diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/sfc/efx.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r-- | drivers/net/sfc/efx.c | 539 |
1 files changed, 381 insertions, 158 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index ba674c5ca29e..c914729f9554 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2009 Solarflare Communications Inc. | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -21,9 +21,9 @@ | |||
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
23 | #include <linux/gfp.h> | 23 | #include <linux/gfp.h> |
24 | #include <linux/cpu_rmap.h> | ||
24 | #include "net_driver.h" | 25 | #include "net_driver.h" |
25 | #include "efx.h" | 26 | #include "efx.h" |
26 | #include "mdio_10g.h" | ||
27 | #include "nic.h" | 27 | #include "nic.h" |
28 | 28 | ||
29 | #include "mcdi.h" | 29 | #include "mcdi.h" |
@@ -68,14 +68,6 @@ const char *efx_loopback_mode_names[] = { | |||
68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", | 68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", |
69 | }; | 69 | }; |
70 | 70 | ||
71 | /* Interrupt mode names (see INT_MODE())) */ | ||
72 | const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
73 | const char *efx_interrupt_mode_names[] = { | ||
74 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
75 | [EFX_INT_MODE_MSI] = "MSI", | ||
76 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
77 | }; | ||
78 | |||
79 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; | 71 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; |
80 | const char *efx_reset_type_names[] = { | 72 | const char *efx_reset_type_names[] = { |
81 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", | 73 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", |
@@ -114,7 +106,7 @@ static struct workqueue_struct *reset_workqueue; | |||
114 | * This is only used in MSI-X interrupt mode | 106 | * This is only used in MSI-X interrupt mode |
115 | */ | 107 | */ |
116 | static unsigned int separate_tx_channels; | 108 | static unsigned int separate_tx_channels; |
117 | module_param(separate_tx_channels, uint, 0644); | 109 | module_param(separate_tx_channels, uint, 0444); |
118 | MODULE_PARM_DESC(separate_tx_channels, | 110 | MODULE_PARM_DESC(separate_tx_channels, |
119 | "Use separate channels for TX and RX"); | 111 | "Use separate channels for TX and RX"); |
120 | 112 | ||
@@ -124,10 +116,11 @@ MODULE_PARM_DESC(separate_tx_channels, | |||
124 | static int napi_weight = 64; | 116 | static int napi_weight = 64; |
125 | 117 | ||
126 | /* This is the time (in jiffies) between invocations of the hardware | 118 | /* This is the time (in jiffies) between invocations of the hardware |
127 | * monitor, which checks for known hardware bugs and resets the | 119 | * monitor. On Falcon-based NICs, this will: |
128 | * hardware and driver as necessary. | 120 | * - Check the on-board hardware monitor; |
121 | * - Poll the link state and reconfigure the hardware as necessary. | ||
129 | */ | 122 | */ |
130 | unsigned int efx_monitor_interval = 1 * HZ; | 123 | static unsigned int efx_monitor_interval = 1 * HZ; |
131 | 124 | ||
132 | /* This controls whether or not the driver will initialise devices | 125 | /* This controls whether or not the driver will initialise devices |
133 | * with invalid MAC addresses stored in the EEPROM or flash. If true, | 126 | * with invalid MAC addresses stored in the EEPROM or flash. If true, |
@@ -201,10 +194,15 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); | |||
201 | * Utility functions and prototypes | 194 | * Utility functions and prototypes |
202 | * | 195 | * |
203 | *************************************************************************/ | 196 | *************************************************************************/ |
204 | static void efx_remove_channel(struct efx_channel *channel); | 197 | |
198 | static void efx_remove_channels(struct efx_nic *efx); | ||
205 | static void efx_remove_port(struct efx_nic *efx); | 199 | static void efx_remove_port(struct efx_nic *efx); |
200 | static void efx_init_napi(struct efx_nic *efx); | ||
206 | static void efx_fini_napi(struct efx_nic *efx); | 201 | static void efx_fini_napi(struct efx_nic *efx); |
207 | static void efx_fini_channels(struct efx_nic *efx); | 202 | static void efx_fini_napi_channel(struct efx_channel *channel); |
203 | static void efx_fini_struct(struct efx_nic *efx); | ||
204 | static void efx_start_all(struct efx_nic *efx); | ||
205 | static void efx_stop_all(struct efx_nic *efx); | ||
208 | 206 | ||
209 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | 207 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ |
210 | do { \ | 208 | do { \ |
@@ -248,7 +246,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) | |||
248 | 246 | ||
249 | efx_rx_strategy(channel); | 247 | efx_rx_strategy(channel); |
250 | 248 | ||
251 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | 249 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
252 | 250 | ||
253 | return spent; | 251 | return spent; |
254 | } | 252 | } |
@@ -310,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
310 | channel->irq_mod_score = 0; | 308 | channel->irq_mod_score = 0; |
311 | } | 309 | } |
312 | 310 | ||
311 | efx_filter_rfs_expire(channel); | ||
312 | |||
313 | /* There is no race here; although napi_disable() will | 313 | /* There is no race here; although napi_disable() will |
314 | * only wait for napi_complete(), this isn't a problem | 314 | * only wait for napi_complete(), this isn't a problem |
315 | * since efx_channel_processed() will have no effect if | 315 | * since efx_channel_processed() will have no effect if |
@@ -328,18 +328,23 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
328 | * processing to finish, then directly poll (and ack ) the eventq. | 328 | * processing to finish, then directly poll (and ack ) the eventq. |
329 | * Finally reenable NAPI and interrupts. | 329 | * Finally reenable NAPI and interrupts. |
330 | * | 330 | * |
331 | * Since we are touching interrupts the caller should hold the suspend lock | 331 | * This is for use only during a loopback self-test. It must not |
332 | * deliver any packets up the stack as this can result in deadlock. | ||
332 | */ | 333 | */ |
333 | void efx_process_channel_now(struct efx_channel *channel) | 334 | void efx_process_channel_now(struct efx_channel *channel) |
334 | { | 335 | { |
335 | struct efx_nic *efx = channel->efx; | 336 | struct efx_nic *efx = channel->efx; |
336 | 337 | ||
338 | BUG_ON(channel->channel >= efx->n_channels); | ||
337 | BUG_ON(!channel->enabled); | 339 | BUG_ON(!channel->enabled); |
340 | BUG_ON(!efx->loopback_selftest); | ||
338 | 341 | ||
339 | /* Disable interrupts and wait for ISRs to complete */ | 342 | /* Disable interrupts and wait for ISRs to complete */ |
340 | efx_nic_disable_interrupts(efx); | 343 | efx_nic_disable_interrupts(efx); |
341 | if (efx->legacy_irq) | 344 | if (efx->legacy_irq) { |
342 | synchronize_irq(efx->legacy_irq); | 345 | synchronize_irq(efx->legacy_irq); |
346 | efx->legacy_irq_enabled = false; | ||
347 | } | ||
343 | if (channel->irq) | 348 | if (channel->irq) |
344 | synchronize_irq(channel->irq); | 349 | synchronize_irq(channel->irq); |
345 | 350 | ||
@@ -347,13 +352,15 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
347 | napi_disable(&channel->napi_str); | 352 | napi_disable(&channel->napi_str); |
348 | 353 | ||
349 | /* Poll the channel */ | 354 | /* Poll the channel */ |
350 | efx_process_channel(channel, EFX_EVQ_SIZE); | 355 | efx_process_channel(channel, channel->eventq_mask + 1); |
351 | 356 | ||
352 | /* Ack the eventq. This may cause an interrupt to be generated | 357 | /* Ack the eventq. This may cause an interrupt to be generated |
353 | * when they are reenabled */ | 358 | * when they are reenabled */ |
354 | efx_channel_processed(channel); | 359 | efx_channel_processed(channel); |
355 | 360 | ||
356 | napi_enable(&channel->napi_str); | 361 | napi_enable(&channel->napi_str); |
362 | if (efx->legacy_irq) | ||
363 | efx->legacy_irq_enabled = true; | ||
357 | efx_nic_enable_interrupts(efx); | 364 | efx_nic_enable_interrupts(efx); |
358 | } | 365 | } |
359 | 366 | ||
@@ -364,9 +371,18 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
364 | */ | 371 | */ |
365 | static int efx_probe_eventq(struct efx_channel *channel) | 372 | static int efx_probe_eventq(struct efx_channel *channel) |
366 | { | 373 | { |
374 | struct efx_nic *efx = channel->efx; | ||
375 | unsigned long entries; | ||
376 | |||
367 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | 377 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
368 | "chan %d create event queue\n", channel->channel); | 378 | "chan %d create event queue\n", channel->channel); |
369 | 379 | ||
380 | /* Build an event queue with room for one event per tx and rx buffer, | ||
381 | * plus some extra for link state events and MCDI completions. */ | ||
382 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); | ||
383 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); | ||
384 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; | ||
385 | |||
370 | return efx_nic_probe_eventq(channel); | 386 | return efx_nic_probe_eventq(channel); |
371 | } | 387 | } |
372 | 388 | ||
@@ -403,6 +419,61 @@ static void efx_remove_eventq(struct efx_channel *channel) | |||
403 | * | 419 | * |
404 | *************************************************************************/ | 420 | *************************************************************************/ |
405 | 421 | ||
422 | /* Allocate and initialise a channel structure, optionally copying | ||
423 | * parameters (but not resources) from an old channel structure. */ | ||
424 | static struct efx_channel * | ||
425 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | ||
426 | { | ||
427 | struct efx_channel *channel; | ||
428 | struct efx_rx_queue *rx_queue; | ||
429 | struct efx_tx_queue *tx_queue; | ||
430 | int j; | ||
431 | |||
432 | if (old_channel) { | ||
433 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); | ||
434 | if (!channel) | ||
435 | return NULL; | ||
436 | |||
437 | *channel = *old_channel; | ||
438 | |||
439 | channel->napi_dev = NULL; | ||
440 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | ||
441 | |||
442 | rx_queue = &channel->rx_queue; | ||
443 | rx_queue->buffer = NULL; | ||
444 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); | ||
445 | |||
446 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | ||
447 | tx_queue = &channel->tx_queue[j]; | ||
448 | if (tx_queue->channel) | ||
449 | tx_queue->channel = channel; | ||
450 | tx_queue->buffer = NULL; | ||
451 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); | ||
452 | } | ||
453 | } else { | ||
454 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | ||
455 | if (!channel) | ||
456 | return NULL; | ||
457 | |||
458 | channel->efx = efx; | ||
459 | channel->channel = i; | ||
460 | |||
461 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | ||
462 | tx_queue = &channel->tx_queue[j]; | ||
463 | tx_queue->efx = efx; | ||
464 | tx_queue->queue = i * EFX_TXQ_TYPES + j; | ||
465 | tx_queue->channel = channel; | ||
466 | } | ||
467 | } | ||
468 | |||
469 | rx_queue = &channel->rx_queue; | ||
470 | rx_queue->efx = efx; | ||
471 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | ||
472 | (unsigned long)rx_queue); | ||
473 | |||
474 | return channel; | ||
475 | } | ||
476 | |||
406 | static int efx_probe_channel(struct efx_channel *channel) | 477 | static int efx_probe_channel(struct efx_channel *channel) |
407 | { | 478 | { |
408 | struct efx_tx_queue *tx_queue; | 479 | struct efx_tx_queue *tx_queue; |
@@ -459,11 +530,38 @@ static void efx_set_channel_names(struct efx_nic *efx) | |||
459 | number -= efx->n_rx_channels; | 530 | number -= efx->n_rx_channels; |
460 | } | 531 | } |
461 | } | 532 | } |
462 | snprintf(channel->name, sizeof(channel->name), | 533 | snprintf(efx->channel_name[channel->channel], |
534 | sizeof(efx->channel_name[0]), | ||
463 | "%s%s-%d", efx->name, type, number); | 535 | "%s%s-%d", efx->name, type, number); |
464 | } | 536 | } |
465 | } | 537 | } |
466 | 538 | ||
539 | static int efx_probe_channels(struct efx_nic *efx) | ||
540 | { | ||
541 | struct efx_channel *channel; | ||
542 | int rc; | ||
543 | |||
544 | /* Restart special buffer allocation */ | ||
545 | efx->next_buffer_table = 0; | ||
546 | |||
547 | efx_for_each_channel(channel, efx) { | ||
548 | rc = efx_probe_channel(channel); | ||
549 | if (rc) { | ||
550 | netif_err(efx, probe, efx->net_dev, | ||
551 | "failed to create channel %d\n", | ||
552 | channel->channel); | ||
553 | goto fail; | ||
554 | } | ||
555 | } | ||
556 | efx_set_channel_names(efx); | ||
557 | |||
558 | return 0; | ||
559 | |||
560 | fail: | ||
561 | efx_remove_channels(efx); | ||
562 | return rc; | ||
563 | } | ||
564 | |||
467 | /* Channels are shutdown and reinitialised whilst the NIC is running | 565 | /* Channels are shutdown and reinitialised whilst the NIC is running |
468 | * to propagate configuration changes (mtu, checksum offload), or | 566 | * to propagate configuration changes (mtu, checksum offload), or |
469 | * to clear hardware error conditions | 567 | * to clear hardware error conditions |
@@ -580,7 +678,7 @@ static void efx_fini_channels(struct efx_nic *efx) | |||
580 | 678 | ||
581 | efx_for_each_channel_rx_queue(rx_queue, channel) | 679 | efx_for_each_channel_rx_queue(rx_queue, channel) |
582 | efx_fini_rx_queue(rx_queue); | 680 | efx_fini_rx_queue(rx_queue); |
583 | efx_for_each_channel_tx_queue(tx_queue, channel) | 681 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) |
584 | efx_fini_tx_queue(tx_queue); | 682 | efx_fini_tx_queue(tx_queue); |
585 | efx_fini_eventq(channel); | 683 | efx_fini_eventq(channel); |
586 | } | 684 | } |
@@ -596,11 +694,84 @@ static void efx_remove_channel(struct efx_channel *channel) | |||
596 | 694 | ||
597 | efx_for_each_channel_rx_queue(rx_queue, channel) | 695 | efx_for_each_channel_rx_queue(rx_queue, channel) |
598 | efx_remove_rx_queue(rx_queue); | 696 | efx_remove_rx_queue(rx_queue); |
599 | efx_for_each_channel_tx_queue(tx_queue, channel) | 697 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) |
600 | efx_remove_tx_queue(tx_queue); | 698 | efx_remove_tx_queue(tx_queue); |
601 | efx_remove_eventq(channel); | 699 | efx_remove_eventq(channel); |
602 | } | 700 | } |
603 | 701 | ||
702 | static void efx_remove_channels(struct efx_nic *efx) | ||
703 | { | ||
704 | struct efx_channel *channel; | ||
705 | |||
706 | efx_for_each_channel(channel, efx) | ||
707 | efx_remove_channel(channel); | ||
708 | } | ||
709 | |||
710 | int | ||
711 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) | ||
712 | { | ||
713 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; | ||
714 | u32 old_rxq_entries, old_txq_entries; | ||
715 | unsigned i; | ||
716 | int rc; | ||
717 | |||
718 | efx_stop_all(efx); | ||
719 | efx_fini_channels(efx); | ||
720 | |||
721 | /* Clone channels */ | ||
722 | memset(other_channel, 0, sizeof(other_channel)); | ||
723 | for (i = 0; i < efx->n_channels; i++) { | ||
724 | channel = efx_alloc_channel(efx, i, efx->channel[i]); | ||
725 | if (!channel) { | ||
726 | rc = -ENOMEM; | ||
727 | goto out; | ||
728 | } | ||
729 | other_channel[i] = channel; | ||
730 | } | ||
731 | |||
732 | /* Swap entry counts and channel pointers */ | ||
733 | old_rxq_entries = efx->rxq_entries; | ||
734 | old_txq_entries = efx->txq_entries; | ||
735 | efx->rxq_entries = rxq_entries; | ||
736 | efx->txq_entries = txq_entries; | ||
737 | for (i = 0; i < efx->n_channels; i++) { | ||
738 | channel = efx->channel[i]; | ||
739 | efx->channel[i] = other_channel[i]; | ||
740 | other_channel[i] = channel; | ||
741 | } | ||
742 | |||
743 | rc = efx_probe_channels(efx); | ||
744 | if (rc) | ||
745 | goto rollback; | ||
746 | |||
747 | efx_init_napi(efx); | ||
748 | |||
749 | /* Destroy old channels */ | ||
750 | for (i = 0; i < efx->n_channels; i++) { | ||
751 | efx_fini_napi_channel(other_channel[i]); | ||
752 | efx_remove_channel(other_channel[i]); | ||
753 | } | ||
754 | out: | ||
755 | /* Free unused channel structures */ | ||
756 | for (i = 0; i < efx->n_channels; i++) | ||
757 | kfree(other_channel[i]); | ||
758 | |||
759 | efx_init_channels(efx); | ||
760 | efx_start_all(efx); | ||
761 | return rc; | ||
762 | |||
763 | rollback: | ||
764 | /* Swap back */ | ||
765 | efx->rxq_entries = old_rxq_entries; | ||
766 | efx->txq_entries = old_txq_entries; | ||
767 | for (i = 0; i < efx->n_channels; i++) { | ||
768 | channel = efx->channel[i]; | ||
769 | efx->channel[i] = other_channel[i]; | ||
770 | other_channel[i] = channel; | ||
771 | } | ||
772 | goto out; | ||
773 | } | ||
774 | |||
604 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) | 775 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) |
605 | { | 776 | { |
606 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); | 777 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); |
@@ -627,11 +798,6 @@ void efx_link_status_changed(struct efx_nic *efx) | |||
627 | if (!netif_running(efx->net_dev)) | 798 | if (!netif_running(efx->net_dev)) |
628 | return; | 799 | return; |
629 | 800 | ||
630 | if (efx->port_inhibited) { | ||
631 | netif_carrier_off(efx->net_dev); | ||
632 | return; | ||
633 | } | ||
634 | |||
635 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { | 801 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { |
636 | efx->n_link_state_changes++; | 802 | efx->n_link_state_changes++; |
637 | 803 | ||
@@ -667,7 +833,7 @@ void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) | |||
667 | } | 833 | } |
668 | } | 834 | } |
669 | 835 | ||
670 | void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) | 836 | void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) |
671 | { | 837 | { |
672 | efx->wanted_fc = wanted_fc; | 838 | efx->wanted_fc = wanted_fc; |
673 | if (efx->link_advertising) { | 839 | if (efx->link_advertising) { |
@@ -751,6 +917,7 @@ static void efx_mac_work(struct work_struct *data) | |||
751 | 917 | ||
752 | static int efx_probe_port(struct efx_nic *efx) | 918 | static int efx_probe_port(struct efx_nic *efx) |
753 | { | 919 | { |
920 | unsigned char *perm_addr; | ||
754 | int rc; | 921 | int rc; |
755 | 922 | ||
756 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); | 923 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); |
@@ -761,14 +928,15 @@ static int efx_probe_port(struct efx_nic *efx) | |||
761 | /* Connect up MAC/PHY operations table */ | 928 | /* Connect up MAC/PHY operations table */ |
762 | rc = efx->type->probe_port(efx); | 929 | rc = efx->type->probe_port(efx); |
763 | if (rc) | 930 | if (rc) |
764 | goto err; | 931 | return rc; |
765 | 932 | ||
766 | /* Sanity check MAC address */ | 933 | /* Sanity check MAC address */ |
767 | if (is_valid_ether_addr(efx->mac_address)) { | 934 | perm_addr = efx->net_dev->perm_addr; |
768 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | 935 | if (is_valid_ether_addr(perm_addr)) { |
936 | memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); | ||
769 | } else { | 937 | } else { |
770 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", | 938 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", |
771 | efx->mac_address); | 939 | perm_addr); |
772 | if (!allow_bad_hwaddr) { | 940 | if (!allow_bad_hwaddr) { |
773 | rc = -EINVAL; | 941 | rc = -EINVAL; |
774 | goto err; | 942 | goto err; |
@@ -782,7 +950,7 @@ static int efx_probe_port(struct efx_nic *efx) | |||
782 | return 0; | 950 | return 0; |
783 | 951 | ||
784 | err: | 952 | err: |
785 | efx_remove_port(efx); | 953 | efx->type->remove_port(efx); |
786 | return rc; | 954 | return rc; |
787 | } | 955 | } |
788 | 956 | ||
@@ -883,6 +1051,7 @@ static int efx_init_io(struct efx_nic *efx) | |||
883 | { | 1051 | { |
884 | struct pci_dev *pci_dev = efx->pci_dev; | 1052 | struct pci_dev *pci_dev = efx->pci_dev; |
885 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1053 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1054 | bool use_wc; | ||
886 | int rc; | 1055 | int rc; |
887 | 1056 | ||
888 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1057 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
@@ -933,8 +1102,21 @@ static int efx_init_io(struct efx_nic *efx) | |||
933 | rc = -EIO; | 1102 | rc = -EIO; |
934 | goto fail3; | 1103 | goto fail3; |
935 | } | 1104 | } |
936 | efx->membase = ioremap_nocache(efx->membase_phys, | 1105 | |
937 | efx->type->mem_map_size); | 1106 | /* bug22643: If SR-IOV is enabled then tx push over a write combined |
1107 | * mapping is unsafe. We need to disable write combining in this case. | ||
1108 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1109 | * have removed the MSI capability. So write combining is safe if | ||
1110 | * there is an MSI capability. | ||
1111 | */ | ||
1112 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1113 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1114 | if (use_wc) | ||
1115 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1116 | efx->type->mem_map_size); | ||
1117 | else | ||
1118 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1119 | efx->type->mem_map_size); | ||
938 | if (!efx->membase) { | 1120 | if (!efx->membase) { |
939 | netif_err(efx, probe, efx->net_dev, | 1121 | netif_err(efx, probe, efx->net_dev, |
940 | "could not map memory BAR at %llx+%x\n", | 1122 | "could not map memory BAR at %llx+%x\n", |
@@ -985,6 +1167,9 @@ static int efx_wanted_channels(void) | |||
985 | int count; | 1167 | int count; |
986 | int cpu; | 1168 | int cpu; |
987 | 1169 | ||
1170 | if (rss_cpus) | ||
1171 | return rss_cpus; | ||
1172 | |||
988 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 1173 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
989 | printk(KERN_WARNING | 1174 | printk(KERN_WARNING |
990 | "sfc: RSS disabled due to allocation failure\n"); | 1175 | "sfc: RSS disabled due to allocation failure\n"); |
@@ -1004,10 +1189,32 @@ static int efx_wanted_channels(void) | |||
1004 | return count; | 1189 | return count; |
1005 | } | 1190 | } |
1006 | 1191 | ||
1192 | static int | ||
1193 | efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) | ||
1194 | { | ||
1195 | #ifdef CONFIG_RFS_ACCEL | ||
1196 | int i, rc; | ||
1197 | |||
1198 | efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); | ||
1199 | if (!efx->net_dev->rx_cpu_rmap) | ||
1200 | return -ENOMEM; | ||
1201 | for (i = 0; i < efx->n_rx_channels; i++) { | ||
1202 | rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, | ||
1203 | xentries[i].vector); | ||
1204 | if (rc) { | ||
1205 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | ||
1206 | efx->net_dev->rx_cpu_rmap = NULL; | ||
1207 | return rc; | ||
1208 | } | ||
1209 | } | ||
1210 | #endif | ||
1211 | return 0; | ||
1212 | } | ||
1213 | |||
1007 | /* Probe the number and type of interrupts we are able to obtain, and | 1214 | /* Probe the number and type of interrupts we are able to obtain, and |
1008 | * the resulting numbers of channels and RX queues. | 1215 | * the resulting numbers of channels and RX queues. |
1009 | */ | 1216 | */ |
1010 | static void efx_probe_interrupts(struct efx_nic *efx) | 1217 | static int efx_probe_interrupts(struct efx_nic *efx) |
1011 | { | 1218 | { |
1012 | int max_channels = | 1219 | int max_channels = |
1013 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); | 1220 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); |
@@ -1049,8 +1256,14 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
1049 | efx->n_tx_channels = efx->n_channels; | 1256 | efx->n_tx_channels = efx->n_channels; |
1050 | efx->n_rx_channels = efx->n_channels; | 1257 | efx->n_rx_channels = efx->n_channels; |
1051 | } | 1258 | } |
1259 | rc = efx_init_rx_cpu_rmap(efx, xentries); | ||
1260 | if (rc) { | ||
1261 | pci_disable_msix(efx->pci_dev); | ||
1262 | return rc; | ||
1263 | } | ||
1052 | for (i = 0; i < n_channels; i++) | 1264 | for (i = 0; i < n_channels; i++) |
1053 | efx->channel[i].irq = xentries[i].vector; | 1265 | efx_get_channel(efx, i)->irq = |
1266 | xentries[i].vector; | ||
1054 | } else { | 1267 | } else { |
1055 | /* Fall back to single channel MSI */ | 1268 | /* Fall back to single channel MSI */ |
1056 | efx->interrupt_mode = EFX_INT_MODE_MSI; | 1269 | efx->interrupt_mode = EFX_INT_MODE_MSI; |
@@ -1066,7 +1279,7 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
1066 | efx->n_tx_channels = 1; | 1279 | efx->n_tx_channels = 1; |
1067 | rc = pci_enable_msi(efx->pci_dev); | 1280 | rc = pci_enable_msi(efx->pci_dev); |
1068 | if (rc == 0) { | 1281 | if (rc == 0) { |
1069 | efx->channel[0].irq = efx->pci_dev->irq; | 1282 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; |
1070 | } else { | 1283 | } else { |
1071 | netif_err(efx, drv, efx->net_dev, | 1284 | netif_err(efx, drv, efx->net_dev, |
1072 | "could not enable MSI\n"); | 1285 | "could not enable MSI\n"); |
@@ -1081,6 +1294,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
1081 | efx->n_tx_channels = 1; | 1294 | efx->n_tx_channels = 1; |
1082 | efx->legacy_irq = efx->pci_dev->irq; | 1295 | efx->legacy_irq = efx->pci_dev->irq; |
1083 | } | 1296 | } |
1297 | |||
1298 | return 0; | ||
1084 | } | 1299 | } |
1085 | 1300 | ||
1086 | static void efx_remove_interrupts(struct efx_nic *efx) | 1301 | static void efx_remove_interrupts(struct efx_nic *efx) |
@@ -1101,22 +1316,18 @@ static void efx_set_channels(struct efx_nic *efx) | |||
1101 | { | 1316 | { |
1102 | struct efx_channel *channel; | 1317 | struct efx_channel *channel; |
1103 | struct efx_tx_queue *tx_queue; | 1318 | struct efx_tx_queue *tx_queue; |
1104 | struct efx_rx_queue *rx_queue; | 1319 | |
1105 | unsigned tx_channel_offset = | 1320 | efx->tx_channel_offset = |
1106 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1321 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1107 | 1322 | ||
1323 | /* We need to adjust the TX queue numbers if we have separate | ||
1324 | * RX-only and TX-only channels. | ||
1325 | */ | ||
1108 | efx_for_each_channel(channel, efx) { | 1326 | efx_for_each_channel(channel, efx) { |
1109 | if (channel->channel - tx_channel_offset < efx->n_tx_channels) { | 1327 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1110 | channel->tx_queue = &efx->tx_queue[ | 1328 | tx_queue->queue -= (efx->tx_channel_offset * |
1111 | (channel->channel - tx_channel_offset) * | 1329 | EFX_TXQ_TYPES); |
1112 | EFX_TXQ_TYPES]; | ||
1113 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
1114 | tx_queue->channel = channel; | ||
1115 | } | ||
1116 | } | 1330 | } |
1117 | |||
1118 | efx_for_each_rx_queue(rx_queue, efx) | ||
1119 | rx_queue->channel = &efx->channel[rx_queue->queue]; | ||
1120 | } | 1331 | } |
1121 | 1332 | ||
1122 | static int efx_probe_nic(struct efx_nic *efx) | 1333 | static int efx_probe_nic(struct efx_nic *efx) |
@@ -1133,7 +1344,9 @@ static int efx_probe_nic(struct efx_nic *efx) | |||
1133 | 1344 | ||
1134 | /* Determine the number of channels and queues by trying to hook | 1345 | /* Determine the number of channels and queues by trying to hook |
1135 | * in MSI-X interrupts. */ | 1346 | * in MSI-X interrupts. */ |
1136 | efx_probe_interrupts(efx); | 1347 | rc = efx_probe_interrupts(efx); |
1348 | if (rc) | ||
1349 | goto fail; | ||
1137 | 1350 | ||
1138 | if (efx->n_channels > 1) | 1351 | if (efx->n_channels > 1) |
1139 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); | 1352 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); |
@@ -1141,12 +1354,17 @@ static int efx_probe_nic(struct efx_nic *efx) | |||
1141 | efx->rx_indir_table[i] = i % efx->n_rx_channels; | 1354 | efx->rx_indir_table[i] = i % efx->n_rx_channels; |
1142 | 1355 | ||
1143 | efx_set_channels(efx); | 1356 | efx_set_channels(efx); |
1144 | efx->net_dev->real_num_tx_queues = efx->n_tx_channels; | 1357 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); |
1358 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); | ||
1145 | 1359 | ||
1146 | /* Initialise the interrupt moderation settings */ | 1360 | /* Initialise the interrupt moderation settings */ |
1147 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); | 1361 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); |
1148 | 1362 | ||
1149 | return 0; | 1363 | return 0; |
1364 | |||
1365 | fail: | ||
1366 | efx->type->remove(efx); | ||
1367 | return rc; | ||
1150 | } | 1368 | } |
1151 | 1369 | ||
1152 | static void efx_remove_nic(struct efx_nic *efx) | 1370 | static void efx_remove_nic(struct efx_nic *efx) |
@@ -1165,40 +1383,37 @@ static void efx_remove_nic(struct efx_nic *efx) | |||
1165 | 1383 | ||
1166 | static int efx_probe_all(struct efx_nic *efx) | 1384 | static int efx_probe_all(struct efx_nic *efx) |
1167 | { | 1385 | { |
1168 | struct efx_channel *channel; | ||
1169 | int rc; | 1386 | int rc; |
1170 | 1387 | ||
1171 | /* Create NIC */ | ||
1172 | rc = efx_probe_nic(efx); | 1388 | rc = efx_probe_nic(efx); |
1173 | if (rc) { | 1389 | if (rc) { |
1174 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); | 1390 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); |
1175 | goto fail1; | 1391 | goto fail1; |
1176 | } | 1392 | } |
1177 | 1393 | ||
1178 | /* Create port */ | ||
1179 | rc = efx_probe_port(efx); | 1394 | rc = efx_probe_port(efx); |
1180 | if (rc) { | 1395 | if (rc) { |
1181 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); | 1396 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); |
1182 | goto fail2; | 1397 | goto fail2; |
1183 | } | 1398 | } |
1184 | 1399 | ||
1185 | /* Create channels */ | 1400 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; |
1186 | efx_for_each_channel(channel, efx) { | 1401 | rc = efx_probe_channels(efx); |
1187 | rc = efx_probe_channel(channel); | 1402 | if (rc) |
1188 | if (rc) { | 1403 | goto fail3; |
1189 | netif_err(efx, probe, efx->net_dev, | 1404 | |
1190 | "failed to create channel %d\n", | 1405 | rc = efx_probe_filters(efx); |
1191 | channel->channel); | 1406 | if (rc) { |
1192 | goto fail3; | 1407 | netif_err(efx, probe, efx->net_dev, |
1193 | } | 1408 | "failed to create filter tables\n"); |
1409 | goto fail4; | ||
1194 | } | 1410 | } |
1195 | efx_set_channel_names(efx); | ||
1196 | 1411 | ||
1197 | return 0; | 1412 | return 0; |
1198 | 1413 | ||
1414 | fail4: | ||
1415 | efx_remove_channels(efx); | ||
1199 | fail3: | 1416 | fail3: |
1200 | efx_for_each_channel(channel, efx) | ||
1201 | efx_remove_channel(channel); | ||
1202 | efx_remove_port(efx); | 1417 | efx_remove_port(efx); |
1203 | fail2: | 1418 | fail2: |
1204 | efx_remove_nic(efx); | 1419 | efx_remove_nic(efx); |
@@ -1230,12 +1445,14 @@ static void efx_start_all(struct efx_nic *efx) | |||
1230 | * restart the transmit interface early so the watchdog timer stops */ | 1445 | * restart the transmit interface early so the watchdog timer stops */ |
1231 | efx_start_port(efx); | 1446 | efx_start_port(efx); |
1232 | 1447 | ||
1233 | efx_for_each_channel(channel, efx) { | 1448 | if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) |
1234 | if (efx_dev_registered(efx)) | 1449 | netif_tx_wake_all_queues(efx->net_dev); |
1235 | efx_wake_queue(channel); | 1450 | |
1451 | efx_for_each_channel(channel, efx) | ||
1236 | efx_start_channel(channel); | 1452 | efx_start_channel(channel); |
1237 | } | ||
1238 | 1453 | ||
1454 | if (efx->legacy_irq) | ||
1455 | efx->legacy_irq_enabled = true; | ||
1239 | efx_nic_enable_interrupts(efx); | 1456 | efx_nic_enable_interrupts(efx); |
1240 | 1457 | ||
1241 | /* Switch to event based MCDI completions after enabling interrupts. | 1458 | /* Switch to event based MCDI completions after enabling interrupts. |
@@ -1296,8 +1513,10 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1296 | 1513 | ||
1297 | /* Disable interrupts and wait for ISR to complete */ | 1514 | /* Disable interrupts and wait for ISR to complete */ |
1298 | efx_nic_disable_interrupts(efx); | 1515 | efx_nic_disable_interrupts(efx); |
1299 | if (efx->legacy_irq) | 1516 | if (efx->legacy_irq) { |
1300 | synchronize_irq(efx->legacy_irq); | 1517 | synchronize_irq(efx->legacy_irq); |
1518 | efx->legacy_irq_enabled = false; | ||
1519 | } | ||
1301 | efx_for_each_channel(channel, efx) { | 1520 | efx_for_each_channel(channel, efx) { |
1302 | if (channel->irq) | 1521 | if (channel->irq) |
1303 | synchronize_irq(channel->irq); | 1522 | synchronize_irq(channel->irq); |
@@ -1318,9 +1537,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1318 | /* Stop the kernel transmit interface late, so the watchdog | 1537 | /* Stop the kernel transmit interface late, so the watchdog |
1319 | * timer isn't ticking over the flush */ | 1538 | * timer isn't ticking over the flush */ |
1320 | if (efx_dev_registered(efx)) { | 1539 | if (efx_dev_registered(efx)) { |
1321 | struct efx_channel *channel; | 1540 | netif_tx_stop_all_queues(efx->net_dev); |
1322 | efx_for_each_channel(channel, efx) | ||
1323 | efx_stop_queue(channel); | ||
1324 | netif_tx_lock_bh(efx->net_dev); | 1541 | netif_tx_lock_bh(efx->net_dev); |
1325 | netif_tx_unlock_bh(efx->net_dev); | 1542 | netif_tx_unlock_bh(efx->net_dev); |
1326 | } | 1543 | } |
@@ -1328,10 +1545,8 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1328 | 1545 | ||
1329 | static void efx_remove_all(struct efx_nic *efx) | 1546 | static void efx_remove_all(struct efx_nic *efx) |
1330 | { | 1547 | { |
1331 | struct efx_channel *channel; | 1548 | efx_remove_filters(efx); |
1332 | 1549 | efx_remove_channels(efx); | |
1333 | efx_for_each_channel(channel, efx) | ||
1334 | efx_remove_channel(channel); | ||
1335 | efx_remove_port(efx); | 1550 | efx_remove_port(efx); |
1336 | efx_remove_nic(efx); | 1551 | efx_remove_nic(efx); |
1337 | } | 1552 | } |
@@ -1355,20 +1570,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution) | |||
1355 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | 1570 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, |
1356 | bool rx_adaptive) | 1571 | bool rx_adaptive) |
1357 | { | 1572 | { |
1358 | struct efx_tx_queue *tx_queue; | 1573 | struct efx_channel *channel; |
1359 | struct efx_rx_queue *rx_queue; | ||
1360 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1574 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1361 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1575 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1362 | 1576 | ||
1363 | EFX_ASSERT_RESET_SERIALISED(efx); | 1577 | EFX_ASSERT_RESET_SERIALISED(efx); |
1364 | 1578 | ||
1365 | efx_for_each_tx_queue(tx_queue, efx) | ||
1366 | tx_queue->channel->irq_moderation = tx_ticks; | ||
1367 | |||
1368 | efx->irq_rx_adaptive = rx_adaptive; | 1579 | efx->irq_rx_adaptive = rx_adaptive; |
1369 | efx->irq_rx_moderation = rx_ticks; | 1580 | efx->irq_rx_moderation = rx_ticks; |
1370 | efx_for_each_rx_queue(rx_queue, efx) | 1581 | efx_for_each_channel(channel, efx) { |
1371 | rx_queue->channel->irq_moderation = rx_ticks; | 1582 | if (efx_channel_has_rx_queue(channel)) |
1583 | channel->irq_moderation = rx_ticks; | ||
1584 | else if (efx_channel_has_tx_queues(channel)) | ||
1585 | channel->irq_moderation = tx_ticks; | ||
1586 | } | ||
1372 | } | 1587 | } |
1373 | 1588 | ||
1374 | /************************************************************************** | 1589 | /************************************************************************** |
@@ -1377,8 +1592,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | |||
1377 | * | 1592 | * |
1378 | **************************************************************************/ | 1593 | **************************************************************************/ |
1379 | 1594 | ||
1380 | /* Run periodically off the general workqueue. Serialised against | 1595 | /* Run periodically off the general workqueue */ |
1381 | * efx_reconfigure_port via the mac_lock */ | ||
1382 | static void efx_monitor(struct work_struct *data) | 1596 | static void efx_monitor(struct work_struct *data) |
1383 | { | 1597 | { |
1384 | struct efx_nic *efx = container_of(data, struct efx_nic, | 1598 | struct efx_nic *efx = container_of(data, struct efx_nic, |
@@ -1391,16 +1605,13 @@ static void efx_monitor(struct work_struct *data) | |||
1391 | 1605 | ||
1392 | /* If the mac_lock is already held then it is likely a port | 1606 | /* If the mac_lock is already held then it is likely a port |
1393 | * reconfiguration is already in place, which will likely do | 1607 | * reconfiguration is already in place, which will likely do |
1394 | * most of the work of check_hw() anyway. */ | 1608 | * most of the work of monitor() anyway. */ |
1395 | if (!mutex_trylock(&efx->mac_lock)) | 1609 | if (mutex_trylock(&efx->mac_lock)) { |
1396 | goto out_requeue; | 1610 | if (efx->port_enabled) |
1397 | if (!efx->port_enabled) | 1611 | efx->type->monitor(efx); |
1398 | goto out_unlock; | 1612 | mutex_unlock(&efx->mac_lock); |
1399 | efx->type->monitor(efx); | 1613 | } |
1400 | 1614 | ||
1401 | out_unlock: | ||
1402 | mutex_unlock(&efx->mac_lock); | ||
1403 | out_requeue: | ||
1404 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1615 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1405 | efx_monitor_interval); | 1616 | efx_monitor_interval); |
1406 | } | 1617 | } |
@@ -1435,7 +1646,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | |||
1435 | * | 1646 | * |
1436 | **************************************************************************/ | 1647 | **************************************************************************/ |
1437 | 1648 | ||
1438 | static int efx_init_napi(struct efx_nic *efx) | 1649 | static void efx_init_napi(struct efx_nic *efx) |
1439 | { | 1650 | { |
1440 | struct efx_channel *channel; | 1651 | struct efx_channel *channel; |
1441 | 1652 | ||
@@ -1444,18 +1655,21 @@ static int efx_init_napi(struct efx_nic *efx) | |||
1444 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 1655 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
1445 | efx_poll, napi_weight); | 1656 | efx_poll, napi_weight); |
1446 | } | 1657 | } |
1447 | return 0; | 1658 | } |
1659 | |||
1660 | static void efx_fini_napi_channel(struct efx_channel *channel) | ||
1661 | { | ||
1662 | if (channel->napi_dev) | ||
1663 | netif_napi_del(&channel->napi_str); | ||
1664 | channel->napi_dev = NULL; | ||
1448 | } | 1665 | } |
1449 | 1666 | ||
1450 | static void efx_fini_napi(struct efx_nic *efx) | 1667 | static void efx_fini_napi(struct efx_nic *efx) |
1451 | { | 1668 | { |
1452 | struct efx_channel *channel; | 1669 | struct efx_channel *channel; |
1453 | 1670 | ||
1454 | efx_for_each_channel(channel, efx) { | 1671 | efx_for_each_channel(channel, efx) |
1455 | if (channel->napi_dev) | 1672 | efx_fini_napi_channel(channel); |
1456 | netif_napi_del(&channel->napi_str); | ||
1457 | channel->napi_dev = NULL; | ||
1458 | } | ||
1459 | } | 1673 | } |
1460 | 1674 | ||
1461 | /************************************************************************** | 1675 | /************************************************************************** |
@@ -1546,11 +1760,11 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc | |||
1546 | stats->tx_packets = mac_stats->tx_packets; | 1760 | stats->tx_packets = mac_stats->tx_packets; |
1547 | stats->rx_bytes = mac_stats->rx_bytes; | 1761 | stats->rx_bytes = mac_stats->rx_bytes; |
1548 | stats->tx_bytes = mac_stats->tx_bytes; | 1762 | stats->tx_bytes = mac_stats->tx_bytes; |
1763 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; | ||
1549 | stats->multicast = mac_stats->rx_multicast; | 1764 | stats->multicast = mac_stats->rx_multicast; |
1550 | stats->collisions = mac_stats->tx_collision; | 1765 | stats->collisions = mac_stats->tx_collision; |
1551 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + | 1766 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + |
1552 | mac_stats->rx_length_error); | 1767 | mac_stats->rx_length_error); |
1553 | stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt; | ||
1554 | stats->rx_crc_errors = mac_stats->rx_bad; | 1768 | stats->rx_crc_errors = mac_stats->rx_bad; |
1555 | stats->rx_frame_errors = mac_stats->rx_align_error; | 1769 | stats->rx_frame_errors = mac_stats->rx_align_error; |
1556 | stats->rx_fifo_errors = mac_stats->rx_overflow; | 1770 | stats->rx_fifo_errors = mac_stats->rx_overflow; |
@@ -1669,6 +1883,17 @@ static void efx_set_multicast_list(struct net_device *net_dev) | |||
1669 | /* Otherwise efx_start_port() will do this */ | 1883 | /* Otherwise efx_start_port() will do this */ |
1670 | } | 1884 | } |
1671 | 1885 | ||
1886 | static int efx_set_features(struct net_device *net_dev, u32 data) | ||
1887 | { | ||
1888 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1889 | |||
1890 | /* If disabling RX n-tuple filtering, clear existing filters */ | ||
1891 | if (net_dev->features & ~data & NETIF_F_NTUPLE) | ||
1892 | efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); | ||
1893 | |||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1672 | static const struct net_device_ops efx_netdev_ops = { | 1897 | static const struct net_device_ops efx_netdev_ops = { |
1673 | .ndo_open = efx_net_open, | 1898 | .ndo_open = efx_net_open, |
1674 | .ndo_stop = efx_net_stop, | 1899 | .ndo_stop = efx_net_stop, |
@@ -1680,9 +1905,14 @@ static const struct net_device_ops efx_netdev_ops = { | |||
1680 | .ndo_change_mtu = efx_change_mtu, | 1905 | .ndo_change_mtu = efx_change_mtu, |
1681 | .ndo_set_mac_address = efx_set_mac_address, | 1906 | .ndo_set_mac_address = efx_set_mac_address, |
1682 | .ndo_set_multicast_list = efx_set_multicast_list, | 1907 | .ndo_set_multicast_list = efx_set_multicast_list, |
1908 | .ndo_set_features = efx_set_features, | ||
1683 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1909 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1684 | .ndo_poll_controller = efx_netpoll, | 1910 | .ndo_poll_controller = efx_netpoll, |
1685 | #endif | 1911 | #endif |
1912 | .ndo_setup_tc = efx_setup_tc, | ||
1913 | #ifdef CONFIG_RFS_ACCEL | ||
1914 | .ndo_rx_flow_steer = efx_filter_rfs, | ||
1915 | #endif | ||
1686 | }; | 1916 | }; |
1687 | 1917 | ||
1688 | static void efx_update_name(struct efx_nic *efx) | 1918 | static void efx_update_name(struct efx_nic *efx) |
@@ -1719,6 +1949,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); | |||
1719 | static int efx_register_netdev(struct efx_nic *efx) | 1949 | static int efx_register_netdev(struct efx_nic *efx) |
1720 | { | 1950 | { |
1721 | struct net_device *net_dev = efx->net_dev; | 1951 | struct net_device *net_dev = efx->net_dev; |
1952 | struct efx_channel *channel; | ||
1722 | int rc; | 1953 | int rc; |
1723 | 1954 | ||
1724 | net_dev->watchdog_timeo = 5 * HZ; | 1955 | net_dev->watchdog_timeo = 5 * HZ; |
@@ -1741,6 +1972,12 @@ static int efx_register_netdev(struct efx_nic *efx) | |||
1741 | if (rc) | 1972 | if (rc) |
1742 | goto fail_locked; | 1973 | goto fail_locked; |
1743 | 1974 | ||
1975 | efx_for_each_channel(channel, efx) { | ||
1976 | struct efx_tx_queue *tx_queue; | ||
1977 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
1978 | efx_init_tx_queue_core_txq(tx_queue); | ||
1979 | } | ||
1980 | |||
1744 | /* Always start with carrier off; PHY events will detect the link */ | 1981 | /* Always start with carrier off; PHY events will detect the link */ |
1745 | netif_carrier_off(efx->net_dev); | 1982 | netif_carrier_off(efx->net_dev); |
1746 | 1983 | ||
@@ -1767,6 +2004,7 @@ fail_registered: | |||
1767 | 2004 | ||
1768 | static void efx_unregister_netdev(struct efx_nic *efx) | 2005 | static void efx_unregister_netdev(struct efx_nic *efx) |
1769 | { | 2006 | { |
2007 | struct efx_channel *channel; | ||
1770 | struct efx_tx_queue *tx_queue; | 2008 | struct efx_tx_queue *tx_queue; |
1771 | 2009 | ||
1772 | if (!efx->net_dev) | 2010 | if (!efx->net_dev) |
@@ -1777,8 +2015,10 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1777 | /* Free up any skbs still remaining. This has to happen before | 2015 | /* Free up any skbs still remaining. This has to happen before |
1778 | * we try to unregister the netdev as running their destructors | 2016 | * we try to unregister the netdev as running their destructors |
1779 | * may be needed to get the device ref. count to 0. */ | 2017 | * may be needed to get the device ref. count to 0. */ |
1780 | efx_for_each_tx_queue(tx_queue, efx) | 2018 | efx_for_each_channel(channel, efx) { |
1781 | efx_release_tx_buffers(tx_queue); | 2019 | efx_for_each_channel_tx_queue(tx_queue, channel) |
2020 | efx_release_tx_buffers(tx_queue); | ||
2021 | } | ||
1782 | 2022 | ||
1783 | if (efx_dev_registered(efx)) { | 2023 | if (efx_dev_registered(efx)) { |
1784 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 2024 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
@@ -1801,7 +2041,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) | |||
1801 | 2041 | ||
1802 | efx_stop_all(efx); | 2042 | efx_stop_all(efx); |
1803 | mutex_lock(&efx->mac_lock); | 2043 | mutex_lock(&efx->mac_lock); |
1804 | mutex_lock(&efx->spi_lock); | ||
1805 | 2044 | ||
1806 | efx_fini_channels(efx); | 2045 | efx_fini_channels(efx); |
1807 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | 2046 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) |
@@ -1841,8 +2080,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
1841 | efx->mac_op->reconfigure(efx); | 2080 | efx->mac_op->reconfigure(efx); |
1842 | 2081 | ||
1843 | efx_init_channels(efx); | 2082 | efx_init_channels(efx); |
2083 | efx_restore_filters(efx); | ||
1844 | 2084 | ||
1845 | mutex_unlock(&efx->spi_lock); | ||
1846 | mutex_unlock(&efx->mac_lock); | 2085 | mutex_unlock(&efx->mac_lock); |
1847 | 2086 | ||
1848 | efx_start_all(efx); | 2087 | efx_start_all(efx); |
@@ -1852,7 +2091,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
1852 | fail: | 2091 | fail: |
1853 | efx->port_initialized = false; | 2092 | efx->port_initialized = false; |
1854 | 2093 | ||
1855 | mutex_unlock(&efx->spi_lock); | ||
1856 | mutex_unlock(&efx->mac_lock); | 2094 | mutex_unlock(&efx->mac_lock); |
1857 | 2095 | ||
1858 | return rc; | 2096 | return rc; |
@@ -1871,6 +2109,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) | |||
1871 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", | 2109 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", |
1872 | RESET_TYPE(method)); | 2110 | RESET_TYPE(method)); |
1873 | 2111 | ||
2112 | netif_device_detach(efx->net_dev); | ||
1874 | efx_reset_down(efx, method); | 2113 | efx_reset_down(efx, method); |
1875 | 2114 | ||
1876 | rc = efx->type->reset(efx, method); | 2115 | rc = efx->type->reset(efx, method); |
@@ -1904,6 +2143,7 @@ out: | |||
1904 | efx->state = STATE_DISABLED; | 2143 | efx->state = STATE_DISABLED; |
1905 | } else { | 2144 | } else { |
1906 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); | 2145 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); |
2146 | netif_device_attach(efx->net_dev); | ||
1907 | } | 2147 | } |
1908 | return rc; | 2148 | return rc; |
1909 | } | 2149 | } |
@@ -2010,15 +2250,13 @@ int efx_port_dummy_op_int(struct efx_nic *efx) | |||
2010 | return 0; | 2250 | return 0; |
2011 | } | 2251 | } |
2012 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | 2252 | void efx_port_dummy_op_void(struct efx_nic *efx) {} |
2013 | void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | 2253 | |
2014 | { | 2254 | static bool efx_port_dummy_op_poll(struct efx_nic *efx) |
2015 | } | ||
2016 | bool efx_port_dummy_op_poll(struct efx_nic *efx) | ||
2017 | { | 2255 | { |
2018 | return false; | 2256 | return false; |
2019 | } | 2257 | } |
2020 | 2258 | ||
2021 | static struct efx_phy_operations efx_dummy_phy_operations = { | 2259 | static const struct efx_phy_operations efx_dummy_phy_operations = { |
2022 | .init = efx_port_dummy_op_int, | 2260 | .init = efx_port_dummy_op_int, |
2023 | .reconfigure = efx_port_dummy_op_int, | 2261 | .reconfigure = efx_port_dummy_op_int, |
2024 | .poll = efx_port_dummy_op_poll, | 2262 | .poll = efx_port_dummy_op_poll, |
@@ -2034,19 +2272,14 @@ static struct efx_phy_operations efx_dummy_phy_operations = { | |||
2034 | /* This zeroes out and then fills in the invariants in a struct | 2272 | /* This zeroes out and then fills in the invariants in a struct |
2035 | * efx_nic (including all sub-structures). | 2273 | * efx_nic (including all sub-structures). |
2036 | */ | 2274 | */ |
2037 | static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | 2275 | static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, |
2038 | struct pci_dev *pci_dev, struct net_device *net_dev) | 2276 | struct pci_dev *pci_dev, struct net_device *net_dev) |
2039 | { | 2277 | { |
2040 | struct efx_channel *channel; | ||
2041 | struct efx_tx_queue *tx_queue; | ||
2042 | struct efx_rx_queue *rx_queue; | ||
2043 | int i; | 2278 | int i; |
2044 | 2279 | ||
2045 | /* Initialise common structures */ | 2280 | /* Initialise common structures */ |
2046 | memset(efx, 0, sizeof(*efx)); | 2281 | memset(efx, 0, sizeof(*efx)); |
2047 | spin_lock_init(&efx->biu_lock); | 2282 | spin_lock_init(&efx->biu_lock); |
2048 | mutex_init(&efx->mdio_lock); | ||
2049 | mutex_init(&efx->spi_lock); | ||
2050 | #ifdef CONFIG_SFC_MTD | 2283 | #ifdef CONFIG_SFC_MTD |
2051 | INIT_LIST_HEAD(&efx->mtd_list); | 2284 | INIT_LIST_HEAD(&efx->mtd_list); |
2052 | #endif | 2285 | #endif |
@@ -2059,7 +2292,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
2059 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | 2292 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); |
2060 | 2293 | ||
2061 | efx->net_dev = net_dev; | 2294 | efx->net_dev = net_dev; |
2062 | efx->rx_checksum_enabled = true; | ||
2063 | spin_lock_init(&efx->stats_lock); | 2295 | spin_lock_init(&efx->stats_lock); |
2064 | mutex_init(&efx->mac_lock); | 2296 | mutex_init(&efx->mac_lock); |
2065 | efx->mac_op = type->default_mac_ops; | 2297 | efx->mac_op = type->default_mac_ops; |
@@ -2068,36 +2300,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
2068 | INIT_WORK(&efx->mac_work, efx_mac_work); | 2300 | INIT_WORK(&efx->mac_work, efx_mac_work); |
2069 | 2301 | ||
2070 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | 2302 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { |
2071 | channel = &efx->channel[i]; | 2303 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); |
2072 | channel->efx = efx; | 2304 | if (!efx->channel[i]) |
2073 | channel->channel = i; | 2305 | goto fail; |
2074 | channel->work_pending = false; | ||
2075 | spin_lock_init(&channel->tx_stop_lock); | ||
2076 | atomic_set(&channel->tx_stop_count, 1); | ||
2077 | } | ||
2078 | for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { | ||
2079 | tx_queue = &efx->tx_queue[i]; | ||
2080 | tx_queue->efx = efx; | ||
2081 | tx_queue->queue = i; | ||
2082 | tx_queue->buffer = NULL; | ||
2083 | tx_queue->channel = &efx->channel[0]; /* for safety */ | ||
2084 | tx_queue->tso_headers_free = NULL; | ||
2085 | } | ||
2086 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { | ||
2087 | rx_queue = &efx->rx_queue[i]; | ||
2088 | rx_queue->efx = efx; | ||
2089 | rx_queue->queue = i; | ||
2090 | rx_queue->channel = &efx->channel[0]; /* for safety */ | ||
2091 | rx_queue->buffer = NULL; | ||
2092 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | ||
2093 | (unsigned long)rx_queue); | ||
2094 | } | 2306 | } |
2095 | 2307 | ||
2096 | efx->type = type; | 2308 | efx->type = type; |
2097 | 2309 | ||
2098 | /* As close as we can get to guaranteeing that we don't overflow */ | ||
2099 | BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE); | ||
2100 | |||
2101 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | 2310 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); |
2102 | 2311 | ||
2103 | /* Higher numbered interrupt modes are less capable! */ | 2312 | /* Higher numbered interrupt modes are less capable! */ |
@@ -2109,13 +2318,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
2109 | pci_name(pci_dev)); | 2318 | pci_name(pci_dev)); |
2110 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); | 2319 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); |
2111 | if (!efx->workqueue) | 2320 | if (!efx->workqueue) |
2112 | return -ENOMEM; | 2321 | goto fail; |
2113 | 2322 | ||
2114 | return 0; | 2323 | return 0; |
2324 | |||
2325 | fail: | ||
2326 | efx_fini_struct(efx); | ||
2327 | return -ENOMEM; | ||
2115 | } | 2328 | } |
2116 | 2329 | ||
2117 | static void efx_fini_struct(struct efx_nic *efx) | 2330 | static void efx_fini_struct(struct efx_nic *efx) |
2118 | { | 2331 | { |
2332 | int i; | ||
2333 | |||
2334 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | ||
2335 | kfree(efx->channel[i]); | ||
2336 | |||
2119 | if (efx->workqueue) { | 2337 | if (efx->workqueue) { |
2120 | destroy_workqueue(efx->workqueue); | 2338 | destroy_workqueue(efx->workqueue); |
2121 | efx->workqueue = NULL; | 2339 | efx->workqueue = NULL; |
@@ -2133,6 +2351,10 @@ static void efx_fini_struct(struct efx_nic *efx) | |||
2133 | */ | 2351 | */ |
2134 | static void efx_pci_remove_main(struct efx_nic *efx) | 2352 | static void efx_pci_remove_main(struct efx_nic *efx) |
2135 | { | 2353 | { |
2354 | #ifdef CONFIG_RFS_ACCEL | ||
2355 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | ||
2356 | efx->net_dev->rx_cpu_rmap = NULL; | ||
2357 | #endif | ||
2136 | efx_nic_fini_interrupt(efx); | 2358 | efx_nic_fini_interrupt(efx); |
2137 | efx_fini_channels(efx); | 2359 | efx_fini_channels(efx); |
2138 | efx_fini_port(efx); | 2360 | efx_fini_port(efx); |
@@ -2192,9 +2414,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
2192 | if (rc) | 2414 | if (rc) |
2193 | goto fail1; | 2415 | goto fail1; |
2194 | 2416 | ||
2195 | rc = efx_init_napi(efx); | 2417 | efx_init_napi(efx); |
2196 | if (rc) | ||
2197 | goto fail2; | ||
2198 | 2418 | ||
2199 | rc = efx->type->init(efx); | 2419 | rc = efx->type->init(efx); |
2200 | if (rc) { | 2420 | if (rc) { |
@@ -2225,7 +2445,6 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
2225 | efx->type->fini(efx); | 2445 | efx->type->fini(efx); |
2226 | fail3: | 2446 | fail3: |
2227 | efx_fini_napi(efx); | 2447 | efx_fini_napi(efx); |
2228 | fail2: | ||
2229 | efx_remove_all(efx); | 2448 | efx_remove_all(efx); |
2230 | fail1: | 2449 | fail1: |
2231 | return rc; | 2450 | return rc; |
@@ -2243,23 +2462,27 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
2243 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | 2462 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, |
2244 | const struct pci_device_id *entry) | 2463 | const struct pci_device_id *entry) |
2245 | { | 2464 | { |
2246 | struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; | 2465 | const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; |
2247 | struct net_device *net_dev; | 2466 | struct net_device *net_dev; |
2248 | struct efx_nic *efx; | 2467 | struct efx_nic *efx; |
2249 | int i, rc; | 2468 | int i, rc; |
2250 | 2469 | ||
2251 | /* Allocate and initialise a struct net_device and struct efx_nic */ | 2470 | /* Allocate and initialise a struct net_device and struct efx_nic */ |
2252 | net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); | 2471 | net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, |
2472 | EFX_MAX_RX_QUEUES); | ||
2253 | if (!net_dev) | 2473 | if (!net_dev) |
2254 | return -ENOMEM; | 2474 | return -ENOMEM; |
2255 | net_dev->features |= (type->offload_features | NETIF_F_SG | | 2475 | net_dev->features |= (type->offload_features | NETIF_F_SG | |
2256 | NETIF_F_HIGHDMA | NETIF_F_TSO | | 2476 | NETIF_F_HIGHDMA | NETIF_F_TSO | |
2257 | NETIF_F_GRO); | 2477 | NETIF_F_RXCSUM); |
2258 | if (type->offload_features & NETIF_F_V6_CSUM) | 2478 | if (type->offload_features & NETIF_F_V6_CSUM) |
2259 | net_dev->features |= NETIF_F_TSO6; | 2479 | net_dev->features |= NETIF_F_TSO6; |
2260 | /* Mask for features that also apply to VLAN devices */ | 2480 | /* Mask for features that also apply to VLAN devices */ |
2261 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2481 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2262 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2482 | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | |
2483 | NETIF_F_RXCSUM); | ||
2484 | /* All offloads can be toggled */ | ||
2485 | net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; | ||
2263 | efx = netdev_priv(net_dev); | 2486 | efx = netdev_priv(net_dev); |
2264 | pci_set_drvdata(pci_dev, efx); | 2487 | pci_set_drvdata(pci_dev, efx); |
2265 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); | 2488 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); |