aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-02-16 17:08:06 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-16 17:08:06 -0500
commitd5df7c415688ca7e5c0e57bfa830803f221736a5 (patch)
tree6b5cc7abaa48ea655a2d9417a2aab35b9762748a
parent80703d265b7e8a801560d907b1bfe340e574dbca (diff)
parentcd2d5b529cdb9bd274f3e4bc68d37d4d63b7f383 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
-rw-r--r--drivers/net/ethernet/sfc/Kconfig8
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/efx.c685
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c62
-rw-r--r--drivers/net/ethernet/sfc/falcon.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c255
-rw-r--r--drivers/net/ethernet/sfc/filter.h20
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c34
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c4
-rw-r--r--drivers/net/ethernet/sfc/mtd.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h123
-rw-r--r--drivers/net/ethernet/sfc/nic.c524
-rw-r--r--drivers/net/ethernet/sfc/nic.h102
-rw-r--r--drivers/net/ethernet/sfc/regs.h20
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/sfc/siena.c14
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1642
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h254
21 files changed, 3170 insertions, 604 deletions
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8d423544a7e6..fb3cbc27063c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -26,3 +26,11 @@ config SFC_MCDI_MON
26 ----help--- 26 ----help---
27 This exposes the on-board firmware-managed sensors as a 27 This exposes the on-board firmware-managed sensors as a
28 hardware monitor device. 28 hardware monitor device.
29config SFC_SRIOV
30 bool "Solarflare SFC9000-family SR-IOV support"
31 depends on SFC && PCI_IOV
32 default y
33 ---help---
34 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in
36 virtualized environments.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3fa2e25ccc45..ea1f8db57318 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -4,5 +4,6 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o 5 mcdi.o mcdi_phy.o mcdi_mon.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
7 8
8obj-$(CONFIG_SFC) += sfc.o 9obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 952d0bf7695a..ac571cf14485 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -186,9 +186,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
186 * 186 *
187 *************************************************************************/ 187 *************************************************************************/
188 188
189static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
190static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
191static void efx_remove_channel(struct efx_channel *channel);
189static void efx_remove_channels(struct efx_nic *efx); 192static void efx_remove_channels(struct efx_nic *efx);
193static const struct efx_channel_type efx_default_channel_type;
190static void efx_remove_port(struct efx_nic *efx); 194static void efx_remove_port(struct efx_nic *efx);
191static void efx_init_napi(struct efx_nic *efx); 195static void efx_init_napi_channel(struct efx_channel *channel);
192static void efx_fini_napi(struct efx_nic *efx); 196static void efx_fini_napi(struct efx_nic *efx);
193static void efx_fini_napi_channel(struct efx_channel *channel); 197static void efx_fini_napi_channel(struct efx_channel *channel);
194static void efx_fini_struct(struct efx_nic *efx); 198static void efx_fini_struct(struct efx_nic *efx);
@@ -217,26 +221,27 @@ static void efx_stop_all(struct efx_nic *efx);
217 */ 221 */
218static int efx_process_channel(struct efx_channel *channel, int budget) 222static int efx_process_channel(struct efx_channel *channel, int budget)
219{ 223{
220 struct efx_nic *efx = channel->efx;
221 int spent; 224 int spent;
222 225
223 if (unlikely(efx->reset_pending || !channel->enabled)) 226 if (unlikely(!channel->enabled))
224 return 0; 227 return 0;
225 228
226 spent = efx_nic_process_eventq(channel, budget); 229 spent = efx_nic_process_eventq(channel, budget);
227 if (spent == 0) 230 if (spent && efx_channel_has_rx_queue(channel)) {
228 return 0; 231 struct efx_rx_queue *rx_queue =
229 232 efx_channel_get_rx_queue(channel);
230 /* Deliver last RX packet. */ 233
231 if (channel->rx_pkt) { 234 /* Deliver last RX packet. */
232 __efx_rx_packet(channel, channel->rx_pkt); 235 if (channel->rx_pkt) {
233 channel->rx_pkt = NULL; 236 __efx_rx_packet(channel, channel->rx_pkt);
237 channel->rx_pkt = NULL;
238 }
239 if (rx_queue->enabled) {
240 efx_rx_strategy(channel);
241 efx_fast_push_rx_descriptors(rx_queue);
242 }
234 } 243 }
235 244
236 efx_rx_strategy(channel);
237
238 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
239
240 return spent; 245 return spent;
241} 246}
242 247
@@ -276,7 +281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
276 spent = efx_process_channel(channel, budget); 281 spent = efx_process_channel(channel, budget);
277 282
278 if (spent < budget) { 283 if (spent < budget) {
279 if (channel->channel < efx->n_rx_channels && 284 if (efx_channel_has_rx_queue(channel) &&
280 efx->irq_rx_adaptive && 285 efx->irq_rx_adaptive &&
281 unlikely(++channel->irq_count == 1000)) { 286 unlikely(++channel->irq_count == 1000)) {
282 if (unlikely(channel->irq_mod_score < 287 if (unlikely(channel->irq_mod_score <
@@ -386,6 +391,34 @@ static void efx_init_eventq(struct efx_channel *channel)
386 efx_nic_init_eventq(channel); 391 efx_nic_init_eventq(channel);
387} 392}
388 393
394/* Enable event queue processing and NAPI */
395static void efx_start_eventq(struct efx_channel *channel)
396{
397 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
398 "chan %d start event queue\n", channel->channel);
399
400 /* The interrupt handler for this channel may set work_pending
401 * as soon as we enable it. Make sure it's cleared before
402 * then. Similarly, make sure it sees the enabled flag set.
403 */
404 channel->work_pending = false;
405 channel->enabled = true;
406 smp_wmb();
407
408 napi_enable(&channel->napi_str);
409 efx_nic_eventq_read_ack(channel);
410}
411
412/* Disable event queue processing and NAPI */
413static void efx_stop_eventq(struct efx_channel *channel)
414{
415 if (!channel->enabled)
416 return;
417
418 napi_disable(&channel->napi_str);
419 channel->enabled = false;
420}
421
389static void efx_fini_eventq(struct efx_channel *channel) 422static void efx_fini_eventq(struct efx_channel *channel)
390{ 423{
391 netif_dbg(channel->efx, drv, channel->efx->net_dev, 424 netif_dbg(channel->efx, drv, channel->efx->net_dev,
@@ -408,8 +441,7 @@ static void efx_remove_eventq(struct efx_channel *channel)
408 * 441 *
409 *************************************************************************/ 442 *************************************************************************/
410 443
411/* Allocate and initialise a channel structure, optionally copying 444/* Allocate and initialise a channel structure. */
412 * parameters (but not resources) from an old channel structure. */
413static struct efx_channel * 445static struct efx_channel *
414efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 446efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
415{ 447{
@@ -418,45 +450,60 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
418 struct efx_tx_queue *tx_queue; 450 struct efx_tx_queue *tx_queue;
419 int j; 451 int j;
420 452
421 if (old_channel) { 453 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
422 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 454 if (!channel)
423 if (!channel) 455 return NULL;
424 return NULL;
425 456
426 *channel = *old_channel; 457 channel->efx = efx;
458 channel->channel = i;
459 channel->type = &efx_default_channel_type;
427 460
428 channel->napi_dev = NULL; 461 for (j = 0; j < EFX_TXQ_TYPES; j++) {
429 memset(&channel->eventq, 0, sizeof(channel->eventq)); 462 tx_queue = &channel->tx_queue[j];
463 tx_queue->efx = efx;
464 tx_queue->queue = i * EFX_TXQ_TYPES + j;
465 tx_queue->channel = channel;
466 }
430 467
431 rx_queue = &channel->rx_queue; 468 rx_queue = &channel->rx_queue;
432 rx_queue->buffer = NULL; 469 rx_queue->efx = efx;
433 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 470 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
471 (unsigned long)rx_queue);
434 472
435 for (j = 0; j < EFX_TXQ_TYPES; j++) { 473 return channel;
436 tx_queue = &channel->tx_queue[j]; 474}
437 if (tx_queue->channel)
438 tx_queue->channel = channel;
439 tx_queue->buffer = NULL;
440 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
441 }
442 } else {
443 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
444 if (!channel)
445 return NULL;
446 475
447 channel->efx = efx; 476/* Allocate and initialise a channel structure, copying parameters
448 channel->channel = i; 477 * (but not resources) from an old channel structure.
478 */
479static struct efx_channel *
480efx_copy_channel(const struct efx_channel *old_channel)
481{
482 struct efx_channel *channel;
483 struct efx_rx_queue *rx_queue;
484 struct efx_tx_queue *tx_queue;
485 int j;
486
487 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
488 if (!channel)
489 return NULL;
490
491 *channel = *old_channel;
492
493 channel->napi_dev = NULL;
494 memset(&channel->eventq, 0, sizeof(channel->eventq));
449 495
450 for (j = 0; j < EFX_TXQ_TYPES; j++) { 496 for (j = 0; j < EFX_TXQ_TYPES; j++) {
451 tx_queue = &channel->tx_queue[j]; 497 tx_queue = &channel->tx_queue[j];
452 tx_queue->efx = efx; 498 if (tx_queue->channel)
453 tx_queue->queue = i * EFX_TXQ_TYPES + j;
454 tx_queue->channel = channel; 499 tx_queue->channel = channel;
455 } 500 tx_queue->buffer = NULL;
501 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
456 } 502 }
457 503
458 rx_queue = &channel->rx_queue; 504 rx_queue = &channel->rx_queue;
459 rx_queue->efx = efx; 505 rx_queue->buffer = NULL;
506 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
460 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 507 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
461 (unsigned long)rx_queue); 508 (unsigned long)rx_queue);
462 509
@@ -472,57 +519,62 @@ static int efx_probe_channel(struct efx_channel *channel)
472 netif_dbg(channel->efx, probe, channel->efx->net_dev, 519 netif_dbg(channel->efx, probe, channel->efx->net_dev,
473 "creating channel %d\n", channel->channel); 520 "creating channel %d\n", channel->channel);
474 521
522 rc = channel->type->pre_probe(channel);
523 if (rc)
524 goto fail;
525
475 rc = efx_probe_eventq(channel); 526 rc = efx_probe_eventq(channel);
476 if (rc) 527 if (rc)
477 goto fail1; 528 goto fail;
478 529
479 efx_for_each_channel_tx_queue(tx_queue, channel) { 530 efx_for_each_channel_tx_queue(tx_queue, channel) {
480 rc = efx_probe_tx_queue(tx_queue); 531 rc = efx_probe_tx_queue(tx_queue);
481 if (rc) 532 if (rc)
482 goto fail2; 533 goto fail;
483 } 534 }
484 535
485 efx_for_each_channel_rx_queue(rx_queue, channel) { 536 efx_for_each_channel_rx_queue(rx_queue, channel) {
486 rc = efx_probe_rx_queue(rx_queue); 537 rc = efx_probe_rx_queue(rx_queue);
487 if (rc) 538 if (rc)
488 goto fail3; 539 goto fail;
489 } 540 }
490 541
491 channel->n_rx_frm_trunc = 0; 542 channel->n_rx_frm_trunc = 0;
492 543
493 return 0; 544 return 0;
494 545
495 fail3: 546fail:
496 efx_for_each_channel_rx_queue(rx_queue, channel) 547 efx_remove_channel(channel);
497 efx_remove_rx_queue(rx_queue);
498 fail2:
499 efx_for_each_channel_tx_queue(tx_queue, channel)
500 efx_remove_tx_queue(tx_queue);
501 fail1:
502 return rc; 548 return rc;
503} 549}
504 550
551static void
552efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
553{
554 struct efx_nic *efx = channel->efx;
555 const char *type;
556 int number;
557
558 number = channel->channel;
559 if (efx->tx_channel_offset == 0) {
560 type = "";
561 } else if (channel->channel < efx->tx_channel_offset) {
562 type = "-rx";
563 } else {
564 type = "-tx";
565 number -= efx->tx_channel_offset;
566 }
567 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
568}
505 569
506static void efx_set_channel_names(struct efx_nic *efx) 570static void efx_set_channel_names(struct efx_nic *efx)
507{ 571{
508 struct efx_channel *channel; 572 struct efx_channel *channel;
509 const char *type = "";
510 int number;
511 573
512 efx_for_each_channel(channel, efx) { 574 efx_for_each_channel(channel, efx)
513 number = channel->channel; 575 channel->type->get_name(channel,
514 if (efx->n_channels > efx->n_rx_channels) { 576 efx->channel_name[channel->channel],
515 if (channel->channel < efx->n_rx_channels) { 577 sizeof(efx->channel_name[0]));
516 type = "-rx";
517 } else {
518 type = "-tx";
519 number -= efx->n_rx_channels;
520 }
521 }
522 snprintf(efx->channel_name[channel->channel],
523 sizeof(efx->channel_name[0]),
524 "%s%s-%d", efx->name, type, number);
525 }
526} 578}
527 579
528static int efx_probe_channels(struct efx_nic *efx) 580static int efx_probe_channels(struct efx_nic *efx)
@@ -555,7 +607,7 @@ fail:
555 * to propagate configuration changes (mtu, checksum offload), or 607 * to propagate configuration changes (mtu, checksum offload), or
556 * to clear hardware error conditions 608 * to clear hardware error conditions
557 */ 609 */
558static void efx_init_channels(struct efx_nic *efx) 610static void efx_start_datapath(struct efx_nic *efx)
559{ 611{
560 struct efx_tx_queue *tx_queue; 612 struct efx_tx_queue *tx_queue;
561 struct efx_rx_queue *rx_queue; 613 struct efx_rx_queue *rx_queue;
@@ -574,68 +626,26 @@ static void efx_init_channels(struct efx_nic *efx)
574 626
575 /* Initialise the channels */ 627 /* Initialise the channels */
576 efx_for_each_channel(channel, efx) { 628 efx_for_each_channel(channel, efx) {
577 netif_dbg(channel->efx, drv, channel->efx->net_dev,
578 "init chan %d\n", channel->channel);
579
580 efx_init_eventq(channel);
581
582 efx_for_each_channel_tx_queue(tx_queue, channel) 629 efx_for_each_channel_tx_queue(tx_queue, channel)
583 efx_init_tx_queue(tx_queue); 630 efx_init_tx_queue(tx_queue);
584 631
585 /* The rx buffer allocation strategy is MTU dependent */ 632 /* The rx buffer allocation strategy is MTU dependent */
586 efx_rx_strategy(channel); 633 efx_rx_strategy(channel);
587 634
588 efx_for_each_channel_rx_queue(rx_queue, channel) 635 efx_for_each_channel_rx_queue(rx_queue, channel) {
589 efx_init_rx_queue(rx_queue); 636 efx_init_rx_queue(rx_queue);
637 efx_nic_generate_fill_event(rx_queue);
638 }
590 639
591 WARN_ON(channel->rx_pkt != NULL); 640 WARN_ON(channel->rx_pkt != NULL);
592 efx_rx_strategy(channel); 641 efx_rx_strategy(channel);
593 } 642 }
594}
595
596/* This enables event queue processing and packet transmission.
597 *
598 * Note that this function is not allowed to fail, since that would
599 * introduce too much complexity into the suspend/resume path.
600 */
601static void efx_start_channel(struct efx_channel *channel)
602{
603 struct efx_rx_queue *rx_queue;
604
605 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
606 "starting chan %d\n", channel->channel);
607
608 /* The interrupt handler for this channel may set work_pending
609 * as soon as we enable it. Make sure it's cleared before
610 * then. Similarly, make sure it sees the enabled flag set. */
611 channel->work_pending = false;
612 channel->enabled = true;
613 smp_wmb();
614
615 /* Fill the queues before enabling NAPI */
616 efx_for_each_channel_rx_queue(rx_queue, channel)
617 efx_fast_push_rx_descriptors(rx_queue);
618
619 napi_enable(&channel->napi_str);
620}
621
622/* This disables event queue processing and packet transmission.
623 * This function does not guarantee that all queue processing
624 * (e.g. RX refill) is complete.
625 */
626static void efx_stop_channel(struct efx_channel *channel)
627{
628 if (!channel->enabled)
629 return;
630
631 netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
632 "stop chan %d\n", channel->channel);
633 643
634 channel->enabled = false; 644 if (netif_device_present(efx->net_dev))
635 napi_disable(&channel->napi_str); 645 netif_tx_wake_all_queues(efx->net_dev);
636} 646}
637 647
638static void efx_fini_channels(struct efx_nic *efx) 648static void efx_stop_datapath(struct efx_nic *efx)
639{ 649{
640 struct efx_channel *channel; 650 struct efx_channel *channel;
641 struct efx_tx_queue *tx_queue; 651 struct efx_tx_queue *tx_queue;
@@ -662,14 +672,21 @@ static void efx_fini_channels(struct efx_nic *efx)
662 } 672 }
663 673
664 efx_for_each_channel(channel, efx) { 674 efx_for_each_channel(channel, efx) {
665 netif_dbg(channel->efx, drv, channel->efx->net_dev, 675 /* RX packet processing is pipelined, so wait for the
666 "shut down chan %d\n", channel->channel); 676 * NAPI handler to complete. At least event queue 0
677 * might be kept active by non-data events, so don't
678 * use napi_synchronize() but actually disable NAPI
679 * temporarily.
680 */
681 if (efx_channel_has_rx_queue(channel)) {
682 efx_stop_eventq(channel);
683 efx_start_eventq(channel);
684 }
667 685
668 efx_for_each_channel_rx_queue(rx_queue, channel) 686 efx_for_each_channel_rx_queue(rx_queue, channel)
669 efx_fini_rx_queue(rx_queue); 687 efx_fini_rx_queue(rx_queue);
670 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 688 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
671 efx_fini_tx_queue(tx_queue); 689 efx_fini_tx_queue(tx_queue);
672 efx_fini_eventq(channel);
673 } 690 }
674} 691}
675 692
@@ -701,16 +718,40 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
701{ 718{
702 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 719 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
703 u32 old_rxq_entries, old_txq_entries; 720 u32 old_rxq_entries, old_txq_entries;
704 unsigned i; 721 unsigned i, next_buffer_table = 0;
705 int rc; 722 int rc = 0;
723
724 /* Not all channels should be reallocated. We must avoid
725 * reallocating their buffer table entries.
726 */
727 efx_for_each_channel(channel, efx) {
728 struct efx_rx_queue *rx_queue;
729 struct efx_tx_queue *tx_queue;
730
731 if (channel->type->copy)
732 continue;
733 next_buffer_table = max(next_buffer_table,
734 channel->eventq.index +
735 channel->eventq.entries);
736 efx_for_each_channel_rx_queue(rx_queue, channel)
737 next_buffer_table = max(next_buffer_table,
738 rx_queue->rxd.index +
739 rx_queue->rxd.entries);
740 efx_for_each_channel_tx_queue(tx_queue, channel)
741 next_buffer_table = max(next_buffer_table,
742 tx_queue->txd.index +
743 tx_queue->txd.entries);
744 }
706 745
707 efx_stop_all(efx); 746 efx_stop_all(efx);
708 efx_fini_channels(efx); 747 efx_stop_interrupts(efx, true);
709 748
710 /* Clone channels */ 749 /* Clone channels (where possible) */
711 memset(other_channel, 0, sizeof(other_channel)); 750 memset(other_channel, 0, sizeof(other_channel));
712 for (i = 0; i < efx->n_channels; i++) { 751 for (i = 0; i < efx->n_channels; i++) {
713 channel = efx_alloc_channel(efx, i, efx->channel[i]); 752 channel = efx->channel[i];
753 if (channel->type->copy)
754 channel = channel->type->copy(channel);
714 if (!channel) { 755 if (!channel) {
715 rc = -ENOMEM; 756 rc = -ENOMEM;
716 goto out; 757 goto out;
@@ -729,23 +770,31 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
729 other_channel[i] = channel; 770 other_channel[i] = channel;
730 } 771 }
731 772
732 rc = efx_probe_channels(efx); 773 /* Restart buffer table allocation */
733 if (rc) 774 efx->next_buffer_table = next_buffer_table;
734 goto rollback;
735
736 efx_init_napi(efx);
737 775
738 /* Destroy old channels */
739 for (i = 0; i < efx->n_channels; i++) { 776 for (i = 0; i < efx->n_channels; i++) {
740 efx_fini_napi_channel(other_channel[i]); 777 channel = efx->channel[i];
741 efx_remove_channel(other_channel[i]); 778 if (!channel->type->copy)
779 continue;
780 rc = efx_probe_channel(channel);
781 if (rc)
782 goto rollback;
783 efx_init_napi_channel(efx->channel[i]);
742 } 784 }
785
743out: 786out:
744 /* Free unused channel structures */ 787 /* Destroy unused channel structures */
745 for (i = 0; i < efx->n_channels; i++) 788 for (i = 0; i < efx->n_channels; i++) {
746 kfree(other_channel[i]); 789 channel = other_channel[i];
790 if (channel && channel->type->copy) {
791 efx_fini_napi_channel(channel);
792 efx_remove_channel(channel);
793 kfree(channel);
794 }
795 }
747 796
748 efx_init_channels(efx); 797 efx_start_interrupts(efx, true);
749 efx_start_all(efx); 798 efx_start_all(efx);
750 return rc; 799 return rc;
751 800
@@ -766,6 +815,18 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
766 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 815 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
767} 816}
768 817
818static const struct efx_channel_type efx_default_channel_type = {
819 .pre_probe = efx_channel_dummy_op_int,
820 .get_name = efx_get_channel_name,
821 .copy = efx_copy_channel,
822 .keep_eventq = false,
823};
824
825int efx_channel_dummy_op_int(struct efx_channel *channel)
826{
827 return 0;
828}
829
769/************************************************************************** 830/**************************************************************************
770 * 831 *
771 * Port handling 832 * Port handling
@@ -1108,31 +1169,46 @@ static void efx_fini_io(struct efx_nic *efx)
1108 pci_disable_device(efx->pci_dev); 1169 pci_disable_device(efx->pci_dev);
1109} 1170}
1110 1171
1111static int efx_wanted_parallelism(void) 1172static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1112{ 1173{
1113 cpumask_var_t thread_mask; 1174 cpumask_var_t thread_mask;
1114 int count; 1175 unsigned int count;
1115 int cpu; 1176 int cpu;
1116 1177
1117 if (rss_cpus) 1178 if (rss_cpus) {
1118 return rss_cpus; 1179 count = rss_cpus;
1180 } else {
1181 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1182 netif_warn(efx, probe, efx->net_dev,
1183 "RSS disabled due to allocation failure\n");
1184 return 1;
1185 }
1186
1187 count = 0;
1188 for_each_online_cpu(cpu) {
1189 if (!cpumask_test_cpu(cpu, thread_mask)) {
1190 ++count;
1191 cpumask_or(thread_mask, thread_mask,
1192 topology_thread_cpumask(cpu));
1193 }
1194 }
1119 1195
1120 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 1196 free_cpumask_var(thread_mask);
1121 printk(KERN_WARNING
1122 "sfc: RSS disabled due to allocation failure\n");
1123 return 1;
1124 } 1197 }
1125 1198
1126 count = 0; 1199 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1127 for_each_online_cpu(cpu) { 1200 * table entries that are inaccessible to VFs
1128 if (!cpumask_test_cpu(cpu, thread_mask)) { 1201 */
1129 ++count; 1202 if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1130 cpumask_or(thread_mask, thread_mask, 1203 count > efx_vf_size(efx)) {
1131 topology_thread_cpumask(cpu)); 1204 netif_warn(efx, probe, efx->net_dev,
1132 } 1205 "Reducing number of RSS channels from %u to %u for "
1206 "VF support. Increase vf-msix-limit to use more "
1207 "channels on the PF.\n",
1208 count, efx_vf_size(efx));
1209 count = efx_vf_size(efx);
1133 } 1210 }
1134 1211
1135 free_cpumask_var(thread_mask);
1136 return count; 1212 return count;
1137} 1213}
1138 1214
@@ -1140,7 +1216,8 @@ static int
1140efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) 1216efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1141{ 1217{
1142#ifdef CONFIG_RFS_ACCEL 1218#ifdef CONFIG_RFS_ACCEL
1143 int i, rc; 1219 unsigned int i;
1220 int rc;
1144 1221
1145 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); 1222 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1146 if (!efx->net_dev->rx_cpu_rmap) 1223 if (!efx->net_dev->rx_cpu_rmap)
@@ -1163,17 +1240,24 @@ efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1163 */ 1240 */
1164static int efx_probe_interrupts(struct efx_nic *efx) 1241static int efx_probe_interrupts(struct efx_nic *efx)
1165{ 1242{
1166 int max_channels = 1243 unsigned int max_channels =
1167 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1244 min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1168 int rc, i; 1245 unsigned int extra_channels = 0;
1246 unsigned int i, j;
1247 int rc;
1248
1249 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1250 if (efx->extra_channel_type[i])
1251 ++extra_channels;
1169 1252
1170 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1253 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1171 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1254 struct msix_entry xentries[EFX_MAX_CHANNELS];
1172 int n_channels; 1255 unsigned int n_channels;
1173 1256
1174 n_channels = efx_wanted_parallelism(); 1257 n_channels = efx_wanted_parallelism(efx);
1175 if (separate_tx_channels) 1258 if (separate_tx_channels)
1176 n_channels *= 2; 1259 n_channels *= 2;
1260 n_channels += extra_channels;
1177 n_channels = min(n_channels, max_channels); 1261 n_channels = min(n_channels, max_channels);
1178 1262
1179 for (i = 0; i < n_channels; i++) 1263 for (i = 0; i < n_channels; i++)
@@ -1182,7 +1266,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1182 if (rc > 0) { 1266 if (rc > 0) {
1183 netif_err(efx, drv, efx->net_dev, 1267 netif_err(efx, drv, efx->net_dev,
1184 "WARNING: Insufficient MSI-X vectors" 1268 "WARNING: Insufficient MSI-X vectors"
1185 " available (%d < %d).\n", rc, n_channels); 1269 " available (%d < %u).\n", rc, n_channels);
1186 netif_err(efx, drv, efx->net_dev, 1270 netif_err(efx, drv, efx->net_dev,
1187 "WARNING: Performance may be reduced.\n"); 1271 "WARNING: Performance may be reduced.\n");
1188 EFX_BUG_ON_PARANOID(rc >= n_channels); 1272 EFX_BUG_ON_PARANOID(rc >= n_channels);
@@ -1193,22 +1277,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1193 1277
1194 if (rc == 0) { 1278 if (rc == 0) {
1195 efx->n_channels = n_channels; 1279 efx->n_channels = n_channels;
1280 if (n_channels > extra_channels)
1281 n_channels -= extra_channels;
1196 if (separate_tx_channels) { 1282 if (separate_tx_channels) {
1197 efx->n_tx_channels = 1283 efx->n_tx_channels = max(n_channels / 2, 1U);
1198 max(efx->n_channels / 2, 1U); 1284 efx->n_rx_channels = max(n_channels -
1199 efx->n_rx_channels = 1285 efx->n_tx_channels,
1200 max(efx->n_channels - 1286 1U);
1201 efx->n_tx_channels, 1U);
1202 } else { 1287 } else {
1203 efx->n_tx_channels = efx->n_channels; 1288 efx->n_tx_channels = n_channels;
1204 efx->n_rx_channels = efx->n_channels; 1289 efx->n_rx_channels = n_channels;
1205 } 1290 }
1206 rc = efx_init_rx_cpu_rmap(efx, xentries); 1291 rc = efx_init_rx_cpu_rmap(efx, xentries);
1207 if (rc) { 1292 if (rc) {
1208 pci_disable_msix(efx->pci_dev); 1293 pci_disable_msix(efx->pci_dev);
1209 return rc; 1294 return rc;
1210 } 1295 }
1211 for (i = 0; i < n_channels; i++) 1296 for (i = 0; i < efx->n_channels; i++)
1212 efx_get_channel(efx, i)->irq = 1297 efx_get_channel(efx, i)->irq =
1213 xentries[i].vector; 1298 xentries[i].vector;
1214 } else { 1299 } else {
@@ -1242,9 +1327,68 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1242 efx->legacy_irq = efx->pci_dev->irq; 1327 efx->legacy_irq = efx->pci_dev->irq;
1243 } 1328 }
1244 1329
1330 /* Assign extra channels if possible */
1331 j = efx->n_channels;
1332 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1333 if (!efx->extra_channel_type[i])
1334 continue;
1335 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1336 efx->n_channels <= extra_channels) {
1337 efx->extra_channel_type[i]->handle_no_channel(efx);
1338 } else {
1339 --j;
1340 efx_get_channel(efx, j)->type =
1341 efx->extra_channel_type[i];
1342 }
1343 }
1344
1345 /* RSS might be usable on VFs even if it is disabled on the PF */
1346 efx->rss_spread = (efx->n_rx_channels > 1 ?
1347 efx->n_rx_channels : efx_vf_size(efx));
1348
1245 return 0; 1349 return 0;
1246} 1350}
1247 1351
1352/* Enable interrupts, then probe and start the event queues */
1353static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1354{
1355 struct efx_channel *channel;
1356
1357 if (efx->legacy_irq)
1358 efx->legacy_irq_enabled = true;
1359 efx_nic_enable_interrupts(efx);
1360
1361 efx_for_each_channel(channel, efx) {
1362 if (!channel->type->keep_eventq || !may_keep_eventq)
1363 efx_init_eventq(channel);
1364 efx_start_eventq(channel);
1365 }
1366
1367 efx_mcdi_mode_event(efx);
1368}
1369
1370static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1371{
1372 struct efx_channel *channel;
1373
1374 efx_mcdi_mode_poll(efx);
1375
1376 efx_nic_disable_interrupts(efx);
1377 if (efx->legacy_irq) {
1378 synchronize_irq(efx->legacy_irq);
1379 efx->legacy_irq_enabled = false;
1380 }
1381
1382 efx_for_each_channel(channel, efx) {
1383 if (channel->irq)
1384 synchronize_irq(channel->irq);
1385
1386 efx_stop_eventq(channel);
1387 if (!channel->type->keep_eventq || !may_keep_eventq)
1388 efx_fini_eventq(channel);
1389 }
1390}
1391
1248static void efx_remove_interrupts(struct efx_nic *efx) 1392static void efx_remove_interrupts(struct efx_nic *efx)
1249{ 1393{
1250 struct efx_channel *channel; 1394 struct efx_channel *channel;
@@ -1295,11 +1439,13 @@ static int efx_probe_nic(struct efx_nic *efx)
1295 if (rc) 1439 if (rc)
1296 goto fail; 1440 goto fail;
1297 1441
1442 efx->type->dimension_resources(efx);
1443
1298 if (efx->n_channels > 1) 1444 if (efx->n_channels > 1)
1299 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1445 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1300 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1446 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1301 efx->rx_indir_table[i] = 1447 efx->rx_indir_table[i] =
1302 ethtool_rxfh_indir_default(i, efx->n_rx_channels); 1448 ethtool_rxfh_indir_default(i, efx->rss_spread);
1303 1449
1304 efx_set_channels(efx); 1450 efx_set_channels(efx);
1305 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1451 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1347,21 +1493,22 @@ static int efx_probe_all(struct efx_nic *efx)
1347 } 1493 }
1348 1494
1349 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1495 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1350 rc = efx_probe_channels(efx);
1351 if (rc)
1352 goto fail3;
1353 1496
1354 rc = efx_probe_filters(efx); 1497 rc = efx_probe_filters(efx);
1355 if (rc) { 1498 if (rc) {
1356 netif_err(efx, probe, efx->net_dev, 1499 netif_err(efx, probe, efx->net_dev,
1357 "failed to create filter tables\n"); 1500 "failed to create filter tables\n");
1358 goto fail4; 1501 goto fail3;
1359 } 1502 }
1360 1503
1504 rc = efx_probe_channels(efx);
1505 if (rc)
1506 goto fail4;
1507
1361 return 0; 1508 return 0;
1362 1509
1363 fail4: 1510 fail4:
1364 efx_remove_channels(efx); 1511 efx_remove_filters(efx);
1365 fail3: 1512 fail3:
1366 efx_remove_port(efx); 1513 efx_remove_port(efx);
1367 fail2: 1514 fail2:
@@ -1370,15 +1517,13 @@ static int efx_probe_all(struct efx_nic *efx)
1370 return rc; 1517 return rc;
1371} 1518}
1372 1519
1373/* Called after previous invocation(s) of efx_stop_all, restarts the 1520/* Called after previous invocation(s) of efx_stop_all, restarts the port,
1374 * port, kernel transmit queue, NAPI processing and hardware interrupts, 1521 * kernel transmit queues and NAPI processing, and ensures that the port is
1375 * and ensures that the port is scheduled to be reconfigured. 1522 * scheduled to be reconfigured. This function is safe to call multiple
1376 * This function is safe to call multiple times when the NIC is in any 1523 * times when the NIC is in any state.
1377 * state. */ 1524 */
1378static void efx_start_all(struct efx_nic *efx) 1525static void efx_start_all(struct efx_nic *efx)
1379{ 1526{
1380 struct efx_channel *channel;
1381
1382 EFX_ASSERT_RESET_SERIALISED(efx); 1527 EFX_ASSERT_RESET_SERIALISED(efx);
1383 1528
1384 /* Check that it is appropriate to restart the interface. All 1529 /* Check that it is appropriate to restart the interface. All
@@ -1390,28 +1535,8 @@ static void efx_start_all(struct efx_nic *efx)
1390 if (!netif_running(efx->net_dev)) 1535 if (!netif_running(efx->net_dev))
1391 return; 1536 return;
1392 1537
1393 /* Mark the port as enabled so port reconfigurations can start, then
1394 * restart the transmit interface early so the watchdog timer stops */
1395 efx_start_port(efx); 1538 efx_start_port(efx);
1396 1539 efx_start_datapath(efx);
1397 if (netif_device_present(efx->net_dev))
1398 netif_tx_wake_all_queues(efx->net_dev);
1399
1400 efx_for_each_channel(channel, efx)
1401 efx_start_channel(channel);
1402
1403 if (efx->legacy_irq)
1404 efx->legacy_irq_enabled = true;
1405 efx_nic_enable_interrupts(efx);
1406
1407 /* Switch to event based MCDI completions after enabling interrupts.
1408 * If a reset has been scheduled, then we need to stay in polled mode.
1409 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
1410 * reset_pending [modified from an atomic context], we instead guarantee
1411 * that efx_mcdi_mode_poll() isn't reverted erroneously */
1412 efx_mcdi_mode_event(efx);
1413 if (efx->reset_pending)
1414 efx_mcdi_mode_poll(efx);
1415 1540
1416 /* Start the hardware monitor if there is one. Otherwise (we're link 1541 /* Start the hardware monitor if there is one. Otherwise (we're link
1417 * event driven), we have to poll the PHY because after an event queue 1542 * event driven), we have to poll the PHY because after an event queue
@@ -1447,8 +1572,6 @@ static void efx_flush_all(struct efx_nic *efx)
1447 * taking locks. */ 1572 * taking locks. */
1448static void efx_stop_all(struct efx_nic *efx) 1573static void efx_stop_all(struct efx_nic *efx)
1449{ 1574{
1450 struct efx_channel *channel;
1451
1452 EFX_ASSERT_RESET_SERIALISED(efx); 1575 EFX_ASSERT_RESET_SERIALISED(efx);
1453 1576
1454 /* port_enabled can be read safely under the rtnl lock */ 1577 /* port_enabled can be read safely under the rtnl lock */
@@ -1456,28 +1579,6 @@ static void efx_stop_all(struct efx_nic *efx)
1456 return; 1579 return;
1457 1580
1458 efx->type->stop_stats(efx); 1581 efx->type->stop_stats(efx);
1459
1460 /* Switch to MCDI polling on Siena before disabling interrupts */
1461 efx_mcdi_mode_poll(efx);
1462
1463 /* Disable interrupts and wait for ISR to complete */
1464 efx_nic_disable_interrupts(efx);
1465 if (efx->legacy_irq) {
1466 synchronize_irq(efx->legacy_irq);
1467 efx->legacy_irq_enabled = false;
1468 }
1469 efx_for_each_channel(channel, efx) {
1470 if (channel->irq)
1471 synchronize_irq(channel->irq);
1472 }
1473
1474 /* Stop all NAPI processing and synchronous rx refills */
1475 efx_for_each_channel(channel, efx)
1476 efx_stop_channel(channel);
1477
1478 /* Stop all asynchronous port reconfigurations. Since all
1479 * event processing has already been stopped, there is no
1480 * window to loose phy events */
1481 efx_stop_port(efx); 1582 efx_stop_port(efx);
1482 1583
1483 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1584 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
@@ -1485,15 +1586,15 @@ static void efx_stop_all(struct efx_nic *efx)
1485 1586
1486 /* Stop the kernel transmit interface late, so the watchdog 1587 /* Stop the kernel transmit interface late, so the watchdog
1487 * timer isn't ticking over the flush */ 1588 * timer isn't ticking over the flush */
1488 netif_tx_stop_all_queues(efx->net_dev); 1589 netif_tx_disable(efx->net_dev);
1489 netif_tx_lock_bh(efx->net_dev); 1590
1490 netif_tx_unlock_bh(efx->net_dev); 1591 efx_stop_datapath(efx);
1491} 1592}
1492 1593
1493static void efx_remove_all(struct efx_nic *efx) 1594static void efx_remove_all(struct efx_nic *efx)
1494{ 1595{
1495 efx_remove_filters(efx);
1496 efx_remove_channels(efx); 1596 efx_remove_channels(efx);
1597 efx_remove_filters(efx);
1497 efx_remove_port(efx); 1598 efx_remove_port(efx);
1498 efx_remove_nic(efx); 1599 efx_remove_nic(efx);
1499} 1600}
@@ -1637,15 +1738,21 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1637 * 1738 *
1638 **************************************************************************/ 1739 **************************************************************************/
1639 1740
1741static void efx_init_napi_channel(struct efx_channel *channel)
1742{
1743 struct efx_nic *efx = channel->efx;
1744
1745 channel->napi_dev = efx->net_dev;
1746 netif_napi_add(channel->napi_dev, &channel->napi_str,
1747 efx_poll, napi_weight);
1748}
1749
1640static void efx_init_napi(struct efx_nic *efx) 1750static void efx_init_napi(struct efx_nic *efx)
1641{ 1751{
1642 struct efx_channel *channel; 1752 struct efx_channel *channel;
1643 1753
1644 efx_for_each_channel(channel, efx) { 1754 efx_for_each_channel(channel, efx)
1645 channel->napi_dev = efx->net_dev; 1755 efx_init_napi_channel(channel);
1646 netif_napi_add(channel->napi_dev, &channel->napi_str,
1647 efx_poll, napi_weight);
1648 }
1649} 1756}
1650 1757
1651static void efx_fini_napi_channel(struct efx_channel *channel) 1758static void efx_fini_napi_channel(struct efx_channel *channel)
@@ -1730,8 +1837,6 @@ static int efx_net_stop(struct net_device *net_dev)
1730 if (efx->state != STATE_DISABLED) { 1837 if (efx->state != STATE_DISABLED) {
1731 /* Stop the device and flush all the channels */ 1838 /* Stop the device and flush all the channels */
1732 efx_stop_all(efx); 1839 efx_stop_all(efx);
1733 efx_fini_channels(efx);
1734 efx_init_channels(efx);
1735 } 1840 }
1736 1841
1737 return 0; 1842 return 0;
@@ -1802,8 +1907,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1802 1907
1803 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1908 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1804 1909
1805 efx_fini_channels(efx);
1806
1807 mutex_lock(&efx->mac_lock); 1910 mutex_lock(&efx->mac_lock);
1808 /* Reconfigure the MAC before enabling the dma queues so that 1911 /* Reconfigure the MAC before enabling the dma queues so that
1809 * the RX buffers don't overflow */ 1912 * the RX buffers don't overflow */
@@ -1811,8 +1914,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1811 efx->type->reconfigure_mac(efx); 1914 efx->type->reconfigure_mac(efx);
1812 mutex_unlock(&efx->mac_lock); 1915 mutex_unlock(&efx->mac_lock);
1813 1916
1814 efx_init_channels(efx);
1815
1816 efx_start_all(efx); 1917 efx_start_all(efx);
1817 return 0; 1918 return 0;
1818} 1919}
@@ -1833,6 +1934,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1833 } 1934 }
1834 1935
1835 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1936 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1937 efx_sriov_mac_address_changed(efx);
1836 1938
1837 /* Reconfigure the MAC */ 1939 /* Reconfigure the MAC */
1838 mutex_lock(&efx->mac_lock); 1940 mutex_lock(&efx->mac_lock);
@@ -1899,6 +2001,12 @@ static const struct net_device_ops efx_netdev_ops = {
1899 .ndo_set_mac_address = efx_set_mac_address, 2001 .ndo_set_mac_address = efx_set_mac_address,
1900 .ndo_set_rx_mode = efx_set_rx_mode, 2002 .ndo_set_rx_mode = efx_set_rx_mode,
1901 .ndo_set_features = efx_set_features, 2003 .ndo_set_features = efx_set_features,
2004#ifdef CONFIG_SFC_SRIOV
2005 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2006 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2007 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2008 .ndo_get_vf_config = efx_sriov_get_vf_config,
2009#endif
1902#ifdef CONFIG_NET_POLL_CONTROLLER 2010#ifdef CONFIG_NET_POLL_CONTROLLER
1903 .ndo_poll_controller = efx_netpoll, 2011 .ndo_poll_controller = efx_netpoll,
1904#endif 2012#endif
@@ -2029,7 +2137,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2029 efx_stop_all(efx); 2137 efx_stop_all(efx);
2030 mutex_lock(&efx->mac_lock); 2138 mutex_lock(&efx->mac_lock);
2031 2139
2032 efx_fini_channels(efx); 2140 efx_stop_interrupts(efx, false);
2033 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2141 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2034 efx->phy_op->fini(efx); 2142 efx->phy_op->fini(efx);
2035 efx->type->fini(efx); 2143 efx->type->fini(efx);
@@ -2066,8 +2174,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2066 2174
2067 efx->type->reconfigure_mac(efx); 2175 efx->type->reconfigure_mac(efx);
2068 2176
2069 efx_init_channels(efx); 2177 efx_start_interrupts(efx, false);
2070 efx_restore_filters(efx); 2178 efx_restore_filters(efx);
2179 efx_sriov_reset(efx);
2071 2180
2072 mutex_unlock(&efx->mac_lock); 2181 mutex_unlock(&efx->mac_lock);
2073 2182
@@ -2272,6 +2381,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2272 efx->phy_op = &efx_dummy_phy_operations; 2381 efx->phy_op = &efx_dummy_phy_operations;
2273 efx->mdio.dev = net_dev; 2382 efx->mdio.dev = net_dev;
2274 INIT_WORK(&efx->mac_work, efx_mac_work); 2383 INIT_WORK(&efx->mac_work, efx_mac_work);
2384 init_waitqueue_head(&efx->flush_wq);
2275 2385
2276 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2386 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2277 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2387 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
@@ -2329,8 +2439,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
2329 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 2439 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2330 efx->net_dev->rx_cpu_rmap = NULL; 2440 efx->net_dev->rx_cpu_rmap = NULL;
2331#endif 2441#endif
2442 efx_stop_interrupts(efx, false);
2332 efx_nic_fini_interrupt(efx); 2443 efx_nic_fini_interrupt(efx);
2333 efx_fini_channels(efx);
2334 efx_fini_port(efx); 2444 efx_fini_port(efx);
2335 efx->type->fini(efx); 2445 efx->type->fini(efx);
2336 efx_fini_napi(efx); 2446 efx_fini_napi(efx);
@@ -2356,6 +2466,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2356 /* Allow any queued efx_resets() to complete */ 2466 /* Allow any queued efx_resets() to complete */
2357 rtnl_unlock(); 2467 rtnl_unlock();
2358 2468
2469 efx_stop_interrupts(efx, false);
2470 efx_sriov_fini(efx);
2359 efx_unregister_netdev(efx); 2471 efx_unregister_netdev(efx);
2360 2472
2361 efx_mtd_remove(efx); 2473 efx_mtd_remove(efx);
@@ -2404,16 +2516,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2404 goto fail4; 2516 goto fail4;
2405 } 2517 }
2406 2518
2407 efx_init_channels(efx);
2408
2409 rc = efx_nic_init_interrupt(efx); 2519 rc = efx_nic_init_interrupt(efx);
2410 if (rc) 2520 if (rc)
2411 goto fail5; 2521 goto fail5;
2522 efx_start_interrupts(efx, false);
2412 2523
2413 return 0; 2524 return 0;
2414 2525
2415 fail5: 2526 fail5:
2416 efx_fini_channels(efx);
2417 efx_fini_port(efx); 2527 efx_fini_port(efx);
2418 fail4: 2528 fail4:
2419 efx->type->fini(efx); 2529 efx->type->fini(efx);
@@ -2439,7 +2549,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2439 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; 2549 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2440 struct net_device *net_dev; 2550 struct net_device *net_dev;
2441 struct efx_nic *efx; 2551 struct efx_nic *efx;
2442 int i, rc; 2552 int rc;
2443 2553
2444 /* Allocate and initialise a struct net_device and struct efx_nic */ 2554 /* Allocate and initialise a struct net_device and struct efx_nic */
2445 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 2555 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
@@ -2472,39 +2582,22 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2472 if (rc) 2582 if (rc)
2473 goto fail2; 2583 goto fail2;
2474 2584
2475 /* No serialisation is required with the reset path because 2585 rc = efx_pci_probe_main(efx);
2476 * we're in STATE_INIT. */
2477 for (i = 0; i < 5; i++) {
2478 rc = efx_pci_probe_main(efx);
2479 2586
2480 /* Serialise against efx_reset(). No more resets will be 2587 /* Serialise against efx_reset(). No more resets will be
2481 * scheduled since efx_stop_all() has been called, and we 2588 * scheduled since efx_stop_all() has been called, and we have
2482 * have not and never have been registered with either 2589 * not and never have been registered.
2483 * the rtnetlink or driverlink layers. */ 2590 */
2484 cancel_work_sync(&efx->reset_work); 2591 cancel_work_sync(&efx->reset_work);
2485
2486 if (rc == 0) {
2487 if (efx->reset_pending) {
2488 /* If there was a scheduled reset during
2489 * probe, the NIC is probably hosed anyway */
2490 efx_pci_remove_main(efx);
2491 rc = -EIO;
2492 } else {
2493 break;
2494 }
2495 }
2496
2497 /* Retry if a recoverably reset event has been scheduled */
2498 if (efx->reset_pending &
2499 ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
2500 !efx->reset_pending)
2501 goto fail3;
2502 2592
2503 efx->reset_pending = 0; 2593 if (rc)
2504 } 2594 goto fail3;
2505 2595
2506 if (rc) { 2596 /* If there was a scheduled reset during probe, the NIC is
2507 netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); 2597 * probably hosed anyway.
2598 */
2599 if (efx->reset_pending) {
2600 rc = -EIO;
2508 goto fail4; 2601 goto fail4;
2509 } 2602 }
2510 2603
@@ -2514,18 +2607,27 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2514 2607
2515 rc = efx_register_netdev(efx); 2608 rc = efx_register_netdev(efx);
2516 if (rc) 2609 if (rc)
2517 goto fail5; 2610 goto fail4;
2611
2612 rc = efx_sriov_init(efx);
2613 if (rc)
2614 netif_err(efx, probe, efx->net_dev,
2615 "SR-IOV can't be enabled rc %d\n", rc);
2518 2616
2519 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 2617 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2520 2618
2619 /* Try to create MTDs, but allow this to fail */
2521 rtnl_lock(); 2620 rtnl_lock();
2522 efx_mtd_probe(efx); /* allowed to fail */ 2621 rc = efx_mtd_probe(efx);
2523 rtnl_unlock(); 2622 rtnl_unlock();
2623 if (rc)
2624 netif_warn(efx, probe, efx->net_dev,
2625 "failed to create MTDs (%d)\n", rc);
2626
2524 return 0; 2627 return 0;
2525 2628
2526 fail5:
2527 efx_pci_remove_main(efx);
2528 fail4: 2629 fail4:
2630 efx_pci_remove_main(efx);
2529 fail3: 2631 fail3:
2530 efx_fini_io(efx); 2632 efx_fini_io(efx);
2531 fail2: 2633 fail2:
@@ -2546,7 +2648,7 @@ static int efx_pm_freeze(struct device *dev)
2546 netif_device_detach(efx->net_dev); 2648 netif_device_detach(efx->net_dev);
2547 2649
2548 efx_stop_all(efx); 2650 efx_stop_all(efx);
2549 efx_fini_channels(efx); 2651 efx_stop_interrupts(efx, false);
2550 2652
2551 return 0; 2653 return 0;
2552} 2654}
@@ -2557,7 +2659,7 @@ static int efx_pm_thaw(struct device *dev)
2557 2659
2558 efx->state = STATE_INIT; 2660 efx->state = STATE_INIT;
2559 2661
2560 efx_init_channels(efx); 2662 efx_start_interrupts(efx, false);
2561 2663
2562 mutex_lock(&efx->mac_lock); 2664 mutex_lock(&efx->mac_lock);
2563 efx->phy_op->reconfigure(efx); 2665 efx->phy_op->reconfigure(efx);
@@ -2663,6 +2765,10 @@ static int __init efx_init_module(void)
2663 if (rc) 2765 if (rc)
2664 goto err_notifier; 2766 goto err_notifier;
2665 2767
2768 rc = efx_init_sriov();
2769 if (rc)
2770 goto err_sriov;
2771
2666 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2772 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2667 if (!reset_workqueue) { 2773 if (!reset_workqueue) {
2668 rc = -ENOMEM; 2774 rc = -ENOMEM;
@@ -2678,6 +2784,8 @@ static int __init efx_init_module(void)
2678 err_pci: 2784 err_pci:
2679 destroy_workqueue(reset_workqueue); 2785 destroy_workqueue(reset_workqueue);
2680 err_reset: 2786 err_reset:
2787 efx_fini_sriov();
2788 err_sriov:
2681 unregister_netdevice_notifier(&efx_netdev_notifier); 2789 unregister_netdevice_notifier(&efx_netdev_notifier);
2682 err_notifier: 2790 err_notifier:
2683 return rc; 2791 return rc;
@@ -2689,6 +2797,7 @@ static void __exit efx_exit_module(void)
2689 2797
2690 pci_unregister_driver(&efx_pci_driver); 2798 pci_unregister_driver(&efx_pci_driver);
2691 destroy_workqueue(reset_workqueue); 2799 destroy_workqueue(reset_workqueue);
2800 efx_fini_sriov();
2692 unregister_netdevice_notifier(&efx_netdev_notifier); 2801 unregister_netdevice_notifier(&efx_netdev_notifier);
2693 2802
2694} 2803}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 7f546e2c39e2..4debfe07fb88 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -95,6 +95,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
95#endif 95#endif
96 96
97/* Channels */ 97/* Channels */
98extern int efx_channel_dummy_op_int(struct efx_channel *channel);
98extern void efx_process_channel_now(struct efx_channel *channel); 99extern void efx_process_channel_now(struct efx_channel *channel);
99extern int 100extern int
100efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 101efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f887f65e4189..f22f45f515a8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -808,11 +808,16 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
808 return efx_reset(efx, rc); 808 return efx_reset(efx, rc);
809} 809}
810 810
811/* MAC address mask including only MC flag */
812static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
813
811static int efx_ethtool_get_class_rule(struct efx_nic *efx, 814static int efx_ethtool_get_class_rule(struct efx_nic *efx,
812 struct ethtool_rx_flow_spec *rule) 815 struct ethtool_rx_flow_spec *rule)
813{ 816{
814 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; 817 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
815 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; 818 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
819 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
820 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
816 struct efx_filter_spec spec; 821 struct efx_filter_spec spec;
817 u16 vid; 822 u16 vid;
818 u8 proto; 823 u8 proto;
@@ -828,11 +833,18 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
828 else 833 else
829 rule->ring_cookie = spec.dmaq_id; 834 rule->ring_cookie = spec.dmaq_id;
830 835
831 rc = efx_filter_get_eth_local(&spec, &vid, 836 if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
832 rule->h_u.ether_spec.h_dest); 837 rule->flow_type = ETHER_FLOW;
838 memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
839 if (spec.type == EFX_FILTER_MC_DEF)
840 memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
841 return 0;
842 }
843
844 rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
833 if (rc == 0) { 845 if (rc == 0) {
834 rule->flow_type = ETHER_FLOW; 846 rule->flow_type = ETHER_FLOW;
835 memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN); 847 memset(mac_mask->h_dest, ~0, ETH_ALEN);
836 if (vid != EFX_FILTER_VID_UNSPEC) { 848 if (vid != EFX_FILTER_VID_UNSPEC) {
837 rule->flow_type |= FLOW_EXT; 849 rule->flow_type |= FLOW_EXT;
838 rule->h_ext.vlan_tci = htons(vid); 850 rule->h_ext.vlan_tci = htons(vid);
@@ -1001,27 +1013,40 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
1001 } 1013 }
1002 1014
1003 case ETHER_FLOW | FLOW_EXT: 1015 case ETHER_FLOW | FLOW_EXT:
1004 /* Must match all or none of VID */ 1016 case ETHER_FLOW: {
1005 if (rule->m_ext.vlan_tci != htons(0xfff) && 1017 u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
1006 rule->m_ext.vlan_tci != 0) 1018 ntohs(rule->m_ext.vlan_tci) : 0);
1007 return -EINVAL; 1019
1008 case ETHER_FLOW: 1020 /* Must not match on source address or Ethertype */
1009 /* Must match all of destination */
1010 if (!is_broadcast_ether_addr(mac_mask->h_dest))
1011 return -EINVAL;
1012 /* and nothing else */
1013 if (!is_zero_ether_addr(mac_mask->h_source) || 1021 if (!is_zero_ether_addr(mac_mask->h_source) ||
1014 mac_mask->h_proto) 1022 mac_mask->h_proto)
1015 return -EINVAL; 1023 return -EINVAL;
1016 1024
1017 rc = efx_filter_set_eth_local( 1025 /* Is it a default UC or MC filter? */
1018 &spec, 1026 if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) &&
1019 (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ? 1027 vlan_tag_mask == 0) {
1020 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, 1028 if (is_multicast_ether_addr(mac_entry->h_dest))
1021 mac_entry->h_dest); 1029 rc = efx_filter_set_mc_def(&spec);
1030 else
1031 rc = efx_filter_set_uc_def(&spec);
1032 }
1033 /* Otherwise, it must match all of destination and all
1034 * or none of VID.
1035 */
1036 else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
1037 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
1038 rc = efx_filter_set_eth_local(
1039 &spec,
1040 vlan_tag_mask ?
1041 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
1042 mac_entry->h_dest);
1043 } else {
1044 rc = -EINVAL;
1045 }
1022 if (rc) 1046 if (rc)
1023 return rc; 1047 return rc;
1024 break; 1048 break;
1049 }
1025 1050
1026 default: 1051 default:
1027 return -EINVAL; 1052 return -EINVAL;
@@ -1060,7 +1085,8 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1060{ 1085{
1061 struct efx_nic *efx = netdev_priv(net_dev); 1086 struct efx_nic *efx = netdev_priv(net_dev);
1062 1087
1063 return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ? 1088 return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
1089 efx->n_rx_channels == 1) ?
1064 0 : ARRAY_SIZE(efx->rx_indir_table)); 1090 0 : ARRAY_SIZE(efx->rx_indir_table));
1065} 1091}
1066 1092
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 98285115df10..3a1ca2bd1548 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1333,6 +1333,12 @@ out:
1333 return rc; 1333 return rc;
1334} 1334}
1335 1335
1336static void falcon_dimension_resources(struct efx_nic *efx)
1337{
1338 efx->rx_dc_base = 0x20000;
1339 efx->tx_dc_base = 0x26000;
1340}
1341
1336/* Probe all SPI devices on the NIC */ 1342/* Probe all SPI devices on the NIC */
1337static void falcon_probe_spi_devices(struct efx_nic *efx) 1343static void falcon_probe_spi_devices(struct efx_nic *efx)
1338{ 1344{
@@ -1749,6 +1755,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1749 .probe = falcon_probe_nic, 1755 .probe = falcon_probe_nic,
1750 .remove = falcon_remove_nic, 1756 .remove = falcon_remove_nic,
1751 .init = falcon_init_nic, 1757 .init = falcon_init_nic,
1758 .dimension_resources = falcon_dimension_resources,
1752 .fini = efx_port_dummy_op_void, 1759 .fini = efx_port_dummy_op_void,
1753 .monitor = falcon_monitor, 1760 .monitor = falcon_monitor,
1754 .map_reset_reason = falcon_map_reset_reason, 1761 .map_reset_reason = falcon_map_reset_reason,
@@ -1783,8 +1790,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
1783 .max_interrupt_mode = EFX_INT_MODE_MSI, 1790 .max_interrupt_mode = EFX_INT_MODE_MSI,
1784 .phys_addr_channels = 4, 1791 .phys_addr_channels = 4,
1785 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 1792 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1786 .tx_dc_base = 0x130000,
1787 .rx_dc_base = 0x100000,
1788 .offload_features = NETIF_F_IP_CSUM, 1793 .offload_features = NETIF_F_IP_CSUM,
1789}; 1794};
1790 1795
@@ -1792,6 +1797,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1792 .probe = falcon_probe_nic, 1797 .probe = falcon_probe_nic,
1793 .remove = falcon_remove_nic, 1798 .remove = falcon_remove_nic,
1794 .init = falcon_init_nic, 1799 .init = falcon_init_nic,
1800 .dimension_resources = falcon_dimension_resources,
1795 .fini = efx_port_dummy_op_void, 1801 .fini = efx_port_dummy_op_void,
1796 .monitor = falcon_monitor, 1802 .monitor = falcon_monitor,
1797 .map_reset_reason = falcon_map_reset_reason, 1803 .map_reset_reason = falcon_map_reset_reason,
@@ -1835,8 +1841,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
1835 * interrupt handler only supports 32 1841 * interrupt handler only supports 32
1836 * channels */ 1842 * channels */
1837 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 1843 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1838 .tx_dc_base = 0x130000,
1839 .rx_dc_base = 0x100000,
1840 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 1844 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1841}; 1845};
1842 1846
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 1fbbbee7b1ae..fea7f7300675 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -35,9 +35,17 @@
35enum efx_filter_table_id { 35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
38 EFX_FILTER_TABLE_COUNT, 40 EFX_FILTER_TABLE_COUNT,
39}; 41};
40 42
43enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47};
48
41struct efx_filter_table { 49struct efx_filter_table {
42 enum efx_filter_table_id id; 50 enum efx_filter_table_id id;
43 u32 offset; /* address of table relative to BAR */ 51 u32 offset; /* address of table relative to BAR */
@@ -90,8 +98,9 @@ efx_filter_spec_table_id(const struct efx_filter_spec *spec)
90 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); 98 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
91 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); 99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
92 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); 100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
93 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); 102 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
94 return spec->type >> 2; 103 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
95} 104}
96 105
97static struct efx_filter_table * 106static struct efx_filter_table *
@@ -109,7 +118,7 @@ static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
109 memset(table->search_depth, 0, sizeof(table->search_depth)); 118 memset(table->search_depth, 0, sizeof(table->search_depth));
110} 119}
111 120
112static void efx_filter_push_rx_limits(struct efx_nic *efx) 121static void efx_filter_push_rx_config(struct efx_nic *efx)
113{ 122{
114 struct efx_filter_state *state = efx->filter_state; 123 struct efx_filter_state *state = efx->filter_state;
115 struct efx_filter_table *table; 124 struct efx_filter_table *table;
@@ -143,9 +152,58 @@ static void efx_filter_push_rx_limits(struct efx_nic *efx)
143 FILTER_CTL_SRCH_FUDGE_WILD); 152 FILTER_CTL_SRCH_FUDGE_WILD);
144 } 153 }
145 154
155 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
156 if (table->size) {
157 EFX_SET_OWORD_FIELD(
158 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
159 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
160 EFX_SET_OWORD_FIELD(
161 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS));
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
177 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
178 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
179 }
180
146 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 181 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
147} 182}
148 183
184static void efx_filter_push_tx_limits(struct efx_nic *efx)
185{
186 struct efx_filter_state *state = efx->filter_state;
187 struct efx_filter_table *table;
188 efx_oword_t tx_cfg;
189
190 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
191
192 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
193 if (table->size) {
194 EFX_SET_OWORD_FIELD(
195 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
196 table->search_depth[EFX_FILTER_MAC_FULL] +
197 FILTER_CTL_SRCH_FUDGE_FULL);
198 EFX_SET_OWORD_FIELD(
199 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
200 table->search_depth[EFX_FILTER_MAC_WILD] +
201 FILTER_CTL_SRCH_FUDGE_WILD);
202 }
203
204 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
205}
206
149static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, 207static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
150 __be32 host1, __be16 port1, 208 __be32 host1, __be16 port1,
151 __be32 host2, __be16 port2) 209 __be32 host2, __be16 port2)
@@ -300,7 +358,8 @@ int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
300int efx_filter_set_eth_local(struct efx_filter_spec *spec, 358int efx_filter_set_eth_local(struct efx_filter_spec *spec,
301 u16 vid, const u8 *addr) 359 u16 vid, const u8 *addr)
302{ 360{
303 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); 361 EFX_BUG_ON_PARANOID(!(spec->flags &
362 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
304 363
305 /* This cannot currently be combined with other filtering */ 364 /* This cannot currently be combined with other filtering */
306 if (spec->type != EFX_FILTER_UNSPEC) 365 if (spec->type != EFX_FILTER_UNSPEC)
@@ -319,6 +378,52 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
319 return 0; 378 return 0;
320} 379}
321 380
381/**
382 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
383 * @spec: Specification to initialise
384 */
385int efx_filter_set_uc_def(struct efx_filter_spec *spec)
386{
387 EFX_BUG_ON_PARANOID(!(spec->flags &
388 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
389
390 if (spec->type != EFX_FILTER_UNSPEC)
391 return -EINVAL;
392
393 spec->type = EFX_FILTER_UC_DEF;
394 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
395 return 0;
396}
397
398/**
399 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
400 * @spec: Specification to initialise
401 */
402int efx_filter_set_mc_def(struct efx_filter_spec *spec)
403{
404 EFX_BUG_ON_PARANOID(!(spec->flags &
405 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
406
407 if (spec->type != EFX_FILTER_UNSPEC)
408 return -EINVAL;
409
410 spec->type = EFX_FILTER_MC_DEF;
411 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
412 return 0;
413}
414
415static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
416{
417 struct efx_filter_state *state = efx->filter_state;
418 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
419 struct efx_filter_spec *spec = &table->spec[filter_idx];
420
421 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
422 EFX_FILTER_FLAG_RX_RSS, 0);
423 spec->type = EFX_FILTER_UC_DEF + filter_idx;
424 table->used_bitmap[0] |= 1 << filter_idx;
425}
426
322int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 427int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
323 u16 *vid, u8 *addr) 428 u16 *vid, u8 *addr)
324{ 429{
@@ -366,6 +471,13 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
366 break; 471 break;
367 } 472 }
368 473
474 case EFX_FILTER_TABLE_RX_DEF:
475 /* One filter spec per type */
476 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
477 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
478 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
479 return spec->type - EFX_FILTER_UC_DEF;
480
369 case EFX_FILTER_TABLE_RX_MAC: { 481 case EFX_FILTER_TABLE_RX_MAC: {
370 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 482 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
371 EFX_POPULATE_OWORD_8( 483 EFX_POPULATE_OWORD_8(
@@ -385,6 +497,18 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
385 break; 497 break;
386 } 498 }
387 499
500 case EFX_FILTER_TABLE_TX_MAC: {
501 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
502 EFX_POPULATE_OWORD_5(*filter,
503 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
504 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
505 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
506 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
507 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
508 data3 = is_wild | spec->dmaq_id << 1;
509 break;
510 }
511
388 default: 512 default:
389 BUG(); 513 BUG();
390 } 514 }
@@ -399,6 +523,10 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
399 memcmp(left->data, right->data, sizeof(left->data))) 523 memcmp(left->data, right->data, sizeof(left->data)))
400 return false; 524 return false;
401 525
526 if (left->flags & EFX_FILTER_FLAG_TX &&
527 left->dmaq_id != right->dmaq_id)
528 return false;
529
402 return true; 530 return true;
403} 531}
404 532
@@ -448,23 +576,40 @@ static int efx_filter_search(struct efx_filter_table *table,
448 * MAC filters without overriding behaviour. 576 * MAC filters without overriding behaviour.
449 */ 577 */
450 578
579#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0
580#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1
581#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2
582
451#define EFX_FILTER_INDEX_WIDTH 13 583#define EFX_FILTER_INDEX_WIDTH 13
452#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) 584#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
453 585
454static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, 586static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
455 unsigned int index, u8 flags) 587 unsigned int index, u8 flags)
456{ 588{
457 return (table_id == EFX_FILTER_TABLE_RX_MAC && 589 unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
458 flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? 590
459 index : 591 if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
460 (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index; 592 if (table_id == EFX_FILTER_TABLE_RX_MAC)
593 match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
594 else if (table_id == EFX_FILTER_TABLE_RX_DEF)
595 match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
596 }
597
598 return match_pri << EFX_FILTER_INDEX_WIDTH | index;
461} 599}
462 600
463static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) 601static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
464{ 602{
465 return (id <= EFX_FILTER_INDEX_MASK) ? 603 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
466 EFX_FILTER_TABLE_RX_MAC : 604
467 (id >> EFX_FILTER_INDEX_WIDTH) - 1; 605 switch (match_pri) {
606 case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
607 return EFX_FILTER_TABLE_RX_MAC;
608 case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
609 return EFX_FILTER_TABLE_RX_DEF;
610 default:
611 return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
612 }
468} 613}
469 614
470static inline unsigned int efx_filter_id_index(u32 id) 615static inline unsigned int efx_filter_id_index(u32 id)
@@ -474,23 +619,30 @@ static inline unsigned int efx_filter_id_index(u32 id)
474 619
475static inline u8 efx_filter_id_flags(u32 id) 620static inline u8 efx_filter_id_flags(u32 id)
476{ 621{
477 return (id <= EFX_FILTER_INDEX_MASK) ? 622 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
478 EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP : 623
479 EFX_FILTER_FLAG_RX; 624 if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
625 return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
626 else if (match_pri <=
627 EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
628 return EFX_FILTER_FLAG_RX;
629 else
630 return EFX_FILTER_FLAG_TX;
480} 631}
481 632
482u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) 633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
483{ 634{
484 struct efx_filter_state *state = efx->filter_state; 635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
485 637
486 if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0) 638 do {
487 return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH) 639 if (state->table[table_id].size != 0)
488 + state->table[EFX_FILTER_TABLE_RX_MAC].size; 640 return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
489 else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0) 641 << EFX_FILTER_INDEX_WIDTH) +
490 return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH) 642 state->table[table_id].size;
491 + state->table[EFX_FILTER_TABLE_RX_IP].size; 643 } while (table_id--);
492 else 644
493 return 0; 645 return 0;
494} 646}
495 647
496/** 648/**
@@ -548,12 +700,20 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
548 } 700 }
549 *saved_spec = *spec; 701 *saved_spec = *spec;
550 702
551 if (table->search_depth[spec->type] < depth) { 703 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
552 table->search_depth[spec->type] = depth; 704 efx_filter_push_rx_config(efx);
553 efx_filter_push_rx_limits(efx); 705 } else {
554 } 706 if (table->search_depth[spec->type] < depth) {
707 table->search_depth[spec->type] = depth;
708 if (spec->flags & EFX_FILTER_FLAG_TX)
709 efx_filter_push_tx_limits(efx);
710 else
711 efx_filter_push_rx_config(efx);
712 }
555 713
556 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); 714 efx_writeo(efx, &filter,
715 table->offset + table->step * filter_idx);
716 }
557 717
558 netif_vdbg(efx, hw, efx->net_dev, 718 netif_vdbg(efx, hw, efx->net_dev,
559 "%s: filter type %d index %d rxq %u set", 719 "%s: filter type %d index %d rxq %u set",
@@ -571,7 +731,11 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
571{ 731{
572 static efx_oword_t filter; 732 static efx_oword_t filter;
573 733
574 if (test_bit(filter_idx, table->used_bitmap)) { 734 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
735 /* RX default filters must always exist */
736 efx_filter_reset_rx_def(efx, filter_idx);
737 efx_filter_push_rx_config(efx);
738 } else if (test_bit(filter_idx, table->used_bitmap)) {
575 __clear_bit(filter_idx, table->used_bitmap); 739 __clear_bit(filter_idx, table->used_bitmap);
576 --table->used; 740 --table->used;
577 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 741 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
@@ -617,7 +781,8 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
617 spin_lock_bh(&state->lock); 781 spin_lock_bh(&state->lock);
618 782
619 if (test_bit(filter_idx, table->used_bitmap) && 783 if (test_bit(filter_idx, table->used_bitmap) &&
620 spec->priority == priority && spec->flags == filter_flags) { 784 spec->priority == priority &&
785 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
621 efx_filter_table_clear_entry(efx, table, filter_idx); 786 efx_filter_table_clear_entry(efx, table, filter_idx);
622 if (table->used == 0) 787 if (table->used == 0)
623 efx_filter_table_reset_search_depth(table); 788 efx_filter_table_reset_search_depth(table);
@@ -668,7 +833,8 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
668 spin_lock_bh(&state->lock); 833 spin_lock_bh(&state->lock);
669 834
670 if (test_bit(filter_idx, table->used_bitmap) && 835 if (test_bit(filter_idx, table->used_bitmap) &&
671 spec->priority == priority && spec->flags == filter_flags) { 836 spec->priority == priority &&
837 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
672 *spec_buf = *spec; 838 *spec_buf = *spec;
673 rc = 0; 839 rc = 0;
674 } else { 840 } else {
@@ -722,7 +888,7 @@ u32 efx_filter_count_rx_used(struct efx_nic *efx,
722 spin_lock_bh(&state->lock); 888 spin_lock_bh(&state->lock);
723 889
724 for (table_id = EFX_FILTER_TABLE_RX_IP; 890 for (table_id = EFX_FILTER_TABLE_RX_IP;
725 table_id <= EFX_FILTER_TABLE_RX_MAC; 891 table_id <= EFX_FILTER_TABLE_RX_DEF;
726 table_id++) { 892 table_id++) {
727 table = &state->table[table_id]; 893 table = &state->table[table_id];
728 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 894 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -750,7 +916,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
750 spin_lock_bh(&state->lock); 916 spin_lock_bh(&state->lock);
751 917
752 for (table_id = EFX_FILTER_TABLE_RX_IP; 918 for (table_id = EFX_FILTER_TABLE_RX_IP;
753 table_id <= EFX_FILTER_TABLE_RX_MAC; 919 table_id <= EFX_FILTER_TABLE_RX_DEF;
754 table_id++) { 920 table_id++) {
755 table = &state->table[table_id]; 921 table = &state->table[table_id];
756 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 922 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -785,6 +951,11 @@ void efx_restore_filters(struct efx_nic *efx)
785 951
786 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { 952 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
787 table = &state->table[table_id]; 953 table = &state->table[table_id];
954
955 /* Check whether this is a regular register table */
956 if (table->step == 0)
957 continue;
958
788 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 959 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
789 if (!test_bit(filter_idx, table->used_bitmap)) 960 if (!test_bit(filter_idx, table->used_bitmap))
790 continue; 961 continue;
@@ -794,7 +965,8 @@ void efx_restore_filters(struct efx_nic *efx)
794 } 965 }
795 } 966 }
796 967
797 efx_filter_push_rx_limits(efx); 968 efx_filter_push_rx_config(efx);
969 efx_filter_push_tx_limits(efx);
798 970
799 spin_unlock_bh(&state->lock); 971 spin_unlock_bh(&state->lock);
800} 972}
@@ -833,6 +1005,16 @@ int efx_probe_filters(struct efx_nic *efx)
833 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 1005 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
834 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 1006 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
835 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 1007 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1008
1009 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1010 table->id = EFX_FILTER_TABLE_RX_DEF;
1011 table->size = EFX_FILTER_SIZE_RX_DEF;
1012
1013 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1014 table->id = EFX_FILTER_TABLE_TX_MAC;
1015 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1016 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1017 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
836 } 1018 }
837 1019
838 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { 1020 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
@@ -849,6 +1031,15 @@ int efx_probe_filters(struct efx_nic *efx)
849 goto fail; 1031 goto fail;
850 } 1032 }
851 1033
1034 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1035 /* RX default filters must always exist */
1036 unsigned i;
1037 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1038 efx_filter_reset_rx_def(efx, i);
1039 }
1040
1041 efx_filter_push_rx_config(efx);
1042
852 return 0; 1043 return 0;
853 1044
854fail: 1045fail:
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3d4108cd90ca..3c77802aed6c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -20,6 +20,8 @@
20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) 20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID 21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address 22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
23 * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
24 * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
23 * @EFX_FILTER_UNSPEC: Match type is unspecified 25 * @EFX_FILTER_UNSPEC: Match type is unspecified
24 * 26 *
25 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. 27 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
@@ -31,6 +33,8 @@ enum efx_filter_type {
31 EFX_FILTER_UDP_WILD, 33 EFX_FILTER_UDP_WILD,
32 EFX_FILTER_MAC_FULL = 4, 34 EFX_FILTER_MAC_FULL = 4,
33 EFX_FILTER_MAC_WILD, 35 EFX_FILTER_MAC_WILD,
36 EFX_FILTER_UC_DEF = 8,
37 EFX_FILTER_MC_DEF,
34 EFX_FILTER_TYPE_COUNT, /* number of specific types */ 38 EFX_FILTER_TYPE_COUNT, /* number of specific types */
35 EFX_FILTER_UNSPEC = 0xf, 39 EFX_FILTER_UNSPEC = 0xf,
36}; 40};
@@ -39,7 +43,8 @@ enum efx_filter_type {
39 * enum efx_filter_priority - priority of a hardware filter specification 43 * enum efx_filter_priority - priority of a hardware filter specification
40 * @EFX_FILTER_PRI_HINT: Performance hint 44 * @EFX_FILTER_PRI_HINT: Performance hint
41 * @EFX_FILTER_PRI_MANUAL: Manually configured filter 45 * @EFX_FILTER_PRI_MANUAL: Manually configured filter
42 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour 46 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
47 * networking and SR-IOV)
43 */ 48 */
44enum efx_filter_priority { 49enum efx_filter_priority {
45 EFX_FILTER_PRI_HINT = 0, 50 EFX_FILTER_PRI_HINT = 0,
@@ -60,12 +65,14 @@ enum efx_filter_priority {
60 * any IP filter that matches the same packet. By default, IP 65 * any IP filter that matches the same packet. By default, IP
61 * filters take precedence. 66 * filters take precedence.
62 * @EFX_FILTER_FLAG_RX: Filter is for RX 67 * @EFX_FILTER_FLAG_RX: Filter is for RX
68 * @EFX_FILTER_FLAG_TX: Filter is for TX
63 */ 69 */
64enum efx_filter_flags { 70enum efx_filter_flags {
65 EFX_FILTER_FLAG_RX_RSS = 0x01, 71 EFX_FILTER_FLAG_RX_RSS = 0x01,
66 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 72 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
67 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, 73 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
68 EFX_FILTER_FLAG_RX = 0x08, 74 EFX_FILTER_FLAG_RX = 0x08,
75 EFX_FILTER_FLAG_TX = 0x10,
69}; 76};
70 77
71/** 78/**
@@ -103,6 +110,15 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
103 spec->dmaq_id = rxq_id; 110 spec->dmaq_id = rxq_id;
104} 111}
105 112
113static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
114 unsigned txq_id)
115{
116 spec->type = EFX_FILTER_UNSPEC;
117 spec->priority = EFX_FILTER_PRI_REQUIRED;
118 spec->flags = EFX_FILTER_FLAG_TX;
119 spec->dmaq_id = txq_id;
120}
121
106extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, 122extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
107 __be32 host, __be16 port); 123 __be32 host, __be16 port);
108extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, 124extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
@@ -117,6 +133,8 @@ extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
117 u16 vid, const u8 *addr); 133 u16 vid, const u8 *addr);
118extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 134extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
119 u16 *vid, u8 *addr); 135 u16 *vid, u8 *addr);
136extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
137extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
120enum { 138enum {
121 EFX_FILTER_VID_UNSPEC = 0xffff, 139 EFX_FILTER_VID_UNSPEC = 0xffff,
122}; 140};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 619f63a66ce7..17b6463e459c 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -560,6 +560,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
560 case MCDI_EVENT_CODE_MAC_STATS_DMA: 560 case MCDI_EVENT_CODE_MAC_STATS_DMA:
561 /* MAC stats are gather lazily. We can ignore this. */ 561 /* MAC stats are gather lazily. We can ignore this. */
562 break; 562 break;
563 case MCDI_EVENT_CODE_FLR:
564 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
565 break;
563 566
564 default: 567 default:
565 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 568 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -1154,6 +1157,37 @@ fail:
1154 return rc; 1157 return rc;
1155} 1158}
1156 1159
1160int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1161{
1162 struct efx_channel *channel;
1163 struct efx_rx_queue *rx_queue;
1164 __le32 *qid;
1165 int rc, count;
1166
1167 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1168 if (qid == NULL)
1169 return -ENOMEM;
1170
1171 count = 0;
1172 efx_for_each_channel(channel, efx) {
1173 efx_for_each_channel_rx_queue(rx_queue, channel) {
1174 if (rx_queue->flush_pending) {
1175 rx_queue->flush_pending = false;
1176 atomic_dec(&efx->rxq_flush_pending);
1177 qid[count++] = cpu_to_le32(
1178 efx_rx_queue_index(rx_queue));
1179 }
1180 }
1181 }
1182
1183 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
1184 count * sizeof(*qid), NULL, 0, NULL);
1185 WARN_ON(rc > 0);
1186
1187 kfree(qid);
1188
1189 return rc;
1190}
1157 1191
1158int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1192int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1159{ 1193{
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index fbaa6efcd744..0bdf3e331832 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -146,6 +146,8 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
146extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 146extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
147extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 147extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
148extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 148extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
149extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
150extern int efx_mcdi_set_mac(struct efx_nic *efx);
149extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 151extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
150 u32 dma_len, int enable, int clear); 152 u32 dma_len, int enable, int clear);
151extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx); 153extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
index f67cf921bd1b..1003f309cba7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
@@ -12,7 +12,7 @@
12#include "mcdi.h" 12#include "mcdi.h"
13#include "mcdi_pcol.h" 13#include "mcdi_pcol.h"
14 14
15static int efx_mcdi_set_mac(struct efx_nic *efx) 15int efx_mcdi_set_mac(struct efx_nic *efx)
16{ 16{
17 u32 reject, fcntl; 17 u32 reject, fcntl;
18 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; 18 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
@@ -44,6 +44,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
44 } 44 }
45 if (efx->wanted_fc & EFX_FC_AUTO) 45 if (efx->wanted_fc & EFX_FC_AUTO)
46 fcntl = MC_CMD_FCNTL_AUTO; 46 fcntl = MC_CMD_FCNTL_AUTO;
47 if (efx->fc_disable)
48 fcntl = MC_CMD_FCNTL_OFF;
47 49
48 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); 50 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
49 51
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 79c192272047..26b3c23b0b6f 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -280,7 +280,7 @@ fail:
280 --part; 280 --part;
281 efx_mtd_remove_partition(part); 281 efx_mtd_remove_partition(part);
282 } 282 }
283 /* mtd_device_register() returns 1 if the MTD table is full */ 283 /* Failure is unlikely here, but probably means we're out of memory */
284 return -ENOMEM; 284 return -ENOMEM;
285} 285}
286 286
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 53864014c2b4..3fbec458c323 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -24,6 +24,7 @@
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mutex.h>
27#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29 30
@@ -52,8 +53,10 @@
52 * 53 *
53 **************************************************************************/ 54 **************************************************************************/
54 55
55#define EFX_MAX_CHANNELS 32 56#define EFX_MAX_CHANNELS 32U
56#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58#define EFX_EXTRA_CHANNEL_IOV 0
59#define EFX_MAX_EXTRA_CHANNELS 1U
57 60
58/* Checksum generation is a per-queue option in hardware, so each 61/* Checksum generation is a per-queue option in hardware, so each
59 * queue visible to the networking core is backed by two hardware TX 62 * queue visible to the networking core is backed by two hardware TX
@@ -81,15 +84,8 @@ struct efx_special_buffer {
81 void *addr; 84 void *addr;
82 dma_addr_t dma_addr; 85 dma_addr_t dma_addr;
83 unsigned int len; 86 unsigned int len;
84 int index; 87 unsigned int index;
85 int entries; 88 unsigned int entries;
86};
87
88enum efx_flush_state {
89 FLUSH_NONE,
90 FLUSH_PENDING,
91 FLUSH_FAILED,
92 FLUSH_DONE,
93}; 89};
94 90
95/** 91/**
@@ -138,7 +134,6 @@ struct efx_tx_buffer {
138 * @txd: The hardware descriptor ring 134 * @txd: The hardware descriptor ring
139 * @ptr_mask: The size of the ring minus 1. 135 * @ptr_mask: The size of the ring minus 1.
140 * @initialised: Has hardware queue been initialised? 136 * @initialised: Has hardware queue been initialised?
141 * @flushed: Used when handling queue flushing
142 * @read_count: Current read pointer. 137 * @read_count: Current read pointer.
143 * This is the number of buffers that have been removed from both rings. 138 * This is the number of buffers that have been removed from both rings.
144 * @old_write_count: The value of @write_count when last checked. 139 * @old_write_count: The value of @write_count when last checked.
@@ -181,7 +176,6 @@ struct efx_tx_queue {
181 struct efx_special_buffer txd; 176 struct efx_special_buffer txd;
182 unsigned int ptr_mask; 177 unsigned int ptr_mask;
183 bool initialised; 178 bool initialised;
184 enum efx_flush_state flushed;
185 179
186 /* Members used mainly on the completion path */ 180 /* Members used mainly on the completion path */
187 unsigned int read_count ____cacheline_aligned_in_smp; 181 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -249,6 +243,9 @@ struct efx_rx_page_state {
249 * @buffer: The software buffer ring 243 * @buffer: The software buffer ring
250 * @rxd: The hardware descriptor ring 244 * @rxd: The hardware descriptor ring
251 * @ptr_mask: The size of the ring minus 1. 245 * @ptr_mask: The size of the ring minus 1.
246 * @enabled: Receive queue enabled indicator.
247 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
248 * @rxq_flush_pending.
252 * @added_count: Number of buffers added to the receive queue. 249 * @added_count: Number of buffers added to the receive queue.
253 * @notified_count: Number of buffers given to NIC (<= @added_count). 250 * @notified_count: Number of buffers given to NIC (<= @added_count).
254 * @removed_count: Number of buffers removed from the receive queue. 251 * @removed_count: Number of buffers removed from the receive queue.
@@ -263,13 +260,14 @@ struct efx_rx_page_state {
263 * @alloc_page_count: RX allocation strategy counter. 260 * @alloc_page_count: RX allocation strategy counter.
264 * @alloc_skb_count: RX allocation strategy counter. 261 * @alloc_skb_count: RX allocation strategy counter.
265 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 262 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
266 * @flushed: Use when handling queue flushing
267 */ 263 */
268struct efx_rx_queue { 264struct efx_rx_queue {
269 struct efx_nic *efx; 265 struct efx_nic *efx;
270 struct efx_rx_buffer *buffer; 266 struct efx_rx_buffer *buffer;
271 struct efx_special_buffer rxd; 267 struct efx_special_buffer rxd;
272 unsigned int ptr_mask; 268 unsigned int ptr_mask;
269 bool enabled;
270 bool flush_pending;
273 271
274 int added_count; 272 int added_count;
275 int notified_count; 273 int notified_count;
@@ -283,8 +281,6 @@ struct efx_rx_queue {
283 unsigned int alloc_skb_count; 281 unsigned int alloc_skb_count;
284 struct timer_list slow_fill; 282 struct timer_list slow_fill;
285 unsigned int slow_fill_count; 283 unsigned int slow_fill_count;
286
287 enum efx_flush_state flushed;
288}; 284};
289 285
290/** 286/**
@@ -318,6 +314,7 @@ enum efx_rx_alloc_method {
318 * 314 *
319 * @efx: Associated Efx NIC 315 * @efx: Associated Efx NIC
320 * @channel: Channel instance number 316 * @channel: Channel instance number
317 * @type: Channel type definition
321 * @enabled: Channel enabled indicator 318 * @enabled: Channel enabled indicator
322 * @irq: IRQ number (MSI and MSI-X only) 319 * @irq: IRQ number (MSI and MSI-X only)
323 * @irq_moderation: IRQ moderation value (in hardware ticks) 320 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -348,6 +345,7 @@ enum efx_rx_alloc_method {
348struct efx_channel { 345struct efx_channel {
349 struct efx_nic *efx; 346 struct efx_nic *efx;
350 int channel; 347 int channel;
348 const struct efx_channel_type *type;
351 bool enabled; 349 bool enabled;
352 int irq; 350 int irq;
353 unsigned int irq_moderation; 351 unsigned int irq_moderation;
@@ -386,6 +384,26 @@ struct efx_channel {
386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
387}; 385};
388 386
387/**
388 * struct efx_channel_type - distinguishes traffic and extra channels
389 * @handle_no_channel: Handle failure to allocate an extra channel
390 * @pre_probe: Set up extra state prior to initialisation
391 * @post_remove: Tear down extra state after finalisation, if allocated.
392 * May be called on channels that have not been probed.
393 * @get_name: Generate the channel's name (used for its IRQ handler)
394 * @copy: Copy the channel state prior to reallocation. May be %NULL if
395 * reallocation is not supported.
396 * @keep_eventq: Flag for whether event queue should be kept initialised
397 * while the device is stopped
398 */
399struct efx_channel_type {
400 void (*handle_no_channel)(struct efx_nic *);
401 int (*pre_probe)(struct efx_channel *);
402 void (*get_name)(struct efx_channel *, char *buf, size_t len);
403 struct efx_channel *(*copy)(const struct efx_channel *);
404 bool keep_eventq;
405};
406
389enum efx_led_mode { 407enum efx_led_mode {
390 EFX_LED_OFF = 0, 408 EFX_LED_OFF = 0,
391 EFX_LED_ON = 1, 409 EFX_LED_ON = 1,
@@ -613,6 +631,8 @@ union efx_multicast_hash {
613}; 631};
614 632
615struct efx_filter_state; 633struct efx_filter_state;
634struct efx_vf;
635struct vfdi_status;
616 636
617/** 637/**
618 * struct efx_nic - an Efx NIC 638 * struct efx_nic - an Efx NIC
@@ -638,8 +658,13 @@ struct efx_filter_state;
638 * @rx_queue: RX DMA queues 658 * @rx_queue: RX DMA queues
639 * @channel: Channels 659 * @channel: Channels
640 * @channel_name: Names for channels and their IRQs 660 * @channel_name: Names for channels and their IRQs
661 * @extra_channel_types: Types of extra (non-traffic) channels that
662 * should be allocated for this NIC
641 * @rxq_entries: Size of receive queues requested by user. 663 * @rxq_entries: Size of receive queues requested by user.
642 * @txq_entries: Size of transmit queues requested by user. 664 * @txq_entries: Size of transmit queues requested by user.
665 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
666 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
667 * @sram_lim_qw: Qword address limit of SRAM
643 * @next_buffer_table: First available buffer table id 668 * @next_buffer_table: First available buffer table id
644 * @n_channels: Number of channels in use 669 * @n_channels: Number of channels in use
645 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 670 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -677,10 +702,31 @@ struct efx_filter_state;
677 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 702 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
678 * @multicast_hash: Multicast hash table 703 * @multicast_hash: Multicast hash table
679 * @wanted_fc: Wanted flow control flags 704 * @wanted_fc: Wanted flow control flags
705 * @fc_disable: When non-zero flow control is disabled. Typically used to
706 * ensure that network back pressure doesn't delay dma queue flushes.
707 * Serialised by the rtnl lock.
680 * @mac_work: Work item for changing MAC promiscuity and multicast hash 708 * @mac_work: Work item for changing MAC promiscuity and multicast hash
681 * @loopback_mode: Loopback status 709 * @loopback_mode: Loopback status
682 * @loopback_modes: Supported loopback mode bitmask 710 * @loopback_modes: Supported loopback mode bitmask
683 * @loopback_selftest: Offline self-test private state 711 * @loopback_selftest: Offline self-test private state
712 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
713 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
714 * Decremented when the efx_flush_rx_queue() is called.
715 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
716 * completed (either success or failure). Not used when MCDI is used to
717 * flush receive queues.
718 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
719 * @vf: Array of &struct efx_vf objects.
720 * @vf_count: Number of VFs intended to be enabled.
721 * @vf_init_count: Number of VFs that have been fully initialised.
722 * @vi_scale: log2 number of vnics per VF.
723 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
724 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
725 * @local_addr_list: List of local addresses. Protected by %local_lock.
726 * @local_page_list: List of DMA addressable pages used to broadcast
727 * %local_addr_list. Protected by %local_lock.
728 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
729 * @peer_work: Work item to broadcast peer addresses to VMs.
684 * @monitor_work: Hardware monitor workitem 730 * @monitor_work: Hardware monitor workitem
685 * @biu_lock: BIU (bus interface unit) lock 731 * @biu_lock: BIU (bus interface unit) lock
686 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 732 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -720,12 +766,18 @@ struct efx_nic {
720 766
721 struct efx_channel *channel[EFX_MAX_CHANNELS]; 767 struct efx_channel *channel[EFX_MAX_CHANNELS];
722 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 768 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
769 const struct efx_channel_type *
770 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
723 771
724 unsigned rxq_entries; 772 unsigned rxq_entries;
725 unsigned txq_entries; 773 unsigned txq_entries;
774 unsigned tx_dc_base;
775 unsigned rx_dc_base;
776 unsigned sram_lim_qw;
726 unsigned next_buffer_table; 777 unsigned next_buffer_table;
727 unsigned n_channels; 778 unsigned n_channels;
728 unsigned n_rx_channels; 779 unsigned n_rx_channels;
780 unsigned rss_spread;
729 unsigned tx_channel_offset; 781 unsigned tx_channel_offset;
730 unsigned n_tx_channels; 782 unsigned n_tx_channels;
731 unsigned int rx_buffer_len; 783 unsigned int rx_buffer_len;
@@ -769,6 +821,7 @@ struct efx_nic {
769 bool promiscuous; 821 bool promiscuous;
770 union efx_multicast_hash multicast_hash; 822 union efx_multicast_hash multicast_hash;
771 u8 wanted_fc; 823 u8 wanted_fc;
824 unsigned fc_disable;
772 825
773 atomic_t rx_reset; 826 atomic_t rx_reset;
774 enum efx_loopback_mode loopback_mode; 827 enum efx_loopback_mode loopback_mode;
@@ -778,6 +831,25 @@ struct efx_nic {
778 831
779 struct efx_filter_state *filter_state; 832 struct efx_filter_state *filter_state;
780 833
834 atomic_t drain_pending;
835 atomic_t rxq_flush_pending;
836 atomic_t rxq_flush_outstanding;
837 wait_queue_head_t flush_wq;
838
839#ifdef CONFIG_SFC_SRIOV
840 struct efx_channel *vfdi_channel;
841 struct efx_vf *vf;
842 unsigned vf_count;
843 unsigned vf_init_count;
844 unsigned vi_scale;
845 unsigned vf_buftbl_base;
846 struct efx_buffer vfdi_status;
847 struct list_head local_addr_list;
848 struct list_head local_page_list;
849 struct mutex local_lock;
850 struct work_struct peer_work;
851#endif
852
781 /* The following fields may be written more often */ 853 /* The following fields may be written more often */
782 854
783 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 855 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -803,6 +875,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
803 * @probe: Probe the controller 875 * @probe: Probe the controller
804 * @remove: Free resources allocated by probe() 876 * @remove: Free resources allocated by probe()
805 * @init: Initialise the controller 877 * @init: Initialise the controller
878 * @dimension_resources: Dimension controller resources (buffer table,
879 * and VIs once the available interrupt resources are clear)
806 * @fini: Shut down the controller 880 * @fini: Shut down the controller
807 * @monitor: Periodic function for polling link state and hardware monitor 881 * @monitor: Periodic function for polling link state and hardware monitor
808 * @map_reset_reason: Map ethtool reset reason to a reset method 882 * @map_reset_reason: Map ethtool reset reason to a reset method
@@ -842,8 +916,6 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
842 * @phys_addr_channels: Number of channels with physically addressed 916 * @phys_addr_channels: Number of channels with physically addressed
843 * descriptors 917 * descriptors
844 * @timer_period_max: Maximum period of interrupt timer (in ticks) 918 * @timer_period_max: Maximum period of interrupt timer (in ticks)
845 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
846 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
847 * @offload_features: net_device feature flags for protocol offload 919 * @offload_features: net_device feature flags for protocol offload
848 * features implemented in hardware 920 * features implemented in hardware
849 */ 921 */
@@ -851,6 +923,7 @@ struct efx_nic_type {
851 int (*probe)(struct efx_nic *efx); 923 int (*probe)(struct efx_nic *efx);
852 void (*remove)(struct efx_nic *efx); 924 void (*remove)(struct efx_nic *efx);
853 int (*init)(struct efx_nic *efx); 925 int (*init)(struct efx_nic *efx);
926 void (*dimension_resources)(struct efx_nic *efx);
854 void (*fini)(struct efx_nic *efx); 927 void (*fini)(struct efx_nic *efx);
855 void (*monitor)(struct efx_nic *efx); 928 void (*monitor)(struct efx_nic *efx);
856 enum reset_type (*map_reset_reason)(enum reset_type reason); 929 enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -887,8 +960,6 @@ struct efx_nic_type {
887 unsigned int max_interrupt_mode; 960 unsigned int max_interrupt_mode;
888 unsigned int phys_addr_channels; 961 unsigned int phys_addr_channels;
889 unsigned int timer_period_max; 962 unsigned int timer_period_max;
890 unsigned int tx_dc_base;
891 unsigned int rx_dc_base;
892 netdev_features_t offload_features; 963 netdev_features_t offload_features;
893}; 964};
894 965
@@ -912,6 +983,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
912 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 983 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
913 (_efx)->channel[_channel->channel + 1] : NULL) 984 (_efx)->channel[_channel->channel + 1] : NULL)
914 985
986/* Iterate over all used channels in reverse */
987#define efx_for_each_channel_rev(_channel, _efx) \
988 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
989 _channel; \
990 _channel = _channel->channel ? \
991 (_efx)->channel[_channel->channel - 1] : NULL)
992
915static inline struct efx_tx_queue * 993static inline struct efx_tx_queue *
916efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) 994efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
917{ 995{
@@ -956,13 +1034,6 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
956 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 1034 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
957 _tx_queue++) 1035 _tx_queue++)
958 1036
959static inline struct efx_rx_queue *
960efx_get_rx_queue(struct efx_nic *efx, unsigned index)
961{
962 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
963 return &efx->channel[index]->rx_queue;
964}
965
966static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 1037static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
967{ 1038{
968 return channel->channel < channel->efx->n_rx_channels; 1039 return channel->channel < channel->efx->n_rx_channels;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index a43d1ca270c0..2bf4283f05fe 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -49,24 +49,29 @@
49#define EFX_INT_ERROR_EXPIRE 3600 49#define EFX_INT_ERROR_EXPIRE 3600
50#define EFX_MAX_INT_ERRORS 5 50#define EFX_MAX_INT_ERRORS 5
51 51
52/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53 */
54#define EFX_FLUSH_INTERVAL 10
55#define EFX_FLUSH_POLL_COUNT 100
56
57/* Size and alignment of special buffers (4KB) */
58#define EFX_BUF_SIZE 4096
59
60/* Depth of RX flush request fifo */ 52/* Depth of RX flush request fifo */
61#define EFX_RX_FLUSH_COUNT 4 53#define EFX_RX_FLUSH_COUNT 4
62 54
63/* Generated event code for efx_generate_test_event() */ 55/* Driver generated events */
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \ 56#define _EFX_CHANNEL_MAGIC_TEST 0x000101
65 (0x00010100 + (_channel)->channel) 57#define _EFX_CHANNEL_MAGIC_FILL 0x000102
66 58#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
67/* Generated event code for efx_generate_fill_event() */ 59#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
68#define EFX_CHANNEL_MAGIC_FILL(_channel) \ 60
69 (0x00010200 + (_channel)->channel) 61#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
63
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
70 75
71/************************************************************************** 76/**************************************************************************
72 * 77 *
@@ -187,7 +192,7 @@ static void
187efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188{ 193{
189 efx_qword_t buf_desc; 194 efx_qword_t buf_desc;
190 int index; 195 unsigned int index;
191 dma_addr_t dma_addr; 196 dma_addr_t dma_addr;
192 int i; 197 int i;
193 198
@@ -196,7 +201,7 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
196 /* Write buffer descriptors to NIC */ 201 /* Write buffer descriptors to NIC */
197 for (i = 0; i < buffer->entries; i++) { 202 for (i = 0; i < buffer->entries; i++) {
198 index = buffer->index + i; 203 index = buffer->index + i;
199 dma_addr = buffer->dma_addr + (i * 4096); 204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
200 netif_dbg(efx, probe, efx->net_dev, 205 netif_dbg(efx, probe, efx->net_dev,
201 "mapping special buffer %d at %llx\n", 206 "mapping special buffer %d at %llx\n",
202 index, (unsigned long long)dma_addr); 207 index, (unsigned long long)dma_addr);
@@ -259,6 +264,10 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
259 /* Select new buffer ID */ 264 /* Select new buffer ID */
260 buffer->index = efx->next_buffer_table; 265 buffer->index = efx->next_buffer_table;
261 efx->next_buffer_table += buffer->entries; 266 efx->next_buffer_table += buffer->entries;
267#ifdef CONFIG_SFC_SRIOV
268 BUG_ON(efx_sriov_enabled(efx) &&
269 efx->vf_buftbl_base < efx->next_buffer_table);
270#endif
262 271
263 netif_dbg(efx, probe, efx->net_dev, 272 netif_dbg(efx, probe, efx->net_dev,
264 "allocating special buffers %d-%d at %llx+%x " 273 "allocating special buffers %d-%d at %llx+%x "
@@ -430,8 +439,6 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
430 struct efx_nic *efx = tx_queue->efx; 439 struct efx_nic *efx = tx_queue->efx;
431 efx_oword_t reg; 440 efx_oword_t reg;
432 441
433 tx_queue->flushed = FLUSH_NONE;
434
435 /* Pin TX descriptor ring */ 442 /* Pin TX descriptor ring */
436 efx_init_special_buffer(efx, &tx_queue->txd); 443 efx_init_special_buffer(efx, &tx_queue->txd);
437 444
@@ -488,9 +495,6 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
488 struct efx_nic *efx = tx_queue->efx; 495 struct efx_nic *efx = tx_queue->efx;
489 efx_oword_t tx_flush_descq; 496 efx_oword_t tx_flush_descq;
490 497
491 tx_queue->flushed = FLUSH_PENDING;
492
493 /* Post a flush command */
494 EFX_POPULATE_OWORD_2(tx_flush_descq, 498 EFX_POPULATE_OWORD_2(tx_flush_descq,
495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 499 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 500 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -502,9 +506,6 @@ void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
502 struct efx_nic *efx = tx_queue->efx; 506 struct efx_nic *efx = tx_queue->efx;
503 efx_oword_t tx_desc_ptr; 507 efx_oword_t tx_desc_ptr;
504 508
505 /* The queue should have been flushed */
506 WARN_ON(tx_queue->flushed != FLUSH_DONE);
507
508 /* Remove TX descriptor ring from card */ 509 /* Remove TX descriptor ring from card */
509 EFX_ZERO_OWORD(tx_desc_ptr); 510 EFX_ZERO_OWORD(tx_desc_ptr);
510 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 511 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -595,8 +596,6 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
595 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 596 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 597 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597 598
598 rx_queue->flushed = FLUSH_NONE;
599
600 /* Pin RX descriptor ring */ 599 /* Pin RX descriptor ring */
601 efx_init_special_buffer(efx, &rx_queue->rxd); 600 efx_init_special_buffer(efx, &rx_queue->rxd);
602 601
@@ -625,9 +624,6 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
625 struct efx_nic *efx = rx_queue->efx; 624 struct efx_nic *efx = rx_queue->efx;
626 efx_oword_t rx_flush_descq; 625 efx_oword_t rx_flush_descq;
627 626
628 rx_queue->flushed = FLUSH_PENDING;
629
630 /* Post a flush command */
631 EFX_POPULATE_OWORD_2(rx_flush_descq, 627 EFX_POPULATE_OWORD_2(rx_flush_descq,
632 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 628 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
633 FRF_AZ_RX_FLUSH_DESCQ, 629 FRF_AZ_RX_FLUSH_DESCQ,
@@ -640,9 +636,6 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
640 efx_oword_t rx_desc_ptr; 636 efx_oword_t rx_desc_ptr;
641 struct efx_nic *efx = rx_queue->efx; 637 struct efx_nic *efx = rx_queue->efx;
642 638
643 /* The queue should already have been flushed */
644 WARN_ON(rx_queue->flushed != FLUSH_DONE);
645
646 /* Remove RX descriptor ring from card */ 639 /* Remove RX descriptor ring from card */
647 EFX_ZERO_OWORD(rx_desc_ptr); 640 EFX_ZERO_OWORD(rx_desc_ptr);
648 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 641 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
@@ -660,6 +653,103 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
660 653
661/************************************************************************** 654/**************************************************************************
662 * 655 *
656 * Flush handling
657 *
658 **************************************************************************/
659
660/* efx_nic_flush_queues() must be woken up when all flushes are completed,
661 * or more RX flushes can be kicked off.
662 */
663static bool efx_flush_wake(struct efx_nic *efx)
664{
665 /* Ensure that all updates are visible to efx_nic_flush_queues() */
666 smp_mb();
667
668 return (atomic_read(&efx->drain_pending) == 0 ||
669 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
670 && atomic_read(&efx->rxq_flush_pending) > 0));
671}
672
673/* Flush all the transmit queues, and continue flushing receive queues until
674 * they're all flushed. Wait for the DRAIN events to be recieved so that there
675 * are no more RX and TX events left on any channel. */
676int efx_nic_flush_queues(struct efx_nic *efx)
677{
678 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
679 struct efx_channel *channel;
680 struct efx_rx_queue *rx_queue;
681 struct efx_tx_queue *tx_queue;
682 int rc = 0;
683
684 efx->fc_disable++;
685 efx->type->prepare_flush(efx);
686
687 efx_for_each_channel(channel, efx) {
688 efx_for_each_channel_tx_queue(tx_queue, channel) {
689 atomic_inc(&efx->drain_pending);
690 efx_flush_tx_queue(tx_queue);
691 }
692 efx_for_each_channel_rx_queue(rx_queue, channel) {
693 atomic_inc(&efx->drain_pending);
694 rx_queue->flush_pending = true;
695 atomic_inc(&efx->rxq_flush_pending);
696 }
697 }
698
699 while (timeout && atomic_read(&efx->drain_pending) > 0) {
700 /* If SRIOV is enabled, then offload receive queue flushing to
701 * the firmware (though we will still have to poll for
702 * completion). If that fails, fall back to the old scheme.
703 */
704 if (efx_sriov_enabled(efx)) {
705 rc = efx_mcdi_flush_rxqs(efx);
706 if (!rc)
707 goto wait;
708 }
709
710 /* The hardware supports four concurrent rx flushes, each of
711 * which may need to be retried if there is an outstanding
712 * descriptor fetch
713 */
714 efx_for_each_channel(channel, efx) {
715 efx_for_each_channel_rx_queue(rx_queue, channel) {
716 if (atomic_read(&efx->rxq_flush_outstanding) >=
717 EFX_RX_FLUSH_COUNT)
718 break;
719
720 if (rx_queue->flush_pending) {
721 rx_queue->flush_pending = false;
722 atomic_dec(&efx->rxq_flush_pending);
723 atomic_inc(&efx->rxq_flush_outstanding);
724 efx_flush_rx_queue(rx_queue);
725 }
726 }
727 }
728
729 wait:
730 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
731 timeout);
732 }
733
734 if (atomic_read(&efx->drain_pending)) {
735 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
736 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
737 atomic_read(&efx->rxq_flush_outstanding),
738 atomic_read(&efx->rxq_flush_pending));
739 rc = -ETIMEDOUT;
740
741 atomic_set(&efx->drain_pending, 0);
742 atomic_set(&efx->rxq_flush_pending, 0);
743 atomic_set(&efx->rxq_flush_outstanding, 0);
744 }
745
746 efx->fc_disable--;
747
748 return rc;
749}
750
751/**************************************************************************
752 *
663 * Event queue processing 753 * Event queue processing
664 * Event queues are processed by per-channel tasklets. 754 * Event queues are processed by per-channel tasklets.
665 * 755 *
@@ -682,7 +772,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
682} 772}
683 773
684/* Use HW to insert a SW defined event */ 774/* Use HW to insert a SW defined event */
685static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 775void efx_generate_event(struct efx_nic *efx, unsigned int evq,
776 efx_qword_t *event)
686{ 777{
687 efx_oword_t drv_ev_reg; 778 efx_oword_t drv_ev_reg;
688 779
@@ -692,8 +783,18 @@ static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
692 drv_ev_reg.u32[1] = event->u32[1]; 783 drv_ev_reg.u32[1] = event->u32[1];
693 drv_ev_reg.u32[2] = 0; 784 drv_ev_reg.u32[2] = 0;
694 drv_ev_reg.u32[3] = 0; 785 drv_ev_reg.u32[3] = 0;
695 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 786 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
696 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 787 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
788}
789
790static void efx_magic_event(struct efx_channel *channel, u32 magic)
791{
792 efx_qword_t event;
793
794 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
795 FSE_AZ_EV_CODE_DRV_GEN_EV,
796 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
797 efx_generate_event(channel->efx, channel->channel, &event);
697} 798}
698 799
699/* Handle a transmit completion event 800/* Handle a transmit completion event
@@ -710,6 +811,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
710 struct efx_nic *efx = channel->efx; 811 struct efx_nic *efx = channel->efx;
711 int tx_packets = 0; 812 int tx_packets = 0;
712 813
814 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
815 return 0;
816
713 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 817 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
714 /* Transmit completion */ 818 /* Transmit completion */
715 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 819 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
@@ -851,6 +955,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
851 bool rx_ev_pkt_ok; 955 bool rx_ev_pkt_ok;
852 u16 flags; 956 u16 flags;
853 struct efx_rx_queue *rx_queue; 957 struct efx_rx_queue *rx_queue;
958 struct efx_nic *efx = channel->efx;
959
960 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
961 return;
854 962
855 /* Basic packet information */ 963 /* Basic packet information */
856 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 964 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -897,24 +1005,101 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
897 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1005 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
898} 1006}
899 1007
1008/* If this flush done event corresponds to a &struct efx_tx_queue, then
1009 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1010 * of all transmit completions.
1011 */
1012static void
1013efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1014{
1015 struct efx_tx_queue *tx_queue;
1016 int qid;
1017
1018 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1019 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1020 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1021 qid % EFX_TXQ_TYPES);
1022
1023 efx_magic_event(tx_queue->channel,
1024 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1025 }
1026}
1027
1028/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1029 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1030 * the RX queue back to the mask of RX queues in need of flushing.
1031 */
1032static void
1033efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1034{
1035 struct efx_channel *channel;
1036 struct efx_rx_queue *rx_queue;
1037 int qid;
1038 bool failed;
1039
1040 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1041 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1042 if (qid >= efx->n_channels)
1043 return;
1044 channel = efx_get_channel(efx, qid);
1045 if (!efx_channel_has_rx_queue(channel))
1046 return;
1047 rx_queue = efx_channel_get_rx_queue(channel);
1048
1049 if (failed) {
1050 netif_info(efx, hw, efx->net_dev,
1051 "RXQ %d flush retry\n", qid);
1052 rx_queue->flush_pending = true;
1053 atomic_inc(&efx->rxq_flush_pending);
1054 } else {
1055 efx_magic_event(efx_rx_queue_channel(rx_queue),
1056 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1057 }
1058 atomic_dec(&efx->rxq_flush_outstanding);
1059 if (efx_flush_wake(efx))
1060 wake_up(&efx->flush_wq);
1061}
1062
1063static void
1064efx_handle_drain_event(struct efx_channel *channel)
1065{
1066 struct efx_nic *efx = channel->efx;
1067
1068 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1069 atomic_dec(&efx->drain_pending);
1070 if (efx_flush_wake(efx))
1071 wake_up(&efx->flush_wq);
1072}
1073
900static void 1074static void
901efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1075efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
902{ 1076{
903 struct efx_nic *efx = channel->efx; 1077 struct efx_nic *efx = channel->efx;
904 unsigned code; 1078 struct efx_rx_queue *rx_queue =
1079 efx_channel_has_rx_queue(channel) ?
1080 efx_channel_get_rx_queue(channel) : NULL;
1081 unsigned magic, code;
1082
1083 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1084 code = _EFX_CHANNEL_MAGIC_CODE(magic);
905 1085
906 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1086 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
907 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 1087 /* ignore */
908 ; /* ignore */ 1088 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
909 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
910 /* The queue must be empty, so we won't receive any rx 1089 /* The queue must be empty, so we won't receive any rx
911 * events, so efx_process_channel() won't refill the 1090 * events, so efx_process_channel() won't refill the
912 * queue. Refill it here */ 1091 * queue. Refill it here */
913 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); 1092 efx_fast_push_rx_descriptors(rx_queue);
914 else 1093 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1094 rx_queue->enabled = false;
1095 efx_handle_drain_event(channel);
1096 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1097 efx_handle_drain_event(channel);
1098 } else {
915 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1099 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
916 "generated event "EFX_QWORD_FMT"\n", 1100 "generated event "EFX_QWORD_FMT"\n",
917 channel->channel, EFX_QWORD_VAL(*event)); 1101 channel->channel, EFX_QWORD_VAL(*event));
1102 }
918} 1103}
919 1104
920static void 1105static void
@@ -931,10 +1116,14 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
931 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1116 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
932 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1117 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
933 channel->channel, ev_sub_data); 1118 channel->channel, ev_sub_data);
1119 efx_handle_tx_flush_done(efx, event);
1120 efx_sriov_tx_flush_done(efx, event);
934 break; 1121 break;
935 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1122 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
936 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1123 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
937 channel->channel, ev_sub_data); 1124 channel->channel, ev_sub_data);
1125 efx_handle_rx_flush_done(efx, event);
1126 efx_sriov_rx_flush_done(efx, event);
938 break; 1127 break;
939 case FSE_AZ_EVQ_INIT_DONE_EV: 1128 case FSE_AZ_EVQ_INIT_DONE_EV:
940 netif_dbg(efx, hw, efx->net_dev, 1129 netif_dbg(efx, hw, efx->net_dev,
@@ -966,16 +1155,24 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
966 RESET_TYPE_DISABLE); 1155 RESET_TYPE_DISABLE);
967 break; 1156 break;
968 case FSE_BZ_RX_DSC_ERROR_EV: 1157 case FSE_BZ_RX_DSC_ERROR_EV:
969 netif_err(efx, rx_err, efx->net_dev, 1158 if (ev_sub_data < EFX_VI_BASE) {
970 "RX DMA Q %d reports descriptor fetch error." 1159 netif_err(efx, rx_err, efx->net_dev,
971 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1160 "RX DMA Q %d reports descriptor fetch error."
972 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1161 " RX Q %d is disabled.\n", ev_sub_data,
1162 ev_sub_data);
1163 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1164 } else
1165 efx_sriov_desc_fetch_err(efx, ev_sub_data);
973 break; 1166 break;
974 case FSE_BZ_TX_DSC_ERROR_EV: 1167 case FSE_BZ_TX_DSC_ERROR_EV:
975 netif_err(efx, tx_err, efx->net_dev, 1168 if (ev_sub_data < EFX_VI_BASE) {
976 "TX DMA Q %d reports descriptor fetch error." 1169 netif_err(efx, tx_err, efx->net_dev,
977 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1170 "TX DMA Q %d reports descriptor fetch error."
978 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1171 " TX Q %d is disabled.\n", ev_sub_data,
1172 ev_sub_data);
1173 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1174 } else
1175 efx_sriov_desc_fetch_err(efx, ev_sub_data);
979 break; 1176 break;
980 default: 1177 default:
981 netif_vdbg(efx, hw, efx->net_dev, 1178 netif_vdbg(efx, hw, efx->net_dev,
@@ -1035,6 +1232,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1035 case FSE_AZ_EV_CODE_DRIVER_EV: 1232 case FSE_AZ_EV_CODE_DRIVER_EV:
1036 efx_handle_driver_event(channel, &event); 1233 efx_handle_driver_event(channel, &event);
1037 break; 1234 break;
1235 case FSE_CZ_EV_CODE_USER_EV:
1236 efx_sriov_event(channel, &event);
1237 break;
1038 case FSE_CZ_EV_CODE_MCDI_EV: 1238 case FSE_CZ_EV_CODE_MCDI_EV:
1039 efx_mcdi_process_event(channel, &event); 1239 efx_mcdi_process_event(channel, &event);
1040 break; 1240 break;
@@ -1135,161 +1335,13 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
1135 1335
1136void efx_nic_generate_test_event(struct efx_channel *channel) 1336void efx_nic_generate_test_event(struct efx_channel *channel)
1137{ 1337{
1138 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); 1338 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1139 efx_qword_t test_event;
1140
1141 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1142 FSE_AZ_EV_CODE_DRV_GEN_EV,
1143 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1144 efx_generate_event(channel, &test_event);
1145}
1146
1147void efx_nic_generate_fill_event(struct efx_channel *channel)
1148{
1149 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1150 efx_qword_t test_event;
1151
1152 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1153 FSE_AZ_EV_CODE_DRV_GEN_EV,
1154 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1155 efx_generate_event(channel, &test_event);
1156} 1339}
1157 1340
1158/************************************************************************** 1341void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1159 *
1160 * Flush handling
1161 *
1162 **************************************************************************/
1163
1164
1165static void efx_poll_flush_events(struct efx_nic *efx)
1166{
1167 struct efx_channel *channel = efx_get_channel(efx, 0);
1168 struct efx_tx_queue *tx_queue;
1169 struct efx_rx_queue *rx_queue;
1170 unsigned int read_ptr = channel->eventq_read_ptr;
1171 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1172
1173 do {
1174 efx_qword_t *event = efx_event(channel, read_ptr);
1175 int ev_code, ev_sub_code, ev_queue;
1176 bool ev_failed;
1177
1178 if (!efx_event_present(event))
1179 break;
1180
1181 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1182 ev_sub_code = EFX_QWORD_FIELD(*event,
1183 FSF_AZ_DRIVER_EV_SUBCODE);
1184 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1185 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1186 ev_queue = EFX_QWORD_FIELD(*event,
1187 FSF_AZ_DRIVER_EV_SUBDATA);
1188 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1189 tx_queue = efx_get_tx_queue(
1190 efx, ev_queue / EFX_TXQ_TYPES,
1191 ev_queue % EFX_TXQ_TYPES);
1192 tx_queue->flushed = FLUSH_DONE;
1193 }
1194 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1195 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1196 ev_queue = EFX_QWORD_FIELD(
1197 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1198 ev_failed = EFX_QWORD_FIELD(
1199 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1200 if (ev_queue < efx->n_rx_channels) {
1201 rx_queue = efx_get_rx_queue(efx, ev_queue);
1202 rx_queue->flushed =
1203 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1204 }
1205 }
1206
1207 /* We're about to destroy the queue anyway, so
1208 * it's ok to throw away every non-flush event */
1209 EFX_SET_QWORD(*event);
1210
1211 ++read_ptr;
1212 } while (read_ptr != end_ptr);
1213
1214 channel->eventq_read_ptr = read_ptr;
1215}
1216
1217/* Handle tx and rx flushes at the same time, since they run in
1218 * parallel in the hardware and there's no reason for us to
1219 * serialise them */
1220int efx_nic_flush_queues(struct efx_nic *efx)
1221{ 1342{
1222 struct efx_channel *channel; 1343 efx_magic_event(efx_rx_queue_channel(rx_queue),
1223 struct efx_rx_queue *rx_queue; 1344 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1224 struct efx_tx_queue *tx_queue;
1225 int i, tx_pending, rx_pending;
1226
1227 /* If necessary prepare the hardware for flushing */
1228 efx->type->prepare_flush(efx);
1229
1230 /* Flush all tx queues in parallel */
1231 efx_for_each_channel(channel, efx) {
1232 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1233 if (tx_queue->initialised)
1234 efx_flush_tx_queue(tx_queue);
1235 }
1236 }
1237
1238 /* The hardware supports four concurrent rx flushes, each of which may
1239 * need to be retried if there is an outstanding descriptor fetch */
1240 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1241 rx_pending = tx_pending = 0;
1242 efx_for_each_channel(channel, efx) {
1243 efx_for_each_channel_rx_queue(rx_queue, channel) {
1244 if (rx_queue->flushed == FLUSH_PENDING)
1245 ++rx_pending;
1246 }
1247 }
1248 efx_for_each_channel(channel, efx) {
1249 efx_for_each_channel_rx_queue(rx_queue, channel) {
1250 if (rx_pending == EFX_RX_FLUSH_COUNT)
1251 break;
1252 if (rx_queue->flushed == FLUSH_FAILED ||
1253 rx_queue->flushed == FLUSH_NONE) {
1254 efx_flush_rx_queue(rx_queue);
1255 ++rx_pending;
1256 }
1257 }
1258 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1259 if (tx_queue->initialised &&
1260 tx_queue->flushed != FLUSH_DONE)
1261 ++tx_pending;
1262 }
1263 }
1264
1265 if (rx_pending == 0 && tx_pending == 0)
1266 return 0;
1267
1268 msleep(EFX_FLUSH_INTERVAL);
1269 efx_poll_flush_events(efx);
1270 }
1271
1272 /* Mark the queues as all flushed. We're going to return failure
1273 * leading to a reset, or fake up success anyway */
1274 efx_for_each_channel(channel, efx) {
1275 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1276 if (tx_queue->initialised &&
1277 tx_queue->flushed != FLUSH_DONE)
1278 netif_err(efx, hw, efx->net_dev,
1279 "tx queue %d flush command timed out\n",
1280 tx_queue->queue);
1281 tx_queue->flushed = FLUSH_DONE;
1282 }
1283 efx_for_each_channel_rx_queue(rx_queue, channel) {
1284 if (rx_queue->flushed != FLUSH_DONE)
1285 netif_err(efx, hw, efx->net_dev,
1286 "rx queue %d flush command timed out\n",
1287 efx_rx_queue_index(rx_queue));
1288 rx_queue->flushed = FLUSH_DONE;
1289 }
1290 }
1291
1292 return -ETIMEDOUT;
1293} 1345}
1294 1346
1295/************************************************************************** 1347/**************************************************************************
@@ -1315,18 +1367,10 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1315 1367
1316void efx_nic_enable_interrupts(struct efx_nic *efx) 1368void efx_nic_enable_interrupts(struct efx_nic *efx)
1317{ 1369{
1318 struct efx_channel *channel;
1319
1320 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1370 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1321 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1371 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1322 1372
1323 /* Enable interrupts */
1324 efx_nic_interrupts(efx, true, false); 1373 efx_nic_interrupts(efx, true, false);
1325
1326 /* Force processing of all the channels to get the EVQ RPTRs up to
1327 date */
1328 efx_for_each_channel(channel, efx)
1329 efx_schedule_channel(channel);
1330} 1374}
1331 1375
1332void efx_nic_disable_interrupts(struct efx_nic *efx) 1376void efx_nic_disable_interrupts(struct efx_nic *efx)
@@ -1593,6 +1637,58 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1593 free_irq(efx->legacy_irq, efx); 1637 free_irq(efx->legacy_irq, efx);
1594} 1638}
1595 1639
1640/* Looks at available SRAM resources and works out how many queues we
1641 * can support, and where things like descriptor caches should live.
1642 *
1643 * SRAM is split up as follows:
1644 * 0 buftbl entries for channels
1645 * efx->vf_buftbl_base buftbl entries for SR-IOV
1646 * efx->rx_dc_base RX descriptor caches
1647 * efx->tx_dc_base TX descriptor caches
1648 */
1649void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1650{
1651 unsigned vi_count, buftbl_min;
1652
1653 /* Account for the buffer table entries backing the datapath channels
1654 * and the descriptor caches for those channels.
1655 */
1656 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1657 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1658 efx->n_channels * EFX_MAX_EVQ_SIZE)
1659 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1660 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1661
1662#ifdef CONFIG_SFC_SRIOV
1663 if (efx_sriov_wanted(efx)) {
1664 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1665
1666 efx->vf_buftbl_base = buftbl_min;
1667
1668 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1669 vi_count = max(vi_count, EFX_VI_BASE);
1670 buftbl_free = (sram_lim_qw - buftbl_min -
1671 vi_count * vi_dc_entries);
1672
1673 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1674 efx_vf_size(efx));
1675 vf_limit = min(buftbl_free / entries_per_vf,
1676 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1677
1678 if (efx->vf_count > vf_limit) {
1679 netif_err(efx, probe, efx->net_dev,
1680 "Reducing VF count from from %d to %d\n",
1681 efx->vf_count, vf_limit);
1682 efx->vf_count = vf_limit;
1683 }
1684 vi_count += efx->vf_count * efx_vf_size(efx);
1685 }
1686#endif
1687
1688 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1689 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1690}
1691
1596u32 efx_nic_fpga_ver(struct efx_nic *efx) 1692u32 efx_nic_fpga_ver(struct efx_nic *efx)
1597{ 1693{
1598 efx_oword_t altera_build; 1694 efx_oword_t altera_build;
@@ -1605,11 +1701,9 @@ void efx_nic_init_common(struct efx_nic *efx)
1605 efx_oword_t temp; 1701 efx_oword_t temp;
1606 1702
1607 /* Set positions of descriptor caches in SRAM. */ 1703 /* Set positions of descriptor caches in SRAM. */
1608 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1704 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1609 efx->type->tx_dc_base / 8);
1610 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1705 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1611 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1706 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1612 efx->type->rx_dc_base / 8);
1613 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1707 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1614 1708
1615 /* Set TX descriptor cache size. */ 1709 /* Set TX descriptor cache size. */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 905a1877d603..246c4140453c 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -65,6 +65,11 @@ enum {
65#define FALCON_GMAC_LOOPBACKS \ 65#define FALCON_GMAC_LOOPBACKS \
66 (1 << LOOPBACK_GMAC) 66 (1 << LOOPBACK_GMAC)
67 67
68/* Alignment of PCIe DMA boundaries (4KB) */
69#define EFX_PAGE_SIZE 4096
70/* Size and alignment of buffer table entries (same) */
71#define EFX_BUF_SIZE EFX_PAGE_SIZE
72
68/** 73/**
69 * struct falcon_board_type - board operations and type information 74 * struct falcon_board_type - board operations and type information
70 * @id: Board type id, as found in NVRAM 75 * @id: Board type id, as found in NVRAM
@@ -164,6 +169,95 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
164} 169}
165#endif 170#endif
166 171
172/*
173 * On the SFC9000 family each port is associated with 1 PCI physical
174 * function (PF) handled by sfc and a configurable number of virtual
175 * functions (VFs) that may be handled by some other driver, often in
176 * a VM guest. The queue pointer registers are mapped in both PF and
177 * VF BARs such that an 8K region provides access to a single RX, TX
178 * and event queue (collectively a Virtual Interface, VI or VNIC).
179 *
180 * The PF has access to all 1024 VIs while VFs are mapped to VIs
181 * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
182 * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
183 * The number of VIs and the VI_SCALE value are configurable but must
184 * be established at boot time by firmware.
185 */
186
187/* Maximum VI_SCALE parameter supported by Siena */
188#define EFX_VI_SCALE_MAX 6
189/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
190 * so this is the smallest allowed value. */
191#define EFX_VI_BASE 128U
192/* Maximum number of VFs allowed */
193#define EFX_VF_COUNT_MAX 127
194/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
195#define EFX_MAX_VF_EVQ_SIZE 8192UL
196/* The number of buffer table entries reserved for each VI on a VF */
197#define EFX_VF_BUFTBL_PER_VI \
198 ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
199 sizeof(efx_qword_t) / EFX_BUF_SIZE)
200
201#ifdef CONFIG_SFC_SRIOV
202
203static inline bool efx_sriov_wanted(struct efx_nic *efx)
204{
205 return efx->vf_count != 0;
206}
207static inline bool efx_sriov_enabled(struct efx_nic *efx)
208{
209 return efx->vf_init_count != 0;
210}
211static inline unsigned int efx_vf_size(struct efx_nic *efx)
212{
213 return 1 << efx->vi_scale;
214}
215
216extern int efx_init_sriov(void);
217extern void efx_sriov_probe(struct efx_nic *efx);
218extern int efx_sriov_init(struct efx_nic *efx);
219extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
220extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
221extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
222extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
223extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
224extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
225extern void efx_sriov_reset(struct efx_nic *efx);
226extern void efx_sriov_fini(struct efx_nic *efx);
227extern void efx_fini_sriov(void);
228
229#else
230
231static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
232static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
233static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
234
235static inline int efx_init_sriov(void) { return 0; }
236static inline void efx_sriov_probe(struct efx_nic *efx) {}
237static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
238static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
239static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
240 efx_qword_t *event) {}
241static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
242 efx_qword_t *event) {}
243static inline void efx_sriov_event(struct efx_channel *channel,
244 efx_qword_t *event) {}
245static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
246static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
247static inline void efx_sriov_reset(struct efx_nic *efx) {}
248static inline void efx_sriov_fini(struct efx_nic *efx) {}
249static inline void efx_fini_sriov(void) {}
250
251#endif
252
253extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
254extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
255 u16 vlan, u8 qos);
256extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
257 struct ifla_vf_info *ivf);
258extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
259 bool spoofchk);
260
167extern const struct efx_nic_type falcon_a1_nic_type; 261extern const struct efx_nic_type falcon_a1_nic_type;
168extern const struct efx_nic_type falcon_b0_nic_type; 262extern const struct efx_nic_type falcon_b0_nic_type;
169extern const struct efx_nic_type siena_a0_nic_type; 263extern const struct efx_nic_type siena_a0_nic_type;
@@ -190,6 +284,7 @@ extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
190extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); 284extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
191extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); 285extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
192extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); 286extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
287extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
193 288
194/* Event data path */ 289/* Event data path */
195extern int efx_nic_probe_eventq(struct efx_channel *channel); 290extern int efx_nic_probe_eventq(struct efx_channel *channel);
@@ -211,7 +306,6 @@ extern void falcon_update_stats_xmac(struct efx_nic *efx);
211extern int efx_nic_init_interrupt(struct efx_nic *efx); 306extern int efx_nic_init_interrupt(struct efx_nic *efx);
212extern void efx_nic_enable_interrupts(struct efx_nic *efx); 307extern void efx_nic_enable_interrupts(struct efx_nic *efx);
213extern void efx_nic_generate_test_event(struct efx_channel *channel); 308extern void efx_nic_generate_test_event(struct efx_channel *channel);
214extern void efx_nic_generate_fill_event(struct efx_channel *channel);
215extern void efx_nic_generate_interrupt(struct efx_nic *efx); 309extern void efx_nic_generate_interrupt(struct efx_nic *efx);
216extern void efx_nic_disable_interrupts(struct efx_nic *efx); 310extern void efx_nic_disable_interrupts(struct efx_nic *efx);
217extern void efx_nic_fini_interrupt(struct efx_nic *efx); 311extern void efx_nic_fini_interrupt(struct efx_nic *efx);
@@ -225,6 +319,8 @@ extern void falcon_start_nic_stats(struct efx_nic *efx);
225extern void falcon_stop_nic_stats(struct efx_nic *efx); 319extern void falcon_stop_nic_stats(struct efx_nic *efx);
226extern void falcon_setup_xaui(struct efx_nic *efx); 320extern void falcon_setup_xaui(struct efx_nic *efx);
227extern int falcon_reset_xaui(struct efx_nic *efx); 321extern int falcon_reset_xaui(struct efx_nic *efx);
322extern void
323efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
228extern void efx_nic_init_common(struct efx_nic *efx); 324extern void efx_nic_init_common(struct efx_nic *efx);
229extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); 325extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
230 326
@@ -278,8 +374,8 @@ extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
278#define MAC_DATA_LBN 0 374#define MAC_DATA_LBN 0
279#define MAC_DATA_WIDTH 32 375#define MAC_DATA_WIDTH 32
280 376
281extern void efx_nic_generate_event(struct efx_channel *channel, 377extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
282 efx_qword_t *event); 378 efx_qword_t *event);
283 379
284extern void falcon_poll_xmac(struct efx_nic *efx); 380extern void falcon_poll_xmac(struct efx_nic *efx);
285 381
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h
index cc2c86b76a7b..ade4c4dc56ca 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/regs.h
@@ -2446,8 +2446,8 @@
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16 2449#define FRF_CZ_RMFT_DEST_MAC_LBN 12
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44 2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0 2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453 2453
@@ -2523,8 +2523,8 @@
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16 2526#define FRF_CZ_TMFT_SRC_MAC_LBN 12
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44 2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0 2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530 2530
@@ -2895,17 +2895,17 @@
2895 2895
2896/* RX_MAC_FILTER_TBL0 */ 2896/* RX_MAC_FILTER_TBL0 */
2897/* RMFT_DEST_MAC is wider than 32 bits */ 2897/* RMFT_DEST_MAC is wider than 32 bits */
2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12 2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44 2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16 2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
2902 2902
2903/* TX_MAC_FILTER_TBL0 */ 2903/* TX_MAC_FILTER_TBL0 */
2904/* TMFT_SRC_MAC is wider than 32 bits */ 2904/* TMFT_SRC_MAC is wider than 32 bits */
2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12 2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
2909 2909
2910/* TX_PACE_TBL */ 2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going 2911/* Values >20 are documented as reserved, but will result in a queue going
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 1dfda5e27919..506d24669956 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -405,10 +405,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
405void efx_rx_slow_fill(unsigned long context) 405void efx_rx_slow_fill(unsigned long context)
406{ 406{
407 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 407 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
408 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
409 408
410 /* Post an event to cause NAPI to run and refill the queue */ 409 /* Post an event to cause NAPI to run and refill the queue */
411 efx_nic_generate_fill_event(channel); 410 efx_nic_generate_fill_event(rx_queue);
412 ++rx_queue->slow_fill_count; 411 ++rx_queue->slow_fill_count;
413} 412}
414 413
@@ -706,6 +705,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
706 rx_queue->fast_fill_limit = limit; 705 rx_queue->fast_fill_limit = limit;
707 706
708 /* Set up RX descriptor ring */ 707 /* Set up RX descriptor ring */
708 rx_queue->enabled = true;
709 efx_nic_init_rx(rx_queue); 709 efx_nic_init_rx(rx_queue);
710} 710}
711 711
@@ -717,6 +717,9 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
717 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 717 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
718 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 718 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
719 719
720 /* A flush failure might have left rx_queue->enabled */
721 rx_queue->enabled = false;
722
720 del_timer_sync(&rx_queue->slow_fill); 723 del_timer_sync(&rx_queue->slow_fill);
721 efx_nic_fini_rx(rx_queue); 724 efx_nic_fini_rx(rx_queue);
722 725
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d3c4169e2a0b..7bea79017a05 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -225,6 +225,15 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
225 return rc; 225 return rc;
226} 226}
227 227
228static void siena_dimension_resources(struct efx_nic *efx)
229{
230 /* Each port has a small block of internal SRAM dedicated to
231 * the buffer table and descriptor caches. In theory we can
232 * map both blocks to one port, but we don't.
233 */
234 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
235}
236
228static int siena_probe_nic(struct efx_nic *efx) 237static int siena_probe_nic(struct efx_nic *efx)
229{ 238{
230 struct siena_nic_data *nic_data; 239 struct siena_nic_data *nic_data;
@@ -304,6 +313,8 @@ static int siena_probe_nic(struct efx_nic *efx)
304 if (rc) 313 if (rc)
305 goto fail5; 314 goto fail5;
306 315
316 efx_sriov_probe(efx);
317
307 return 0; 318 return 0;
308 319
309fail5: 320fail5:
@@ -619,6 +630,7 @@ const struct efx_nic_type siena_a0_nic_type = {
619 .probe = siena_probe_nic, 630 .probe = siena_probe_nic,
620 .remove = siena_remove_nic, 631 .remove = siena_remove_nic,
621 .init = siena_init_nic, 632 .init = siena_init_nic,
633 .dimension_resources = siena_dimension_resources,
622 .fini = efx_port_dummy_op_void, 634 .fini = efx_port_dummy_op_void,
623 .monitor = NULL, 635 .monitor = NULL,
624 .map_reset_reason = siena_map_reset_reason, 636 .map_reset_reason = siena_map_reset_reason,
@@ -657,8 +669,6 @@ const struct efx_nic_type siena_a0_nic_type = {
657 * interrupt handler only supports 32 669 * interrupt handler only supports 32
658 * channels */ 670 * channels */
659 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, 671 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
660 .tx_dc_base = 0x88000,
661 .rx_dc_base = 0x68000,
662 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 672 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
663 NETIF_F_RXHASH | NETIF_F_NTUPLE), 673 NETIF_F_RXHASH | NETIF_F_NTUPLE),
664}; 674};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
new file mode 100644
index 000000000000..5c6839ec3a83
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -0,0 +1,1642 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9#include <linux/pci.h>
10#include <linux/module.h>
11#include "net_driver.h"
12#include "efx.h"
13#include "nic.h"
14#include "io.h"
15#include "mcdi.h"
16#include "filter.h"
17#include "mcdi_pcol.h"
18#include "regs.h"
19#include "vfdi.h"
20
21/* Number of longs required to track all the VIs in a VF */
22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
23
24/**
25 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
26 * @VF_TX_FILTER_OFF: Disabled
27 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
28 * 2 TX queues allowed per VF.
29 * @VF_TX_FILTER_ON: Enabled
30 */
31enum efx_vf_tx_filter_mode {
32 VF_TX_FILTER_OFF,
33 VF_TX_FILTER_AUTO,
34 VF_TX_FILTER_ON,
35};
36
37/**
38 * struct efx_vf - Back-end resource and protocol state for a PCI VF
39 * @efx: The Efx NIC owning this VF
40 * @pci_rid: The PCI requester ID for this VF
41 * @pci_name: The PCI name (formatted address) of this VF
42 * @index: Index of VF within its port and PF.
43 * @req: VFDI incoming request work item. Incoming USR_EV events are received
44 * by the NAPI handler, but must be handled by executing MCDI requests
45 * inside a work item.
46 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
47 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
48 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
49 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
50 * @status_lock
51 * @busy: VFDI request queued to be processed or being processed. Receiving
52 * a VFDI request when @busy is set is an error condition.
53 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
54 * @buftbl_base: Buffer table entries for this VF start at this index.
55 * @rx_filtering: Receive filtering has been requested by the VF driver.
56 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
57 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
58 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
59 * @tx_filter_mode: Transmit MAC filtering mode.
60 * @tx_filter_id: Transmit MAC filter ID.
61 * @addr: The MAC address and outer vlan tag of the VF.
62 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
63 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
64 * @peer_page_addrs and @peer_page_count from simultaneous
65 * updates by the VM and consumption by
66 * efx_sriov_update_vf_addr()
67 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
68 * @peer_page_count: Number of entries in @peer_page_count.
69 * @evq0_addrs: Array of guest pages backing evq0.
70 * @evq0_count: Number of entries in @evq0_addrs.
71 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
72 * to wait for flush completions.
73 * @txq_lock: Mutex for TX queue allocation.
74 * @txq_mask: Mask of initialized transmit queues.
75 * @txq_count: Number of initialized transmit queues.
76 * @rxq_mask: Mask of initialized receive queues.
77 * @rxq_count: Number of initialized receive queues.
78 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
79 * due to flush failure.
80 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
81 * @reset_work: Work item to schedule a VF reset.
82 */
83struct efx_vf {
84 struct efx_nic *efx;
85 unsigned int pci_rid;
86 char pci_name[13]; /* dddd:bb:dd.f */
87 unsigned int index;
88 struct work_struct req;
89 u64 req_addr;
90 int req_type;
91 unsigned req_seqno;
92 unsigned msg_seqno;
93 bool busy;
94 struct efx_buffer buf;
95 unsigned buftbl_base;
96 bool rx_filtering;
97 enum efx_filter_flags rx_filter_flags;
98 unsigned rx_filter_qid;
99 int rx_filter_id;
100 enum efx_vf_tx_filter_mode tx_filter_mode;
101 int tx_filter_id;
102 struct vfdi_endpoint addr;
103 u64 status_addr;
104 struct mutex status_lock;
105 u64 *peer_page_addrs;
106 unsigned peer_page_count;
107 u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
108 EFX_BUF_SIZE];
109 unsigned evq0_count;
110 wait_queue_head_t flush_waitq;
111 struct mutex txq_lock;
112 unsigned long txq_mask[VI_MASK_LENGTH];
113 unsigned txq_count;
114 unsigned long rxq_mask[VI_MASK_LENGTH];
115 unsigned rxq_count;
116 unsigned long rxq_retry_mask[VI_MASK_LENGTH];
117 atomic_t rxq_retry_count;
118 struct work_struct reset_work;
119};
120
121struct efx_memcpy_req {
122 unsigned int from_rid;
123 void *from_buf;
124 u64 from_addr;
125 unsigned int to_rid;
126 u64 to_addr;
127 unsigned length;
128};
129
130/**
131 * struct efx_local_addr - A MAC address on the vswitch without a VF.
132 *
133 * Siena does not have a switch, so VFs can't transmit data to each
134 * other. Instead the VFs must be made aware of the local addresses
135 * on the vswitch, so that they can arrange for an alternative
136 * software datapath to be used.
137 *
138 * @link: List head for insertion into efx->local_addr_list.
139 * @addr: Ethernet address
140 */
141struct efx_local_addr {
142 struct list_head link;
143 u8 addr[ETH_ALEN];
144};
145
146/**
147 * struct efx_endpoint_page - Page of vfdi_endpoint structures
148 *
149 * @link: List head for insertion into efx->local_page_list.
150 * @ptr: Pointer to page.
151 * @addr: DMA address of page.
152 */
153struct efx_endpoint_page {
154 struct list_head link;
155 void *ptr;
156 dma_addr_t addr;
157};
158
159/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
160#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
161 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
162#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
163 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
164 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
165#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168
169#define EFX_FIELD_MASK(_field) \
170 ((1 << _field ## _WIDTH) - 1)
171
172/* VFs can only use this many transmit channels */
173static unsigned int vf_max_tx_channels = 2;
174module_param(vf_max_tx_channels, uint, 0444);
175MODULE_PARM_DESC(vf_max_tx_channels,
176 "Limit the number of TX channels VFs can use");
177
178static int max_vfs = -1;
179module_param(max_vfs, int, 0444);
180MODULE_PARM_DESC(max_vfs,
181 "Reduce the number of VFs initialized by the driver");
182
183/* Workqueue used by VFDI communication. We can't use the global
184 * workqueue because it may be running the VF driver's probe()
185 * routine, which will be blocked there waiting for a VFDI response.
186 */
187static struct workqueue_struct *vfdi_workqueue;
188
189static unsigned abs_index(struct efx_vf *vf, unsigned index)
190{
191 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
192}
193
194static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
195 unsigned *vi_scale_out, unsigned *vf_total_out)
196{
197 u8 inbuf[MC_CMD_SRIOV_IN_LEN];
198 u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
199 unsigned vi_scale, vf_total;
200 size_t outlen;
201 int rc;
202
203 MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
204 MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
205 MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
206
207 rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
208 outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
209 if (rc)
210 return rc;
211 if (outlen < MC_CMD_SRIOV_OUT_LEN)
212 return -EIO;
213
214 vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
215 vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
216 if (vi_scale > EFX_VI_SCALE_MAX)
217 return -EOPNOTSUPP;
218
219 if (vi_scale_out)
220 *vi_scale_out = vi_scale;
221 if (vf_total_out)
222 *vf_total_out = vf_total;
223
224 return 0;
225}
226
227static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
228{
229 efx_oword_t reg;
230
231 EFX_POPULATE_OWORD_2(reg,
232 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
233 FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
234 efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
235}
236
237static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
238 unsigned int count)
239{
240 u8 *inbuf, *record;
241 unsigned int used;
242 u32 from_rid, from_hi, from_lo;
243 int rc;
244
245 mb(); /* Finish writing source/reading dest before DMA starts */
246
247 used = MC_CMD_MEMCPY_IN_LEN(count);
248 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
249 return -ENOBUFS;
250
251 /* Allocate room for the largest request */
252 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
253 if (inbuf == NULL)
254 return -ENOMEM;
255
256 record = inbuf;
257 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
258 while (count-- > 0) {
259 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
260 req->to_rid);
261 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
262 (u32)req->to_addr);
263 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
264 (u32)(req->to_addr >> 32));
265 if (req->from_buf == NULL) {
266 from_rid = req->from_rid;
267 from_lo = (u32)req->from_addr;
268 from_hi = (u32)(req->from_addr >> 32);
269 } else {
270 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
271 rc = -ENOBUFS;
272 goto out;
273 }
274
275 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
276 from_lo = used;
277 from_hi = 0;
278 memcpy(inbuf + used, req->from_buf, req->length);
279 used += req->length;
280 }
281
282 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
283 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
284 from_lo);
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
286 from_hi);
287 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
288 req->length);
289
290 ++req;
291 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
292 }
293
294 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
295out:
296 kfree(inbuf);
297
298 mb(); /* Don't write source/read dest before DMA is complete */
299
300 return rc;
301}
302
303/* The TX filter is entirely controlled by this driver, and is modified
304 * underneath the feet of the VF
305 */
306static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
307{
308 struct efx_nic *efx = vf->efx;
309 struct efx_filter_spec filter;
310 u16 vlan;
311 int rc;
312
313 if (vf->tx_filter_id != -1) {
314 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
315 vf->tx_filter_id);
316 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
317 vf->pci_name, vf->tx_filter_id);
318 vf->tx_filter_id = -1;
319 }
320
321 if (is_zero_ether_addr(vf->addr.mac_addr))
322 return;
323
324 /* Turn on TX filtering automatically if not explicitly
325 * enabled or disabled.
326 */
327 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
328 vf->tx_filter_mode = VF_TX_FILTER_ON;
329
330 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
331 efx_filter_init_tx(&filter, abs_index(vf, 0));
332 rc = efx_filter_set_eth_local(&filter,
333 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
334 vf->addr.mac_addr);
335 BUG_ON(rc);
336
337 rc = efx_filter_insert_filter(efx, &filter, true);
338 if (rc < 0) {
339 netif_warn(efx, hw, efx->net_dev,
340 "Unable to migrate tx filter for vf %s\n",
341 vf->pci_name);
342 } else {
343 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
344 vf->pci_name, rc);
345 vf->tx_filter_id = rc;
346 }
347}
348
349/* The RX filter is managed here on behalf of the VF driver */
350static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
351{
352 struct efx_nic *efx = vf->efx;
353 struct efx_filter_spec filter;
354 u16 vlan;
355 int rc;
356
357 if (vf->rx_filter_id != -1) {
358 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
359 vf->rx_filter_id);
360 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
361 vf->pci_name, vf->rx_filter_id);
362 vf->rx_filter_id = -1;
363 }
364
365 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
366 return;
367
368 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
369 efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
370 vf->rx_filter_flags,
371 abs_index(vf, vf->rx_filter_qid));
372 rc = efx_filter_set_eth_local(&filter,
373 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
374 vf->addr.mac_addr);
375 BUG_ON(rc);
376
377 rc = efx_filter_insert_filter(efx, &filter, true);
378 if (rc < 0) {
379 netif_warn(efx, hw, efx->net_dev,
380 "Unable to insert rx filter for vf %s\n",
381 vf->pci_name);
382 } else {
383 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
384 vf->pci_name, rc);
385 vf->rx_filter_id = rc;
386 }
387}
388
389static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
390{
391 efx_sriov_reset_tx_filter(vf);
392 efx_sriov_reset_rx_filter(vf);
393 queue_work(vfdi_workqueue, &vf->efx->peer_work);
394}
395
396/* Push the peer list to this VF. The caller must hold status_lock to interlock
397 * with VFDI requests, and they must be serialised against manipulation of
398 * local_page_list, either by acquiring local_lock or by running from
399 * efx_sriov_peer_work()
400 */
401static void __efx_sriov_push_vf_status(struct efx_vf *vf)
402{
403 struct efx_nic *efx = vf->efx;
404 struct vfdi_status *status = efx->vfdi_status.addr;
405 struct efx_memcpy_req copy[4];
406 struct efx_endpoint_page *epp;
407 unsigned int pos, count;
408 unsigned data_offset;
409 efx_qword_t event;
410
411 WARN_ON(!mutex_is_locked(&vf->status_lock));
412 WARN_ON(!vf->status_addr);
413
414 status->local = vf->addr;
415 status->generation_end = ++status->generation_start;
416
417 memset(copy, '\0', sizeof(copy));
418 /* Write generation_start */
419 copy[0].from_buf = &status->generation_start;
420 copy[0].to_rid = vf->pci_rid;
421 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
422 generation_start);
423 copy[0].length = sizeof(status->generation_start);
424 /* DMA the rest of the structure (excluding the generations). This
425 * assumes that the non-generation portion of vfdi_status is in
426 * one chunk starting at the version member.
427 */
428 data_offset = offsetof(struct vfdi_status, version);
429 copy[1].from_rid = efx->pci_dev->devfn;
430 copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
431 copy[1].to_rid = vf->pci_rid;
432 copy[1].to_addr = vf->status_addr + data_offset;
433 copy[1].length = status->length - data_offset;
434
435 /* Copy the peer pages */
436 pos = 2;
437 count = 0;
438 list_for_each_entry(epp, &efx->local_page_list, link) {
439 if (count == vf->peer_page_count) {
440 /* The VF driver will know they need to provide more
441 * pages because peer_addr_count is too large.
442 */
443 break;
444 }
445 copy[pos].from_buf = NULL;
446 copy[pos].from_rid = efx->pci_dev->devfn;
447 copy[pos].from_addr = epp->addr;
448 copy[pos].to_rid = vf->pci_rid;
449 copy[pos].to_addr = vf->peer_page_addrs[count];
450 copy[pos].length = EFX_PAGE_SIZE;
451
452 if (++pos == ARRAY_SIZE(copy)) {
453 efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
454 pos = 0;
455 }
456 ++count;
457 }
458
459 /* Write generation_end */
460 copy[pos].from_buf = &status->generation_end;
461 copy[pos].to_rid = vf->pci_rid;
462 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
463 generation_end);
464 copy[pos].length = sizeof(status->generation_end);
465 efx_sriov_memcpy(efx, copy, pos + 1);
466
467 /* Notify the guest */
468 EFX_POPULATE_QWORD_3(event,
469 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
470 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
471 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
472 ++vf->msg_seqno;
473 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
474 &event);
475}
476
477static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
478 u64 *addr, unsigned count)
479{
480 efx_qword_t buf;
481 unsigned pos;
482
483 for (pos = 0; pos < count; ++pos) {
484 EFX_POPULATE_QWORD_3(buf,
485 FRF_AZ_BUF_ADR_REGION, 0,
486 FRF_AZ_BUF_ADR_FBUF,
487 addr ? addr[pos] >> 12 : 0,
488 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
489 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
490 &buf, offset + pos);
491 }
492}
493
494static bool bad_vf_index(struct efx_nic *efx, unsigned index)
495{
496 return index >= efx_vf_size(efx);
497}
498
499static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
500{
501 unsigned max_buf_count = max_entry_count *
502 sizeof(efx_qword_t) / EFX_BUF_SIZE;
503
504 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
505}
506
507/* Check that VI specified by per-port index belongs to a VF.
508 * Optionally set VF index and VI index within the VF.
509 */
510static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
511 struct efx_vf **vf_out, unsigned *rel_index_out)
512{
513 unsigned vf_i;
514
515 if (abs_index < EFX_VI_BASE)
516 return true;
517 vf_i = (abs_index - EFX_VI_BASE) * efx_vf_size(efx);
518 if (vf_i >= efx->vf_init_count)
519 return true;
520
521 if (vf_out)
522 *vf_out = efx->vf + vf_i;
523 if (rel_index_out)
524 *rel_index_out = abs_index % efx_vf_size(efx);
525 return false;
526}
527
528static int efx_vfdi_init_evq(struct efx_vf *vf)
529{
530 struct efx_nic *efx = vf->efx;
531 struct vfdi_req *req = vf->buf.addr;
532 unsigned vf_evq = req->u.init_evq.index;
533 unsigned buf_count = req->u.init_evq.buf_count;
534 unsigned abs_evq = abs_index(vf, vf_evq);
535 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
536 efx_oword_t reg;
537
538 if (bad_vf_index(efx, vf_evq) ||
539 bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
540 if (net_ratelimit())
541 netif_err(efx, hw, efx->net_dev,
542 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
543 vf->pci_name, vf_evq, buf_count);
544 return VFDI_RC_EINVAL;
545 }
546
547 efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
548
549 EFX_POPULATE_OWORD_3(reg,
550 FRF_CZ_TIMER_Q_EN, 1,
551 FRF_CZ_HOST_NOTIFY_MODE, 0,
552 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
553 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
554 EFX_POPULATE_OWORD_3(reg,
555 FRF_AZ_EVQ_EN, 1,
556 FRF_AZ_EVQ_SIZE, __ffs(buf_count),
557 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
558 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
559
560 if (vf_evq == 0) {
561 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
562 buf_count * sizeof(u64));
563 vf->evq0_count = buf_count;
564 }
565
566 return VFDI_RC_SUCCESS;
567}
568
569static int efx_vfdi_init_rxq(struct efx_vf *vf)
570{
571 struct efx_nic *efx = vf->efx;
572 struct vfdi_req *req = vf->buf.addr;
573 unsigned vf_rxq = req->u.init_rxq.index;
574 unsigned vf_evq = req->u.init_rxq.evq;
575 unsigned buf_count = req->u.init_rxq.buf_count;
576 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
577 unsigned label;
578 efx_oword_t reg;
579
580 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
581 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
582 if (net_ratelimit())
583 netif_err(efx, hw, efx->net_dev,
584 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
585 "buf_count %d\n", vf->pci_name, vf_rxq,
586 vf_evq, buf_count);
587 return VFDI_RC_EINVAL;
588 }
589 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
590 ++vf->rxq_count;
591 efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
592
593 label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
594 EFX_POPULATE_OWORD_6(reg,
595 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
596 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
597 FRF_AZ_RX_DESCQ_LABEL, label,
598 FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
599 FRF_AZ_RX_DESCQ_JUMBO,
600 !!(req->u.init_rxq.flags &
601 VFDI_RXQ_FLAG_SCATTER_EN),
602 FRF_AZ_RX_DESCQ_EN, 1);
603 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
604 abs_index(vf, vf_rxq));
605
606 return VFDI_RC_SUCCESS;
607}
608
609static int efx_vfdi_init_txq(struct efx_vf *vf)
610{
611 struct efx_nic *efx = vf->efx;
612 struct vfdi_req *req = vf->buf.addr;
613 unsigned vf_txq = req->u.init_txq.index;
614 unsigned vf_evq = req->u.init_txq.evq;
615 unsigned buf_count = req->u.init_txq.buf_count;
616 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
617 unsigned label, eth_filt_en;
618 efx_oword_t reg;
619
620 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
621 vf_txq >= vf_max_tx_channels ||
622 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
623 if (net_ratelimit())
624 netif_err(efx, hw, efx->net_dev,
625 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
626 "buf_count %d\n", vf->pci_name, vf_txq,
627 vf_evq, buf_count);
628 return VFDI_RC_EINVAL;
629 }
630
631 mutex_lock(&vf->txq_lock);
632 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
633 ++vf->txq_count;
634 mutex_unlock(&vf->txq_lock);
635 efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
636
637 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
638
639 label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
640 EFX_POPULATE_OWORD_8(reg,
641 FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
642 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
643 FRF_AZ_TX_DESCQ_EN, 1,
644 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
645 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
646 FRF_AZ_TX_DESCQ_LABEL, label,
647 FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
648 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
649 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
650 abs_index(vf, vf_txq));
651
652 return VFDI_RC_SUCCESS;
653}
654
655/* Returns true when efx_vfdi_fini_all_queues should wake */
656static bool efx_vfdi_flush_wake(struct efx_vf *vf)
657{
658 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
659 smp_mb();
660
661 return (!vf->txq_count && !vf->rxq_count) ||
662 atomic_read(&vf->rxq_retry_count);
663}
664
665static void efx_vfdi_flush_clear(struct efx_vf *vf)
666{
667 memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
668 vf->txq_count = 0;
669 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
670 vf->rxq_count = 0;
671 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
672 atomic_set(&vf->rxq_retry_count, 0);
673}
674
675static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
676{
677 struct efx_nic *efx = vf->efx;
678 efx_oword_t reg;
679 unsigned count = efx_vf_size(efx);
680 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
681 unsigned timeout = HZ;
682 unsigned index, rxqs_count;
683 __le32 *rxqs;
684 int rc;
685
686 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
687 if (rxqs == NULL)
688 return VFDI_RC_ENOMEM;
689
690 rtnl_lock();
691 if (efx->fc_disable++ == 0)
692 efx_mcdi_set_mac(efx);
693 rtnl_unlock();
694
695 /* Flush all the initialized queues */
696 rxqs_count = 0;
697 for (index = 0; index < count; ++index) {
698 if (test_bit(index, vf->txq_mask)) {
699 EFX_POPULATE_OWORD_2(reg,
700 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
701 FRF_AZ_TX_FLUSH_DESCQ,
702 vf_offset + index);
703 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
704 }
705 if (test_bit(index, vf->rxq_mask))
706 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
707 }
708
709 atomic_set(&vf->rxq_retry_count, 0);
710 while (timeout && (vf->rxq_count || vf->txq_count)) {
711 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
712 rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
713 WARN_ON(rc < 0);
714
715 timeout = wait_event_timeout(vf->flush_waitq,
716 efx_vfdi_flush_wake(vf),
717 timeout);
718 rxqs_count = 0;
719 for (index = 0; index < count; ++index) {
720 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
721 atomic_dec(&vf->rxq_retry_count);
722 rxqs[rxqs_count++] =
723 cpu_to_le32(vf_offset + index);
724 }
725 }
726 }
727
728 rtnl_lock();
729 if (--efx->fc_disable == 0)
730 efx_mcdi_set_mac(efx);
731 rtnl_unlock();
732
733 /* Irrespective of success/failure, fini the queues */
734 EFX_ZERO_OWORD(reg);
735 for (index = 0; index < count; ++index) {
736 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
737 vf_offset + index);
738 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
739 vf_offset + index);
740 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
741 vf_offset + index);
742 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
743 vf_offset + index);
744 }
745 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
746 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
747 kfree(rxqs);
748 efx_vfdi_flush_clear(vf);
749
750 vf->evq0_count = 0;
751
752 return timeout ? 0 : VFDI_RC_ETIMEDOUT;
753}
754
755static int efx_vfdi_insert_filter(struct efx_vf *vf)
756{
757 struct efx_nic *efx = vf->efx;
758 struct vfdi_req *req = vf->buf.addr;
759 unsigned vf_rxq = req->u.mac_filter.rxq;
760 unsigned flags;
761
762 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
763 if (net_ratelimit())
764 netif_err(efx, hw, efx->net_dev,
765 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
766 "flags 0x%x\n", vf->pci_name, vf_rxq,
767 req->u.mac_filter.flags);
768 return VFDI_RC_EINVAL;
769 }
770
771 flags = 0;
772 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
773 flags |= EFX_FILTER_FLAG_RX_RSS;
774 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
775 flags |= EFX_FILTER_FLAG_RX_SCATTER;
776 vf->rx_filter_flags = flags;
777 vf->rx_filter_qid = vf_rxq;
778 vf->rx_filtering = true;
779
780 efx_sriov_reset_rx_filter(vf);
781 queue_work(vfdi_workqueue, &efx->peer_work);
782
783 return VFDI_RC_SUCCESS;
784}
785
786static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
787{
788 vf->rx_filtering = false;
789 efx_sriov_reset_rx_filter(vf);
790 queue_work(vfdi_workqueue, &vf->efx->peer_work);
791
792 return VFDI_RC_SUCCESS;
793}
794
795static int efx_vfdi_set_status_page(struct efx_vf *vf)
796{
797 struct efx_nic *efx = vf->efx;
798 struct vfdi_req *req = vf->buf.addr;
799 unsigned int page_count;
800
801 page_count = req->u.set_status_page.peer_page_count;
802 if (!req->u.set_status_page.dma_addr || EFX_PAGE_SIZE <
803 offsetof(struct vfdi_req,
804 u.set_status_page.peer_page_addr[page_count])) {
805 if (net_ratelimit())
806 netif_err(efx, hw, efx->net_dev,
807 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
808 vf->pci_name);
809 return VFDI_RC_EINVAL;
810 }
811
812 mutex_lock(&efx->local_lock);
813 mutex_lock(&vf->status_lock);
814 vf->status_addr = req->u.set_status_page.dma_addr;
815
816 kfree(vf->peer_page_addrs);
817 vf->peer_page_addrs = NULL;
818 vf->peer_page_count = 0;
819
820 if (page_count) {
821 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
822 GFP_KERNEL);
823 if (vf->peer_page_addrs) {
824 memcpy(vf->peer_page_addrs,
825 req->u.set_status_page.peer_page_addr,
826 page_count * sizeof(u64));
827 vf->peer_page_count = page_count;
828 }
829 }
830
831 __efx_sriov_push_vf_status(vf);
832 mutex_unlock(&vf->status_lock);
833 mutex_unlock(&efx->local_lock);
834
835 return VFDI_RC_SUCCESS;
836}
837
838static int efx_vfdi_clear_status_page(struct efx_vf *vf)
839{
840 mutex_lock(&vf->status_lock);
841 vf->status_addr = 0;
842 mutex_unlock(&vf->status_lock);
843
844 return VFDI_RC_SUCCESS;
845}
846
847typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
848
849static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
850 [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
851 [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
852 [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
853 [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
854 [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
855 [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
856 [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
857 [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
858};
859
860static void efx_sriov_vfdi(struct work_struct *work)
861{
862 struct efx_vf *vf = container_of(work, struct efx_vf, req);
863 struct efx_nic *efx = vf->efx;
864 struct vfdi_req *req = vf->buf.addr;
865 struct efx_memcpy_req copy[2];
866 int rc;
867
868 /* Copy this page into the local address space */
869 memset(copy, '\0', sizeof(copy));
870 copy[0].from_rid = vf->pci_rid;
871 copy[0].from_addr = vf->req_addr;
872 copy[0].to_rid = efx->pci_dev->devfn;
873 copy[0].to_addr = vf->buf.dma_addr;
874 copy[0].length = EFX_PAGE_SIZE;
875 rc = efx_sriov_memcpy(efx, copy, 1);
876 if (rc) {
877 /* If we can't get the request, we can't reply to the caller */
878 if (net_ratelimit())
879 netif_err(efx, hw, efx->net_dev,
880 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
881 vf->pci_name, -rc);
882 vf->busy = false;
883 return;
884 }
885
886 if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
887 rc = vfdi_ops[req->op](vf);
888 if (rc == 0) {
889 netif_dbg(efx, hw, efx->net_dev,
890 "vfdi request %d from %s ok\n",
891 req->op, vf->pci_name);
892 }
893 } else {
894 netif_dbg(efx, hw, efx->net_dev,
895 "ERROR: Unrecognised request %d from VF %s addr "
896 "%llx\n", req->op, vf->pci_name,
897 (unsigned long long)vf->req_addr);
898 rc = VFDI_RC_EOPNOTSUPP;
899 }
900
901 /* Allow subsequent VF requests */
902 vf->busy = false;
903 smp_wmb();
904
905 /* Respond to the request */
906 req->rc = rc;
907 req->op = VFDI_OP_RESPONSE;
908
909 memset(copy, '\0', sizeof(copy));
910 copy[0].from_buf = &req->rc;
911 copy[0].to_rid = vf->pci_rid;
912 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
913 copy[0].length = sizeof(req->rc);
914 copy[1].from_buf = &req->op;
915 copy[1].to_rid = vf->pci_rid;
916 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
917 copy[1].length = sizeof(req->op);
918
919 (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
920}
921
922
923
924/* After a reset the event queues inside the guests no longer exist. Fill the
925 * event ring in guest memory with VFDI reset events, then (re-initialise) the
926 * event queue to raise an interrupt. The guest driver will then recover.
927 */
928static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
929{
930 struct efx_nic *efx = vf->efx;
931 struct efx_memcpy_req copy_req[4];
932 efx_qword_t event;
933 unsigned int pos, count, k, buftbl, abs_evq;
934 efx_oword_t reg;
935 efx_dword_t ptr;
936 int rc;
937
938 BUG_ON(buffer->len != EFX_PAGE_SIZE);
939
940 if (!vf->evq0_count)
941 return;
942 BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
943
944 mutex_lock(&vf->status_lock);
945 EFX_POPULATE_QWORD_3(event,
946 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
947 VFDI_EV_SEQ, vf->msg_seqno,
948 VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
949 vf->msg_seqno++;
950 for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
951 memcpy(buffer->addr + pos, &event, sizeof(event));
952
953 for (pos = 0; pos < vf->evq0_count; pos += count) {
954 count = min_t(unsigned, vf->evq0_count - pos,
955 ARRAY_SIZE(copy_req));
956 for (k = 0; k < count; k++) {
957 copy_req[k].from_buf = NULL;
958 copy_req[k].from_rid = efx->pci_dev->devfn;
959 copy_req[k].from_addr = buffer->dma_addr;
960 copy_req[k].to_rid = vf->pci_rid;
961 copy_req[k].to_addr = vf->evq0_addrs[pos + k];
962 copy_req[k].length = EFX_PAGE_SIZE;
963 }
964 rc = efx_sriov_memcpy(efx, copy_req, count);
965 if (rc) {
966 if (net_ratelimit())
967 netif_err(efx, hw, efx->net_dev,
968 "ERROR: Unable to notify %s of reset"
969 ": %d\n", vf->pci_name, -rc);
970 break;
971 }
972 }
973
974 /* Reinitialise, arm and trigger evq0 */
975 abs_evq = abs_index(vf, 0);
976 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
977 efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
978
979 EFX_POPULATE_OWORD_3(reg,
980 FRF_CZ_TIMER_Q_EN, 1,
981 FRF_CZ_HOST_NOTIFY_MODE, 0,
982 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
983 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
984 EFX_POPULATE_OWORD_3(reg,
985 FRF_AZ_EVQ_EN, 1,
986 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
987 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
988 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
989 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
990 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
991
992 mutex_unlock(&vf->status_lock);
993}
994
995static void efx_sriov_reset_vf_work(struct work_struct *work)
996{
997 struct efx_vf *vf = container_of(work, struct efx_vf, req);
998 struct efx_nic *efx = vf->efx;
999 struct efx_buffer buf;
1000
1001 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
1002 efx_sriov_reset_vf(vf, &buf);
1003 efx_nic_free_buffer(efx, &buf);
1004 }
1005}
1006
1007static void efx_sriov_handle_no_channel(struct efx_nic *efx)
1008{
1009 netif_err(efx, drv, efx->net_dev,
1010 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1011 "vector. IOV disabled\n");
1012 efx->vf_count = 0;
1013}
1014
1015static int efx_sriov_probe_channel(struct efx_channel *channel)
1016{
1017 channel->efx->vfdi_channel = channel;
1018 return 0;
1019}
1020
1021static void
1022efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1023{
1024 snprintf(buf, len, "%s-iov", channel->efx->name);
1025}
1026
1027static const struct efx_channel_type efx_sriov_channel_type = {
1028 .handle_no_channel = efx_sriov_handle_no_channel,
1029 .pre_probe = efx_sriov_probe_channel,
1030 .get_name = efx_sriov_get_channel_name,
1031 /* no copy operation; channel must not be reallocated */
1032 .keep_eventq = true,
1033};
1034
1035void efx_sriov_probe(struct efx_nic *efx)
1036{
1037 unsigned count;
1038
1039 if (!max_vfs)
1040 return;
1041
1042 if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
1043 return;
1044 if (count > 0 && count > max_vfs)
1045 count = max_vfs;
1046
1047 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1048 efx->vf_count = count;
1049
1050 efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
1051}
1052
1053/* Copy the list of individual addresses into the vfdi_status.peers
1054 * array and auxillary pages, protected by %local_lock. Drop that lock
1055 * and then broadcast the address list to every VF.
1056 */
1057static void efx_sriov_peer_work(struct work_struct *data)
1058{
1059 struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
1060 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1061 struct efx_vf *vf;
1062 struct efx_local_addr *local_addr;
1063 struct vfdi_endpoint *peer;
1064 struct efx_endpoint_page *epp;
1065 struct list_head pages;
1066 unsigned int peer_space;
1067 unsigned int peer_count;
1068 unsigned int pos;
1069
1070 mutex_lock(&efx->local_lock);
1071
1072 /* Move the existing peer pages off %local_page_list */
1073 INIT_LIST_HEAD(&pages);
1074 list_splice_tail_init(&efx->local_page_list, &pages);
1075
1076 /* Populate the VF addresses starting from entry 1 (entry 0 is
1077 * the PF address)
1078 */
1079 peer = vfdi_status->peers + 1;
1080 peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1081 peer_count = 1;
1082 for (pos = 0; pos < efx->vf_count; ++pos) {
1083 vf = efx->vf + pos;
1084
1085 mutex_lock(&vf->status_lock);
1086 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1087 *peer++ = vf->addr;
1088 ++peer_count;
1089 --peer_space;
1090 BUG_ON(peer_space == 0);
1091 }
1092 mutex_unlock(&vf->status_lock);
1093 }
1094
1095 /* Fill the remaining addresses */
1096 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1097 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
1098 peer->tci = 0;
1099 ++peer;
1100 ++peer_count;
1101 if (--peer_space == 0) {
1102 if (list_empty(&pages)) {
1103 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1104 if (!epp)
1105 break;
1106 epp->ptr = dma_alloc_coherent(
1107 &efx->pci_dev->dev, EFX_PAGE_SIZE,
1108 &epp->addr, GFP_KERNEL);
1109 if (!epp->ptr) {
1110 kfree(epp);
1111 break;
1112 }
1113 } else {
1114 epp = list_first_entry(
1115 &pages, struct efx_endpoint_page, link);
1116 list_del(&epp->link);
1117 }
1118
1119 list_add_tail(&epp->link, &efx->local_page_list);
1120 peer = (struct vfdi_endpoint *)epp->ptr;
1121 peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1122 }
1123 }
1124 vfdi_status->peer_count = peer_count;
1125 mutex_unlock(&efx->local_lock);
1126
1127 /* Free any now unused endpoint pages */
1128 while (!list_empty(&pages)) {
1129 epp = list_first_entry(
1130 &pages, struct efx_endpoint_page, link);
1131 list_del(&epp->link);
1132 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1133 epp->ptr, epp->addr);
1134 kfree(epp);
1135 }
1136
1137 /* Finally, push the pages */
1138 for (pos = 0; pos < efx->vf_count; ++pos) {
1139 vf = efx->vf + pos;
1140
1141 mutex_lock(&vf->status_lock);
1142 if (vf->status_addr)
1143 __efx_sriov_push_vf_status(vf);
1144 mutex_unlock(&vf->status_lock);
1145 }
1146}
1147
1148static void efx_sriov_free_local(struct efx_nic *efx)
1149{
1150 struct efx_local_addr *local_addr;
1151 struct efx_endpoint_page *epp;
1152
1153 while (!list_empty(&efx->local_addr_list)) {
1154 local_addr = list_first_entry(&efx->local_addr_list,
1155 struct efx_local_addr, link);
1156 list_del(&local_addr->link);
1157 kfree(local_addr);
1158 }
1159
1160 while (!list_empty(&efx->local_page_list)) {
1161 epp = list_first_entry(&efx->local_page_list,
1162 struct efx_endpoint_page, link);
1163 list_del(&epp->link);
1164 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1165 epp->ptr, epp->addr);
1166 kfree(epp);
1167 }
1168}
1169
1170static int efx_sriov_vf_alloc(struct efx_nic *efx)
1171{
1172 unsigned index;
1173 struct efx_vf *vf;
1174
1175 efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
1176 if (!efx->vf)
1177 return -ENOMEM;
1178
1179 for (index = 0; index < efx->vf_count; ++index) {
1180 vf = efx->vf + index;
1181
1182 vf->efx = efx;
1183 vf->index = index;
1184 vf->rx_filter_id = -1;
1185 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1186 vf->tx_filter_id = -1;
1187 INIT_WORK(&vf->req, efx_sriov_vfdi);
1188 INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
1189 init_waitqueue_head(&vf->flush_waitq);
1190 mutex_init(&vf->status_lock);
1191 mutex_init(&vf->txq_lock);
1192 }
1193
1194 return 0;
1195}
1196
1197static void efx_sriov_vfs_fini(struct efx_nic *efx)
1198{
1199 struct efx_vf *vf;
1200 unsigned int pos;
1201
1202 for (pos = 0; pos < efx->vf_count; ++pos) {
1203 vf = efx->vf + pos;
1204
1205 efx_nic_free_buffer(efx, &vf->buf);
1206 kfree(vf->peer_page_addrs);
1207 vf->peer_page_addrs = NULL;
1208 vf->peer_page_count = 0;
1209
1210 vf->evq0_count = 0;
1211 }
1212}
1213
1214static int efx_sriov_vfs_init(struct efx_nic *efx)
1215{
1216 struct pci_dev *pci_dev = efx->pci_dev;
1217 unsigned index, devfn, sriov, buftbl_base;
1218 u16 offset, stride;
1219 struct efx_vf *vf;
1220 int rc;
1221
1222 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1223 if (!sriov)
1224 return -ENOENT;
1225
1226 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1227 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1228
1229 buftbl_base = efx->vf_buftbl_base;
1230 devfn = pci_dev->devfn + offset;
1231 for (index = 0; index < efx->vf_count; ++index) {
1232 vf = efx->vf + index;
1233
1234 /* Reserve buffer entries */
1235 vf->buftbl_base = buftbl_base;
1236 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1237
1238 vf->pci_rid = devfn;
1239 snprintf(vf->pci_name, sizeof(vf->pci_name),
1240 "%04x:%02x:%02x.%d",
1241 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1242 PCI_SLOT(devfn), PCI_FUNC(devfn));
1243
1244 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
1245 if (rc)
1246 goto fail;
1247
1248 devfn += stride;
1249 }
1250
1251 return 0;
1252
1253fail:
1254 efx_sriov_vfs_fini(efx);
1255 return rc;
1256}
1257
1258int efx_sriov_init(struct efx_nic *efx)
1259{
1260 struct net_device *net_dev = efx->net_dev;
1261 struct vfdi_status *vfdi_status;
1262 int rc;
1263
1264 /* Ensure there's room for vf_channel */
1265 BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1266 /* Ensure that VI_BASE is aligned on VI_SCALE */
1267 BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1268
1269 if (efx->vf_count == 0)
1270 return 0;
1271
1272 rc = efx_sriov_cmd(efx, true, NULL, NULL);
1273 if (rc)
1274 goto fail_cmd;
1275
1276 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
1277 if (rc)
1278 goto fail_status;
1279 vfdi_status = efx->vfdi_status.addr;
1280 memset(vfdi_status, 0, sizeof(*vfdi_status));
1281 vfdi_status->version = 1;
1282 vfdi_status->length = sizeof(*vfdi_status);
1283 vfdi_status->max_tx_channels = vf_max_tx_channels;
1284 vfdi_status->vi_scale = efx->vi_scale;
1285 vfdi_status->rss_rxq_count = efx->rss_spread;
1286 vfdi_status->peer_count = 1 + efx->vf_count;
1287 vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1288
1289 rc = efx_sriov_vf_alloc(efx);
1290 if (rc)
1291 goto fail_alloc;
1292
1293 mutex_init(&efx->local_lock);
1294 INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
1295 INIT_LIST_HEAD(&efx->local_addr_list);
1296 INIT_LIST_HEAD(&efx->local_page_list);
1297
1298 rc = efx_sriov_vfs_init(efx);
1299 if (rc)
1300 goto fail_vfs;
1301
1302 rtnl_lock();
1303 memcpy(vfdi_status->peers[0].mac_addr,
1304 net_dev->dev_addr, ETH_ALEN);
1305 efx->vf_init_count = efx->vf_count;
1306 rtnl_unlock();
1307
1308 efx_sriov_usrev(efx, true);
1309
1310 /* At this point we must be ready to accept VFDI requests */
1311
1312 rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1313 if (rc)
1314 goto fail_pci;
1315
1316 netif_info(efx, probe, net_dev,
1317 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1318 efx->vf_count, efx_vf_size(efx));
1319 return 0;
1320
1321fail_pci:
1322 efx_sriov_usrev(efx, false);
1323 rtnl_lock();
1324 efx->vf_init_count = 0;
1325 rtnl_unlock();
1326 efx_sriov_vfs_fini(efx);
1327fail_vfs:
1328 cancel_work_sync(&efx->peer_work);
1329 efx_sriov_free_local(efx);
1330 kfree(efx->vf);
1331fail_alloc:
1332 efx_nic_free_buffer(efx, &efx->vfdi_status);
1333fail_status:
1334 efx_sriov_cmd(efx, false, NULL, NULL);
1335fail_cmd:
1336 return rc;
1337}
1338
1339void efx_sriov_fini(struct efx_nic *efx)
1340{
1341 struct efx_vf *vf;
1342 unsigned int pos;
1343
1344 if (efx->vf_init_count == 0)
1345 return;
1346
1347 /* Disable all interfaces to reconfiguration */
1348 BUG_ON(efx->vfdi_channel->enabled);
1349 efx_sriov_usrev(efx, false);
1350 rtnl_lock();
1351 efx->vf_init_count = 0;
1352 rtnl_unlock();
1353
1354 /* Flush all reconfiguration work */
1355 for (pos = 0; pos < efx->vf_count; ++pos) {
1356 vf = efx->vf + pos;
1357 cancel_work_sync(&vf->req);
1358 cancel_work_sync(&vf->reset_work);
1359 }
1360 cancel_work_sync(&efx->peer_work);
1361
1362 pci_disable_sriov(efx->pci_dev);
1363
1364 /* Tear down back-end state */
1365 efx_sriov_vfs_fini(efx);
1366 efx_sriov_free_local(efx);
1367 kfree(efx->vf);
1368 efx_nic_free_buffer(efx, &efx->vfdi_status);
1369 efx_sriov_cmd(efx, false, NULL, NULL);
1370}
1371
1372void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1373{
1374 struct efx_nic *efx = channel->efx;
1375 struct efx_vf *vf;
1376 unsigned qid, seq, type, data;
1377
1378 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1379
1380 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1381 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1382 seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1383 type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1384 data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1385
1386 netif_vdbg(efx, hw, efx->net_dev,
1387 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1388 qid, seq, type, data);
1389
1390 if (map_vi_index(efx, qid, &vf, NULL))
1391 return;
1392 if (vf->busy)
1393 goto error;
1394
1395 if (type == VFDI_EV_TYPE_REQ_WORD0) {
1396 /* Resynchronise */
1397 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1398 vf->req_seqno = seq + 1;
1399 vf->req_addr = 0;
1400 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1401 goto error;
1402
1403 switch (vf->req_type) {
1404 case VFDI_EV_TYPE_REQ_WORD0:
1405 case VFDI_EV_TYPE_REQ_WORD1:
1406 case VFDI_EV_TYPE_REQ_WORD2:
1407 vf->req_addr |= (u64)data << (vf->req_type << 4);
1408 ++vf->req_type;
1409 return;
1410
1411 case VFDI_EV_TYPE_REQ_WORD3:
1412 vf->req_addr |= (u64)data << 48;
1413 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1414 vf->busy = true;
1415 queue_work(vfdi_workqueue, &vf->req);
1416 return;
1417 }
1418
1419error:
1420 if (net_ratelimit())
1421 netif_err(efx, hw, efx->net_dev,
1422 "ERROR: Screaming VFDI request from %s\n",
1423 vf->pci_name);
1424 /* Reset the request and sequence number */
1425 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1426 vf->req_seqno = seq + 1;
1427}
1428
1429void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1430{
1431 struct efx_vf *vf;
1432
1433 if (vf_i > efx->vf_init_count)
1434 return;
1435 vf = efx->vf + vf_i;
1436 netif_info(efx, hw, efx->net_dev,
1437 "FLR on VF %s\n", vf->pci_name);
1438
1439 vf->status_addr = 0;
1440 efx_vfdi_remove_all_filters(vf);
1441 efx_vfdi_flush_clear(vf);
1442
1443 vf->evq0_count = 0;
1444}
1445
1446void efx_sriov_mac_address_changed(struct efx_nic *efx)
1447{
1448 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1449
1450 if (!efx->vf_init_count)
1451 return;
1452 memcpy(vfdi_status->peers[0].mac_addr,
1453 efx->net_dev->dev_addr, ETH_ALEN);
1454 queue_work(vfdi_workqueue, &efx->peer_work);
1455}
1456
1457void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1458{
1459 struct efx_vf *vf;
1460 unsigned queue, qid;
1461
1462 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1463 if (map_vi_index(efx, queue, &vf, &qid))
1464 return;
1465 /* Ignore flush completions triggered by an FLR */
1466 if (!test_bit(qid, vf->txq_mask))
1467 return;
1468
1469 __clear_bit(qid, vf->txq_mask);
1470 --vf->txq_count;
1471
1472 if (efx_vfdi_flush_wake(vf))
1473 wake_up(&vf->flush_waitq);
1474}
1475
1476void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1477{
1478 struct efx_vf *vf;
1479 unsigned ev_failed, queue, qid;
1480
1481 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1482 ev_failed = EFX_QWORD_FIELD(*event,
1483 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1484 if (map_vi_index(efx, queue, &vf, &qid))
1485 return;
1486 if (!test_bit(qid, vf->rxq_mask))
1487 return;
1488
1489 if (ev_failed) {
1490 set_bit(qid, vf->rxq_retry_mask);
1491 atomic_inc(&vf->rxq_retry_count);
1492 } else {
1493 __clear_bit(qid, vf->rxq_mask);
1494 --vf->rxq_count;
1495 }
1496 if (efx_vfdi_flush_wake(vf))
1497 wake_up(&vf->flush_waitq);
1498}
1499
1500/* Called from napi. Schedule the reset work item */
1501void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1502{
1503 struct efx_vf *vf;
1504 unsigned int rel;
1505
1506 if (map_vi_index(efx, dmaq, &vf, &rel))
1507 return;
1508
1509 if (net_ratelimit())
1510 netif_err(efx, hw, efx->net_dev,
1511 "VF %d DMA Q %d reports descriptor fetch error.\n",
1512 vf->index, rel);
1513 queue_work(vfdi_workqueue, &vf->reset_work);
1514}
1515
1516/* Reset all VFs */
1517void efx_sriov_reset(struct efx_nic *efx)
1518{
1519 unsigned int vf_i;
1520 struct efx_buffer buf;
1521 struct efx_vf *vf;
1522
1523 ASSERT_RTNL();
1524
1525 if (efx->vf_init_count == 0)
1526 return;
1527
1528 efx_sriov_usrev(efx, true);
1529 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1530
1531 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
1532 return;
1533
1534 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1535 vf = efx->vf + vf_i;
1536 efx_sriov_reset_vf(vf, &buf);
1537 }
1538
1539 efx_nic_free_buffer(efx, &buf);
1540}
1541
1542int efx_init_sriov(void)
1543{
1544 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1545 * efx_sriov_peer_work() spend almost all their time sleeping for
1546 * MCDI to complete anyway
1547 */
1548 vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1549 if (!vfdi_workqueue)
1550 return -ENOMEM;
1551
1552 return 0;
1553}
1554
1555void efx_fini_sriov(void)
1556{
1557 destroy_workqueue(vfdi_workqueue);
1558}
1559
1560int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1561{
1562 struct efx_nic *efx = netdev_priv(net_dev);
1563 struct efx_vf *vf;
1564
1565 if (vf_i >= efx->vf_init_count)
1566 return -EINVAL;
1567 vf = efx->vf + vf_i;
1568
1569 mutex_lock(&vf->status_lock);
1570 memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
1571 __efx_sriov_update_vf_addr(vf);
1572 mutex_unlock(&vf->status_lock);
1573
1574 return 0;
1575}
1576
1577int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
1578 u16 vlan, u8 qos)
1579{
1580 struct efx_nic *efx = netdev_priv(net_dev);
1581 struct efx_vf *vf;
1582 u16 tci;
1583
1584 if (vf_i >= efx->vf_init_count)
1585 return -EINVAL;
1586 vf = efx->vf + vf_i;
1587
1588 mutex_lock(&vf->status_lock);
1589 tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1590 vf->addr.tci = htons(tci);
1591 __efx_sriov_update_vf_addr(vf);
1592 mutex_unlock(&vf->status_lock);
1593
1594 return 0;
1595}
1596
1597int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
1598 bool spoofchk)
1599{
1600 struct efx_nic *efx = netdev_priv(net_dev);
1601 struct efx_vf *vf;
1602 int rc;
1603
1604 if (vf_i >= efx->vf_init_count)
1605 return -EINVAL;
1606 vf = efx->vf + vf_i;
1607
1608 mutex_lock(&vf->txq_lock);
1609 if (vf->txq_count == 0) {
1610 vf->tx_filter_mode =
1611 spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1612 rc = 0;
1613 } else {
1614 /* This cannot be changed while TX queues are running */
1615 rc = -EBUSY;
1616 }
1617 mutex_unlock(&vf->txq_lock);
1618 return rc;
1619}
1620
1621int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1622 struct ifla_vf_info *ivi)
1623{
1624 struct efx_nic *efx = netdev_priv(net_dev);
1625 struct efx_vf *vf;
1626 u16 tci;
1627
1628 if (vf_i >= efx->vf_init_count)
1629 return -EINVAL;
1630 vf = efx->vf + vf_i;
1631
1632 ivi->vf = vf_i;
1633 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
1634 ivi->tx_rate = 0;
1635 tci = ntohs(vf->addr.tci);
1636 ivi->vlan = tci & VLAN_VID_MASK;
1637 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1638 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
1639
1640 return 0;
1641}
1642
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5cb81fa3fcbd..a096e287e95f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -110,7 +110,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
110 * little benefit from using descriptors that cross those 110 * little benefit from using descriptors that cross those
111 * boundaries and we keep things simple by not doing so. 111 * boundaries and we keep things simple by not doing so.
112 */ 112 */
113 unsigned len = (~dma_addr & 0xfff) + 1; 113 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
114 114
115 /* Work around hardware bug for unaligned buffers. */ 115 /* Work around hardware bug for unaligned buffers. */
116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
new file mode 100644
index 000000000000..656fa70f9993
--- /dev/null
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -0,0 +1,254 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2012 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9#ifndef _VFDI_H
10#define _VFDI_H
11
12/**
13 * DOC: Virtual Function Driver Interface
14 *
15 * This file contains software structures used to form a two way
16 * communication channel between the VF driver and the PF driver,
17 * named Virtual Function Driver Interface (VFDI).
18 *
19 * For the purposes of VFDI, a page is a memory region with size and
20 * alignment of 4K. All addresses are DMA addresses to be used within
21 * the domain of the relevant VF.
22 *
23 * The only hardware-defined channels for a VF driver to communicate
24 * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
25 * registers). Writing to these registers generates an event with
26 * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
27 * and USER_EV_REG_VALUE set to the value written. The PF driver may
28 * direct or disable delivery of these events by setting
29 * %FR_CZ_USR_EV_CFG.
30 *
31 * The PF driver can send arbitrary events to arbitrary event queues.
32 * However, for consistency, VFDI events from the PF are defined to
33 * follow the same form and be sent to the first event queue assigned
34 * to the VF while that queue is enabled by the VF driver.
35 *
36 * The general form of the variable bits of VFDI events is:
37 *
38 * 0 16 24 31
39 * | DATA | TYPE | SEQ |
40 *
41 * SEQ is a sequence number which should be incremented by 1 (modulo
42 * 256) for each event. The sequence numbers used in each direction
43 * are independent.
44 *
45 * The VF submits requests of type &struct vfdi_req by sending the
46 * address of the request (ADDR) in a series of 4 events:
47 *
48 * 0 16 24 31
49 * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
50 * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
51 * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
52 * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
53 *
54 * The address must be page-aligned. After receiving such a valid
55 * series of events, the PF driver will attempt to read the request
56 * and write a response to the same address. In case of an invalid
57 * sequence of events or a DMA error, there will be no response.
58 *
59 * The VF driver may request that the PF driver writes status
60 * information into its domain asynchronously. After writing the
61 * status, the PF driver will send an event of the form:
62 *
63 * 0 16 24 31
64 * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
65 *
66 * In case the VF must be reset for any reason, the PF driver will
67 * send an event of the form:
68 *
69 * 0 16 24 31
70 * | reserved | VFDI_EV_TYPE_RESET | SEQ |
71 *
72 * It is then the responsibility of the VF driver to request
73 * reinitialisation of its queues.
74 */
75#define VFDI_EV_SEQ_LBN 24
76#define VFDI_EV_SEQ_WIDTH 8
77#define VFDI_EV_TYPE_LBN 16
78#define VFDI_EV_TYPE_WIDTH 8
79#define VFDI_EV_TYPE_REQ_WORD0 0
80#define VFDI_EV_TYPE_REQ_WORD1 1
81#define VFDI_EV_TYPE_REQ_WORD2 2
82#define VFDI_EV_TYPE_REQ_WORD3 3
83#define VFDI_EV_TYPE_STATUS 4
84#define VFDI_EV_TYPE_RESET 5
85#define VFDI_EV_DATA_LBN 0
86#define VFDI_EV_DATA_WIDTH 16
87
88struct vfdi_endpoint {
89 u8 mac_addr[ETH_ALEN];
90 __be16 tci;
91};
92
93/**
94 * enum vfdi_op - VFDI operation enumeration
95 * @VFDI_OP_RESPONSE: Indicates a response to the request.
96 * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
97 * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
98 * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
99 * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
100 * finalize the SRAM entries.
101 * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
102 * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
103 * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
104 * from PF and write the initial status.
105 * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
106 * updates from PF.
107 */
108enum vfdi_op {
109 VFDI_OP_RESPONSE = 0,
110 VFDI_OP_INIT_EVQ = 1,
111 VFDI_OP_INIT_RXQ = 2,
112 VFDI_OP_INIT_TXQ = 3,
113 VFDI_OP_FINI_ALL_QUEUES = 4,
114 VFDI_OP_INSERT_FILTER = 5,
115 VFDI_OP_REMOVE_ALL_FILTERS = 6,
116 VFDI_OP_SET_STATUS_PAGE = 7,
117 VFDI_OP_CLEAR_STATUS_PAGE = 8,
118 VFDI_OP_LIMIT,
119};
120
121/* Response codes for VFDI operations. Other values may be used in future. */
122#define VFDI_RC_SUCCESS 0
123#define VFDI_RC_ENOMEM (-12)
124#define VFDI_RC_EINVAL (-22)
125#define VFDI_RC_EOPNOTSUPP (-95)
126#define VFDI_RC_ETIMEDOUT (-110)
127
128/**
129 * struct vfdi_req - Request from VF driver to PF driver
130 * @op: Operation code or response indicator, taken from &enum vfdi_op.
131 * @rc: Response code. Set to 0 on success or a negative error code on failure.
132 * @u.init_evq.index: Index of event queue to create.
133 * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
134 * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
135 * address of each page backing the event queue.
136 * @u.init_rxq.index: Index of receive queue to create.
137 * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
138 * @u.init_rxq.evq: Instance of event queue to target receive events at.
139 * @u.init_rxq.label: Label used in receive events.
140 * @u.init_rxq.flags: Unused.
141 * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
142 * address of each page backing the receive queue.
143 * @u.init_txq.index: Index of transmit queue to create.
144 * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
145 * @u.init_txq.evq: Instance of event queue to target transmit completion
146 * events at.
147 * @u.init_txq.label: Label used in transmit completion events.
148 * @u.init_txq.flags: Checksum offload flags.
149 * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
150 * address of each page backing the transmit queue.
151 * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
152 * all traffic at this receive queue.
153 * @u.mac_filter.flags: MAC filter flags.
154 * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
155 * This address must be such that the structure fits within a page.
156 * @u.set_status_page.peer_page_count: Number of additional pages the VF
157 * has provided into which peer addresses may be DMAd.
158 * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
159 * If the number of peers exceeds 256, then the VF must provide
160 * additional pages in this array. The PF will then DMA up to
161 * 512 vfdi_endpoint structures into each page. These addresses
162 * must be page-aligned.
163 */
164struct vfdi_req {
165 u32 op;
166 u32 reserved1;
167 s32 rc;
168 u32 reserved2;
169 union {
170 struct {
171 u32 index;
172 u32 buf_count;
173 u64 addr[];
174 } init_evq;
175 struct {
176 u32 index;
177 u32 buf_count;
178 u32 evq;
179 u32 label;
180 u32 flags;
181#define VFDI_RXQ_FLAG_SCATTER_EN 1
182 u32 reserved;
183 u64 addr[];
184 } init_rxq;
185 struct {
186 u32 index;
187 u32 buf_count;
188 u32 evq;
189 u32 label;
190 u32 flags;
191#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
192#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
193 u32 reserved;
194 u64 addr[];
195 } init_txq;
196 struct {
197 u32 rxq;
198 u32 flags;
199#define VFDI_MAC_FILTER_FLAG_RSS 1
200#define VFDI_MAC_FILTER_FLAG_SCATTER 2
201 } mac_filter;
202 struct {
203 u64 dma_addr;
204 u64 peer_page_count;
205 u64 peer_page_addr[];
206 } set_status_page;
207 } u;
208};
209
210/**
211 * struct vfdi_status - Status provided by PF driver to VF driver
212 * @generation_start: A generation count DMA'd to VF *before* the
213 * rest of the structure.
214 * @generation_end: A generation count DMA'd to VF *after* the
215 * rest of the structure.
216 * @version: Version of this structure; currently set to 1. Later
217 * versions must either be layout-compatible or only be sent to VFs
218 * that specifically request them.
219 * @length: Total length of this structure including embedded tables
220 * @vi_scale: log2 the number of VIs available on this VF. This quantity
221 * is used by the hardware for register decoding.
222 * @max_tx_channels: The maximum number of transmit queues the VF can use.
223 * @rss_rxq_count: The number of receive queues present in the shared RSS
224 * indirection table.
225 * @peer_count: Total number of peers in the complete peer list. If larger
226 * than ARRAY_SIZE(%peers), then the VF must provide sufficient
227 * additional pages each of which is filled with vfdi_endpoint structures.
228 * @local: The MAC address and outer VLAN tag of *this* VF
229 * @peers: Table of peer addresses. The @tci fields in these structures
230 * are currently unused and must be ignored. Additional peers are
231 * written into any additional pages provided by the VF.
232 * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
233 * for interrupt moderation timers, in nanoseconds. This member is only
234 * present if @length is sufficiently large.
235 */
236struct vfdi_status {
237 u32 generation_start;
238 u32 generation_end;
239 u32 version;
240 u32 length;
241 u8 vi_scale;
242 u8 max_tx_channels;
243 u8 rss_rxq_count;
244 u8 reserved1;
245 u16 peer_count;
246 u16 reserved2;
247 struct vfdi_endpoint local;
248 struct vfdi_endpoint peers[256];
249
250 /* Members below here extend version 1 of this structure */
251 u32 timer_quantum_ns;
252};
253
254#endif