diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-06-23 07:30:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-25 01:13:22 -0400 |
commit | 62776d034cc40c49bafdb3551a6ba35f78e3f08d (patch) | |
tree | 1cd2132940ced266ad53619a0c947e153cc83a5e | |
parent | 0c605a2061670412d3b5580c92f1e161b1a693d2 (diff) |
sfc: Implement message level control
Replace EFX_ERR() with netif_err(), EFX_INFO() with netif_info(),
EFX_LOG() with netif_dbg() and EFX_TRACE() and EFX_REGDUMP() with
netif_vdbg().
Replace EFX_ERR_RL(), EFX_INFO_RL() and EFX_LOG_RL() using explicit
calls to net_ratelimit().
Implement the ethtool operations to get and set message level flags,
and add a 'debug' module parameter for the initial value.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/sfc/efx.c | 223 | ||||
-rw-r--r-- | drivers/net/sfc/efx.h | 5 | ||||
-rw-r--r-- | drivers/net/sfc/ethtool.c | 46 | ||||
-rw-r--r-- | drivers/net/sfc/falcon.c | 176 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_boards.c | 30 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_xmac.c | 5 | ||||
-rw-r--r-- | drivers/net/sfc/io.h | 30 | ||||
-rw-r--r-- | drivers/net/sfc/mcdi.c | 98 | ||||
-rw-r--r-- | drivers/net/sfc/mcdi_mac.c | 8 | ||||
-rw-r--r-- | drivers/net/sfc/mcdi_phy.c | 20 | ||||
-rw-r--r-- | drivers/net/sfc/mdio_10g.c | 39 | ||||
-rw-r--r-- | drivers/net/sfc/mdio_10g.h | 3 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 35 | ||||
-rw-r--r-- | drivers/net/sfc/nic.c | 219 | ||||
-rw-r--r-- | drivers/net/sfc/qt202x_phy.c | 42 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 56 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.c | 126 | ||||
-rw-r--r-- | drivers/net/sfc/siena.c | 42 | ||||
-rw-r--r-- | drivers/net/sfc/tenxpress.c | 12 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 41 |
20 files changed, 727 insertions, 529 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 8ad476a19d95..72514005c2a3 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -189,6 +189,13 @@ module_param(irq_adapt_high_thresh, uint, 0644); | |||
189 | MODULE_PARM_DESC(irq_adapt_high_thresh, | 189 | MODULE_PARM_DESC(irq_adapt_high_thresh, |
190 | "Threshold score for increasing IRQ moderation"); | 190 | "Threshold score for increasing IRQ moderation"); |
191 | 191 | ||
192 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
193 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | | ||
194 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | | ||
195 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); | ||
196 | module_param(debug, uint, 0); | ||
197 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); | ||
198 | |||
192 | /************************************************************************** | 199 | /************************************************************************** |
193 | * | 200 | * |
194 | * Utility functions and prototypes | 201 | * Utility functions and prototypes |
@@ -272,16 +279,16 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
272 | { | 279 | { |
273 | struct efx_channel *channel = | 280 | struct efx_channel *channel = |
274 | container_of(napi, struct efx_channel, napi_str); | 281 | container_of(napi, struct efx_channel, napi_str); |
282 | struct efx_nic *efx = channel->efx; | ||
275 | int spent; | 283 | int spent; |
276 | 284 | ||
277 | EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", | 285 | netif_vdbg(efx, intr, efx->net_dev, |
278 | channel->channel, raw_smp_processor_id()); | 286 | "channel %d NAPI poll executing on CPU %d\n", |
287 | channel->channel, raw_smp_processor_id()); | ||
279 | 288 | ||
280 | spent = efx_process_channel(channel, budget); | 289 | spent = efx_process_channel(channel, budget); |
281 | 290 | ||
282 | if (spent < budget) { | 291 | if (spent < budget) { |
283 | struct efx_nic *efx = channel->efx; | ||
284 | |||
285 | if (channel->channel < efx->n_rx_channels && | 292 | if (channel->channel < efx->n_rx_channels && |
286 | efx->irq_rx_adaptive && | 293 | efx->irq_rx_adaptive && |
287 | unlikely(++channel->irq_count == 1000)) { | 294 | unlikely(++channel->irq_count == 1000)) { |
@@ -357,7 +364,8 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
357 | */ | 364 | */ |
358 | static int efx_probe_eventq(struct efx_channel *channel) | 365 | static int efx_probe_eventq(struct efx_channel *channel) |
359 | { | 366 | { |
360 | EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); | 367 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
368 | "chan %d create event queue\n", channel->channel); | ||
361 | 369 | ||
362 | return efx_nic_probe_eventq(channel); | 370 | return efx_nic_probe_eventq(channel); |
363 | } | 371 | } |
@@ -365,7 +373,8 @@ static int efx_probe_eventq(struct efx_channel *channel) | |||
365 | /* Prepare channel's event queue */ | 373 | /* Prepare channel's event queue */ |
366 | static void efx_init_eventq(struct efx_channel *channel) | 374 | static void efx_init_eventq(struct efx_channel *channel) |
367 | { | 375 | { |
368 | EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); | 376 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
377 | "chan %d init event queue\n", channel->channel); | ||
369 | 378 | ||
370 | channel->eventq_read_ptr = 0; | 379 | channel->eventq_read_ptr = 0; |
371 | 380 | ||
@@ -374,14 +383,16 @@ static void efx_init_eventq(struct efx_channel *channel) | |||
374 | 383 | ||
375 | static void efx_fini_eventq(struct efx_channel *channel) | 384 | static void efx_fini_eventq(struct efx_channel *channel) |
376 | { | 385 | { |
377 | EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); | 386 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
387 | "chan %d fini event queue\n", channel->channel); | ||
378 | 388 | ||
379 | efx_nic_fini_eventq(channel); | 389 | efx_nic_fini_eventq(channel); |
380 | } | 390 | } |
381 | 391 | ||
382 | static void efx_remove_eventq(struct efx_channel *channel) | 392 | static void efx_remove_eventq(struct efx_channel *channel) |
383 | { | 393 | { |
384 | EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); | 394 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
395 | "chan %d remove event queue\n", channel->channel); | ||
385 | 396 | ||
386 | efx_nic_remove_eventq(channel); | 397 | efx_nic_remove_eventq(channel); |
387 | } | 398 | } |
@@ -398,7 +409,8 @@ static int efx_probe_channel(struct efx_channel *channel) | |||
398 | struct efx_rx_queue *rx_queue; | 409 | struct efx_rx_queue *rx_queue; |
399 | int rc; | 410 | int rc; |
400 | 411 | ||
401 | EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); | 412 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
413 | "creating channel %d\n", channel->channel); | ||
402 | 414 | ||
403 | rc = efx_probe_eventq(channel); | 415 | rc = efx_probe_eventq(channel); |
404 | if (rc) | 416 | if (rc) |
@@ -474,7 +486,8 @@ static void efx_init_channels(struct efx_nic *efx) | |||
474 | 486 | ||
475 | /* Initialise the channels */ | 487 | /* Initialise the channels */ |
476 | efx_for_each_channel(channel, efx) { | 488 | efx_for_each_channel(channel, efx) { |
477 | EFX_LOG(channel->efx, "init chan %d\n", channel->channel); | 489 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
490 | "init chan %d\n", channel->channel); | ||
478 | 491 | ||
479 | efx_init_eventq(channel); | 492 | efx_init_eventq(channel); |
480 | 493 | ||
@@ -501,7 +514,8 @@ static void efx_start_channel(struct efx_channel *channel) | |||
501 | { | 514 | { |
502 | struct efx_rx_queue *rx_queue; | 515 | struct efx_rx_queue *rx_queue; |
503 | 516 | ||
504 | EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); | 517 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, |
518 | "starting chan %d\n", channel->channel); | ||
505 | 519 | ||
506 | /* The interrupt handler for this channel may set work_pending | 520 | /* The interrupt handler for this channel may set work_pending |
507 | * as soon as we enable it. Make sure it's cleared before | 521 | * as soon as we enable it. Make sure it's cleared before |
@@ -526,7 +540,8 @@ static void efx_stop_channel(struct efx_channel *channel) | |||
526 | if (!channel->enabled) | 540 | if (!channel->enabled) |
527 | return; | 541 | return; |
528 | 542 | ||
529 | EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); | 543 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, |
544 | "stop chan %d\n", channel->channel); | ||
530 | 545 | ||
531 | channel->enabled = false; | 546 | channel->enabled = false; |
532 | napi_disable(&channel->napi_str); | 547 | napi_disable(&channel->napi_str); |
@@ -548,16 +563,19 @@ static void efx_fini_channels(struct efx_nic *efx) | |||
548 | * descriptor caches reference memory we're about to free, | 563 | * descriptor caches reference memory we're about to free, |
549 | * but falcon_reconfigure_mac_wrapper() won't reconnect | 564 | * but falcon_reconfigure_mac_wrapper() won't reconnect |
550 | * the MACs because of the pending reset. */ | 565 | * the MACs because of the pending reset. */ |
551 | EFX_ERR(efx, "Resetting to recover from flush failure\n"); | 566 | netif_err(efx, drv, efx->net_dev, |
567 | "Resetting to recover from flush failure\n"); | ||
552 | efx_schedule_reset(efx, RESET_TYPE_ALL); | 568 | efx_schedule_reset(efx, RESET_TYPE_ALL); |
553 | } else if (rc) { | 569 | } else if (rc) { |
554 | EFX_ERR(efx, "failed to flush queues\n"); | 570 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); |
555 | } else { | 571 | } else { |
556 | EFX_LOG(efx, "successfully flushed all queues\n"); | 572 | netif_dbg(efx, drv, efx->net_dev, |
573 | "successfully flushed all queues\n"); | ||
557 | } | 574 | } |
558 | 575 | ||
559 | efx_for_each_channel(channel, efx) { | 576 | efx_for_each_channel(channel, efx) { |
560 | EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); | 577 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
578 | "shut down chan %d\n", channel->channel); | ||
561 | 579 | ||
562 | efx_for_each_channel_rx_queue(rx_queue, channel) | 580 | efx_for_each_channel_rx_queue(rx_queue, channel) |
563 | efx_fini_rx_queue(rx_queue); | 581 | efx_fini_rx_queue(rx_queue); |
@@ -572,7 +590,8 @@ static void efx_remove_channel(struct efx_channel *channel) | |||
572 | struct efx_tx_queue *tx_queue; | 590 | struct efx_tx_queue *tx_queue; |
573 | struct efx_rx_queue *rx_queue; | 591 | struct efx_rx_queue *rx_queue; |
574 | 592 | ||
575 | EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); | 593 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
594 | "destroy chan %d\n", channel->channel); | ||
576 | 595 | ||
577 | efx_for_each_channel_rx_queue(rx_queue, channel) | 596 | efx_for_each_channel_rx_queue(rx_queue, channel) |
578 | efx_remove_rx_queue(rx_queue); | 597 | efx_remove_rx_queue(rx_queue); |
@@ -623,12 +642,13 @@ void efx_link_status_changed(struct efx_nic *efx) | |||
623 | 642 | ||
624 | /* Status message for kernel log */ | 643 | /* Status message for kernel log */ |
625 | if (link_state->up) { | 644 | if (link_state->up) { |
626 | EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", | 645 | netif_info(efx, link, efx->net_dev, |
627 | link_state->speed, link_state->fd ? "full" : "half", | 646 | "link up at %uMbps %s-duplex (MTU %d)%s\n", |
628 | efx->net_dev->mtu, | 647 | link_state->speed, link_state->fd ? "full" : "half", |
629 | (efx->promiscuous ? " [PROMISC]" : "")); | 648 | efx->net_dev->mtu, |
649 | (efx->promiscuous ? " [PROMISC]" : "")); | ||
630 | } else { | 650 | } else { |
631 | EFX_INFO(efx, "link down\n"); | 651 | netif_info(efx, link, efx->net_dev, "link down\n"); |
632 | } | 652 | } |
633 | 653 | ||
634 | } | 654 | } |
@@ -732,7 +752,7 @@ static int efx_probe_port(struct efx_nic *efx) | |||
732 | { | 752 | { |
733 | int rc; | 753 | int rc; |
734 | 754 | ||
735 | EFX_LOG(efx, "create port\n"); | 755 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); |
736 | 756 | ||
737 | if (phy_flash_cfg) | 757 | if (phy_flash_cfg) |
738 | efx->phy_mode = PHY_MODE_SPECIAL; | 758 | efx->phy_mode = PHY_MODE_SPECIAL; |
@@ -746,15 +766,16 @@ static int efx_probe_port(struct efx_nic *efx) | |||
746 | if (is_valid_ether_addr(efx->mac_address)) { | 766 | if (is_valid_ether_addr(efx->mac_address)) { |
747 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | 767 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); |
748 | } else { | 768 | } else { |
749 | EFX_ERR(efx, "invalid MAC address %pM\n", | 769 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", |
750 | efx->mac_address); | 770 | efx->mac_address); |
751 | if (!allow_bad_hwaddr) { | 771 | if (!allow_bad_hwaddr) { |
752 | rc = -EINVAL; | 772 | rc = -EINVAL; |
753 | goto err; | 773 | goto err; |
754 | } | 774 | } |
755 | random_ether_addr(efx->net_dev->dev_addr); | 775 | random_ether_addr(efx->net_dev->dev_addr); |
756 | EFX_INFO(efx, "using locally-generated MAC %pM\n", | 776 | netif_info(efx, probe, efx->net_dev, |
757 | efx->net_dev->dev_addr); | 777 | "using locally-generated MAC %pM\n", |
778 | efx->net_dev->dev_addr); | ||
758 | } | 779 | } |
759 | 780 | ||
760 | return 0; | 781 | return 0; |
@@ -768,7 +789,7 @@ static int efx_init_port(struct efx_nic *efx) | |||
768 | { | 789 | { |
769 | int rc; | 790 | int rc; |
770 | 791 | ||
771 | EFX_LOG(efx, "init port\n"); | 792 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); |
772 | 793 | ||
773 | mutex_lock(&efx->mac_lock); | 794 | mutex_lock(&efx->mac_lock); |
774 | 795 | ||
@@ -799,7 +820,7 @@ fail1: | |||
799 | 820 | ||
800 | static void efx_start_port(struct efx_nic *efx) | 821 | static void efx_start_port(struct efx_nic *efx) |
801 | { | 822 | { |
802 | EFX_LOG(efx, "start port\n"); | 823 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); |
803 | BUG_ON(efx->port_enabled); | 824 | BUG_ON(efx->port_enabled); |
804 | 825 | ||
805 | mutex_lock(&efx->mac_lock); | 826 | mutex_lock(&efx->mac_lock); |
@@ -816,7 +837,7 @@ static void efx_start_port(struct efx_nic *efx) | |||
816 | /* Prevent efx_mac_work() and efx_monitor() from working */ | 837 | /* Prevent efx_mac_work() and efx_monitor() from working */ |
817 | static void efx_stop_port(struct efx_nic *efx) | 838 | static void efx_stop_port(struct efx_nic *efx) |
818 | { | 839 | { |
819 | EFX_LOG(efx, "stop port\n"); | 840 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); |
820 | 841 | ||
821 | mutex_lock(&efx->mac_lock); | 842 | mutex_lock(&efx->mac_lock); |
822 | efx->port_enabled = false; | 843 | efx->port_enabled = false; |
@@ -831,7 +852,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
831 | 852 | ||
832 | static void efx_fini_port(struct efx_nic *efx) | 853 | static void efx_fini_port(struct efx_nic *efx) |
833 | { | 854 | { |
834 | EFX_LOG(efx, "shut down port\n"); | 855 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); |
835 | 856 | ||
836 | if (!efx->port_initialized) | 857 | if (!efx->port_initialized) |
837 | return; | 858 | return; |
@@ -845,7 +866,7 @@ static void efx_fini_port(struct efx_nic *efx) | |||
845 | 866 | ||
846 | static void efx_remove_port(struct efx_nic *efx) | 867 | static void efx_remove_port(struct efx_nic *efx) |
847 | { | 868 | { |
848 | EFX_LOG(efx, "destroying port\n"); | 869 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); |
849 | 870 | ||
850 | efx->type->remove_port(efx); | 871 | efx->type->remove_port(efx); |
851 | } | 872 | } |
@@ -863,11 +884,12 @@ static int efx_init_io(struct efx_nic *efx) | |||
863 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 884 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
864 | int rc; | 885 | int rc; |
865 | 886 | ||
866 | EFX_LOG(efx, "initialising I/O\n"); | 887 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
867 | 888 | ||
868 | rc = pci_enable_device(pci_dev); | 889 | rc = pci_enable_device(pci_dev); |
869 | if (rc) { | 890 | if (rc) { |
870 | EFX_ERR(efx, "failed to enable PCI device\n"); | 891 | netif_err(efx, probe, efx->net_dev, |
892 | "failed to enable PCI device\n"); | ||
871 | goto fail1; | 893 | goto fail1; |
872 | } | 894 | } |
873 | 895 | ||
@@ -885,39 +907,45 @@ static int efx_init_io(struct efx_nic *efx) | |||
885 | dma_mask >>= 1; | 907 | dma_mask >>= 1; |
886 | } | 908 | } |
887 | if (rc) { | 909 | if (rc) { |
888 | EFX_ERR(efx, "could not find a suitable DMA mask\n"); | 910 | netif_err(efx, probe, efx->net_dev, |
911 | "could not find a suitable DMA mask\n"); | ||
889 | goto fail2; | 912 | goto fail2; |
890 | } | 913 | } |
891 | EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); | 914 | netif_dbg(efx, probe, efx->net_dev, |
915 | "using DMA mask %llx\n", (unsigned long long) dma_mask); | ||
892 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | 916 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); |
893 | if (rc) { | 917 | if (rc) { |
894 | /* pci_set_consistent_dma_mask() is not *allowed* to | 918 | /* pci_set_consistent_dma_mask() is not *allowed* to |
895 | * fail with a mask that pci_set_dma_mask() accepted, | 919 | * fail with a mask that pci_set_dma_mask() accepted, |
896 | * but just in case... | 920 | * but just in case... |
897 | */ | 921 | */ |
898 | EFX_ERR(efx, "failed to set consistent DMA mask\n"); | 922 | netif_err(efx, probe, efx->net_dev, |
923 | "failed to set consistent DMA mask\n"); | ||
899 | goto fail2; | 924 | goto fail2; |
900 | } | 925 | } |
901 | 926 | ||
902 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); | 927 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); |
903 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); | 928 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); |
904 | if (rc) { | 929 | if (rc) { |
905 | EFX_ERR(efx, "request for memory BAR failed\n"); | 930 | netif_err(efx, probe, efx->net_dev, |
931 | "request for memory BAR failed\n"); | ||
906 | rc = -EIO; | 932 | rc = -EIO; |
907 | goto fail3; | 933 | goto fail3; |
908 | } | 934 | } |
909 | efx->membase = ioremap_nocache(efx->membase_phys, | 935 | efx->membase = ioremap_nocache(efx->membase_phys, |
910 | efx->type->mem_map_size); | 936 | efx->type->mem_map_size); |
911 | if (!efx->membase) { | 937 | if (!efx->membase) { |
912 | EFX_ERR(efx, "could not map memory BAR at %llx+%x\n", | 938 | netif_err(efx, probe, efx->net_dev, |
913 | (unsigned long long)efx->membase_phys, | 939 | "could not map memory BAR at %llx+%x\n", |
914 | efx->type->mem_map_size); | 940 | (unsigned long long)efx->membase_phys, |
941 | efx->type->mem_map_size); | ||
915 | rc = -ENOMEM; | 942 | rc = -ENOMEM; |
916 | goto fail4; | 943 | goto fail4; |
917 | } | 944 | } |
918 | EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n", | 945 | netif_dbg(efx, probe, efx->net_dev, |
919 | (unsigned long long)efx->membase_phys, | 946 | "memory BAR at %llx+%x (virtual %p)\n", |
920 | efx->type->mem_map_size, efx->membase); | 947 | (unsigned long long)efx->membase_phys, |
948 | efx->type->mem_map_size, efx->membase); | ||
921 | 949 | ||
922 | return 0; | 950 | return 0; |
923 | 951 | ||
@@ -933,7 +961,7 @@ static int efx_init_io(struct efx_nic *efx) | |||
933 | 961 | ||
934 | static void efx_fini_io(struct efx_nic *efx) | 962 | static void efx_fini_io(struct efx_nic *efx) |
935 | { | 963 | { |
936 | EFX_LOG(efx, "shutting down I/O\n"); | 964 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); |
937 | 965 | ||
938 | if (efx->membase) { | 966 | if (efx->membase) { |
939 | iounmap(efx->membase); | 967 | iounmap(efx->membase); |
@@ -997,9 +1025,11 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
997 | xentries[i].entry = i; | 1025 | xentries[i].entry = i; |
998 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); | 1026 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); |
999 | if (rc > 0) { | 1027 | if (rc > 0) { |
1000 | EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" | 1028 | netif_err(efx, drv, efx->net_dev, |
1001 | " available (%d < %d).\n", rc, n_channels); | 1029 | "WARNING: Insufficient MSI-X vectors" |
1002 | EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); | 1030 | " available (%d < %d).\n", rc, n_channels); |
1031 | netif_err(efx, drv, efx->net_dev, | ||
1032 | "WARNING: Performance may be reduced.\n"); | ||
1003 | EFX_BUG_ON_PARANOID(rc >= n_channels); | 1033 | EFX_BUG_ON_PARANOID(rc >= n_channels); |
1004 | n_channels = rc; | 1034 | n_channels = rc; |
1005 | rc = pci_enable_msix(efx->pci_dev, xentries, | 1035 | rc = pci_enable_msix(efx->pci_dev, xentries, |
@@ -1023,7 +1053,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
1023 | } else { | 1053 | } else { |
1024 | /* Fall back to single channel MSI */ | 1054 | /* Fall back to single channel MSI */ |
1025 | efx->interrupt_mode = EFX_INT_MODE_MSI; | 1055 | efx->interrupt_mode = EFX_INT_MODE_MSI; |
1026 | EFX_ERR(efx, "could not enable MSI-X\n"); | 1056 | netif_err(efx, drv, efx->net_dev, |
1057 | "could not enable MSI-X\n"); | ||
1027 | } | 1058 | } |
1028 | } | 1059 | } |
1029 | 1060 | ||
@@ -1036,7 +1067,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
1036 | if (rc == 0) { | 1067 | if (rc == 0) { |
1037 | efx->channel[0].irq = efx->pci_dev->irq; | 1068 | efx->channel[0].irq = efx->pci_dev->irq; |
1038 | } else { | 1069 | } else { |
1039 | EFX_ERR(efx, "could not enable MSI\n"); | 1070 | netif_err(efx, drv, efx->net_dev, |
1071 | "could not enable MSI\n"); | ||
1040 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | 1072 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; |
1041 | } | 1073 | } |
1042 | } | 1074 | } |
@@ -1090,7 +1122,7 @@ static int efx_probe_nic(struct efx_nic *efx) | |||
1090 | { | 1122 | { |
1091 | int rc; | 1123 | int rc; |
1092 | 1124 | ||
1093 | EFX_LOG(efx, "creating NIC\n"); | 1125 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); |
1094 | 1126 | ||
1095 | /* Carry out hardware-type specific initialisation */ | 1127 | /* Carry out hardware-type specific initialisation */ |
1096 | rc = efx->type->probe(efx); | 1128 | rc = efx->type->probe(efx); |
@@ -1112,7 +1144,7 @@ static int efx_probe_nic(struct efx_nic *efx) | |||
1112 | 1144 | ||
1113 | static void efx_remove_nic(struct efx_nic *efx) | 1145 | static void efx_remove_nic(struct efx_nic *efx) |
1114 | { | 1146 | { |
1115 | EFX_LOG(efx, "destroying NIC\n"); | 1147 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); |
1116 | 1148 | ||
1117 | efx_remove_interrupts(efx); | 1149 | efx_remove_interrupts(efx); |
1118 | efx->type->remove(efx); | 1150 | efx->type->remove(efx); |
@@ -1132,14 +1164,14 @@ static int efx_probe_all(struct efx_nic *efx) | |||
1132 | /* Create NIC */ | 1164 | /* Create NIC */ |
1133 | rc = efx_probe_nic(efx); | 1165 | rc = efx_probe_nic(efx); |
1134 | if (rc) { | 1166 | if (rc) { |
1135 | EFX_ERR(efx, "failed to create NIC\n"); | 1167 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); |
1136 | goto fail1; | 1168 | goto fail1; |
1137 | } | 1169 | } |
1138 | 1170 | ||
1139 | /* Create port */ | 1171 | /* Create port */ |
1140 | rc = efx_probe_port(efx); | 1172 | rc = efx_probe_port(efx); |
1141 | if (rc) { | 1173 | if (rc) { |
1142 | EFX_ERR(efx, "failed to create port\n"); | 1174 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); |
1143 | goto fail2; | 1175 | goto fail2; |
1144 | } | 1176 | } |
1145 | 1177 | ||
@@ -1147,8 +1179,9 @@ static int efx_probe_all(struct efx_nic *efx) | |||
1147 | efx_for_each_channel(channel, efx) { | 1179 | efx_for_each_channel(channel, efx) { |
1148 | rc = efx_probe_channel(channel); | 1180 | rc = efx_probe_channel(channel); |
1149 | if (rc) { | 1181 | if (rc) { |
1150 | EFX_ERR(efx, "failed to create channel %d\n", | 1182 | netif_err(efx, probe, efx->net_dev, |
1151 | channel->channel); | 1183 | "failed to create channel %d\n", |
1184 | channel->channel); | ||
1152 | goto fail3; | 1185 | goto fail3; |
1153 | } | 1186 | } |
1154 | } | 1187 | } |
@@ -1344,8 +1377,9 @@ static void efx_monitor(struct work_struct *data) | |||
1344 | struct efx_nic *efx = container_of(data, struct efx_nic, | 1377 | struct efx_nic *efx = container_of(data, struct efx_nic, |
1345 | monitor_work.work); | 1378 | monitor_work.work); |
1346 | 1379 | ||
1347 | EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", | 1380 | netif_vdbg(efx, timer, efx->net_dev, |
1348 | raw_smp_processor_id()); | 1381 | "hardware monitor executing on CPU %d\n", |
1382 | raw_smp_processor_id()); | ||
1349 | BUG_ON(efx->type->monitor == NULL); | 1383 | BUG_ON(efx->type->monitor == NULL); |
1350 | 1384 | ||
1351 | /* If the mac_lock is already held then it is likely a port | 1385 | /* If the mac_lock is already held then it is likely a port |
@@ -1452,8 +1486,8 @@ static int efx_net_open(struct net_device *net_dev) | |||
1452 | struct efx_nic *efx = netdev_priv(net_dev); | 1486 | struct efx_nic *efx = netdev_priv(net_dev); |
1453 | EFX_ASSERT_RESET_SERIALISED(efx); | 1487 | EFX_ASSERT_RESET_SERIALISED(efx); |
1454 | 1488 | ||
1455 | EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, | 1489 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", |
1456 | raw_smp_processor_id()); | 1490 | raw_smp_processor_id()); |
1457 | 1491 | ||
1458 | if (efx->state == STATE_DISABLED) | 1492 | if (efx->state == STATE_DISABLED) |
1459 | return -EIO; | 1493 | return -EIO; |
@@ -1478,8 +1512,8 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1478 | { | 1512 | { |
1479 | struct efx_nic *efx = netdev_priv(net_dev); | 1513 | struct efx_nic *efx = netdev_priv(net_dev); |
1480 | 1514 | ||
1481 | EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, | 1515 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", |
1482 | raw_smp_processor_id()); | 1516 | raw_smp_processor_id()); |
1483 | 1517 | ||
1484 | if (efx->state != STATE_DISABLED) { | 1518 | if (efx->state != STATE_DISABLED) { |
1485 | /* Stop the device and flush all the channels */ | 1519 | /* Stop the device and flush all the channels */ |
@@ -1532,8 +1566,9 @@ static void efx_watchdog(struct net_device *net_dev) | |||
1532 | { | 1566 | { |
1533 | struct efx_nic *efx = netdev_priv(net_dev); | 1567 | struct efx_nic *efx = netdev_priv(net_dev); |
1534 | 1568 | ||
1535 | EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n", | 1569 | netif_err(efx, tx_err, efx->net_dev, |
1536 | efx->port_enabled); | 1570 | "TX stuck with port_enabled=%d: resetting channels\n", |
1571 | efx->port_enabled); | ||
1537 | 1572 | ||
1538 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); | 1573 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); |
1539 | } | 1574 | } |
@@ -1552,7 +1587,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | |||
1552 | 1587 | ||
1553 | efx_stop_all(efx); | 1588 | efx_stop_all(efx); |
1554 | 1589 | ||
1555 | EFX_LOG(efx, "changing MTU to %d\n", new_mtu); | 1590 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); |
1556 | 1591 | ||
1557 | efx_fini_channels(efx); | 1592 | efx_fini_channels(efx); |
1558 | 1593 | ||
@@ -1578,8 +1613,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) | |||
1578 | EFX_ASSERT_RESET_SERIALISED(efx); | 1613 | EFX_ASSERT_RESET_SERIALISED(efx); |
1579 | 1614 | ||
1580 | if (!is_valid_ether_addr(new_addr)) { | 1615 | if (!is_valid_ether_addr(new_addr)) { |
1581 | EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n", | 1616 | netif_err(efx, drv, efx->net_dev, |
1582 | new_addr); | 1617 | "invalid ethernet MAC address requested: %pM\n", |
1618 | new_addr); | ||
1583 | return -EINVAL; | 1619 | return -EINVAL; |
1584 | } | 1620 | } |
1585 | 1621 | ||
@@ -1682,7 +1718,6 @@ static int efx_register_netdev(struct efx_nic *efx) | |||
1682 | net_dev->watchdog_timeo = 5 * HZ; | 1718 | net_dev->watchdog_timeo = 5 * HZ; |
1683 | net_dev->irq = efx->pci_dev->irq; | 1719 | net_dev->irq = efx->pci_dev->irq; |
1684 | net_dev->netdev_ops = &efx_netdev_ops; | 1720 | net_dev->netdev_ops = &efx_netdev_ops; |
1685 | SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); | ||
1686 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); | 1721 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); |
1687 | 1722 | ||
1688 | /* Clear MAC statistics */ | 1723 | /* Clear MAC statistics */ |
@@ -1707,7 +1742,8 @@ static int efx_register_netdev(struct efx_nic *efx) | |||
1707 | 1742 | ||
1708 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); | 1743 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); |
1709 | if (rc) { | 1744 | if (rc) { |
1710 | EFX_ERR(efx, "failed to init net dev attributes\n"); | 1745 | netif_err(efx, drv, efx->net_dev, |
1746 | "failed to init net dev attributes\n"); | ||
1711 | goto fail_registered; | 1747 | goto fail_registered; |
1712 | } | 1748 | } |
1713 | 1749 | ||
@@ -1715,7 +1751,7 @@ static int efx_register_netdev(struct efx_nic *efx) | |||
1715 | 1751 | ||
1716 | fail_locked: | 1752 | fail_locked: |
1717 | rtnl_unlock(); | 1753 | rtnl_unlock(); |
1718 | EFX_ERR(efx, "could not register net dev\n"); | 1754 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); |
1719 | return rc; | 1755 | return rc; |
1720 | 1756 | ||
1721 | fail_registered: | 1757 | fail_registered: |
@@ -1780,7 +1816,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
1780 | 1816 | ||
1781 | rc = efx->type->init(efx); | 1817 | rc = efx->type->init(efx); |
1782 | if (rc) { | 1818 | if (rc) { |
1783 | EFX_ERR(efx, "failed to initialise NIC\n"); | 1819 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); |
1784 | goto fail; | 1820 | goto fail; |
1785 | } | 1821 | } |
1786 | 1822 | ||
@@ -1792,7 +1828,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
1792 | if (rc) | 1828 | if (rc) |
1793 | goto fail; | 1829 | goto fail; |
1794 | if (efx->phy_op->reconfigure(efx)) | 1830 | if (efx->phy_op->reconfigure(efx)) |
1795 | EFX_ERR(efx, "could not restore PHY settings\n"); | 1831 | netif_err(efx, drv, efx->net_dev, |
1832 | "could not restore PHY settings\n"); | ||
1796 | } | 1833 | } |
1797 | 1834 | ||
1798 | efx->mac_op->reconfigure(efx); | 1835 | efx->mac_op->reconfigure(efx); |
@@ -1825,13 +1862,14 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) | |||
1825 | int rc, rc2; | 1862 | int rc, rc2; |
1826 | bool disabled; | 1863 | bool disabled; |
1827 | 1864 | ||
1828 | EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method)); | 1865 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", |
1866 | RESET_TYPE(method)); | ||
1829 | 1867 | ||
1830 | efx_reset_down(efx, method); | 1868 | efx_reset_down(efx, method); |
1831 | 1869 | ||
1832 | rc = efx->type->reset(efx, method); | 1870 | rc = efx->type->reset(efx, method); |
1833 | if (rc) { | 1871 | if (rc) { |
1834 | EFX_ERR(efx, "failed to reset hardware\n"); | 1872 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); |
1835 | goto out; | 1873 | goto out; |
1836 | } | 1874 | } |
1837 | 1875 | ||
@@ -1856,10 +1894,10 @@ out: | |||
1856 | 1894 | ||
1857 | if (disabled) { | 1895 | if (disabled) { |
1858 | dev_close(efx->net_dev); | 1896 | dev_close(efx->net_dev); |
1859 | EFX_ERR(efx, "has been disabled\n"); | 1897 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); |
1860 | efx->state = STATE_DISABLED; | 1898 | efx->state = STATE_DISABLED; |
1861 | } else { | 1899 | } else { |
1862 | EFX_LOG(efx, "reset complete\n"); | 1900 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); |
1863 | } | 1901 | } |
1864 | return rc; | 1902 | return rc; |
1865 | } | 1903 | } |
@@ -1877,7 +1915,8 @@ static void efx_reset_work(struct work_struct *data) | |||
1877 | /* If we're not RUNNING then don't reset. Leave the reset_pending | 1915 | /* If we're not RUNNING then don't reset. Leave the reset_pending |
1878 | * flag set so that efx_pci_probe_main will be retried */ | 1916 | * flag set so that efx_pci_probe_main will be retried */ |
1879 | if (efx->state != STATE_RUNNING) { | 1917 | if (efx->state != STATE_RUNNING) { |
1880 | EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); | 1918 | netif_info(efx, drv, efx->net_dev, |
1919 | "scheduled reset quenched. NIC not RUNNING\n"); | ||
1881 | return; | 1920 | return; |
1882 | } | 1921 | } |
1883 | 1922 | ||
@@ -1891,7 +1930,8 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
1891 | enum reset_type method; | 1930 | enum reset_type method; |
1892 | 1931 | ||
1893 | if (efx->reset_pending != RESET_TYPE_NONE) { | 1932 | if (efx->reset_pending != RESET_TYPE_NONE) { |
1894 | EFX_INFO(efx, "quenching already scheduled reset\n"); | 1933 | netif_info(efx, drv, efx->net_dev, |
1934 | "quenching already scheduled reset\n"); | ||
1895 | return; | 1935 | return; |
1896 | } | 1936 | } |
1897 | 1937 | ||
@@ -1915,10 +1955,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
1915 | } | 1955 | } |
1916 | 1956 | ||
1917 | if (method != type) | 1957 | if (method != type) |
1918 | EFX_LOG(efx, "scheduling %s reset for %s\n", | 1958 | netif_dbg(efx, drv, efx->net_dev, |
1919 | RESET_TYPE(method), RESET_TYPE(type)); | 1959 | "scheduling %s reset for %s\n", |
1960 | RESET_TYPE(method), RESET_TYPE(type)); | ||
1920 | else | 1961 | else |
1921 | EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method)); | 1962 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", |
1963 | RESET_TYPE(method)); | ||
1922 | 1964 | ||
1923 | efx->reset_pending = method; | 1965 | efx->reset_pending = method; |
1924 | 1966 | ||
@@ -2005,6 +2047,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
2005 | INIT_WORK(&efx->reset_work, efx_reset_work); | 2047 | INIT_WORK(&efx->reset_work, efx_reset_work); |
2006 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | 2048 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); |
2007 | efx->pci_dev = pci_dev; | 2049 | efx->pci_dev = pci_dev; |
2050 | efx->msg_enable = debug; | ||
2008 | efx->state = STATE_INIT; | 2051 | efx->state = STATE_INIT; |
2009 | efx->reset_pending = RESET_TYPE_NONE; | 2052 | efx->reset_pending = RESET_TYPE_NONE; |
2010 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | 2053 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); |
@@ -2124,7 +2167,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) | |||
2124 | efx_pci_remove_main(efx); | 2167 | efx_pci_remove_main(efx); |
2125 | 2168 | ||
2126 | efx_fini_io(efx); | 2169 | efx_fini_io(efx); |
2127 | EFX_LOG(efx, "shutdown successful\n"); | 2170 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); |
2128 | 2171 | ||
2129 | pci_set_drvdata(pci_dev, NULL); | 2172 | pci_set_drvdata(pci_dev, NULL); |
2130 | efx_fini_struct(efx); | 2173 | efx_fini_struct(efx); |
@@ -2149,13 +2192,15 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
2149 | 2192 | ||
2150 | rc = efx->type->init(efx); | 2193 | rc = efx->type->init(efx); |
2151 | if (rc) { | 2194 | if (rc) { |
2152 | EFX_ERR(efx, "failed to initialise NIC\n"); | 2195 | netif_err(efx, probe, efx->net_dev, |
2196 | "failed to initialise NIC\n"); | ||
2153 | goto fail3; | 2197 | goto fail3; |
2154 | } | 2198 | } |
2155 | 2199 | ||
2156 | rc = efx_init_port(efx); | 2200 | rc = efx_init_port(efx); |
2157 | if (rc) { | 2201 | if (rc) { |
2158 | EFX_ERR(efx, "failed to initialise port\n"); | 2202 | netif_err(efx, probe, efx->net_dev, |
2203 | "failed to initialise port\n"); | ||
2159 | goto fail4; | 2204 | goto fail4; |
2160 | } | 2205 | } |
2161 | 2206 | ||
@@ -2211,11 +2256,13 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2211 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2256 | NETIF_F_HIGHDMA | NETIF_F_TSO); |
2212 | efx = netdev_priv(net_dev); | 2257 | efx = netdev_priv(net_dev); |
2213 | pci_set_drvdata(pci_dev, efx); | 2258 | pci_set_drvdata(pci_dev, efx); |
2259 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); | ||
2214 | rc = efx_init_struct(efx, type, pci_dev, net_dev); | 2260 | rc = efx_init_struct(efx, type, pci_dev, net_dev); |
2215 | if (rc) | 2261 | if (rc) |
2216 | goto fail1; | 2262 | goto fail1; |
2217 | 2263 | ||
2218 | EFX_INFO(efx, "Solarflare Communications NIC detected\n"); | 2264 | netif_info(efx, probe, efx->net_dev, |
2265 | "Solarflare Communications NIC detected\n"); | ||
2219 | 2266 | ||
2220 | /* Set up basic I/O (BAR mappings etc) */ | 2267 | /* Set up basic I/O (BAR mappings etc) */ |
2221 | rc = efx_init_io(efx); | 2268 | rc = efx_init_io(efx); |
@@ -2253,7 +2300,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2253 | } | 2300 | } |
2254 | 2301 | ||
2255 | if (rc) { | 2302 | if (rc) { |
2256 | EFX_ERR(efx, "Could not reset NIC\n"); | 2303 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); |
2257 | goto fail4; | 2304 | goto fail4; |
2258 | } | 2305 | } |
2259 | 2306 | ||
@@ -2265,7 +2312,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2265 | if (rc) | 2312 | if (rc) |
2266 | goto fail5; | 2313 | goto fail5; |
2267 | 2314 | ||
2268 | EFX_LOG(efx, "initialisation successful\n"); | 2315 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); |
2269 | 2316 | ||
2270 | rtnl_lock(); | 2317 | rtnl_lock(); |
2271 | efx_mtd_probe(efx); /* allowed to fail */ | 2318 | efx_mtd_probe(efx); /* allowed to fail */ |
@@ -2281,7 +2328,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2281 | efx_fini_struct(efx); | 2328 | efx_fini_struct(efx); |
2282 | fail1: | 2329 | fail1: |
2283 | WARN_ON(rc > 0); | 2330 | WARN_ON(rc > 0); |
2284 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); | 2331 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); |
2285 | free_netdev(net_dev); | 2332 | free_netdev(net_dev); |
2286 | return rc; | 2333 | return rc; |
2287 | } | 2334 | } |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index e1e448887dfc..060dc952a0fd 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -106,8 +106,9 @@ extern unsigned int efx_monitor_interval; | |||
106 | 106 | ||
107 | static inline void efx_schedule_channel(struct efx_channel *channel) | 107 | static inline void efx_schedule_channel(struct efx_channel *channel) |
108 | { | 108 | { |
109 | EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", | 109 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
110 | channel->channel, raw_smp_processor_id()); | 110 | "channel %d scheduling NAPI poll on CPU%d\n", |
111 | channel->channel, raw_smp_processor_id()); | ||
111 | channel->work_pending = true; | 112 | channel->work_pending = true; |
112 | 113 | ||
113 | napi_schedule(&channel->napi_str); | 114 | napi_schedule(&channel->napi_str); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 81b7f39ca5fb..27230a992893 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -218,8 +218,8 @@ int efx_ethtool_set_settings(struct net_device *net_dev, | |||
218 | 218 | ||
219 | /* GMAC does not support 1000Mbps HD */ | 219 | /* GMAC does not support 1000Mbps HD */ |
220 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { | 220 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { |
221 | EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" | 221 | netif_dbg(efx, drv, efx->net_dev, |
222 | " setting\n"); | 222 | "rejecting unsupported 1000Mbps HD setting\n"); |
223 | return -EINVAL; | 223 | return -EINVAL; |
224 | } | 224 | } |
225 | 225 | ||
@@ -256,6 +256,18 @@ static void efx_ethtool_get_regs(struct net_device *net_dev, | |||
256 | efx_nic_get_regs(efx, buf); | 256 | efx_nic_get_regs(efx, buf); |
257 | } | 257 | } |
258 | 258 | ||
259 | static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) | ||
260 | { | ||
261 | struct efx_nic *efx = netdev_priv(net_dev); | ||
262 | return efx->msg_enable; | ||
263 | } | ||
264 | |||
265 | static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) | ||
266 | { | ||
267 | struct efx_nic *efx = netdev_priv(net_dev); | ||
268 | efx->msg_enable = msg_enable; | ||
269 | } | ||
270 | |||
259 | /** | 271 | /** |
260 | * efx_fill_test - fill in an individual self-test entry | 272 | * efx_fill_test - fill in an individual self-test entry |
261 | * @test_index: Index of the test | 273 | * @test_index: Index of the test |
@@ -553,7 +565,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
553 | if (!already_up) { | 565 | if (!already_up) { |
554 | rc = dev_open(efx->net_dev); | 566 | rc = dev_open(efx->net_dev); |
555 | if (rc) { | 567 | if (rc) { |
556 | EFX_ERR(efx, "failed opening device.\n"); | 568 | netif_err(efx, drv, efx->net_dev, |
569 | "failed opening device.\n"); | ||
557 | goto fail2; | 570 | goto fail2; |
558 | } | 571 | } |
559 | } | 572 | } |
@@ -565,9 +578,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
565 | if (!already_up) | 578 | if (!already_up) |
566 | dev_close(efx->net_dev); | 579 | dev_close(efx->net_dev); |
567 | 580 | ||
568 | EFX_LOG(efx, "%s %sline self-tests\n", | 581 | netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", |
569 | rc == 0 ? "passed" : "failed", | 582 | rc == 0 ? "passed" : "failed", |
570 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 583 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
571 | 584 | ||
572 | fail2: | 585 | fail2: |
573 | fail1: | 586 | fail1: |
@@ -693,8 +706,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, | |||
693 | return -EOPNOTSUPP; | 706 | return -EOPNOTSUPP; |
694 | 707 | ||
695 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { | 708 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { |
696 | EFX_ERR(efx, "invalid coalescing setting. " | 709 | netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " |
697 | "Only rx/tx_coalesce_usecs_irq are supported\n"); | 710 | "Only rx/tx_coalesce_usecs_irq are supported\n"); |
698 | return -EOPNOTSUPP; | 711 | return -EOPNOTSUPP; |
699 | } | 712 | } |
700 | 713 | ||
@@ -706,8 +719,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, | |||
706 | efx_for_each_tx_queue(tx_queue, efx) { | 719 | efx_for_each_tx_queue(tx_queue, efx) { |
707 | if ((tx_queue->channel->channel < efx->n_rx_channels) && | 720 | if ((tx_queue->channel->channel < efx->n_rx_channels) && |
708 | tx_usecs) { | 721 | tx_usecs) { |
709 | EFX_ERR(efx, "Channel is shared. " | 722 | netif_err(efx, drv, efx->net_dev, "Channel is shared. " |
710 | "Only RX coalescing may be set\n"); | 723 | "Only RX coalescing may be set\n"); |
711 | return -EOPNOTSUPP; | 724 | return -EOPNOTSUPP; |
712 | } | 725 | } |
713 | } | 726 | } |
@@ -735,13 +748,15 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | |||
735 | (pause->autoneg ? EFX_FC_AUTO : 0)); | 748 | (pause->autoneg ? EFX_FC_AUTO : 0)); |
736 | 749 | ||
737 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { | 750 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { |
738 | EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); | 751 | netif_dbg(efx, drv, efx->net_dev, |
752 | "Flow control unsupported: tx ON rx OFF\n"); | ||
739 | rc = -EINVAL; | 753 | rc = -EINVAL; |
740 | goto out; | 754 | goto out; |
741 | } | 755 | } |
742 | 756 | ||
743 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { | 757 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { |
744 | EFX_LOG(efx, "Autonegotiation is disabled\n"); | 758 | netif_dbg(efx, drv, efx->net_dev, |
759 | "Autonegotiation is disabled\n"); | ||
745 | rc = -EINVAL; | 760 | rc = -EINVAL; |
746 | goto out; | 761 | goto out; |
747 | } | 762 | } |
@@ -772,8 +787,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | |||
772 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { | 787 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { |
773 | rc = efx->phy_op->reconfigure(efx); | 788 | rc = efx->phy_op->reconfigure(efx); |
774 | if (rc) { | 789 | if (rc) { |
775 | EFX_ERR(efx, "Unable to advertise requested flow " | 790 | netif_err(efx, drv, efx->net_dev, |
776 | "control setting\n"); | 791 | "Unable to advertise requested flow " |
792 | "control setting\n"); | ||
777 | goto out; | 793 | goto out; |
778 | } | 794 | } |
779 | } | 795 | } |
@@ -850,6 +866,8 @@ const struct ethtool_ops efx_ethtool_ops = { | |||
850 | .get_drvinfo = efx_ethtool_get_drvinfo, | 866 | .get_drvinfo = efx_ethtool_get_drvinfo, |
851 | .get_regs_len = efx_ethtool_get_regs_len, | 867 | .get_regs_len = efx_ethtool_get_regs_len, |
852 | .get_regs = efx_ethtool_get_regs, | 868 | .get_regs = efx_ethtool_get_regs, |
869 | .get_msglevel = efx_ethtool_get_msglevel, | ||
870 | .set_msglevel = efx_ethtool_set_msglevel, | ||
853 | .nway_reset = efx_ethtool_nway_reset, | 871 | .nway_reset = efx_ethtool_nway_reset, |
854 | .get_link = efx_ethtool_get_link, | 872 | .get_link = efx_ethtool_get_link, |
855 | .get_eeprom_len = efx_ethtool_get_eeprom_len, | 873 | .get_eeprom_len = efx_ethtool_get_eeprom_len, |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 8558865ff380..92d38ede6bef 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -167,13 +167,15 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
167 | * exit without having touched the hardware. | 167 | * exit without having touched the hardware. |
168 | */ | 168 | */ |
169 | if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { | 169 | if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { |
170 | EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq, | 170 | netif_vdbg(efx, intr, efx->net_dev, |
171 | raw_smp_processor_id()); | 171 | "IRQ %d on CPU %d not for me\n", irq, |
172 | raw_smp_processor_id()); | ||
172 | return IRQ_NONE; | 173 | return IRQ_NONE; |
173 | } | 174 | } |
174 | efx->last_irq_cpu = raw_smp_processor_id(); | 175 | efx->last_irq_cpu = raw_smp_processor_id(); |
175 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 176 | netif_vdbg(efx, intr, efx->net_dev, |
176 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 177 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
178 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
177 | 179 | ||
178 | /* Determine interrupting queues, clear interrupt status | 180 | /* Determine interrupting queues, clear interrupt status |
179 | * register and acknowledge the device interrupt. | 181 | * register and acknowledge the device interrupt. |
@@ -239,7 +241,8 @@ static int falcon_spi_wait(struct efx_nic *efx) | |||
239 | if (!falcon_spi_poll(efx)) | 241 | if (!falcon_spi_poll(efx)) |
240 | return 0; | 242 | return 0; |
241 | if (time_after_eq(jiffies, timeout)) { | 243 | if (time_after_eq(jiffies, timeout)) { |
242 | EFX_ERR(efx, "timed out waiting for SPI\n"); | 244 | netif_err(efx, hw, efx->net_dev, |
245 | "timed out waiting for SPI\n"); | ||
243 | return -ETIMEDOUT; | 246 | return -ETIMEDOUT; |
244 | } | 247 | } |
245 | schedule_timeout_uninterruptible(1); | 248 | schedule_timeout_uninterruptible(1); |
@@ -333,9 +336,10 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) | |||
333 | if (!(status & SPI_STATUS_NRDY)) | 336 | if (!(status & SPI_STATUS_NRDY)) |
334 | return 0; | 337 | return 0; |
335 | if (time_after_eq(jiffies, timeout)) { | 338 | if (time_after_eq(jiffies, timeout)) { |
336 | EFX_ERR(efx, "SPI write timeout on device %d" | 339 | netif_err(efx, hw, efx->net_dev, |
337 | " last status=0x%02x\n", | 340 | "SPI write timeout on device %d" |
338 | spi->device_id, status); | 341 | " last status=0x%02x\n", |
342 | spi->device_id, status); | ||
339 | return -ETIMEDOUT; | 343 | return -ETIMEDOUT; |
340 | } | 344 | } |
341 | schedule_timeout_uninterruptible(1); | 345 | schedule_timeout_uninterruptible(1); |
@@ -469,7 +473,8 @@ static void falcon_reset_macs(struct efx_nic *efx) | |||
469 | udelay(10); | 473 | udelay(10); |
470 | } | 474 | } |
471 | 475 | ||
472 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); | 476 | netif_err(efx, hw, efx->net_dev, |
477 | "timed out waiting for XMAC core reset\n"); | ||
473 | } | 478 | } |
474 | } | 479 | } |
475 | 480 | ||
@@ -492,12 +497,13 @@ static void falcon_reset_macs(struct efx_nic *efx) | |||
492 | if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && | 497 | if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && |
493 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && | 498 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && |
494 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { | 499 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { |
495 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", | 500 | netif_dbg(efx, hw, efx->net_dev, |
496 | count); | 501 | "Completed MAC reset after %d loops\n", |
502 | count); | ||
497 | break; | 503 | break; |
498 | } | 504 | } |
499 | if (count > 20) { | 505 | if (count > 20) { |
500 | EFX_ERR(efx, "MAC reset failed\n"); | 506 | netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); |
501 | break; | 507 | break; |
502 | } | 508 | } |
503 | count++; | 509 | count++; |
@@ -627,7 +633,8 @@ static void falcon_stats_complete(struct efx_nic *efx) | |||
627 | rmb(); /* read the done flag before the stats */ | 633 | rmb(); /* read the done flag before the stats */ |
628 | efx->mac_op->update_stats(efx); | 634 | efx->mac_op->update_stats(efx); |
629 | } else { | 635 | } else { |
630 | EFX_ERR(efx, "timed out waiting for statistics\n"); | 636 | netif_err(efx, hw, efx->net_dev, |
637 | "timed out waiting for statistics\n"); | ||
631 | } | 638 | } |
632 | } | 639 | } |
633 | 640 | ||
@@ -717,16 +724,17 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
717 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { | 724 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { |
718 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || | 725 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || |
719 | EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { | 726 | EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { |
720 | EFX_ERR(efx, "error from GMII access " | 727 | netif_err(efx, hw, efx->net_dev, |
721 | EFX_OWORD_FMT"\n", | 728 | "error from GMII access " |
722 | EFX_OWORD_VAL(md_stat)); | 729 | EFX_OWORD_FMT"\n", |
730 | EFX_OWORD_VAL(md_stat)); | ||
723 | return -EIO; | 731 | return -EIO; |
724 | } | 732 | } |
725 | return 0; | 733 | return 0; |
726 | } | 734 | } |
727 | udelay(10); | 735 | udelay(10); |
728 | } | 736 | } |
729 | EFX_ERR(efx, "timed out waiting for GMII\n"); | 737 | netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); |
730 | return -ETIMEDOUT; | 738 | return -ETIMEDOUT; |
731 | } | 739 | } |
732 | 740 | ||
@@ -738,7 +746,8 @@ static int falcon_mdio_write(struct net_device *net_dev, | |||
738 | efx_oword_t reg; | 746 | efx_oword_t reg; |
739 | int rc; | 747 | int rc; |
740 | 748 | ||
741 | EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", | 749 | netif_vdbg(efx, hw, efx->net_dev, |
750 | "writing MDIO %d register %d.%d with 0x%04x\n", | ||
742 | prtad, devad, addr, value); | 751 | prtad, devad, addr, value); |
743 | 752 | ||
744 | mutex_lock(&efx->mdio_lock); | 753 | mutex_lock(&efx->mdio_lock); |
@@ -812,8 +821,9 @@ static int falcon_mdio_read(struct net_device *net_dev, | |||
812 | if (rc == 0) { | 821 | if (rc == 0) { |
813 | efx_reado(efx, ®, FR_AB_MD_RXD); | 822 | efx_reado(efx, ®, FR_AB_MD_RXD); |
814 | rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); | 823 | rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); |
815 | EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", | 824 | netif_vdbg(efx, hw, efx->net_dev, |
816 | prtad, devad, addr, rc); | 825 | "read from MDIO %d register %d.%d, got %04x\n", |
826 | prtad, devad, addr, rc); | ||
817 | } else { | 827 | } else { |
818 | /* Abort the read operation */ | 828 | /* Abort the read operation */ |
819 | EFX_POPULATE_OWORD_2(reg, | 829 | EFX_POPULATE_OWORD_2(reg, |
@@ -821,8 +831,9 @@ static int falcon_mdio_read(struct net_device *net_dev, | |||
821 | FRF_AB_MD_GC, 1); | 831 | FRF_AB_MD_GC, 1); |
822 | efx_writeo(efx, ®, FR_AB_MD_CS); | 832 | efx_writeo(efx, ®, FR_AB_MD_CS); |
823 | 833 | ||
824 | EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", | 834 | netif_dbg(efx, hw, efx->net_dev, |
825 | prtad, devad, addr, rc); | 835 | "read from MDIO %d register %d.%d, got error %d\n", |
836 | prtad, devad, addr, rc); | ||
826 | } | 837 | } |
827 | 838 | ||
828 | out: | 839 | out: |
@@ -873,7 +884,8 @@ static void falcon_switch_mac(struct efx_nic *efx) | |||
873 | 884 | ||
874 | falcon_clock_mac(efx); | 885 | falcon_clock_mac(efx); |
875 | 886 | ||
876 | EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); | 887 | netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n", |
888 | EFX_IS10G(efx) ? 'X' : 'G'); | ||
877 | /* Not all macs support a mac-level link state */ | 889 | /* Not all macs support a mac-level link state */ |
878 | efx->xmac_poll_required = false; | 890 | efx->xmac_poll_required = false; |
879 | falcon_reset_macs(efx); | 891 | falcon_reset_macs(efx); |
@@ -897,8 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx) | |||
897 | efx->phy_op = &falcon_qt202x_phy_ops; | 909 | efx->phy_op = &falcon_qt202x_phy_ops; |
898 | break; | 910 | break; |
899 | default: | 911 | default: |
900 | EFX_ERR(efx, "Unknown PHY type %d\n", | 912 | netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", |
901 | efx->phy_type); | 913 | efx->phy_type); |
902 | return -ENODEV; | 914 | return -ENODEV; |
903 | } | 915 | } |
904 | 916 | ||
@@ -926,10 +938,11 @@ static int falcon_probe_port(struct efx_nic *efx) | |||
926 | FALCON_MAC_STATS_SIZE); | 938 | FALCON_MAC_STATS_SIZE); |
927 | if (rc) | 939 | if (rc) |
928 | return rc; | 940 | return rc; |
929 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", | 941 | netif_dbg(efx, probe, efx->net_dev, |
930 | (u64)efx->stats_buffer.dma_addr, | 942 | "stats buffer at %llx (virt %p phys %llx)\n", |
931 | efx->stats_buffer.addr, | 943 | (u64)efx->stats_buffer.dma_addr, |
932 | (u64)virt_to_phys(efx->stats_buffer.addr)); | 944 | efx->stats_buffer.addr, |
945 | (u64)virt_to_phys(efx->stats_buffer.addr)); | ||
933 | 946 | ||
934 | return 0; | 947 | return 0; |
935 | } | 948 | } |
@@ -969,8 +982,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
969 | rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); | 982 | rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); |
970 | mutex_unlock(&efx->spi_lock); | 983 | mutex_unlock(&efx->spi_lock); |
971 | if (rc) { | 984 | if (rc) { |
972 | EFX_ERR(efx, "Failed to read %s\n", | 985 | netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", |
973 | efx->spi_flash ? "flash" : "EEPROM"); | 986 | efx->spi_flash ? "flash" : "EEPROM"); |
974 | rc = -EIO; | 987 | rc = -EIO; |
975 | goto out; | 988 | goto out; |
976 | } | 989 | } |
@@ -980,11 +993,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
980 | 993 | ||
981 | rc = -EINVAL; | 994 | rc = -EINVAL; |
982 | if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { | 995 | if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { |
983 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); | 996 | netif_err(efx, hw, efx->net_dev, |
997 | "NVRAM bad magic 0x%x\n", magic_num); | ||
984 | goto out; | 998 | goto out; |
985 | } | 999 | } |
986 | if (struct_ver < 2) { | 1000 | if (struct_ver < 2) { |
987 | EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver); | 1001 | netif_err(efx, hw, efx->net_dev, |
1002 | "NVRAM has ancient version 0x%x\n", struct_ver); | ||
988 | goto out; | 1003 | goto out; |
989 | } else if (struct_ver < 4) { | 1004 | } else if (struct_ver < 4) { |
990 | word = &nvconfig->board_magic_num; | 1005 | word = &nvconfig->board_magic_num; |
@@ -997,7 +1012,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
997 | csum += le16_to_cpu(*word); | 1012 | csum += le16_to_cpu(*word); |
998 | 1013 | ||
999 | if (~csum & 0xffff) { | 1014 | if (~csum & 0xffff) { |
1000 | EFX_ERR(efx, "NVRAM has incorrect checksum\n"); | 1015 | netif_err(efx, hw, efx->net_dev, |
1016 | "NVRAM has incorrect checksum\n"); | ||
1001 | goto out; | 1017 | goto out; |
1002 | } | 1018 | } |
1003 | 1019 | ||
@@ -1075,22 +1091,25 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1075 | efx_oword_t glb_ctl_reg_ker; | 1091 | efx_oword_t glb_ctl_reg_ker; |
1076 | int rc; | 1092 | int rc; |
1077 | 1093 | ||
1078 | EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); | 1094 | netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", |
1095 | RESET_TYPE(method)); | ||
1079 | 1096 | ||
1080 | /* Initiate device reset */ | 1097 | /* Initiate device reset */ |
1081 | if (method == RESET_TYPE_WORLD) { | 1098 | if (method == RESET_TYPE_WORLD) { |
1082 | rc = pci_save_state(efx->pci_dev); | 1099 | rc = pci_save_state(efx->pci_dev); |
1083 | if (rc) { | 1100 | if (rc) { |
1084 | EFX_ERR(efx, "failed to backup PCI state of primary " | 1101 | netif_err(efx, drv, efx->net_dev, |
1085 | "function prior to hardware reset\n"); | 1102 | "failed to backup PCI state of primary " |
1103 | "function prior to hardware reset\n"); | ||
1086 | goto fail1; | 1104 | goto fail1; |
1087 | } | 1105 | } |
1088 | if (efx_nic_is_dual_func(efx)) { | 1106 | if (efx_nic_is_dual_func(efx)) { |
1089 | rc = pci_save_state(nic_data->pci_dev2); | 1107 | rc = pci_save_state(nic_data->pci_dev2); |
1090 | if (rc) { | 1108 | if (rc) { |
1091 | EFX_ERR(efx, "failed to backup PCI state of " | 1109 | netif_err(efx, drv, efx->net_dev, |
1092 | "secondary function prior to " | 1110 | "failed to backup PCI state of " |
1093 | "hardware reset\n"); | 1111 | "secondary function prior to " |
1112 | "hardware reset\n"); | ||
1094 | goto fail2; | 1113 | goto fail2; |
1095 | } | 1114 | } |
1096 | } | 1115 | } |
@@ -1115,7 +1134,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1115 | } | 1134 | } |
1116 | efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | 1135 | efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); |
1117 | 1136 | ||
1118 | EFX_LOG(efx, "waiting for hardware reset\n"); | 1137 | netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); |
1119 | schedule_timeout_uninterruptible(HZ / 20); | 1138 | schedule_timeout_uninterruptible(HZ / 20); |
1120 | 1139 | ||
1121 | /* Restore PCI configuration if needed */ | 1140 | /* Restore PCI configuration if needed */ |
@@ -1123,28 +1142,32 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1123 | if (efx_nic_is_dual_func(efx)) { | 1142 | if (efx_nic_is_dual_func(efx)) { |
1124 | rc = pci_restore_state(nic_data->pci_dev2); | 1143 | rc = pci_restore_state(nic_data->pci_dev2); |
1125 | if (rc) { | 1144 | if (rc) { |
1126 | EFX_ERR(efx, "failed to restore PCI config for " | 1145 | netif_err(efx, drv, efx->net_dev, |
1127 | "the secondary function\n"); | 1146 | "failed to restore PCI config for " |
1147 | "the secondary function\n"); | ||
1128 | goto fail3; | 1148 | goto fail3; |
1129 | } | 1149 | } |
1130 | } | 1150 | } |
1131 | rc = pci_restore_state(efx->pci_dev); | 1151 | rc = pci_restore_state(efx->pci_dev); |
1132 | if (rc) { | 1152 | if (rc) { |
1133 | EFX_ERR(efx, "failed to restore PCI config for the " | 1153 | netif_err(efx, drv, efx->net_dev, |
1134 | "primary function\n"); | 1154 | "failed to restore PCI config for the " |
1155 | "primary function\n"); | ||
1135 | goto fail4; | 1156 | goto fail4; |
1136 | } | 1157 | } |
1137 | EFX_LOG(efx, "successfully restored PCI config\n"); | 1158 | netif_dbg(efx, drv, efx->net_dev, |
1159 | "successfully restored PCI config\n"); | ||
1138 | } | 1160 | } |
1139 | 1161 | ||
1140 | /* Assert that reset complete */ | 1162 | /* Assert that reset complete */ |
1141 | efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | 1163 | efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); |
1142 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { | 1164 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { |
1143 | rc = -ETIMEDOUT; | 1165 | rc = -ETIMEDOUT; |
1144 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); | 1166 | netif_err(efx, hw, efx->net_dev, |
1167 | "timed out waiting for hardware reset\n"); | ||
1145 | goto fail5; | 1168 | goto fail5; |
1146 | } | 1169 | } |
1147 | EFX_LOG(efx, "hardware reset complete\n"); | 1170 | netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); |
1148 | 1171 | ||
1149 | return 0; | 1172 | return 0; |
1150 | 1173 | ||
@@ -1167,8 +1190,9 @@ static void falcon_monitor(struct efx_nic *efx) | |||
1167 | 1190 | ||
1168 | rc = falcon_board(efx)->type->monitor(efx); | 1191 | rc = falcon_board(efx)->type->monitor(efx); |
1169 | if (rc) { | 1192 | if (rc) { |
1170 | EFX_ERR(efx, "Board sensor %s; shutting down PHY\n", | 1193 | netif_err(efx, hw, efx->net_dev, |
1171 | (rc == -ERANGE) ? "reported fault" : "failed"); | 1194 | "Board sensor %s; shutting down PHY\n", |
1195 | (rc == -ERANGE) ? "reported fault" : "failed"); | ||
1172 | efx->phy_mode |= PHY_MODE_LOW_POWER; | 1196 | efx->phy_mode |= PHY_MODE_LOW_POWER; |
1173 | rc = __efx_reconfigure_port(efx); | 1197 | rc = __efx_reconfigure_port(efx); |
1174 | WARN_ON(rc); | 1198 | WARN_ON(rc); |
@@ -1219,7 +1243,8 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
1219 | /* Wait for SRAM reset to complete */ | 1243 | /* Wait for SRAM reset to complete */ |
1220 | count = 0; | 1244 | count = 0; |
1221 | do { | 1245 | do { |
1222 | EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count); | 1246 | netif_dbg(efx, hw, efx->net_dev, |
1247 | "waiting for SRAM reset (attempt %d)...\n", count); | ||
1223 | 1248 | ||
1224 | /* SRAM reset is slow; expect around 16ms */ | 1249 | /* SRAM reset is slow; expect around 16ms */ |
1225 | schedule_timeout_uninterruptible(HZ / 50); | 1250 | schedule_timeout_uninterruptible(HZ / 50); |
@@ -1227,13 +1252,14 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
1227 | /* Check for reset complete */ | 1252 | /* Check for reset complete */ |
1228 | efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); | 1253 | efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); |
1229 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { | 1254 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { |
1230 | EFX_LOG(efx, "SRAM reset complete\n"); | 1255 | netif_dbg(efx, hw, efx->net_dev, |
1256 | "SRAM reset complete\n"); | ||
1231 | 1257 | ||
1232 | return 0; | 1258 | return 0; |
1233 | } | 1259 | } |
1234 | } while (++count < 20); /* wait upto 0.4 sec */ | 1260 | } while (++count < 20); /* wait upto 0.4 sec */ |
1235 | 1261 | ||
1236 | EFX_ERR(efx, "timed out waiting for SRAM reset\n"); | 1262 | netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); |
1237 | return -ETIMEDOUT; | 1263 | return -ETIMEDOUT; |
1238 | } | 1264 | } |
1239 | 1265 | ||
@@ -1292,7 +1318,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
1292 | 1318 | ||
1293 | rc = falcon_read_nvram(efx, nvconfig); | 1319 | rc = falcon_read_nvram(efx, nvconfig); |
1294 | if (rc == -EINVAL) { | 1320 | if (rc == -EINVAL) { |
1295 | EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); | 1321 | netif_err(efx, probe, efx->net_dev, |
1322 | "NVRAM is invalid therefore using defaults\n"); | ||
1296 | efx->phy_type = PHY_TYPE_NONE; | 1323 | efx->phy_type = PHY_TYPE_NONE; |
1297 | efx->mdio.prtad = MDIO_PRTAD_NONE; | 1324 | efx->mdio.prtad = MDIO_PRTAD_NONE; |
1298 | board_rev = 0; | 1325 | board_rev = 0; |
@@ -1326,7 +1353,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
1326 | /* Read the MAC addresses */ | 1353 | /* Read the MAC addresses */ |
1327 | memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); | 1354 | memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); |
1328 | 1355 | ||
1329 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); | 1356 | netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", |
1357 | efx->phy_type, efx->mdio.prtad); | ||
1330 | 1358 | ||
1331 | rc = falcon_probe_board(efx, board_rev); | 1359 | rc = falcon_probe_board(efx, board_rev); |
1332 | if (rc) | 1360 | if (rc) |
@@ -1355,14 +1383,16 @@ static void falcon_probe_spi_devices(struct efx_nic *efx) | |||
1355 | if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { | 1383 | if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { |
1356 | boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? | 1384 | boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? |
1357 | FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); | 1385 | FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); |
1358 | EFX_LOG(efx, "Booted from %s\n", | 1386 | netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", |
1359 | boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); | 1387 | boot_dev == FFE_AB_SPI_DEVICE_FLASH ? |
1388 | "flash" : "EEPROM"); | ||
1360 | } else { | 1389 | } else { |
1361 | /* Disable VPD and set clock dividers to safe | 1390 | /* Disable VPD and set clock dividers to safe |
1362 | * values for initial programming. */ | 1391 | * values for initial programming. */ |
1363 | boot_dev = -1; | 1392 | boot_dev = -1; |
1364 | EFX_LOG(efx, "Booted from internal ASIC settings;" | 1393 | netif_dbg(efx, probe, efx->net_dev, |
1365 | " setting SPI config\n"); | 1394 | "Booted from internal ASIC settings;" |
1395 | " setting SPI config\n"); | ||
1366 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, | 1396 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, |
1367 | /* 125 MHz / 7 ~= 20 MHz */ | 1397 | /* 125 MHz / 7 ~= 20 MHz */ |
1368 | FRF_AB_EE_SF_CLOCK_DIV, 7, | 1398 | FRF_AB_EE_SF_CLOCK_DIV, 7, |
@@ -1396,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1396 | rc = -ENODEV; | 1426 | rc = -ENODEV; |
1397 | 1427 | ||
1398 | if (efx_nic_fpga_ver(efx) != 0) { | 1428 | if (efx_nic_fpga_ver(efx) != 0) { |
1399 | EFX_ERR(efx, "Falcon FPGA not supported\n"); | 1429 | netif_err(efx, probe, efx->net_dev, |
1430 | "Falcon FPGA not supported\n"); | ||
1400 | goto fail1; | 1431 | goto fail1; |
1401 | } | 1432 | } |
1402 | 1433 | ||
@@ -1406,16 +1437,19 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1406 | u8 pci_rev = efx->pci_dev->revision; | 1437 | u8 pci_rev = efx->pci_dev->revision; |
1407 | 1438 | ||
1408 | if ((pci_rev == 0xff) || (pci_rev == 0)) { | 1439 | if ((pci_rev == 0xff) || (pci_rev == 0)) { |
1409 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 1440 | netif_err(efx, probe, efx->net_dev, |
1441 | "Falcon rev A0 not supported\n"); | ||
1410 | goto fail1; | 1442 | goto fail1; |
1411 | } | 1443 | } |
1412 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); | 1444 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); |
1413 | if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { | 1445 | if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { |
1414 | EFX_ERR(efx, "Falcon rev A1 1G not supported\n"); | 1446 | netif_err(efx, probe, efx->net_dev, |
1447 | "Falcon rev A1 1G not supported\n"); | ||
1415 | goto fail1; | 1448 | goto fail1; |
1416 | } | 1449 | } |
1417 | if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { | 1450 | if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { |
1418 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); | 1451 | netif_err(efx, probe, efx->net_dev, |
1452 | "Falcon rev A1 PCI-X not supported\n"); | ||
1419 | goto fail1; | 1453 | goto fail1; |
1420 | } | 1454 | } |
1421 | 1455 | ||
@@ -1429,7 +1463,8 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1429 | } | 1463 | } |
1430 | } | 1464 | } |
1431 | if (!nic_data->pci_dev2) { | 1465 | if (!nic_data->pci_dev2) { |
1432 | EFX_ERR(efx, "failed to find secondary function\n"); | 1466 | netif_err(efx, probe, efx->net_dev, |
1467 | "failed to find secondary function\n"); | ||
1433 | rc = -ENODEV; | 1468 | rc = -ENODEV; |
1434 | goto fail2; | 1469 | goto fail2; |
1435 | } | 1470 | } |
@@ -1438,7 +1473,7 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1438 | /* Now we can reset the NIC */ | 1473 | /* Now we can reset the NIC */ |
1439 | rc = falcon_reset_hw(efx, RESET_TYPE_ALL); | 1474 | rc = falcon_reset_hw(efx, RESET_TYPE_ALL); |
1440 | if (rc) { | 1475 | if (rc) { |
1441 | EFX_ERR(efx, "failed to reset NIC\n"); | 1476 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); |
1442 | goto fail3; | 1477 | goto fail3; |
1443 | } | 1478 | } |
1444 | 1479 | ||
@@ -1448,9 +1483,11 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1448 | goto fail4; | 1483 | goto fail4; |
1449 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 1484 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
1450 | 1485 | ||
1451 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", | 1486 | netif_dbg(efx, probe, efx->net_dev, |
1452 | (u64)efx->irq_status.dma_addr, | 1487 | "INT_KER at %llx (virt %p phys %llx)\n", |
1453 | efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); | 1488 | (u64)efx->irq_status.dma_addr, |
1489 | efx->irq_status.addr, | ||
1490 | (u64)virt_to_phys(efx->irq_status.addr)); | ||
1454 | 1491 | ||
1455 | falcon_probe_spi_devices(efx); | 1492 | falcon_probe_spi_devices(efx); |
1456 | 1493 | ||
@@ -1474,7 +1511,8 @@ static int falcon_probe_nic(struct efx_nic *efx) | |||
1474 | 1511 | ||
1475 | rc = falcon_board(efx)->type->init(efx); | 1512 | rc = falcon_board(efx)->type->init(efx); |
1476 | if (rc) { | 1513 | if (rc) { |
1477 | EFX_ERR(efx, "failed to initialise board\n"); | 1514 | netif_err(efx, probe, efx->net_dev, |
1515 | "failed to initialise board\n"); | ||
1478 | goto fail6; | 1516 | goto fail6; |
1479 | } | 1517 | } |
1480 | 1518 | ||
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index c7a933a3292e..92b35e3d1100 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -106,12 +106,12 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask) | |||
106 | alarms1 &= mask; | 106 | alarms1 &= mask; |
107 | alarms2 &= mask >> 8; | 107 | alarms2 &= mask >> 8; |
108 | if (alarms1 || alarms2) { | 108 | if (alarms1 || alarms2) { |
109 | EFX_ERR(efx, | 109 | netif_err(efx, hw, efx->net_dev, |
110 | "LM87 detected a hardware failure (status %02x:%02x)" | 110 | "LM87 detected a hardware failure (status %02x:%02x)" |
111 | "%s%s\n", | 111 | "%s%s\n", |
112 | alarms1, alarms2, | 112 | alarms1, alarms2, |
113 | (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", | 113 | (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", |
114 | (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); | 114 | (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); |
115 | return -ERANGE; | 115 | return -ERANGE; |
116 | } | 116 | } |
117 | 117 | ||
@@ -243,7 +243,7 @@ static int sfe4001_poweron(struct efx_nic *efx) | |||
243 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | | 243 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | |
244 | (0 << P0_EN_1V0X_LBN)); | 244 | (0 << P0_EN_1V0X_LBN)); |
245 | if (rc != out) { | 245 | if (rc != out) { |
246 | EFX_INFO(efx, "power-cycling PHY\n"); | 246 | netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); |
247 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | 247 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); |
248 | if (rc) | 248 | if (rc) |
249 | goto fail_on; | 249 | goto fail_on; |
@@ -269,7 +269,8 @@ static int sfe4001_poweron(struct efx_nic *efx) | |||
269 | if (rc) | 269 | if (rc) |
270 | goto fail_on; | 270 | goto fail_on; |
271 | 271 | ||
272 | EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i); | 272 | netif_info(efx, hw, efx->net_dev, |
273 | "waiting for DSP boot (attempt %d)...\n", i); | ||
273 | 274 | ||
274 | /* In flash config mode, DSP does not turn on AFE, so | 275 | /* In flash config mode, DSP does not turn on AFE, so |
275 | * just wait 1 second. | 276 | * just wait 1 second. |
@@ -291,7 +292,7 @@ static int sfe4001_poweron(struct efx_nic *efx) | |||
291 | } | 292 | } |
292 | } | 293 | } |
293 | 294 | ||
294 | EFX_INFO(efx, "timed out waiting for DSP boot\n"); | 295 | netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); |
295 | rc = -ETIMEDOUT; | 296 | rc = -ETIMEDOUT; |
296 | fail_on: | 297 | fail_on: |
297 | sfe4001_poweroff(efx); | 298 | sfe4001_poweroff(efx); |
@@ -377,7 +378,7 @@ static void sfe4001_fini(struct efx_nic *efx) | |||
377 | { | 378 | { |
378 | struct falcon_board *board = falcon_board(efx); | 379 | struct falcon_board *board = falcon_board(efx); |
379 | 380 | ||
380 | EFX_INFO(efx, "%s\n", __func__); | 381 | netif_info(efx, drv, efx->net_dev, "%s\n", __func__); |
381 | 382 | ||
382 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | 383 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); |
383 | sfe4001_poweroff(efx); | 384 | sfe4001_poweroff(efx); |
@@ -461,7 +462,7 @@ static int sfe4001_init(struct efx_nic *efx) | |||
461 | if (rc) | 462 | if (rc) |
462 | goto fail_on; | 463 | goto fail_on; |
463 | 464 | ||
464 | EFX_INFO(efx, "PHY is powered on\n"); | 465 | netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); |
465 | return 0; | 466 | return 0; |
466 | 467 | ||
467 | fail_on: | 468 | fail_on: |
@@ -493,7 +494,7 @@ static int sfn4111t_check_hw(struct efx_nic *efx) | |||
493 | 494 | ||
494 | static void sfn4111t_fini(struct efx_nic *efx) | 495 | static void sfn4111t_fini(struct efx_nic *efx) |
495 | { | 496 | { |
496 | EFX_INFO(efx, "%s\n", __func__); | 497 | netif_info(efx, drv, efx->net_dev, "%s\n", __func__); |
497 | 498 | ||
498 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | 499 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); |
499 | i2c_unregister_device(falcon_board(efx)->hwmon_client); | 500 | i2c_unregister_device(falcon_board(efx)->hwmon_client); |
@@ -742,13 +743,14 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info) | |||
742 | board->type = &board_types[i]; | 743 | board->type = &board_types[i]; |
743 | 744 | ||
744 | if (board->type) { | 745 | if (board->type) { |
745 | EFX_INFO(efx, "board is %s rev %c%d\n", | 746 | netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", |
746 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | 747 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) |
747 | ? board->type->ref_model : board->type->gen_type, | 748 | ? board->type->ref_model : board->type->gen_type, |
748 | 'A' + board->major, board->minor); | 749 | 'A' + board->major, board->minor); |
749 | return 0; | 750 | return 0; |
750 | } else { | 751 | } else { |
751 | EFX_ERR(efx, "unknown board type %d\n", type_id); | 752 | netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", |
753 | type_id); | ||
752 | return -ENODEV; | 754 | return -ENODEV; |
753 | } | 755 | } |
754 | } | 756 | } |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index c84a2ce2ccbb..bae656dd2c4e 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -81,7 +81,8 @@ int falcon_reset_xaui(struct efx_nic *efx) | |||
81 | } | 81 | } |
82 | udelay(10); | 82 | udelay(10); |
83 | } | 83 | } |
84 | EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); | 84 | netif_err(efx, hw, efx->net_dev, |
85 | "timed out waiting for XAUI/XGXS reset\n"); | ||
85 | return -ETIMEDOUT; | 86 | return -ETIMEDOUT; |
86 | } | 87 | } |
87 | 88 | ||
@@ -256,7 +257,7 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) | |||
256 | falcon_stop_nic_stats(efx); | 257 | falcon_stop_nic_stats(efx); |
257 | 258 | ||
258 | while (!mac_up && tries) { | 259 | while (!mac_up && tries) { |
259 | EFX_LOG(efx, "bashing xaui\n"); | 260 | netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); |
260 | falcon_reset_xaui(efx); | 261 | falcon_reset_xaui(efx); |
261 | udelay(200); | 262 | udelay(200); |
262 | 263 | ||
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index 4317574c772d..85a99fe87437 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h | |||
@@ -78,8 +78,9 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | |||
78 | { | 78 | { |
79 | unsigned long flags __attribute__ ((unused)); | 79 | unsigned long flags __attribute__ ((unused)); |
80 | 80 | ||
81 | EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, | 81 | netif_vdbg(efx, hw, efx->net_dev, |
82 | EFX_OWORD_VAL(*value)); | 82 | "writing register %x with " EFX_OWORD_FMT "\n", reg, |
83 | EFX_OWORD_VAL(*value)); | ||
83 | 84 | ||
84 | spin_lock_irqsave(&efx->biu_lock, flags); | 85 | spin_lock_irqsave(&efx->biu_lock, flags); |
85 | #ifdef EFX_USE_QWORD_IO | 86 | #ifdef EFX_USE_QWORD_IO |
@@ -105,8 +106,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | |||
105 | unsigned int addr = index * sizeof(*value); | 106 | unsigned int addr = index * sizeof(*value); |
106 | unsigned long flags __attribute__ ((unused)); | 107 | unsigned long flags __attribute__ ((unused)); |
107 | 108 | ||
108 | EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n", | 109 | netif_vdbg(efx, hw, efx->net_dev, |
109 | addr, EFX_QWORD_VAL(*value)); | 110 | "writing SRAM address %x with " EFX_QWORD_FMT "\n", |
111 | addr, EFX_QWORD_VAL(*value)); | ||
110 | 112 | ||
111 | spin_lock_irqsave(&efx->biu_lock, flags); | 113 | spin_lock_irqsave(&efx->biu_lock, flags); |
112 | #ifdef EFX_USE_QWORD_IO | 114 | #ifdef EFX_USE_QWORD_IO |
@@ -129,8 +131,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | |||
129 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | 131 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, |
130 | unsigned int reg) | 132 | unsigned int reg) |
131 | { | 133 | { |
132 | EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", | 134 | netif_vdbg(efx, hw, efx->net_dev, |
133 | reg, EFX_DWORD_VAL(*value)); | 135 | "writing partial register %x with "EFX_DWORD_FMT"\n", |
136 | reg, EFX_DWORD_VAL(*value)); | ||
134 | 137 | ||
135 | /* No lock required */ | 138 | /* No lock required */ |
136 | _efx_writed(efx, value->u32[0], reg); | 139 | _efx_writed(efx, value->u32[0], reg); |
@@ -155,8 +158,9 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
155 | value->u32[3] = _efx_readd(efx, reg + 12); | 158 | value->u32[3] = _efx_readd(efx, reg + 12); |
156 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 159 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
157 | 160 | ||
158 | EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, | 161 | netif_vdbg(efx, hw, efx->net_dev, |
159 | EFX_OWORD_VAL(*value)); | 162 | "read from register %x, got " EFX_OWORD_FMT "\n", reg, |
163 | EFX_OWORD_VAL(*value)); | ||
160 | } | 164 | } |
161 | 165 | ||
162 | /* Read an 8-byte SRAM entry through supplied mapping, | 166 | /* Read an 8-byte SRAM entry through supplied mapping, |
@@ -177,8 +181,9 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
177 | #endif | 181 | #endif |
178 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 182 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
179 | 183 | ||
180 | EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n", | 184 | netif_vdbg(efx, hw, efx->net_dev, |
181 | addr, EFX_QWORD_VAL(*value)); | 185 | "read from SRAM address %x, got "EFX_QWORD_FMT"\n", |
186 | addr, EFX_QWORD_VAL(*value)); | ||
182 | } | 187 | } |
183 | 188 | ||
184 | /* Read dword from register that allows partial writes (sic) */ | 189 | /* Read dword from register that allows partial writes (sic) */ |
@@ -186,8 +191,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | |||
186 | unsigned int reg) | 191 | unsigned int reg) |
187 | { | 192 | { |
188 | value->u32[0] = _efx_readd(efx, reg); | 193 | value->u32[0] = _efx_readd(efx, reg); |
189 | EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", | 194 | netif_vdbg(efx, hw, efx->net_dev, |
190 | reg, EFX_DWORD_VAL(*value)); | 195 | "read from register %x, got "EFX_DWORD_FMT"\n", |
196 | reg, EFX_DWORD_VAL(*value)); | ||
191 | } | 197 | } |
192 | 198 | ||
193 | /* Write to a register forming part of a table */ | 199 | /* Write to a register forming part of a table */ |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 93cc3c1b9450..3912b8fed912 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -168,11 +168,12 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | 168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); |
169 | 169 | ||
170 | if (error && mcdi->resplen == 0) { | 170 | if (error && mcdi->resplen == 0) { |
171 | EFX_ERR(efx, "MC rebooted\n"); | 171 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); |
172 | rc = EIO; | 172 | rc = EIO; |
173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | 173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { |
174 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | 174 | netif_err(efx, hw, efx->net_dev, |
175 | respseq, mcdi->seqno); | 175 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", |
176 | respseq, mcdi->seqno); | ||
176 | rc = EIO; | 177 | rc = EIO; |
177 | } else if (error) { | 178 | } else if (error) { |
178 | efx_readd(efx, ®, pdu + 4); | 179 | efx_readd(efx, ®, pdu + 4); |
@@ -303,8 +304,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | |||
303 | /* The request has been cancelled */ | 304 | /* The request has been cancelled */ |
304 | --mcdi->credits; | 305 | --mcdi->credits; |
305 | else | 306 | else |
306 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx " | 307 | netif_err(efx, hw, efx->net_dev, |
307 | "seq 0x%x\n", seqno, mcdi->seqno); | 308 | "MC response mismatch tx seq 0x%x rx " |
309 | "seq 0x%x\n", seqno, mcdi->seqno); | ||
308 | } else { | 310 | } else { |
309 | mcdi->resprc = errno; | 311 | mcdi->resprc = errno; |
310 | mcdi->resplen = datalen; | 312 | mcdi->resplen = datalen; |
@@ -352,8 +354,9 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | |||
352 | ++mcdi->credits; | 354 | ++mcdi->credits; |
353 | spin_unlock_bh(&mcdi->iface_lock); | 355 | spin_unlock_bh(&mcdi->iface_lock); |
354 | 356 | ||
355 | EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n", | 357 | netif_err(efx, hw, efx->net_dev, |
356 | cmd, (int)inlen, mcdi->mode); | 358 | "MC command 0x%x inlen %d mode %d timed out\n", |
359 | cmd, (int)inlen, mcdi->mode); | ||
357 | } else { | 360 | } else { |
358 | size_t resplen; | 361 | size_t resplen; |
359 | 362 | ||
@@ -374,11 +377,13 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | |||
374 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | 377 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) |
375 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | 378 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ |
376 | else if (rc == -EIO || rc == -EINTR) { | 379 | else if (rc == -EIO || rc == -EINTR) { |
377 | EFX_ERR(efx, "MC fatal error %d\n", -rc); | 380 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", |
381 | -rc); | ||
378 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | 382 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
379 | } else | 383 | } else |
380 | EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n", | 384 | netif_err(efx, hw, efx->net_dev, |
381 | cmd, (int)inlen, -rc); | 385 | "MC command 0x%x inlen %d failed rc=%d\n", |
386 | cmd, (int)inlen, -rc); | ||
382 | } | 387 | } |
383 | 388 | ||
384 | efx_mcdi_release(mcdi); | 389 | efx_mcdi_release(mcdi); |
@@ -534,8 +539,9 @@ static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | |||
534 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | 539 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); |
535 | state_txt = sensor_status_names[state]; | 540 | state_txt = sensor_status_names[state]; |
536 | 541 | ||
537 | EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n", | 542 | netif_err(efx, hw, efx->net_dev, |
538 | monitor, name, state_txt, value); | 543 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", |
544 | monitor, name, state_txt, value); | ||
539 | } | 545 | } |
540 | 546 | ||
541 | /* Called from falcon_process_eventq for MCDI events */ | 547 | /* Called from falcon_process_eventq for MCDI events */ |
@@ -548,12 +554,13 @@ void efx_mcdi_process_event(struct efx_channel *channel, | |||
548 | 554 | ||
549 | switch (code) { | 555 | switch (code) { |
550 | case MCDI_EVENT_CODE_BADSSERT: | 556 | case MCDI_EVENT_CODE_BADSSERT: |
551 | EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data); | 557 | netif_err(efx, hw, efx->net_dev, |
558 | "MC watchdog or assertion failure at 0x%x\n", data); | ||
552 | efx_mcdi_ev_death(efx, EINTR); | 559 | efx_mcdi_ev_death(efx, EINTR); |
553 | break; | 560 | break; |
554 | 561 | ||
555 | case MCDI_EVENT_CODE_PMNOTICE: | 562 | case MCDI_EVENT_CODE_PMNOTICE: |
556 | EFX_INFO(efx, "MCDI PM event.\n"); | 563 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); |
557 | break; | 564 | break; |
558 | 565 | ||
559 | case MCDI_EVENT_CODE_CMDDONE: | 566 | case MCDI_EVENT_CODE_CMDDONE: |
@@ -570,10 +577,11 @@ void efx_mcdi_process_event(struct efx_channel *channel, | |||
570 | efx_mcdi_sensor_event(efx, event); | 577 | efx_mcdi_sensor_event(efx, event); |
571 | break; | 578 | break; |
572 | case MCDI_EVENT_CODE_SCHEDERR: | 579 | case MCDI_EVENT_CODE_SCHEDERR: |
573 | EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data); | 580 | netif_info(efx, hw, efx->net_dev, |
581 | "MC Scheduler error address=0x%x\n", data); | ||
574 | break; | 582 | break; |
575 | case MCDI_EVENT_CODE_REBOOT: | 583 | case MCDI_EVENT_CODE_REBOOT: |
576 | EFX_INFO(efx, "MC Reboot\n"); | 584 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); |
577 | efx_mcdi_ev_death(efx, EIO); | 585 | efx_mcdi_ev_death(efx, EIO); |
578 | break; | 586 | break; |
579 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | 587 | case MCDI_EVENT_CODE_MAC_STATS_DMA: |
@@ -581,7 +589,8 @@ void efx_mcdi_process_event(struct efx_channel *channel, | |||
581 | break; | 589 | break; |
582 | 590 | ||
583 | default: | 591 | default: |
584 | EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code); | 592 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", |
593 | code); | ||
585 | } | 594 | } |
586 | } | 595 | } |
587 | 596 | ||
@@ -627,7 +636,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) | |||
627 | return 0; | 636 | return 0; |
628 | 637 | ||
629 | fail: | 638 | fail: |
630 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 639 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
631 | return rc; | 640 | return rc; |
632 | } | 641 | } |
633 | 642 | ||
@@ -657,7 +666,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | |||
657 | return 0; | 666 | return 0; |
658 | 667 | ||
659 | fail: | 668 | fail: |
660 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 669 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
661 | return rc; | 670 | return rc; |
662 | } | 671 | } |
663 | 672 | ||
@@ -695,7 +704,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | |||
695 | return 0; | 704 | return 0; |
696 | 705 | ||
697 | fail: | 706 | fail: |
698 | EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); | 707 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", |
708 | __func__, rc, (int)outlen); | ||
699 | 709 | ||
700 | return rc; | 710 | return rc; |
701 | } | 711 | } |
@@ -724,7 +734,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | |||
724 | return 0; | 734 | return 0; |
725 | 735 | ||
726 | fail: | 736 | fail: |
727 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 737 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
728 | return rc; | 738 | return rc; |
729 | } | 739 | } |
730 | 740 | ||
@@ -749,8 +759,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | |||
749 | return 0; | 759 | return 0; |
750 | 760 | ||
751 | fail: | 761 | fail: |
752 | EFX_ERR(efx, "%s: failed rc=%d\n", | 762 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
753 | __func__, rc); | 763 | __func__, rc); |
754 | return rc; | 764 | return rc; |
755 | } | 765 | } |
756 | 766 | ||
@@ -781,7 +791,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | |||
781 | return 0; | 791 | return 0; |
782 | 792 | ||
783 | fail: | 793 | fail: |
784 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 794 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
785 | return rc; | 795 | return rc; |
786 | } | 796 | } |
787 | 797 | ||
@@ -802,7 +812,7 @@ int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | |||
802 | return 0; | 812 | return 0; |
803 | 813 | ||
804 | fail: | 814 | fail: |
805 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 815 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
806 | return rc; | 816 | return rc; |
807 | } | 817 | } |
808 | 818 | ||
@@ -827,7 +837,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |||
827 | return 0; | 837 | return 0; |
828 | 838 | ||
829 | fail: | 839 | fail: |
830 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 840 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
831 | return rc; | 841 | return rc; |
832 | } | 842 | } |
833 | 843 | ||
@@ -853,7 +863,7 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | |||
853 | return 0; | 863 | return 0; |
854 | 864 | ||
855 | fail: | 865 | fail: |
856 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 866 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
857 | return rc; | 867 | return rc; |
858 | } | 868 | } |
859 | 869 | ||
@@ -877,7 +887,7 @@ int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | |||
877 | return 0; | 887 | return 0; |
878 | 888 | ||
879 | fail: | 889 | fail: |
880 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 890 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
881 | return rc; | 891 | return rc; |
882 | } | 892 | } |
883 | 893 | ||
@@ -898,7 +908,7 @@ int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | |||
898 | return 0; | 908 | return 0; |
899 | 909 | ||
900 | fail: | 910 | fail: |
901 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 911 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
902 | return rc; | 912 | return rc; |
903 | } | 913 | } |
904 | 914 | ||
@@ -948,9 +958,10 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx) | |||
948 | return 0; | 958 | return 0; |
949 | 959 | ||
950 | fail2: | 960 | fail2: |
951 | EFX_ERR(efx, "%s: failed type=%u\n", __func__, type); | 961 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", |
962 | __func__, type); | ||
952 | fail1: | 963 | fail1: |
953 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 964 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
954 | return rc; | 965 | return rc; |
955 | } | 966 | } |
956 | 967 | ||
@@ -994,14 +1005,15 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) | |||
994 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | 1005 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) |
995 | ? "watchdog reset" | 1006 | ? "watchdog reset" |
996 | : "unknown assertion"; | 1007 | : "unknown assertion"; |
997 | EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | 1008 | netif_err(efx, hw, efx->net_dev, |
998 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | 1009 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, |
999 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | 1010 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), |
1011 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | ||
1000 | 1012 | ||
1001 | /* Print out the registers */ | 1013 | /* Print out the registers */ |
1002 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | 1014 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; |
1003 | for (index = 1; index < 32; index++) { | 1015 | for (index = 1; index < 32; index++) { |
1004 | EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, | 1016 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, |
1005 | MCDI_DWORD2(outbuf, ofst)); | 1017 | MCDI_DWORD2(outbuf, ofst)); |
1006 | ofst += sizeof(efx_dword_t); | 1018 | ofst += sizeof(efx_dword_t); |
1007 | } | 1019 | } |
@@ -1050,14 +1062,16 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | |||
1050 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | 1062 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), |
1051 | NULL, 0, NULL); | 1063 | NULL, 0, NULL); |
1052 | if (rc) | 1064 | if (rc) |
1053 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1065 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1066 | __func__, rc); | ||
1054 | } | 1067 | } |
1055 | 1068 | ||
1056 | int efx_mcdi_reset_port(struct efx_nic *efx) | 1069 | int efx_mcdi_reset_port(struct efx_nic *efx) |
1057 | { | 1070 | { |
1058 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | 1071 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); |
1059 | if (rc) | 1072 | if (rc) |
1060 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1073 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1074 | __func__, rc); | ||
1061 | return rc; | 1075 | return rc; |
1062 | } | 1076 | } |
1063 | 1077 | ||
@@ -1075,7 +1089,7 @@ int efx_mcdi_reset_mc(struct efx_nic *efx) | |||
1075 | return 0; | 1089 | return 0; |
1076 | if (rc == 0) | 1090 | if (rc == 0) |
1077 | rc = -EIO; | 1091 | rc = -EIO; |
1078 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1092 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1079 | return rc; | 1093 | return rc; |
1080 | } | 1094 | } |
1081 | 1095 | ||
@@ -1108,7 +1122,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | |||
1108 | 1122 | ||
1109 | fail: | 1123 | fail: |
1110 | *id_out = -1; | 1124 | *id_out = -1; |
1111 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1112 | return rc; | 1126 | return rc; |
1113 | 1127 | ||
1114 | } | 1128 | } |
@@ -1143,7 +1157,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | |||
1143 | 1157 | ||
1144 | fail: | 1158 | fail: |
1145 | *id_out = -1; | 1159 | *id_out = -1; |
1146 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1160 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1147 | return rc; | 1161 | return rc; |
1148 | } | 1162 | } |
1149 | 1163 | ||
@@ -1163,7 +1177,7 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | |||
1163 | return 0; | 1177 | return 0; |
1164 | 1178 | ||
1165 | fail: | 1179 | fail: |
1166 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1180 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1167 | return rc; | 1181 | return rc; |
1168 | } | 1182 | } |
1169 | 1183 | ||
@@ -1179,7 +1193,7 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | |||
1179 | return 0; | 1193 | return 0; |
1180 | 1194 | ||
1181 | fail: | 1195 | fail: |
1182 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 1196 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1183 | return rc; | 1197 | return rc; |
1184 | } | 1198 | } |
1185 | 1199 | ||
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c index 39182631ac92..f88f4bf986ff 100644 --- a/drivers/net/sfc/mcdi_mac.c +++ b/drivers/net/sfc/mcdi_mac.c | |||
@@ -69,8 +69,8 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) | |||
69 | return 0; | 69 | return 0; |
70 | 70 | ||
71 | fail: | 71 | fail: |
72 | EFX_ERR(efx, "%s: failed rc=%d\n", | 72 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
73 | __func__, rc); | 73 | __func__, rc); |
74 | return rc; | 74 | return rc; |
75 | } | 75 | } |
76 | 76 | ||
@@ -110,8 +110,8 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | |||
110 | return 0; | 110 | return 0; |
111 | 111 | ||
112 | fail: | 112 | fail: |
113 | EFX_ERR(efx, "%s: %s failed rc=%d\n", | 113 | netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", |
114 | __func__, enable ? "enable" : "disable", rc); | 114 | __func__, enable ? "enable" : "disable", rc); |
115 | return rc; | 115 | return rc; |
116 | } | 116 | } |
117 | 117 | ||
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 86e43b1f7689..0121e71702bf 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c | |||
@@ -71,7 +71,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) | |||
71 | return 0; | 71 | return 0; |
72 | 72 | ||
73 | fail: | 73 | fail: |
74 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 74 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
75 | return rc; | 75 | return rc; |
76 | } | 76 | } |
77 | 77 | ||
@@ -97,7 +97,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, | |||
97 | return 0; | 97 | return 0; |
98 | 98 | ||
99 | fail: | 99 | fail: |
100 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 100 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
101 | return rc; | 101 | return rc; |
102 | } | 102 | } |
103 | 103 | ||
@@ -122,7 +122,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | fail: | 124 | fail: |
125 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
126 | return rc; | 126 | return rc; |
127 | } | 127 | } |
128 | 128 | ||
@@ -150,7 +150,7 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | |||
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | fail: | 152 | fail: |
153 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 153 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
154 | return rc; | 154 | return rc; |
155 | } | 155 | } |
156 | 156 | ||
@@ -178,7 +178,7 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | |||
178 | return 0; | 178 | return 0; |
179 | 179 | ||
180 | fail: | 180 | fail: |
181 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 181 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
182 | return rc; | 182 | return rc; |
183 | } | 183 | } |
184 | 184 | ||
@@ -466,8 +466,8 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) | |||
466 | rmtadv |= ADVERTISED_Asym_Pause; | 466 | rmtadv |= ADVERTISED_Asym_Pause; |
467 | 467 | ||
468 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) | 468 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) |
469 | EFX_ERR(efx, "warning: link partner doesn't support " | 469 | netif_err(efx, link, efx->net_dev, |
470 | "pause frames"); | 470 | "warning: link partner doesn't support pause frames"); |
471 | } | 471 | } |
472 | 472 | ||
473 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) | 473 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) |
@@ -483,7 +483,8 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx) | |||
483 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | 483 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, |
484 | outbuf, sizeof(outbuf), NULL); | 484 | outbuf, sizeof(outbuf), NULL); |
485 | if (rc) { | 485 | if (rc) { |
486 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 486 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
487 | __func__, rc); | ||
487 | efx->link_state.up = false; | 488 | efx->link_state.up = false; |
488 | } else { | 489 | } else { |
489 | efx_mcdi_phy_decode_link( | 490 | efx_mcdi_phy_decode_link( |
@@ -526,7 +527,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e | |||
526 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | 527 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, |
527 | outbuf, sizeof(outbuf), NULL); | 528 | outbuf, sizeof(outbuf), NULL); |
528 | if (rc) { | 529 | if (rc) { |
529 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | 530 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
531 | __func__, rc); | ||
530 | return; | 532 | return; |
531 | } | 533 | } |
532 | ecmd->lp_advertising = | 534 | ecmd->lp_advertising = |
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index 0548fcbbdcd0..eeaf0bd64bd3 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c | |||
@@ -63,7 +63,8 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) | |||
63 | /* Read MMD STATUS2 to check it is responding. */ | 63 | /* Read MMD STATUS2 to check it is responding. */ |
64 | status = efx_mdio_read(efx, mmd, MDIO_STAT2); | 64 | status = efx_mdio_read(efx, mmd, MDIO_STAT2); |
65 | if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { | 65 | if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { |
66 | EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd); | 66 | netif_err(efx, hw, efx->net_dev, |
67 | "PHY MMD %d not responding.\n", mmd); | ||
67 | return -EIO; | 68 | return -EIO; |
68 | } | 69 | } |
69 | } | 70 | } |
@@ -72,12 +73,14 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) | |||
72 | status = efx_mdio_read(efx, mmd, MDIO_STAT1); | 73 | status = efx_mdio_read(efx, mmd, MDIO_STAT1); |
73 | if (status & MDIO_STAT1_FAULT) { | 74 | if (status & MDIO_STAT1_FAULT) { |
74 | if (fault_fatal) { | 75 | if (fault_fatal) { |
75 | EFX_ERR(efx, "PHY MMD %d reporting fatal" | 76 | netif_err(efx, hw, efx->net_dev, |
76 | " fault: status %x\n", mmd, status); | 77 | "PHY MMD %d reporting fatal" |
78 | " fault: status %x\n", mmd, status); | ||
77 | return -EIO; | 79 | return -EIO; |
78 | } else { | 80 | } else { |
79 | EFX_LOG(efx, "PHY MMD %d reporting status" | 81 | netif_dbg(efx, hw, efx->net_dev, |
80 | " %x (expected)\n", mmd, status); | 82 | "PHY MMD %d reporting status" |
83 | " %x (expected)\n", mmd, status); | ||
81 | } | 84 | } |
82 | } | 85 | } |
83 | return 0; | 86 | return 0; |
@@ -103,8 +106,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) | |||
103 | if (mask & 1) { | 106 | if (mask & 1) { |
104 | stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); | 107 | stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); |
105 | if (stat < 0) { | 108 | if (stat < 0) { |
106 | EFX_ERR(efx, "failed to read status of" | 109 | netif_err(efx, hw, efx->net_dev, |
107 | " MMD %d\n", mmd); | 110 | "failed to read status of" |
111 | " MMD %d\n", mmd); | ||
108 | return -EIO; | 112 | return -EIO; |
109 | } | 113 | } |
110 | if (stat & MDIO_CTRL1_RESET) | 114 | if (stat & MDIO_CTRL1_RESET) |
@@ -119,8 +123,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) | |||
119 | msleep(spintime); | 123 | msleep(spintime); |
120 | } | 124 | } |
121 | if (in_reset != 0) { | 125 | if (in_reset != 0) { |
122 | EFX_ERR(efx, "not all MMDs came out of reset in time." | 126 | netif_err(efx, hw, efx->net_dev, |
123 | " MMDs still in reset: %x\n", in_reset); | 127 | "not all MMDs came out of reset in time." |
128 | " MMDs still in reset: %x\n", in_reset); | ||
124 | rc = -ETIMEDOUT; | 129 | rc = -ETIMEDOUT; |
125 | } | 130 | } |
126 | return rc; | 131 | return rc; |
@@ -142,16 +147,18 @@ int efx_mdio_check_mmds(struct efx_nic *efx, | |||
142 | devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); | 147 | devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); |
143 | devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); | 148 | devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); |
144 | if (devs1 < 0 || devs2 < 0) { | 149 | if (devs1 < 0 || devs2 < 0) { |
145 | EFX_ERR(efx, "failed to read devices present\n"); | 150 | netif_err(efx, hw, efx->net_dev, |
151 | "failed to read devices present\n"); | ||
146 | return -EIO; | 152 | return -EIO; |
147 | } | 153 | } |
148 | devices = devs1 | (devs2 << 16); | 154 | devices = devs1 | (devs2 << 16); |
149 | if ((devices & mmd_mask) != mmd_mask) { | 155 | if ((devices & mmd_mask) != mmd_mask) { |
150 | EFX_ERR(efx, "required MMDs not present: got %x, " | 156 | netif_err(efx, hw, efx->net_dev, |
151 | "wanted %x\n", devices, mmd_mask); | 157 | "required MMDs not present: got %x, wanted %x\n", |
158 | devices, mmd_mask); | ||
152 | return -ENODEV; | 159 | return -ENODEV; |
153 | } | 160 | } |
154 | EFX_TRACE(efx, "Devices present: %x\n", devices); | 161 | netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices); |
155 | 162 | ||
156 | /* Check all required MMDs are responding and happy. */ | 163 | /* Check all required MMDs are responding and happy. */ |
157 | while (mmd_mask) { | 164 | while (mmd_mask) { |
@@ -219,7 +226,7 @@ static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, | |||
219 | { | 226 | { |
220 | int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); | 227 | int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); |
221 | 228 | ||
222 | EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n", | 229 | netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n", |
223 | mmd, lpower); | 230 | mmd, lpower); |
224 | 231 | ||
225 | if (stat & MDIO_STAT1_LPOWERABLE) { | 232 | if (stat & MDIO_STAT1_LPOWERABLE) { |
@@ -349,8 +356,8 @@ int efx_mdio_test_alive(struct efx_nic *efx) | |||
349 | 356 | ||
350 | if ((physid1 == 0x0000) || (physid1 == 0xffff) || | 357 | if ((physid1 == 0x0000) || (physid1 == 0xffff) || |
351 | (physid2 == 0x0000) || (physid2 == 0xffff)) { | 358 | (physid2 == 0x0000) || (physid2 == 0xffff)) { |
352 | EFX_ERR(efx, "no MDIO PHY present with ID %d\n", | 359 | netif_err(efx, hw, efx->net_dev, |
353 | efx->mdio.prtad); | 360 | "no MDIO PHY present with ID %d\n", efx->mdio.prtad); |
354 | rc = -EINVAL; | 361 | rc = -EINVAL; |
355 | } else { | 362 | } else { |
356 | rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); | 363 | rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); |
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index f89e71929603..75791d3d4963 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h | |||
@@ -51,7 +51,8 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx) | |||
51 | 51 | ||
52 | sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); | 52 | sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); |
53 | if (!sync) | 53 | if (!sync) |
54 | EFX_LOG(efx, "XGXS lane status: %x\n", lane_status); | 54 | netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n", |
55 | lane_status); | ||
55 | return sync; | 56 | return sync; |
56 | } | 57 | } |
57 | 58 | ||
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index ba636e086fc3..0cca44b4ee44 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -13,6 +13,10 @@ | |||
13 | #ifndef EFX_NET_DRIVER_H | 13 | #ifndef EFX_NET_DRIVER_H |
14 | #define EFX_NET_DRIVER_H | 14 | #define EFX_NET_DRIVER_H |
15 | 15 | ||
16 | #if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) | ||
17 | #define DEBUG | ||
18 | #endif | ||
19 | |||
16 | #include <linux/version.h> | 20 | #include <linux/version.h> |
17 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
18 | #include <linux/etherdevice.h> | 22 | #include <linux/etherdevice.h> |
@@ -48,35 +52,6 @@ | |||
48 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
49 | #endif | 53 | #endif |
50 | 54 | ||
51 | /* Un-rate-limited logging */ | ||
52 | #define EFX_ERR(efx, fmt, args...) \ | ||
53 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) | ||
54 | |||
55 | #define EFX_INFO(efx, fmt, args...) \ | ||
56 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) | ||
57 | |||
58 | #ifdef EFX_ENABLE_DEBUG | ||
59 | #define EFX_LOG(efx, fmt, args...) \ | ||
60 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) | ||
61 | #else | ||
62 | #define EFX_LOG(efx, fmt, args...) \ | ||
63 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) | ||
64 | #endif | ||
65 | |||
66 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | ||
67 | |||
68 | #define EFX_REGDUMP(efx, fmt, args...) do {} while (0) | ||
69 | |||
70 | /* Rate-limited logging */ | ||
71 | #define EFX_ERR_RL(efx, fmt, args...) \ | ||
72 | do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0) | ||
73 | |||
74 | #define EFX_INFO_RL(efx, fmt, args...) \ | ||
75 | do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | ||
76 | |||
77 | #define EFX_LOG_RL(efx, fmt, args...) \ | ||
78 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | ||
79 | |||
80 | /************************************************************************** | 55 | /************************************************************************** |
81 | * | 56 | * |
82 | * Efx data structures | 57 | * Efx data structures |
@@ -663,6 +638,7 @@ union efx_multicast_hash { | |||
663 | * @interrupt_mode: Interrupt mode | 638 | * @interrupt_mode: Interrupt mode |
664 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | 639 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues |
665 | * @irq_rx_moderation: IRQ moderation time for RX event queues | 640 | * @irq_rx_moderation: IRQ moderation time for RX event queues |
641 | * @msg_enable: Log message enable flags | ||
666 | * @state: Device state flag. Serialised by the rtnl_lock. | 642 | * @state: Device state flag. Serialised by the rtnl_lock. |
667 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) | 643 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) |
668 | * @tx_queue: TX DMA queues | 644 | * @tx_queue: TX DMA queues |
@@ -746,6 +722,7 @@ struct efx_nic { | |||
746 | enum efx_int_mode interrupt_mode; | 722 | enum efx_int_mode interrupt_mode; |
747 | bool irq_rx_adaptive; | 723 | bool irq_rx_adaptive; |
748 | unsigned int irq_rx_moderation; | 724 | unsigned int irq_rx_moderation; |
725 | u32 msg_enable; | ||
749 | 726 | ||
750 | enum nic_state state; | 727 | enum nic_state state; |
751 | enum reset_type reset_pending; | 728 | enum reset_type reset_pending; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 67235f1c2550..30836578c1cc 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -179,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx, | |||
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | fail: | 181 | fail: |
182 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | 182 | netif_err(efx, hw, efx->net_dev, |
183 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | 183 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT |
184 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | 184 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), |
185 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
185 | return -EIO; | 186 | return -EIO; |
186 | } | 187 | } |
187 | 188 | ||
@@ -214,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
214 | for (i = 0; i < buffer->entries; i++) { | 215 | for (i = 0; i < buffer->entries; i++) { |
215 | index = buffer->index + i; | 216 | index = buffer->index + i; |
216 | dma_addr = buffer->dma_addr + (i * 4096); | 217 | dma_addr = buffer->dma_addr + (i * 4096); |
217 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | 218 | netif_dbg(efx, probe, efx->net_dev, |
218 | index, (unsigned long long)dma_addr); | 219 | "mapping special buffer %d at %llx\n", |
220 | index, (unsigned long long)dma_addr); | ||
219 | EFX_POPULATE_QWORD_3(buf_desc, | 221 | EFX_POPULATE_QWORD_3(buf_desc, |
220 | FRF_AZ_BUF_ADR_REGION, 0, | 222 | FRF_AZ_BUF_ADR_REGION, 0, |
221 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | 223 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
@@ -235,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
235 | if (!buffer->entries) | 237 | if (!buffer->entries) |
236 | return; | 238 | return; |
237 | 239 | ||
238 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | 240 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", |
239 | buffer->index, buffer->index + buffer->entries - 1); | 241 | buffer->index, buffer->index + buffer->entries - 1); |
240 | 242 | ||
241 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | 243 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
242 | FRF_AZ_BUF_UPD_CMD, 0, | 244 | FRF_AZ_BUF_UPD_CMD, 0, |
@@ -276,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, | |||
276 | buffer->index = efx->next_buffer_table; | 278 | buffer->index = efx->next_buffer_table; |
277 | efx->next_buffer_table += buffer->entries; | 279 | efx->next_buffer_table += buffer->entries; |
278 | 280 | ||
279 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | 281 | netif_dbg(efx, probe, efx->net_dev, |
280 | "(virt %p phys %llx)\n", buffer->index, | 282 | "allocating special buffers %d-%d at %llx+%x " |
281 | buffer->index + buffer->entries - 1, | 283 | "(virt %p phys %llx)\n", buffer->index, |
282 | (u64)buffer->dma_addr, len, | 284 | buffer->index + buffer->entries - 1, |
283 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 285 | (u64)buffer->dma_addr, len, |
286 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
284 | 287 | ||
285 | return 0; | 288 | return 0; |
286 | } | 289 | } |
@@ -291,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
291 | if (!buffer->addr) | 294 | if (!buffer->addr) |
292 | return; | 295 | return; |
293 | 296 | ||
294 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | 297 | netif_dbg(efx, hw, efx->net_dev, |
295 | "(virt %p phys %llx)\n", buffer->index, | 298 | "deallocating special buffers %d-%d at %llx+%x " |
296 | buffer->index + buffer->entries - 1, | 299 | "(virt %p phys %llx)\n", buffer->index, |
297 | (u64)buffer->dma_addr, buffer->len, | 300 | buffer->index + buffer->entries - 1, |
298 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 301 | (u64)buffer->dma_addr, buffer->len, |
302 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
299 | 303 | ||
300 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | 304 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, |
301 | buffer->dma_addr); | 305 | buffer->dma_addr); |
@@ -555,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |||
555 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | 559 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; |
556 | bool iscsi_digest_en = is_b0; | 560 | bool iscsi_digest_en = is_b0; |
557 | 561 | ||
558 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 562 | netif_dbg(efx, hw, efx->net_dev, |
559 | rx_queue->queue, rx_queue->rxd.index, | 563 | "RX queue %d ring in special buffers %d-%d\n", |
560 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 564 | rx_queue->queue, rx_queue->rxd.index, |
565 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
561 | 566 | ||
562 | rx_queue->flushed = FLUSH_NONE; | 567 | rx_queue->flushed = FLUSH_NONE; |
563 | 568 | ||
@@ -694,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
694 | EFX_WORKAROUND_10727(efx)) { | 699 | EFX_WORKAROUND_10727(efx)) { |
695 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 700 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
696 | } else { | 701 | } else { |
697 | EFX_ERR(efx, "channel %d unexpected TX event " | 702 | netif_err(efx, tx_err, efx->net_dev, |
698 | EFX_QWORD_FMT"\n", channel->channel, | 703 | "channel %d unexpected TX event " |
699 | EFX_QWORD_VAL(*event)); | 704 | EFX_QWORD_FMT"\n", channel->channel, |
705 | EFX_QWORD_VAL(*event)); | ||
700 | } | 706 | } |
701 | 707 | ||
702 | return tx_packets; | 708 | return tx_packets; |
@@ -759,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
759 | * to a FIFO overflow. | 765 | * to a FIFO overflow. |
760 | */ | 766 | */ |
761 | #ifdef EFX_ENABLE_DEBUG | 767 | #ifdef EFX_ENABLE_DEBUG |
762 | if (rx_ev_other_err) { | 768 | if (rx_ev_other_err && net_ratelimit()) { |
763 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | 769 | netif_dbg(efx, rx_err, efx->net_dev, |
764 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | 770 | " RX queue %d unexpected RX event " |
765 | rx_queue->queue, EFX_QWORD_VAL(*event), | 771 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
766 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 772 | rx_queue->queue, EFX_QWORD_VAL(*event), |
767 | rx_ev_ip_hdr_chksum_err ? | 773 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
768 | " [IP_HDR_CHKSUM_ERR]" : "", | 774 | rx_ev_ip_hdr_chksum_err ? |
769 | rx_ev_tcp_udp_chksum_err ? | 775 | " [IP_HDR_CHKSUM_ERR]" : "", |
770 | " [TCP_UDP_CHKSUM_ERR]" : "", | 776 | rx_ev_tcp_udp_chksum_err ? |
771 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | 777 | " [TCP_UDP_CHKSUM_ERR]" : "", |
772 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | 778 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", |
773 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | 779 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", |
774 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | 780 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", |
775 | rx_ev_pause_frm ? " [PAUSE]" : ""); | 781 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", |
782 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
776 | } | 783 | } |
777 | #endif | 784 | #endif |
778 | } | 785 | } |
@@ -786,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |||
786 | 793 | ||
787 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | 794 | expected = rx_queue->removed_count & EFX_RXQ_MASK; |
788 | dropped = (index - expected) & EFX_RXQ_MASK; | 795 | dropped = (index - expected) & EFX_RXQ_MASK; |
789 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | 796 | netif_info(efx, rx_err, efx->net_dev, |
790 | dropped, index, expected); | 797 | "dropped %d events (index=%d expected=%d)\n", |
798 | dropped, index, expected); | ||
791 | 799 | ||
792 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | 800 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? |
793 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 801 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
@@ -873,9 +881,9 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | |||
873 | * queue. Refill it here */ | 881 | * queue. Refill it here */ |
874 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | 882 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); |
875 | else | 883 | else |
876 | EFX_LOG(efx, "channel %d received generated " | 884 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
877 | "event "EFX_QWORD_FMT"\n", channel->channel, | 885 | "generated event "EFX_QWORD_FMT"\n", |
878 | EFX_QWORD_VAL(*event)); | 886 | channel->channel, EFX_QWORD_VAL(*event)); |
879 | } | 887 | } |
880 | 888 | ||
881 | /* Global events are basically PHY events */ | 889 | /* Global events are basically PHY events */ |
@@ -901,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | |||
901 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | 909 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? |
902 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | 910 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : |
903 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | 911 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { |
904 | EFX_ERR(efx, "channel %d seen global RX_RESET " | 912 | netif_err(efx, rx_err, efx->net_dev, |
905 | "event. Resetting.\n", channel->channel); | 913 | "channel %d seen global RX_RESET event. Resetting.\n", |
914 | channel->channel); | ||
906 | 915 | ||
907 | atomic_inc(&efx->rx_reset); | 916 | atomic_inc(&efx->rx_reset); |
908 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | 917 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? |
@@ -911,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | |||
911 | } | 920 | } |
912 | 921 | ||
913 | if (!handled) | 922 | if (!handled) |
914 | EFX_ERR(efx, "channel %d unknown global event " | 923 | netif_err(efx, hw, efx->net_dev, |
915 | EFX_QWORD_FMT "\n", channel->channel, | 924 | "channel %d unknown global event " |
916 | EFX_QWORD_VAL(*event)); | 925 | EFX_QWORD_FMT "\n", channel->channel, |
926 | EFX_QWORD_VAL(*event)); | ||
917 | } | 927 | } |
918 | 928 | ||
919 | static void | 929 | static void |
@@ -928,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
928 | 938 | ||
929 | switch (ev_sub_code) { | 939 | switch (ev_sub_code) { |
930 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | 940 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
931 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | 941 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", |
932 | channel->channel, ev_sub_data); | 942 | channel->channel, ev_sub_data); |
933 | break; | 943 | break; |
934 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | 944 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
935 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | 945 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", |
936 | channel->channel, ev_sub_data); | 946 | channel->channel, ev_sub_data); |
937 | break; | 947 | break; |
938 | case FSE_AZ_EVQ_INIT_DONE_EV: | 948 | case FSE_AZ_EVQ_INIT_DONE_EV: |
939 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | 949 | netif_dbg(efx, hw, efx->net_dev, |
940 | channel->channel, ev_sub_data); | 950 | "channel %d EVQ %d initialised\n", |
951 | channel->channel, ev_sub_data); | ||
941 | break; | 952 | break; |
942 | case FSE_AZ_SRM_UPD_DONE_EV: | 953 | case FSE_AZ_SRM_UPD_DONE_EV: |
943 | EFX_TRACE(efx, "channel %d SRAM update done\n", | 954 | netif_vdbg(efx, hw, efx->net_dev, |
944 | channel->channel); | 955 | "channel %d SRAM update done\n", channel->channel); |
945 | break; | 956 | break; |
946 | case FSE_AZ_WAKE_UP_EV: | 957 | case FSE_AZ_WAKE_UP_EV: |
947 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | 958 | netif_vdbg(efx, hw, efx->net_dev, |
948 | channel->channel, ev_sub_data); | 959 | "channel %d RXQ %d wakeup event\n", |
960 | channel->channel, ev_sub_data); | ||
949 | break; | 961 | break; |
950 | case FSE_AZ_TIMER_EV: | 962 | case FSE_AZ_TIMER_EV: |
951 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | 963 | netif_vdbg(efx, hw, efx->net_dev, |
952 | channel->channel, ev_sub_data); | 964 | "channel %d RX queue %d timer expired\n", |
965 | channel->channel, ev_sub_data); | ||
953 | break; | 966 | break; |
954 | case FSE_AA_RX_RECOVER_EV: | 967 | case FSE_AA_RX_RECOVER_EV: |
955 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | 968 | netif_err(efx, rx_err, efx->net_dev, |
969 | "channel %d seen DRIVER RX_RESET event. " | ||
956 | "Resetting.\n", channel->channel); | 970 | "Resetting.\n", channel->channel); |
957 | atomic_inc(&efx->rx_reset); | 971 | atomic_inc(&efx->rx_reset); |
958 | efx_schedule_reset(efx, | 972 | efx_schedule_reset(efx, |
@@ -961,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
961 | RESET_TYPE_DISABLE); | 975 | RESET_TYPE_DISABLE); |
962 | break; | 976 | break; |
963 | case FSE_BZ_RX_DSC_ERROR_EV: | 977 | case FSE_BZ_RX_DSC_ERROR_EV: |
964 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | 978 | netif_err(efx, rx_err, efx->net_dev, |
965 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 979 | "RX DMA Q %d reports descriptor fetch error." |
980 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
966 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | 981 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); |
967 | break; | 982 | break; |
968 | case FSE_BZ_TX_DSC_ERROR_EV: | 983 | case FSE_BZ_TX_DSC_ERROR_EV: |
969 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | 984 | netif_err(efx, tx_err, efx->net_dev, |
970 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 985 | "TX DMA Q %d reports descriptor fetch error." |
986 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
971 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 987 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
972 | break; | 988 | break; |
973 | default: | 989 | default: |
974 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | 990 | netif_vdbg(efx, hw, efx->net_dev, |
975 | "data %04x\n", channel->channel, ev_sub_code, | 991 | "channel %d unknown driver event code %d " |
976 | ev_sub_data); | 992 | "data %04x\n", channel->channel, ev_sub_code, |
993 | ev_sub_data); | ||
977 | break; | 994 | break; |
978 | } | 995 | } |
979 | } | 996 | } |
@@ -996,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
996 | /* End of events */ | 1013 | /* End of events */ |
997 | break; | 1014 | break; |
998 | 1015 | ||
999 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | 1016 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1000 | channel->channel, EFX_QWORD_VAL(event)); | 1017 | "channel %d event is "EFX_QWORD_FMT"\n", |
1018 | channel->channel, EFX_QWORD_VAL(event)); | ||
1001 | 1019 | ||
1002 | /* Clear this event by marking it all ones */ | 1020 | /* Clear this event by marking it all ones */ |
1003 | EFX_SET_QWORD(*p_event); | 1021 | EFX_SET_QWORD(*p_event); |
@@ -1033,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1033 | efx_mcdi_process_event(channel, &event); | 1051 | efx_mcdi_process_event(channel, &event); |
1034 | break; | 1052 | break; |
1035 | default: | 1053 | default: |
1036 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | 1054 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1037 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | 1055 | "channel %d unknown event type %d (data " |
1038 | ev_code, EFX_QWORD_VAL(event)); | 1056 | EFX_QWORD_FMT ")\n", channel->channel, |
1057 | ev_code, EFX_QWORD_VAL(event)); | ||
1039 | } | 1058 | } |
1040 | } | 1059 | } |
1041 | 1060 | ||
@@ -1060,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel) | |||
1060 | efx_oword_t reg; | 1079 | efx_oword_t reg; |
1061 | struct efx_nic *efx = channel->efx; | 1080 | struct efx_nic *efx = channel->efx; |
1062 | 1081 | ||
1063 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | 1082 | netif_dbg(efx, hw, efx->net_dev, |
1064 | channel->channel, channel->eventq.index, | 1083 | "channel %d event queue in special buffers %d-%d\n", |
1065 | channel->eventq.index + channel->eventq.entries - 1); | 1084 | channel->channel, channel->eventq.index, |
1085 | channel->eventq.index + channel->eventq.entries - 1); | ||
1066 | 1086 | ||
1067 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | 1087 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { |
1068 | EFX_POPULATE_OWORD_3(reg, | 1088 | EFX_POPULATE_OWORD_3(reg, |
@@ -1240,14 +1260,16 @@ int efx_nic_flush_queues(struct efx_nic *efx) | |||
1240 | * leading to a reset, or fake up success anyway */ | 1260 | * leading to a reset, or fake up success anyway */ |
1241 | efx_for_each_tx_queue(tx_queue, efx) { | 1261 | efx_for_each_tx_queue(tx_queue, efx) { |
1242 | if (tx_queue->flushed != FLUSH_DONE) | 1262 | if (tx_queue->flushed != FLUSH_DONE) |
1243 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | 1263 | netif_err(efx, hw, efx->net_dev, |
1244 | tx_queue->queue); | 1264 | "tx queue %d flush command timed out\n", |
1265 | tx_queue->queue); | ||
1245 | tx_queue->flushed = FLUSH_DONE; | 1266 | tx_queue->flushed = FLUSH_DONE; |
1246 | } | 1267 | } |
1247 | efx_for_each_rx_queue(rx_queue, efx) { | 1268 | efx_for_each_rx_queue(rx_queue, efx) { |
1248 | if (rx_queue->flushed != FLUSH_DONE) | 1269 | if (rx_queue->flushed != FLUSH_DONE) |
1249 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | 1270 | netif_err(efx, hw, efx->net_dev, |
1250 | rx_queue->queue); | 1271 | "rx queue %d flush command timed out\n", |
1272 | rx_queue->queue); | ||
1251 | rx_queue->flushed = FLUSH_DONE; | 1273 | rx_queue->flushed = FLUSH_DONE; |
1252 | } | 1274 | } |
1253 | 1275 | ||
@@ -1319,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1319 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | 1341 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1320 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | 1342 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1321 | 1343 | ||
1322 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | 1344 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " |
1323 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | 1345 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), |
1324 | EFX_OWORD_VAL(fatal_intr), | 1346 | EFX_OWORD_VAL(fatal_intr), |
1325 | error ? "disabling bus mastering" : "no recognised error"); | 1347 | error ? "disabling bus mastering" : "no recognised error"); |
1326 | 1348 | ||
1327 | /* If this is a memory parity error dump which blocks are offending */ | 1349 | /* If this is a memory parity error dump which blocks are offending */ |
1328 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | 1350 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
@@ -1330,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1330 | if (mem_perr) { | 1352 | if (mem_perr) { |
1331 | efx_oword_t reg; | 1353 | efx_oword_t reg; |
1332 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | 1354 | efx_reado(efx, ®, FR_AZ_MEM_STAT); |
1333 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | 1355 | netif_err(efx, hw, efx->net_dev, |
1334 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | 1356 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", |
1357 | EFX_OWORD_VAL(reg)); | ||
1335 | } | 1358 | } |
1336 | 1359 | ||
1337 | /* Disable both devices */ | 1360 | /* Disable both devices */ |
@@ -1348,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1348 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | 1371 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; |
1349 | } | 1372 | } |
1350 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | 1373 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { |
1351 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | 1374 | netif_err(efx, hw, efx->net_dev, |
1375 | "SYSTEM ERROR - reset scheduled\n"); | ||
1352 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | 1376 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); |
1353 | } else { | 1377 | } else { |
1354 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | 1378 | netif_err(efx, hw, efx->net_dev, |
1355 | "NIC will be disabled\n"); | 1379 | "SYSTEM ERROR - max number of errors seen." |
1380 | "NIC will be disabled\n"); | ||
1356 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 1381 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
1357 | } | 1382 | } |
1358 | 1383 | ||
@@ -1415,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1415 | 1440 | ||
1416 | if (result == IRQ_HANDLED) { | 1441 | if (result == IRQ_HANDLED) { |
1417 | efx->last_irq_cpu = raw_smp_processor_id(); | 1442 | efx->last_irq_cpu = raw_smp_processor_id(); |
1418 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | 1443 | netif_vdbg(efx, intr, efx->net_dev, |
1419 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | 1444 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
1445 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1420 | } | 1446 | } |
1421 | 1447 | ||
1422 | return result; | 1448 | return result; |
@@ -1437,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | |||
1437 | int syserr; | 1463 | int syserr; |
1438 | 1464 | ||
1439 | efx->last_irq_cpu = raw_smp_processor_id(); | 1465 | efx->last_irq_cpu = raw_smp_processor_id(); |
1440 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1466 | netif_vdbg(efx, intr, efx->net_dev, |
1441 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1467 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1468 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1442 | 1469 | ||
1443 | /* Check to see if we have a serious error condition */ | 1470 | /* Check to see if we have a serious error condition */ |
1444 | if (channel->channel == efx->fatal_irq_level) { | 1471 | if (channel->channel == efx->fatal_irq_level) { |
@@ -1494,8 +1521,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1494 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | 1521 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, |
1495 | efx->name, efx); | 1522 | efx->name, efx); |
1496 | if (rc) { | 1523 | if (rc) { |
1497 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | 1524 | netif_err(efx, drv, efx->net_dev, |
1498 | efx->pci_dev->irq); | 1525 | "failed to hook legacy IRQ %d\n", |
1526 | efx->pci_dev->irq); | ||
1499 | goto fail1; | 1527 | goto fail1; |
1500 | } | 1528 | } |
1501 | return 0; | 1529 | return 0; |
@@ -1507,7 +1535,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1507 | IRQF_PROBE_SHARED, /* Not shared */ | 1535 | IRQF_PROBE_SHARED, /* Not shared */ |
1508 | channel->name, channel); | 1536 | channel->name, channel); |
1509 | if (rc) { | 1537 | if (rc) { |
1510 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | 1538 | netif_err(efx, drv, efx->net_dev, |
1539 | "failed to hook IRQ %d\n", channel->irq); | ||
1511 | goto fail2; | 1540 | goto fail2; |
1512 | } | 1541 | } |
1513 | } | 1542 | } |
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index e077bef08a50..68813d1d85f3 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -91,9 +91,10 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx) | |||
91 | if (time_after(jiffies, timeout)) { | 91 | if (time_after(jiffies, timeout)) { |
92 | /* Some cables have EEPROMs that conflict with the | 92 | /* Some cables have EEPROMs that conflict with the |
93 | * PHY's on-board EEPROM so it cannot load firmware */ | 93 | * PHY's on-board EEPROM so it cannot load firmware */ |
94 | EFX_ERR(efx, "If an SFP+ direct attach cable is" | 94 | netif_err(efx, hw, efx->net_dev, |
95 | " connected, please check that it complies" | 95 | "If an SFP+ direct attach cable is" |
96 | " with the SFP+ specification\n"); | 96 | " connected, please check that it complies" |
97 | " with the SFP+ specification\n"); | ||
97 | return -ETIMEDOUT; | 98 | return -ETIMEDOUT; |
98 | } | 99 | } |
99 | msleep(QT2025C_HEARTB_WAIT); | 100 | msleep(QT2025C_HEARTB_WAIT); |
@@ -145,7 +146,8 @@ static int qt2025c_wait_reset(struct efx_nic *efx) | |||
145 | /* Bug 17689: occasionally heartbeat starts but firmware status | 146 | /* Bug 17689: occasionally heartbeat starts but firmware status |
146 | * code never progresses beyond 0x00. Try again, once, after | 147 | * code never progresses beyond 0x00. Try again, once, after |
147 | * restarting execution of the firmware image. */ | 148 | * restarting execution of the firmware image. */ |
148 | EFX_LOG(efx, "bashing QT2025C microcontroller\n"); | 149 | netif_dbg(efx, hw, efx->net_dev, |
150 | "bashing QT2025C microcontroller\n"); | ||
149 | qt2025c_restart_firmware(efx); | 151 | qt2025c_restart_firmware(efx); |
150 | rc = qt2025c_wait_heartbeat(efx); | 152 | rc = qt2025c_wait_heartbeat(efx); |
151 | if (rc != 0) | 153 | if (rc != 0) |
@@ -165,11 +167,12 @@ static void qt2025c_firmware_id(struct efx_nic *efx) | |||
165 | for (i = 0; i < sizeof(firmware_id); i++) | 167 | for (i = 0; i < sizeof(firmware_id); i++) |
166 | firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, | 168 | firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, |
167 | PCS_FW_PRODUCT_CODE_1 + i); | 169 | PCS_FW_PRODUCT_CODE_1 + i); |
168 | EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", | 170 | netif_info(efx, probe, efx->net_dev, |
169 | (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], | 171 | "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", |
170 | firmware_id[3] >> 4, firmware_id[3] & 0xf, | 172 | (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], |
171 | firmware_id[4], firmware_id[5], | 173 | firmware_id[3] >> 4, firmware_id[3] & 0xf, |
172 | firmware_id[6], firmware_id[7], firmware_id[8]); | 174 | firmware_id[4], firmware_id[5], |
175 | firmware_id[6], firmware_id[7], firmware_id[8]); | ||
173 | phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | | 176 | phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | |
174 | ((firmware_id[3] & 0x0f) << 16) | | 177 | ((firmware_id[3] & 0x0f) << 16) | |
175 | (firmware_id[4] << 8) | firmware_id[5]; | 178 | (firmware_id[4] << 8) | firmware_id[5]; |
@@ -198,7 +201,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx) | |||
198 | } | 201 | } |
199 | 202 | ||
200 | if (time_after_eq(jiffies, phy_data->bug17190_timer)) { | 203 | if (time_after_eq(jiffies, phy_data->bug17190_timer)) { |
201 | EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); | 204 | netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n"); |
202 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | 205 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, |
203 | MDIO_PMA_CTRL1_LOOPBACK, true); | 206 | MDIO_PMA_CTRL1_LOOPBACK, true); |
204 | msleep(100); | 207 | msleep(100); |
@@ -231,7 +234,8 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx) | |||
231 | reg = efx_mdio_read(efx, 1, 0xc319); | 234 | reg = efx_mdio_read(efx, 1, 0xc319); |
232 | if ((reg & 0x0038) == phy_op_mode) | 235 | if ((reg & 0x0038) == phy_op_mode) |
233 | return 0; | 236 | return 0; |
234 | EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); | 237 | netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n", |
238 | phy_op_mode); | ||
235 | 239 | ||
236 | /* This sequence replicates the register writes configured in the boot | 240 | /* This sequence replicates the register writes configured in the boot |
237 | * EEPROM (including the differences between board revisions), except | 241 | * EEPROM (including the differences between board revisions), except |
@@ -287,8 +291,9 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx) | |||
287 | /* Wait for the microcontroller to be ready again */ | 291 | /* Wait for the microcontroller to be ready again */ |
288 | rc = qt2025c_wait_reset(efx); | 292 | rc = qt2025c_wait_reset(efx); |
289 | if (rc < 0) { | 293 | if (rc < 0) { |
290 | EFX_ERR(efx, "PHY microcontroller reset during mode switch " | 294 | netif_err(efx, hw, efx->net_dev, |
291 | "timed out\n"); | 295 | "PHY microcontroller reset during mode switch " |
296 | "timed out\n"); | ||
292 | return rc; | 297 | return rc; |
293 | } | 298 | } |
294 | 299 | ||
@@ -324,7 +329,7 @@ static int qt202x_reset_phy(struct efx_nic *efx) | |||
324 | return 0; | 329 | return 0; |
325 | 330 | ||
326 | fail: | 331 | fail: |
327 | EFX_ERR(efx, "PHY reset timed out\n"); | 332 | netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n"); |
328 | return rc; | 333 | return rc; |
329 | } | 334 | } |
330 | 335 | ||
@@ -353,14 +358,15 @@ static int qt202x_phy_init(struct efx_nic *efx) | |||
353 | 358 | ||
354 | rc = qt202x_reset_phy(efx); | 359 | rc = qt202x_reset_phy(efx); |
355 | if (rc) { | 360 | if (rc) { |
356 | EFX_ERR(efx, "PHY init failed\n"); | 361 | netif_err(efx, probe, efx->net_dev, "PHY init failed\n"); |
357 | return rc; | 362 | return rc; |
358 | } | 363 | } |
359 | 364 | ||
360 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); | 365 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); |
361 | EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", | 366 | netif_info(efx, probe, efx->net_dev, |
362 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), | 367 | "PHY ID reg %x (OUI %06x model %02x revision %x)\n", |
363 | efx_mdio_id_rev(devid)); | 368 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), |
369 | efx_mdio_id_rev(devid)); | ||
364 | 370 | ||
365 | if (efx->phy_type == PHY_TYPE_QT2025C) | 371 | if (efx->phy_type == PHY_TYPE_QT2025C) |
366 | qt2025c_firmware_id(efx); | 372 | qt2025c_firmware_id(efx); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 9fb698e3519d..d9ed20ee0dc5 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -348,10 +348,11 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
348 | if (space < EFX_RX_BATCH) | 348 | if (space < EFX_RX_BATCH) |
349 | goto out; | 349 | goto out; |
350 | 350 | ||
351 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | 351 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
352 | " level %d to level %d using %s allocation\n", | 352 | "RX queue %d fast-filling descriptor ring from" |
353 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | 353 | " level %d to level %d using %s allocation\n", |
354 | channel->rx_alloc_push_pages ? "page" : "skb"); | 354 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, |
355 | channel->rx_alloc_push_pages ? "page" : "skb"); | ||
355 | 356 | ||
356 | do { | 357 | do { |
357 | if (channel->rx_alloc_push_pages) | 358 | if (channel->rx_alloc_push_pages) |
@@ -366,9 +367,10 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
366 | } | 367 | } |
367 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | 368 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); |
368 | 369 | ||
369 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " | 370 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
370 | "to level %d\n", rx_queue->queue, | 371 | "RX queue %d fast-filled descriptor ring " |
371 | rx_queue->added_count - rx_queue->removed_count); | 372 | "to level %d\n", rx_queue->queue, |
373 | rx_queue->added_count - rx_queue->removed_count); | ||
372 | 374 | ||
373 | out: | 375 | out: |
374 | if (rx_queue->notified_count != rx_queue->added_count) | 376 | if (rx_queue->notified_count != rx_queue->added_count) |
@@ -402,10 +404,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
402 | *discard = true; | 404 | *discard = true; |
403 | 405 | ||
404 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | 406 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { |
405 | EFX_ERR_RL(efx, " RX queue %d seriously overlength " | 407 | if (net_ratelimit()) |
406 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | 408 | netif_err(efx, rx_err, efx->net_dev, |
407 | rx_queue->queue, len, max_len, | 409 | " RX queue %d seriously overlength " |
408 | efx->type->rx_buffer_padding); | 410 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", |
411 | rx_queue->queue, len, max_len, | ||
412 | efx->type->rx_buffer_padding); | ||
409 | /* If this buffer was skb-allocated, then the meta | 413 | /* If this buffer was skb-allocated, then the meta |
410 | * data at the end of the skb will be trashed. So | 414 | * data at the end of the skb will be trashed. So |
411 | * we have no choice but to leak the fragment. | 415 | * we have no choice but to leak the fragment. |
@@ -413,8 +417,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
413 | *leak_packet = (rx_buf->skb != NULL); | 417 | *leak_packet = (rx_buf->skb != NULL); |
414 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | 418 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
415 | } else { | 419 | } else { |
416 | EFX_ERR_RL(efx, " RX queue %d overlength RX event " | 420 | if (net_ratelimit()) |
417 | "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); | 421 | netif_err(efx, rx_err, efx->net_dev, |
422 | " RX queue %d overlength RX event " | ||
423 | "(0x%x > 0x%x)\n", | ||
424 | rx_queue->queue, len, max_len); | ||
418 | } | 425 | } |
419 | 426 | ||
420 | rx_queue->channel->n_rx_overlength++; | 427 | rx_queue->channel->n_rx_overlength++; |
@@ -502,11 +509,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
502 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | 509 | efx_rx_packet__check_len(rx_queue, rx_buf, len, |
503 | &discard, &leak_packet); | 510 | &discard, &leak_packet); |
504 | 511 | ||
505 | EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", | 512 | netif_vdbg(efx, rx_status, efx->net_dev, |
506 | rx_queue->queue, index, | 513 | "RX queue %d received id %x at %llx+%x %s%s\n", |
507 | (unsigned long long)rx_buf->dma_addr, len, | 514 | rx_queue->queue, index, |
508 | (checksummed ? " [SUMMED]" : ""), | 515 | (unsigned long long)rx_buf->dma_addr, len, |
509 | (discard ? " [DISCARD]" : "")); | 516 | (checksummed ? " [SUMMED]" : ""), |
517 | (discard ? " [DISCARD]" : "")); | ||
510 | 518 | ||
511 | /* Discard packet, if instructed to do so */ | 519 | /* Discard packet, if instructed to do so */ |
512 | if (unlikely(discard)) { | 520 | if (unlikely(discard)) { |
@@ -621,7 +629,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
621 | unsigned int rxq_size; | 629 | unsigned int rxq_size; |
622 | int rc; | 630 | int rc; |
623 | 631 | ||
624 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | 632 | netif_dbg(efx, probe, efx->net_dev, |
633 | "creating RX queue %d\n", rx_queue->queue); | ||
625 | 634 | ||
626 | /* Allocate RX buffers */ | 635 | /* Allocate RX buffers */ |
627 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); | 636 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
@@ -641,7 +650,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
641 | { | 650 | { |
642 | unsigned int max_fill, trigger, limit; | 651 | unsigned int max_fill, trigger, limit; |
643 | 652 | ||
644 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | 653 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
654 | "initialising RX queue %d\n", rx_queue->queue); | ||
645 | 655 | ||
646 | /* Initialise ptr fields */ | 656 | /* Initialise ptr fields */ |
647 | rx_queue->added_count = 0; | 657 | rx_queue->added_count = 0; |
@@ -668,7 +678,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
668 | int i; | 678 | int i; |
669 | struct efx_rx_buffer *rx_buf; | 679 | struct efx_rx_buffer *rx_buf; |
670 | 680 | ||
671 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | 681 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
682 | "shutting down RX queue %d\n", rx_queue->queue); | ||
672 | 683 | ||
673 | del_timer_sync(&rx_queue->slow_fill); | 684 | del_timer_sync(&rx_queue->slow_fill); |
674 | efx_nic_fini_rx(rx_queue); | 685 | efx_nic_fini_rx(rx_queue); |
@@ -684,7 +695,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
684 | 695 | ||
685 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | 696 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |
686 | { | 697 | { |
687 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | 698 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
699 | "destroying RX queue %d\n", rx_queue->queue); | ||
688 | 700 | ||
689 | efx_nic_remove_rx(rx_queue); | 701 | efx_nic_remove_rx(rx_queue); |
690 | 702 | ||
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 1f83404af63b..85f015f005d5 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -123,7 +123,7 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
123 | { | 123 | { |
124 | struct efx_channel *channel; | 124 | struct efx_channel *channel; |
125 | 125 | ||
126 | EFX_LOG(efx, "testing interrupts\n"); | 126 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); |
127 | tests->interrupt = -1; | 127 | tests->interrupt = -1; |
128 | 128 | ||
129 | /* Reset interrupt flag */ | 129 | /* Reset interrupt flag */ |
@@ -142,16 +142,17 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
142 | efx_nic_generate_interrupt(efx); | 142 | efx_nic_generate_interrupt(efx); |
143 | 143 | ||
144 | /* Wait for arrival of test interrupt. */ | 144 | /* Wait for arrival of test interrupt. */ |
145 | EFX_LOG(efx, "waiting for test interrupt\n"); | 145 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); |
146 | schedule_timeout_uninterruptible(HZ / 10); | 146 | schedule_timeout_uninterruptible(HZ / 10); |
147 | if (efx->last_irq_cpu >= 0) | 147 | if (efx->last_irq_cpu >= 0) |
148 | goto success; | 148 | goto success; |
149 | 149 | ||
150 | EFX_ERR(efx, "timed out waiting for interrupt\n"); | 150 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); |
151 | return -ETIMEDOUT; | 151 | return -ETIMEDOUT; |
152 | 152 | ||
153 | success: | 153 | success: |
154 | EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx), | 154 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", |
155 | INT_MODE(efx), | ||
155 | efx->last_irq_cpu); | 156 | efx->last_irq_cpu); |
156 | tests->interrupt = 1; | 157 | tests->interrupt = 1; |
157 | return 0; | 158 | return 0; |
@@ -161,6 +162,7 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
161 | static int efx_test_eventq_irq(struct efx_channel *channel, | 162 | static int efx_test_eventq_irq(struct efx_channel *channel, |
162 | struct efx_self_tests *tests) | 163 | struct efx_self_tests *tests) |
163 | { | 164 | { |
165 | struct efx_nic *efx = channel->efx; | ||
164 | unsigned int magic_count, count; | 166 | unsigned int magic_count, count; |
165 | 167 | ||
166 | tests->eventq_dma[channel->channel] = -1; | 168 | tests->eventq_dma[channel->channel] = -1; |
@@ -185,29 +187,32 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
185 | goto eventq_ok; | 187 | goto eventq_ok; |
186 | } while (++count < 2); | 188 | } while (++count < 2); |
187 | 189 | ||
188 | EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", | 190 | netif_err(efx, drv, efx->net_dev, |
189 | channel->channel); | 191 | "channel %d timed out waiting for event queue\n", |
192 | channel->channel); | ||
190 | 193 | ||
191 | /* See if interrupt arrived */ | 194 | /* See if interrupt arrived */ |
192 | if (channel->efx->last_irq_cpu >= 0) { | 195 | if (channel->efx->last_irq_cpu >= 0) { |
193 | EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " | 196 | netif_err(efx, drv, efx->net_dev, |
194 | "during event queue test\n", channel->channel, | 197 | "channel %d saw interrupt on CPU%d " |
195 | raw_smp_processor_id()); | 198 | "during event queue test\n", channel->channel, |
199 | raw_smp_processor_id()); | ||
196 | tests->eventq_int[channel->channel] = 1; | 200 | tests->eventq_int[channel->channel] = 1; |
197 | } | 201 | } |
198 | 202 | ||
199 | /* Check to see if event was received even if interrupt wasn't */ | 203 | /* Check to see if event was received even if interrupt wasn't */ |
200 | efx_process_channel_now(channel); | 204 | efx_process_channel_now(channel); |
201 | if (channel->magic_count != magic_count) { | 205 | if (channel->magic_count != magic_count) { |
202 | EFX_ERR(channel->efx, "channel %d event was generated, but " | 206 | netif_err(efx, drv, efx->net_dev, |
203 | "failed to trigger an interrupt\n", channel->channel); | 207 | "channel %d event was generated, but " |
208 | "failed to trigger an interrupt\n", channel->channel); | ||
204 | tests->eventq_dma[channel->channel] = 1; | 209 | tests->eventq_dma[channel->channel] = 1; |
205 | } | 210 | } |
206 | 211 | ||
207 | return -ETIMEDOUT; | 212 | return -ETIMEDOUT; |
208 | eventq_ok: | 213 | eventq_ok: |
209 | EFX_LOG(channel->efx, "channel %d event queue passed\n", | 214 | netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", |
210 | channel->channel); | 215 | channel->channel); |
211 | tests->eventq_dma[channel->channel] = 1; | 216 | tests->eventq_dma[channel->channel] = 1; |
212 | tests->eventq_int[channel->channel] = 1; | 217 | tests->eventq_int[channel->channel] = 1; |
213 | tests->eventq_poll[channel->channel] = 1; | 218 | tests->eventq_poll[channel->channel] = 1; |
@@ -260,51 +265,57 @@ void efx_loopback_rx_packet(struct efx_nic *efx, | |||
260 | 265 | ||
261 | /* Check that header exists */ | 266 | /* Check that header exists */ |
262 | if (pkt_len < sizeof(received->header)) { | 267 | if (pkt_len < sizeof(received->header)) { |
263 | EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " | 268 | netif_err(efx, drv, efx->net_dev, |
264 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | 269 | "saw runt RX packet (length %d) in %s loopback " |
270 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | ||
265 | goto err; | 271 | goto err; |
266 | } | 272 | } |
267 | 273 | ||
268 | /* Check that the ethernet header exists */ | 274 | /* Check that the ethernet header exists */ |
269 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | 275 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { |
270 | EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", | 276 | netif_err(efx, drv, efx->net_dev, |
271 | LOOPBACK_MODE(efx)); | 277 | "saw non-loopback RX packet in %s loopback test\n", |
278 | LOOPBACK_MODE(efx)); | ||
272 | goto err; | 279 | goto err; |
273 | } | 280 | } |
274 | 281 | ||
275 | /* Check packet length */ | 282 | /* Check packet length */ |
276 | if (pkt_len != sizeof(*payload)) { | 283 | if (pkt_len != sizeof(*payload)) { |
277 | EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " | 284 | netif_err(efx, drv, efx->net_dev, |
278 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | 285 | "saw incorrect RX packet length %d (wanted %d) in " |
279 | LOOPBACK_MODE(efx)); | 286 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), |
287 | LOOPBACK_MODE(efx)); | ||
280 | goto err; | 288 | goto err; |
281 | } | 289 | } |
282 | 290 | ||
283 | /* Check that IP header matches */ | 291 | /* Check that IP header matches */ |
284 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | 292 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { |
285 | EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", | 293 | netif_err(efx, drv, efx->net_dev, |
286 | LOOPBACK_MODE(efx)); | 294 | "saw corrupted IP header in %s loopback test\n", |
295 | LOOPBACK_MODE(efx)); | ||
287 | goto err; | 296 | goto err; |
288 | } | 297 | } |
289 | 298 | ||
290 | /* Check that msg and padding matches */ | 299 | /* Check that msg and padding matches */ |
291 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | 300 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { |
292 | EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", | 301 | netif_err(efx, drv, efx->net_dev, |
293 | LOOPBACK_MODE(efx)); | 302 | "saw corrupted RX packet in %s loopback test\n", |
303 | LOOPBACK_MODE(efx)); | ||
294 | goto err; | 304 | goto err; |
295 | } | 305 | } |
296 | 306 | ||
297 | /* Check that iteration matches */ | 307 | /* Check that iteration matches */ |
298 | if (received->iteration != payload->iteration) { | 308 | if (received->iteration != payload->iteration) { |
299 | EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " | 309 | netif_err(efx, drv, efx->net_dev, |
300 | "%s loopback test\n", ntohs(received->iteration), | 310 | "saw RX packet from iteration %d (wanted %d) in " |
301 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | 311 | "%s loopback test\n", ntohs(received->iteration), |
312 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | ||
302 | goto err; | 313 | goto err; |
303 | } | 314 | } |
304 | 315 | ||
305 | /* Increase correct RX count */ | 316 | /* Increase correct RX count */ |
306 | EFX_TRACE(efx, "got loopback RX in %s loopback test\n", | 317 | netif_vdbg(efx, drv, efx->net_dev, |
307 | LOOPBACK_MODE(efx)); | 318 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); |
308 | 319 | ||
309 | atomic_inc(&state->rx_good); | 320 | atomic_inc(&state->rx_good); |
310 | return; | 321 | return; |
@@ -312,10 +323,10 @@ void efx_loopback_rx_packet(struct efx_nic *efx, | |||
312 | err: | 323 | err: |
313 | #ifdef EFX_ENABLE_DEBUG | 324 | #ifdef EFX_ENABLE_DEBUG |
314 | if (atomic_read(&state->rx_bad) == 0) { | 325 | if (atomic_read(&state->rx_bad) == 0) { |
315 | EFX_ERR(efx, "received packet:\n"); | 326 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); |
316 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | 327 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, |
317 | buf_ptr, pkt_len, 0); | 328 | buf_ptr, pkt_len, 0); |
318 | EFX_ERR(efx, "expected packet:\n"); | 329 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); |
319 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | 330 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, |
320 | &state->payload, sizeof(state->payload), 0); | 331 | &state->payload, sizeof(state->payload), 0); |
321 | } | 332 | } |
@@ -396,9 +407,11 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | |||
396 | netif_tx_unlock_bh(efx->net_dev); | 407 | netif_tx_unlock_bh(efx->net_dev); |
397 | 408 | ||
398 | if (rc != NETDEV_TX_OK) { | 409 | if (rc != NETDEV_TX_OK) { |
399 | EFX_ERR(efx, "TX queue %d could not transmit packet %d " | 410 | netif_err(efx, drv, efx->net_dev, |
400 | "of %d in %s loopback test\n", tx_queue->queue, | 411 | "TX queue %d could not transmit packet %d of " |
401 | i + 1, state->packet_count, LOOPBACK_MODE(efx)); | 412 | "%d in %s loopback test\n", tx_queue->queue, |
413 | i + 1, state->packet_count, | ||
414 | LOOPBACK_MODE(efx)); | ||
402 | 415 | ||
403 | /* Defer cleaning up the other skbs for the caller */ | 416 | /* Defer cleaning up the other skbs for the caller */ |
404 | kfree_skb(skb); | 417 | kfree_skb(skb); |
@@ -454,20 +467,22 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, | |||
454 | /* Don't free the skbs; they will be picked up on TX | 467 | /* Don't free the skbs; they will be picked up on TX |
455 | * overflow or channel teardown. | 468 | * overflow or channel teardown. |
456 | */ | 469 | */ |
457 | EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " | 470 | netif_err(efx, drv, efx->net_dev, |
458 | "TX completion events in %s loopback test\n", | 471 | "TX queue %d saw only %d out of an expected %d " |
459 | tx_queue->queue, tx_done, state->packet_count, | 472 | "TX completion events in %s loopback test\n", |
460 | LOOPBACK_MODE(efx)); | 473 | tx_queue->queue, tx_done, state->packet_count, |
474 | LOOPBACK_MODE(efx)); | ||
461 | rc = -ETIMEDOUT; | 475 | rc = -ETIMEDOUT; |
462 | /* Allow to fall through so we see the RX errors as well */ | 476 | /* Allow to fall through so we see the RX errors as well */ |
463 | } | 477 | } |
464 | 478 | ||
465 | /* We may always be up to a flush away from our desired packet total */ | 479 | /* We may always be up to a flush away from our desired packet total */ |
466 | if (rx_good != state->packet_count) { | 480 | if (rx_good != state->packet_count) { |
467 | EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " | 481 | netif_dbg(efx, drv, efx->net_dev, |
468 | "received packets in %s loopback test\n", | 482 | "TX queue %d saw only %d out of an expected %d " |
469 | tx_queue->queue, rx_good, state->packet_count, | 483 | "received packets in %s loopback test\n", |
470 | LOOPBACK_MODE(efx)); | 484 | tx_queue->queue, rx_good, state->packet_count, |
485 | LOOPBACK_MODE(efx)); | ||
471 | rc = -ETIMEDOUT; | 486 | rc = -ETIMEDOUT; |
472 | /* Fall through */ | 487 | /* Fall through */ |
473 | } | 488 | } |
@@ -499,9 +514,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
499 | return -ENOMEM; | 514 | return -ENOMEM; |
500 | state->flush = false; | 515 | state->flush = false; |
501 | 516 | ||
502 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | 517 | netif_dbg(efx, drv, efx->net_dev, |
503 | "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | 518 | "TX queue %d testing %s loopback with %d packets\n", |
504 | state->packet_count); | 519 | tx_queue->queue, LOOPBACK_MODE(efx), |
520 | state->packet_count); | ||
505 | 521 | ||
506 | efx_iterate_state(efx); | 522 | efx_iterate_state(efx); |
507 | begin_rc = efx_begin_loopback(tx_queue); | 523 | begin_rc = efx_begin_loopback(tx_queue); |
@@ -525,9 +541,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
525 | } | 541 | } |
526 | } | 542 | } |
527 | 543 | ||
528 | EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " | 544 | netif_dbg(efx, drv, efx->net_dev, |
529 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | 545 | "TX queue %d passed %s loopback test with a burst length " |
530 | state->packet_count); | 546 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), |
547 | state->packet_count); | ||
531 | 548 | ||
532 | return 0; | 549 | return 0; |
533 | } | 550 | } |
@@ -602,15 +619,17 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | |||
602 | rc = __efx_reconfigure_port(efx); | 619 | rc = __efx_reconfigure_port(efx); |
603 | mutex_unlock(&efx->mac_lock); | 620 | mutex_unlock(&efx->mac_lock); |
604 | if (rc) { | 621 | if (rc) { |
605 | EFX_ERR(efx, "unable to move into %s loopback\n", | 622 | netif_err(efx, drv, efx->net_dev, |
606 | LOOPBACK_MODE(efx)); | 623 | "unable to move into %s loopback\n", |
624 | LOOPBACK_MODE(efx)); | ||
607 | goto out; | 625 | goto out; |
608 | } | 626 | } |
609 | 627 | ||
610 | rc = efx_wait_for_link(efx); | 628 | rc = efx_wait_for_link(efx); |
611 | if (rc) { | 629 | if (rc) { |
612 | EFX_ERR(efx, "loopback %s never came up\n", | 630 | netif_err(efx, drv, efx->net_dev, |
613 | LOOPBACK_MODE(efx)); | 631 | "loopback %s never came up\n", |
632 | LOOPBACK_MODE(efx)); | ||
614 | goto out; | 633 | goto out; |
615 | } | 634 | } |
616 | 635 | ||
@@ -718,7 +737,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
718 | rc_reset = rc; | 737 | rc_reset = rc; |
719 | 738 | ||
720 | if (rc_reset) { | 739 | if (rc_reset) { |
721 | EFX_ERR(efx, "Unable to recover from chip test\n"); | 740 | netif_err(efx, drv, efx->net_dev, |
741 | "Unable to recover from chip test\n"); | ||
722 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 742 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
723 | return rc_reset; | 743 | return rc_reset; |
724 | } | 744 | } |
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index f2b1e6180753..59d1dc6db1c6 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -118,10 +118,11 @@ static int siena_probe_port(struct efx_nic *efx) | |||
118 | MC_CMD_MAC_NSTATS * sizeof(u64)); | 118 | MC_CMD_MAC_NSTATS * sizeof(u64)); |
119 | if (rc) | 119 | if (rc) |
120 | return rc; | 120 | return rc; |
121 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", | 121 | netif_dbg(efx, probe, efx->net_dev, |
122 | (u64)efx->stats_buffer.dma_addr, | 122 | "stats buffer at %llx (virt %p phys %llx)\n", |
123 | efx->stats_buffer.addr, | 123 | (u64)efx->stats_buffer.dma_addr, |
124 | (u64)virt_to_phys(efx->stats_buffer.addr)); | 124 | efx->stats_buffer.addr, |
125 | (u64)virt_to_phys(efx->stats_buffer.addr)); | ||
125 | 126 | ||
126 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); | 127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); |
127 | 128 | ||
@@ -216,7 +217,8 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
216 | efx->nic_data = nic_data; | 217 | efx->nic_data = nic_data; |
217 | 218 | ||
218 | if (efx_nic_fpga_ver(efx) != 0) { | 219 | if (efx_nic_fpga_ver(efx) != 0) { |
219 | EFX_ERR(efx, "Siena FPGA not supported\n"); | 220 | netif_err(efx, probe, efx->net_dev, |
221 | "Siena FPGA not supported\n"); | ||
220 | rc = -ENODEV; | 222 | rc = -ENODEV; |
221 | goto fail1; | 223 | goto fail1; |
222 | } | 224 | } |
@@ -233,8 +235,8 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
233 | 235 | ||
234 | rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); | 236 | rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); |
235 | if (rc) { | 237 | if (rc) { |
236 | EFX_ERR(efx, "Failed to read MCPU firmware version - " | 238 | netif_err(efx, probe, efx->net_dev, |
237 | "rc %d\n", rc); | 239 | "Failed to read MCPU firmware version - rc %d\n", rc); |
238 | goto fail1; /* MCPU absent? */ | 240 | goto fail1; /* MCPU absent? */ |
239 | } | 241 | } |
240 | 242 | ||
@@ -242,17 +244,19 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
242 | * filter settings. We must do this before we reset the NIC */ | 244 | * filter settings. We must do this before we reset the NIC */ |
243 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); | 245 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); |
244 | if (rc) { | 246 | if (rc) { |
245 | EFX_ERR(efx, "Unable to register driver with MCPU\n"); | 247 | netif_err(efx, probe, efx->net_dev, |
248 | "Unable to register driver with MCPU\n"); | ||
246 | goto fail2; | 249 | goto fail2; |
247 | } | 250 | } |
248 | if (already_attached) | 251 | if (already_attached) |
249 | /* Not a fatal error */ | 252 | /* Not a fatal error */ |
250 | EFX_ERR(efx, "Host already registered with MCPU\n"); | 253 | netif_err(efx, probe, efx->net_dev, |
254 | "Host already registered with MCPU\n"); | ||
251 | 255 | ||
252 | /* Now we can reset the NIC */ | 256 | /* Now we can reset the NIC */ |
253 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); | 257 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); |
254 | if (rc) { | 258 | if (rc) { |
255 | EFX_ERR(efx, "failed to reset NIC\n"); | 259 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); |
256 | goto fail3; | 260 | goto fail3; |
257 | } | 261 | } |
258 | 262 | ||
@@ -264,15 +268,17 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
264 | goto fail4; | 268 | goto fail4; |
265 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 269 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
266 | 270 | ||
267 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", | 271 | netif_dbg(efx, probe, efx->net_dev, |
268 | (unsigned long long)efx->irq_status.dma_addr, | 272 | "INT_KER at %llx (virt %p phys %llx)\n", |
269 | efx->irq_status.addr, | 273 | (unsigned long long)efx->irq_status.dma_addr, |
270 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | 274 | efx->irq_status.addr, |
275 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | ||
271 | 276 | ||
272 | /* Read in the non-volatile configuration */ | 277 | /* Read in the non-volatile configuration */ |
273 | rc = siena_probe_nvconfig(efx); | 278 | rc = siena_probe_nvconfig(efx); |
274 | if (rc == -EINVAL) { | 279 | if (rc == -EINVAL) { |
275 | EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); | 280 | netif_err(efx, probe, efx->net_dev, |
281 | "NVRAM is invalid therefore using defaults\n"); | ||
276 | efx->phy_type = PHY_TYPE_NONE; | 282 | efx->phy_type = PHY_TYPE_NONE; |
277 | efx->mdio.prtad = MDIO_PRTAD_NONE; | 283 | efx->mdio.prtad = MDIO_PRTAD_NONE; |
278 | } else if (rc) { | 284 | } else if (rc) { |
@@ -344,7 +350,8 @@ static int siena_init_nic(struct efx_nic *efx) | |||
344 | 350 | ||
345 | if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) | 351 | if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) |
346 | /* No MCDI operation has been defined to set thresholds */ | 352 | /* No MCDI operation has been defined to set thresholds */ |
347 | EFX_ERR(efx, "ignoring RX flow control thresholds\n"); | 353 | netif_err(efx, hw, efx->net_dev, |
354 | "ignoring RX flow control thresholds\n"); | ||
348 | 355 | ||
349 | /* Enable event logging */ | 356 | /* Enable event logging */ |
350 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | 357 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
@@ -565,7 +572,8 @@ static int siena_set_wol(struct efx_nic *efx, u32 type) | |||
565 | 572 | ||
566 | return 0; | 573 | return 0; |
567 | fail: | 574 | fail: |
568 | EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc); | 575 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", |
576 | __func__, type, rc); | ||
569 | return rc; | 577 | return rc; |
570 | } | 578 | } |
571 | 579 | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f21efe7bd316..6791be90c2fe 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -228,7 +228,8 @@ int sft9001_wait_boot(struct efx_nic *efx) | |||
228 | boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS, | 228 | boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS, |
229 | PCS_BOOT_STATUS_REG); | 229 | PCS_BOOT_STATUS_REG); |
230 | if (boot_stat >= 0) { | 230 | if (boot_stat >= 0) { |
231 | EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat); | 231 | netif_dbg(efx, hw, efx->net_dev, |
232 | "PHY boot status = %#x\n", boot_stat); | ||
232 | switch (boot_stat & | 233 | switch (boot_stat & |
233 | ((1 << PCS_BOOT_FATAL_ERROR_LBN) | | 234 | ((1 << PCS_BOOT_FATAL_ERROR_LBN) | |
234 | (3 << PCS_BOOT_PROGRESS_LBN) | | 235 | (3 << PCS_BOOT_PROGRESS_LBN) | |
@@ -463,10 +464,11 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok) | |||
463 | reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; | 464 | reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; |
464 | } else { | 465 | } else { |
465 | reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; | 466 | reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; |
466 | EFX_ERR(efx, "appears to be plugged into a port" | 467 | netif_err(efx, link, efx->net_dev, |
467 | " that is not 10GBASE-T capable. The PHY" | 468 | "appears to be plugged into a port" |
468 | " supports 10GBASE-T ONLY, so no link can" | 469 | " that is not 10GBASE-T capable. The PHY" |
469 | " be established\n"); | 470 | " supports 10GBASE-T ONLY, so no link can" |
471 | " be established\n"); | ||
470 | } | 472 | } |
471 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | 473 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, |
472 | PMA_PMD_LED_OVERR_REG, reg); | 474 | PMA_PMD_LED_OVERR_REG, reg); |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 6bb12a87ef2d..c6942da2c99a 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel) | |||
42 | return; | 42 | return; |
43 | 43 | ||
44 | spin_lock_bh(&channel->tx_stop_lock); | 44 | spin_lock_bh(&channel->tx_stop_lock); |
45 | EFX_TRACE(efx, "stop TX queue\n"); | 45 | netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); |
46 | 46 | ||
47 | atomic_inc(&channel->tx_stop_count); | 47 | atomic_inc(&channel->tx_stop_count); |
48 | netif_tx_stop_queue( | 48 | netif_tx_stop_queue( |
@@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel) | |||
64 | local_bh_disable(); | 64 | local_bh_disable(); |
65 | if (atomic_dec_and_lock(&channel->tx_stop_count, | 65 | if (atomic_dec_and_lock(&channel->tx_stop_count, |
66 | &channel->tx_stop_lock)) { | 66 | &channel->tx_stop_lock)) { |
67 | EFX_TRACE(efx, "waking TX queue\n"); | 67 | netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); |
68 | netif_tx_wake_queue( | 68 | netif_tx_wake_queue( |
69 | netdev_get_tx_queue( | 69 | netdev_get_tx_queue( |
70 | efx->net_dev, | 70 | efx->net_dev, |
@@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | |||
94 | if (buffer->skb) { | 94 | if (buffer->skb) { |
95 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); | 95 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
96 | buffer->skb = NULL; | 96 | buffer->skb = NULL; |
97 | EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " | 97 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
98 | "complete\n", tx_queue->queue, read_ptr); | 98 | "TX queue %d transmission id %x complete\n", |
99 | tx_queue->queue, tx_queue->read_count); | ||
99 | } | 100 | } |
100 | } | 101 | } |
101 | 102 | ||
@@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
300 | return NETDEV_TX_OK; | 301 | return NETDEV_TX_OK; |
301 | 302 | ||
302 | pci_err: | 303 | pci_err: |
303 | EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " | 304 | netif_err(efx, tx_err, efx->net_dev, |
304 | "fragments for DMA\n", tx_queue->queue, skb->len, | 305 | " TX queue %d could not map skb with %d bytes %d " |
305 | skb_shinfo(skb)->nr_frags + 1); | 306 | "fragments for DMA\n", tx_queue->queue, skb->len, |
307 | skb_shinfo(skb)->nr_frags + 1); | ||
306 | 308 | ||
307 | /* Mark the packet as transmitted, and free the SKB ourselves */ | 309 | /* Mark the packet as transmitted, and free the SKB ourselves */ |
308 | dev_kfree_skb_any(skb); | 310 | dev_kfree_skb_any(skb); |
@@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
354 | while (read_ptr != stop_index) { | 356 | while (read_ptr != stop_index) { |
355 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | 357 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
356 | if (unlikely(buffer->len == 0)) { | 358 | if (unlikely(buffer->len == 0)) { |
357 | EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " | 359 | netif_err(efx, tx_err, efx->net_dev, |
358 | "completion id %x\n", tx_queue->queue, | 360 | "TX queue %d spurious TX completion id %x\n", |
359 | read_ptr); | 361 | tx_queue->queue, read_ptr); |
360 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); | 362 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); |
361 | return; | 363 | return; |
362 | } | 364 | } |
@@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |||
431 | unsigned int txq_size; | 433 | unsigned int txq_size; |
432 | int i, rc; | 434 | int i, rc; |
433 | 435 | ||
434 | EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); | 436 | netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", |
437 | tx_queue->queue); | ||
435 | 438 | ||
436 | /* Allocate software ring */ | 439 | /* Allocate software ring */ |
437 | txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); | 440 | txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); |
@@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |||
456 | 459 | ||
457 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | 460 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
458 | { | 461 | { |
459 | EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); | 462 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
463 | "initialising TX queue %d\n", tx_queue->queue); | ||
460 | 464 | ||
461 | tx_queue->insert_count = 0; | 465 | tx_queue->insert_count = 0; |
462 | tx_queue->write_count = 0; | 466 | tx_queue->write_count = 0; |
@@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |||
488 | 492 | ||
489 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | 493 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
490 | { | 494 | { |
491 | EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); | 495 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
496 | "shutting down TX queue %d\n", tx_queue->queue); | ||
492 | 497 | ||
493 | /* Flush TX queue, remove descriptor ring */ | 498 | /* Flush TX queue, remove descriptor ring */ |
494 | efx_nic_fini_tx(tx_queue); | 499 | efx_nic_fini_tx(tx_queue); |
@@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
507 | 512 | ||
508 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | 513 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
509 | { | 514 | { |
510 | EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); | 515 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
516 | "destroying TX queue %d\n", tx_queue->queue); | ||
511 | efx_nic_remove_tx(tx_queue); | 517 | efx_nic_remove_tx(tx_queue); |
512 | 518 | ||
513 | kfree(tx_queue->buffer); | 519 | kfree(tx_queue->buffer); |
@@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | |||
639 | 645 | ||
640 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | 646 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); |
641 | if (base_kva == NULL) { | 647 | if (base_kva == NULL) { |
642 | EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" | 648 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, |
643 | " headers\n"); | 649 | "Unable to allocate page for TSO headers\n"); |
644 | return -ENOMEM; | 650 | return -ENOMEM; |
645 | } | 651 | } |
646 | 652 | ||
@@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1124 | return NETDEV_TX_OK; | 1130 | return NETDEV_TX_OK; |
1125 | 1131 | ||
1126 | mem_err: | 1132 | mem_err: |
1127 | EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); | 1133 | netif_err(efx, tx_err, efx->net_dev, |
1134 | "Out of memory for TSO headers, or PCI mapping error\n"); | ||
1128 | dev_kfree_skb_any(skb); | 1135 | dev_kfree_skb_any(skb); |
1129 | goto unwind; | 1136 | goto unwind; |
1130 | 1137 | ||