diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-06-23 07:30:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-25 01:13:22 -0400 |
commit | 62776d034cc40c49bafdb3551a6ba35f78e3f08d (patch) | |
tree | 1cd2132940ced266ad53619a0c947e153cc83a5e /drivers/net/sfc/nic.c | |
parent | 0c605a2061670412d3b5580c92f1e161b1a693d2 (diff) |
sfc: Implement message level control
Replace EFX_ERR() with netif_err(), EFX_INFO() with netif_info(),
EFX_LOG() with netif_dbg() and EFX_TRACE() and EFX_REGDUMP() with
netif_vdbg().
Replace EFX_ERR_RL(), EFX_INFO_RL() and EFX_LOG_RL() using explicit
calls to net_ratelimit().
Implement the ethtool operations to get and set message level flags,
and add a 'debug' module parameter for the initial value.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r-- | drivers/net/sfc/nic.c | 219 |
1 files changed, 124 insertions, 95 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 67235f1c2550..30836578c1cc 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -179,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx, | |||
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | fail: | 181 | fail: |
182 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | 182 | netif_err(efx, hw, efx->net_dev, |
183 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | 183 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT |
184 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | 184 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), |
185 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
185 | return -EIO; | 186 | return -EIO; |
186 | } | 187 | } |
187 | 188 | ||
@@ -214,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
214 | for (i = 0; i < buffer->entries; i++) { | 215 | for (i = 0; i < buffer->entries; i++) { |
215 | index = buffer->index + i; | 216 | index = buffer->index + i; |
216 | dma_addr = buffer->dma_addr + (i * 4096); | 217 | dma_addr = buffer->dma_addr + (i * 4096); |
217 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | 218 | netif_dbg(efx, probe, efx->net_dev, |
218 | index, (unsigned long long)dma_addr); | 219 | "mapping special buffer %d at %llx\n", |
220 | index, (unsigned long long)dma_addr); | ||
219 | EFX_POPULATE_QWORD_3(buf_desc, | 221 | EFX_POPULATE_QWORD_3(buf_desc, |
220 | FRF_AZ_BUF_ADR_REGION, 0, | 222 | FRF_AZ_BUF_ADR_REGION, 0, |
221 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | 223 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
@@ -235,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
235 | if (!buffer->entries) | 237 | if (!buffer->entries) |
236 | return; | 238 | return; |
237 | 239 | ||
238 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | 240 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", |
239 | buffer->index, buffer->index + buffer->entries - 1); | 241 | buffer->index, buffer->index + buffer->entries - 1); |
240 | 242 | ||
241 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | 243 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
242 | FRF_AZ_BUF_UPD_CMD, 0, | 244 | FRF_AZ_BUF_UPD_CMD, 0, |
@@ -276,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, | |||
276 | buffer->index = efx->next_buffer_table; | 278 | buffer->index = efx->next_buffer_table; |
277 | efx->next_buffer_table += buffer->entries; | 279 | efx->next_buffer_table += buffer->entries; |
278 | 280 | ||
279 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | 281 | netif_dbg(efx, probe, efx->net_dev, |
280 | "(virt %p phys %llx)\n", buffer->index, | 282 | "allocating special buffers %d-%d at %llx+%x " |
281 | buffer->index + buffer->entries - 1, | 283 | "(virt %p phys %llx)\n", buffer->index, |
282 | (u64)buffer->dma_addr, len, | 284 | buffer->index + buffer->entries - 1, |
283 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 285 | (u64)buffer->dma_addr, len, |
286 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
284 | 287 | ||
285 | return 0; | 288 | return 0; |
286 | } | 289 | } |
@@ -291,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
291 | if (!buffer->addr) | 294 | if (!buffer->addr) |
292 | return; | 295 | return; |
293 | 296 | ||
294 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | 297 | netif_dbg(efx, hw, efx->net_dev, |
295 | "(virt %p phys %llx)\n", buffer->index, | 298 | "deallocating special buffers %d-%d at %llx+%x " |
296 | buffer->index + buffer->entries - 1, | 299 | "(virt %p phys %llx)\n", buffer->index, |
297 | (u64)buffer->dma_addr, buffer->len, | 300 | buffer->index + buffer->entries - 1, |
298 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 301 | (u64)buffer->dma_addr, buffer->len, |
302 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
299 | 303 | ||
300 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | 304 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, |
301 | buffer->dma_addr); | 305 | buffer->dma_addr); |
@@ -555,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |||
555 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | 559 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; |
556 | bool iscsi_digest_en = is_b0; | 560 | bool iscsi_digest_en = is_b0; |
557 | 561 | ||
558 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 562 | netif_dbg(efx, hw, efx->net_dev, |
559 | rx_queue->queue, rx_queue->rxd.index, | 563 | "RX queue %d ring in special buffers %d-%d\n", |
560 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 564 | rx_queue->queue, rx_queue->rxd.index, |
565 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
561 | 566 | ||
562 | rx_queue->flushed = FLUSH_NONE; | 567 | rx_queue->flushed = FLUSH_NONE; |
563 | 568 | ||
@@ -694,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
694 | EFX_WORKAROUND_10727(efx)) { | 699 | EFX_WORKAROUND_10727(efx)) { |
695 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 700 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
696 | } else { | 701 | } else { |
697 | EFX_ERR(efx, "channel %d unexpected TX event " | 702 | netif_err(efx, tx_err, efx->net_dev, |
698 | EFX_QWORD_FMT"\n", channel->channel, | 703 | "channel %d unexpected TX event " |
699 | EFX_QWORD_VAL(*event)); | 704 | EFX_QWORD_FMT"\n", channel->channel, |
705 | EFX_QWORD_VAL(*event)); | ||
700 | } | 706 | } |
701 | 707 | ||
702 | return tx_packets; | 708 | return tx_packets; |
@@ -759,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
759 | * to a FIFO overflow. | 765 | * to a FIFO overflow. |
760 | */ | 766 | */ |
761 | #ifdef EFX_ENABLE_DEBUG | 767 | #ifdef EFX_ENABLE_DEBUG |
762 | if (rx_ev_other_err) { | 768 | if (rx_ev_other_err && net_ratelimit()) { |
763 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | 769 | netif_dbg(efx, rx_err, efx->net_dev, |
764 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | 770 | " RX queue %d unexpected RX event " |
765 | rx_queue->queue, EFX_QWORD_VAL(*event), | 771 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
766 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 772 | rx_queue->queue, EFX_QWORD_VAL(*event), |
767 | rx_ev_ip_hdr_chksum_err ? | 773 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
768 | " [IP_HDR_CHKSUM_ERR]" : "", | 774 | rx_ev_ip_hdr_chksum_err ? |
769 | rx_ev_tcp_udp_chksum_err ? | 775 | " [IP_HDR_CHKSUM_ERR]" : "", |
770 | " [TCP_UDP_CHKSUM_ERR]" : "", | 776 | rx_ev_tcp_udp_chksum_err ? |
771 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | 777 | " [TCP_UDP_CHKSUM_ERR]" : "", |
772 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | 778 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", |
773 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | 779 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", |
774 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | 780 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", |
775 | rx_ev_pause_frm ? " [PAUSE]" : ""); | 781 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", |
782 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
776 | } | 783 | } |
777 | #endif | 784 | #endif |
778 | } | 785 | } |
@@ -786,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |||
786 | 793 | ||
787 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | 794 | expected = rx_queue->removed_count & EFX_RXQ_MASK; |
788 | dropped = (index - expected) & EFX_RXQ_MASK; | 795 | dropped = (index - expected) & EFX_RXQ_MASK; |
789 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | 796 | netif_info(efx, rx_err, efx->net_dev, |
790 | dropped, index, expected); | 797 | "dropped %d events (index=%d expected=%d)\n", |
798 | dropped, index, expected); | ||
791 | 799 | ||
792 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | 800 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? |
793 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 801 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
@@ -873,9 +881,9 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | |||
873 | * queue. Refill it here */ | 881 | * queue. Refill it here */ |
874 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | 882 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); |
875 | else | 883 | else |
876 | EFX_LOG(efx, "channel %d received generated " | 884 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
877 | "event "EFX_QWORD_FMT"\n", channel->channel, | 885 | "generated event "EFX_QWORD_FMT"\n", |
878 | EFX_QWORD_VAL(*event)); | 886 | channel->channel, EFX_QWORD_VAL(*event)); |
879 | } | 887 | } |
880 | 888 | ||
881 | /* Global events are basically PHY events */ | 889 | /* Global events are basically PHY events */ |
@@ -901,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | |||
901 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | 909 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? |
902 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | 910 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : |
903 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | 911 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { |
904 | EFX_ERR(efx, "channel %d seen global RX_RESET " | 912 | netif_err(efx, rx_err, efx->net_dev, |
905 | "event. Resetting.\n", channel->channel); | 913 | "channel %d seen global RX_RESET event. Resetting.\n", |
914 | channel->channel); | ||
906 | 915 | ||
907 | atomic_inc(&efx->rx_reset); | 916 | atomic_inc(&efx->rx_reset); |
908 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | 917 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? |
@@ -911,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | |||
911 | } | 920 | } |
912 | 921 | ||
913 | if (!handled) | 922 | if (!handled) |
914 | EFX_ERR(efx, "channel %d unknown global event " | 923 | netif_err(efx, hw, efx->net_dev, |
915 | EFX_QWORD_FMT "\n", channel->channel, | 924 | "channel %d unknown global event " |
916 | EFX_QWORD_VAL(*event)); | 925 | EFX_QWORD_FMT "\n", channel->channel, |
926 | EFX_QWORD_VAL(*event)); | ||
917 | } | 927 | } |
918 | 928 | ||
919 | static void | 929 | static void |
@@ -928,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
928 | 938 | ||
929 | switch (ev_sub_code) { | 939 | switch (ev_sub_code) { |
930 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | 940 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
931 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | 941 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", |
932 | channel->channel, ev_sub_data); | 942 | channel->channel, ev_sub_data); |
933 | break; | 943 | break; |
934 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | 944 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
935 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | 945 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", |
936 | channel->channel, ev_sub_data); | 946 | channel->channel, ev_sub_data); |
937 | break; | 947 | break; |
938 | case FSE_AZ_EVQ_INIT_DONE_EV: | 948 | case FSE_AZ_EVQ_INIT_DONE_EV: |
939 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | 949 | netif_dbg(efx, hw, efx->net_dev, |
940 | channel->channel, ev_sub_data); | 950 | "channel %d EVQ %d initialised\n", |
951 | channel->channel, ev_sub_data); | ||
941 | break; | 952 | break; |
942 | case FSE_AZ_SRM_UPD_DONE_EV: | 953 | case FSE_AZ_SRM_UPD_DONE_EV: |
943 | EFX_TRACE(efx, "channel %d SRAM update done\n", | 954 | netif_vdbg(efx, hw, efx->net_dev, |
944 | channel->channel); | 955 | "channel %d SRAM update done\n", channel->channel); |
945 | break; | 956 | break; |
946 | case FSE_AZ_WAKE_UP_EV: | 957 | case FSE_AZ_WAKE_UP_EV: |
947 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | 958 | netif_vdbg(efx, hw, efx->net_dev, |
948 | channel->channel, ev_sub_data); | 959 | "channel %d RXQ %d wakeup event\n", |
960 | channel->channel, ev_sub_data); | ||
949 | break; | 961 | break; |
950 | case FSE_AZ_TIMER_EV: | 962 | case FSE_AZ_TIMER_EV: |
951 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | 963 | netif_vdbg(efx, hw, efx->net_dev, |
952 | channel->channel, ev_sub_data); | 964 | "channel %d RX queue %d timer expired\n", |
965 | channel->channel, ev_sub_data); | ||
953 | break; | 966 | break; |
954 | case FSE_AA_RX_RECOVER_EV: | 967 | case FSE_AA_RX_RECOVER_EV: |
955 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | 968 | netif_err(efx, rx_err, efx->net_dev, |
969 | "channel %d seen DRIVER RX_RESET event. " | ||
956 | "Resetting.\n", channel->channel); | 970 | "Resetting.\n", channel->channel); |
957 | atomic_inc(&efx->rx_reset); | 971 | atomic_inc(&efx->rx_reset); |
958 | efx_schedule_reset(efx, | 972 | efx_schedule_reset(efx, |
@@ -961,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
961 | RESET_TYPE_DISABLE); | 975 | RESET_TYPE_DISABLE); |
962 | break; | 976 | break; |
963 | case FSE_BZ_RX_DSC_ERROR_EV: | 977 | case FSE_BZ_RX_DSC_ERROR_EV: |
964 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | 978 | netif_err(efx, rx_err, efx->net_dev, |
965 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 979 | "RX DMA Q %d reports descriptor fetch error." |
980 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
966 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | 981 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); |
967 | break; | 982 | break; |
968 | case FSE_BZ_TX_DSC_ERROR_EV: | 983 | case FSE_BZ_TX_DSC_ERROR_EV: |
969 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | 984 | netif_err(efx, tx_err, efx->net_dev, |
970 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 985 | "TX DMA Q %d reports descriptor fetch error." |
986 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
971 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 987 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
972 | break; | 988 | break; |
973 | default: | 989 | default: |
974 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | 990 | netif_vdbg(efx, hw, efx->net_dev, |
975 | "data %04x\n", channel->channel, ev_sub_code, | 991 | "channel %d unknown driver event code %d " |
976 | ev_sub_data); | 992 | "data %04x\n", channel->channel, ev_sub_code, |
993 | ev_sub_data); | ||
977 | break; | 994 | break; |
978 | } | 995 | } |
979 | } | 996 | } |
@@ -996,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
996 | /* End of events */ | 1013 | /* End of events */ |
997 | break; | 1014 | break; |
998 | 1015 | ||
999 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | 1016 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1000 | channel->channel, EFX_QWORD_VAL(event)); | 1017 | "channel %d event is "EFX_QWORD_FMT"\n", |
1018 | channel->channel, EFX_QWORD_VAL(event)); | ||
1001 | 1019 | ||
1002 | /* Clear this event by marking it all ones */ | 1020 | /* Clear this event by marking it all ones */ |
1003 | EFX_SET_QWORD(*p_event); | 1021 | EFX_SET_QWORD(*p_event); |
@@ -1033,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1033 | efx_mcdi_process_event(channel, &event); | 1051 | efx_mcdi_process_event(channel, &event); |
1034 | break; | 1052 | break; |
1035 | default: | 1053 | default: |
1036 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | 1054 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1037 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | 1055 | "channel %d unknown event type %d (data " |
1038 | ev_code, EFX_QWORD_VAL(event)); | 1056 | EFX_QWORD_FMT ")\n", channel->channel, |
1057 | ev_code, EFX_QWORD_VAL(event)); | ||
1039 | } | 1058 | } |
1040 | } | 1059 | } |
1041 | 1060 | ||
@@ -1060,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel) | |||
1060 | efx_oword_t reg; | 1079 | efx_oword_t reg; |
1061 | struct efx_nic *efx = channel->efx; | 1080 | struct efx_nic *efx = channel->efx; |
1062 | 1081 | ||
1063 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | 1082 | netif_dbg(efx, hw, efx->net_dev, |
1064 | channel->channel, channel->eventq.index, | 1083 | "channel %d event queue in special buffers %d-%d\n", |
1065 | channel->eventq.index + channel->eventq.entries - 1); | 1084 | channel->channel, channel->eventq.index, |
1085 | channel->eventq.index + channel->eventq.entries - 1); | ||
1066 | 1086 | ||
1067 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | 1087 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { |
1068 | EFX_POPULATE_OWORD_3(reg, | 1088 | EFX_POPULATE_OWORD_3(reg, |
@@ -1240,14 +1260,16 @@ int efx_nic_flush_queues(struct efx_nic *efx) | |||
1240 | * leading to a reset, or fake up success anyway */ | 1260 | * leading to a reset, or fake up success anyway */ |
1241 | efx_for_each_tx_queue(tx_queue, efx) { | 1261 | efx_for_each_tx_queue(tx_queue, efx) { |
1242 | if (tx_queue->flushed != FLUSH_DONE) | 1262 | if (tx_queue->flushed != FLUSH_DONE) |
1243 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | 1263 | netif_err(efx, hw, efx->net_dev, |
1244 | tx_queue->queue); | 1264 | "tx queue %d flush command timed out\n", |
1265 | tx_queue->queue); | ||
1245 | tx_queue->flushed = FLUSH_DONE; | 1266 | tx_queue->flushed = FLUSH_DONE; |
1246 | } | 1267 | } |
1247 | efx_for_each_rx_queue(rx_queue, efx) { | 1268 | efx_for_each_rx_queue(rx_queue, efx) { |
1248 | if (rx_queue->flushed != FLUSH_DONE) | 1269 | if (rx_queue->flushed != FLUSH_DONE) |
1249 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | 1270 | netif_err(efx, hw, efx->net_dev, |
1250 | rx_queue->queue); | 1271 | "rx queue %d flush command timed out\n", |
1272 | rx_queue->queue); | ||
1251 | rx_queue->flushed = FLUSH_DONE; | 1273 | rx_queue->flushed = FLUSH_DONE; |
1252 | } | 1274 | } |
1253 | 1275 | ||
@@ -1319,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1319 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | 1341 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1320 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | 1342 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1321 | 1343 | ||
1322 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | 1344 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " |
1323 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | 1345 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), |
1324 | EFX_OWORD_VAL(fatal_intr), | 1346 | EFX_OWORD_VAL(fatal_intr), |
1325 | error ? "disabling bus mastering" : "no recognised error"); | 1347 | error ? "disabling bus mastering" : "no recognised error"); |
1326 | 1348 | ||
1327 | /* If this is a memory parity error dump which blocks are offending */ | 1349 | /* If this is a memory parity error dump which blocks are offending */ |
1328 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | 1350 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
@@ -1330,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1330 | if (mem_perr) { | 1352 | if (mem_perr) { |
1331 | efx_oword_t reg; | 1353 | efx_oword_t reg; |
1332 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | 1354 | efx_reado(efx, ®, FR_AZ_MEM_STAT); |
1333 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | 1355 | netif_err(efx, hw, efx->net_dev, |
1334 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | 1356 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", |
1357 | EFX_OWORD_VAL(reg)); | ||
1335 | } | 1358 | } |
1336 | 1359 | ||
1337 | /* Disable both devices */ | 1360 | /* Disable both devices */ |
@@ -1348,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |||
1348 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | 1371 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; |
1349 | } | 1372 | } |
1350 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | 1373 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { |
1351 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | 1374 | netif_err(efx, hw, efx->net_dev, |
1375 | "SYSTEM ERROR - reset scheduled\n"); | ||
1352 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | 1376 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); |
1353 | } else { | 1377 | } else { |
1354 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | 1378 | netif_err(efx, hw, efx->net_dev, |
1355 | "NIC will be disabled\n"); | 1379 | "SYSTEM ERROR - max number of errors seen." |
1380 | "NIC will be disabled\n"); | ||
1356 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 1381 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
1357 | } | 1382 | } |
1358 | 1383 | ||
@@ -1415,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1415 | 1440 | ||
1416 | if (result == IRQ_HANDLED) { | 1441 | if (result == IRQ_HANDLED) { |
1417 | efx->last_irq_cpu = raw_smp_processor_id(); | 1442 | efx->last_irq_cpu = raw_smp_processor_id(); |
1418 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | 1443 | netif_vdbg(efx, intr, efx->net_dev, |
1419 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | 1444 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
1445 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1420 | } | 1446 | } |
1421 | 1447 | ||
1422 | return result; | 1448 | return result; |
@@ -1437,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | |||
1437 | int syserr; | 1463 | int syserr; |
1438 | 1464 | ||
1439 | efx->last_irq_cpu = raw_smp_processor_id(); | 1465 | efx->last_irq_cpu = raw_smp_processor_id(); |
1440 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1466 | netif_vdbg(efx, intr, efx->net_dev, |
1441 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1467 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1468 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1442 | 1469 | ||
1443 | /* Check to see if we have a serious error condition */ | 1470 | /* Check to see if we have a serious error condition */ |
1444 | if (channel->channel == efx->fatal_irq_level) { | 1471 | if (channel->channel == efx->fatal_irq_level) { |
@@ -1494,8 +1521,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1494 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | 1521 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, |
1495 | efx->name, efx); | 1522 | efx->name, efx); |
1496 | if (rc) { | 1523 | if (rc) { |
1497 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | 1524 | netif_err(efx, drv, efx->net_dev, |
1498 | efx->pci_dev->irq); | 1525 | "failed to hook legacy IRQ %d\n", |
1526 | efx->pci_dev->irq); | ||
1499 | goto fail1; | 1527 | goto fail1; |
1500 | } | 1528 | } |
1501 | return 0; | 1529 | return 0; |
@@ -1507,7 +1535,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) | |||
1507 | IRQF_PROBE_SHARED, /* Not shared */ | 1535 | IRQF_PROBE_SHARED, /* Not shared */ |
1508 | channel->name, channel); | 1536 | channel->name, channel); |
1509 | if (rc) { | 1537 | if (rc) { |
1510 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | 1538 | netif_err(efx, drv, efx->net_dev, |
1539 | "failed to hook IRQ %d\n", channel->irq); | ||
1511 | goto fail2; | 1540 | goto fail2; |
1512 | } | 1541 | } |
1513 | } | 1542 | } |