diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-06-23 07:30:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-25 01:13:22 -0400 |
commit | 62776d034cc40c49bafdb3551a6ba35f78e3f08d (patch) | |
tree | 1cd2132940ced266ad53619a0c947e153cc83a5e /drivers/net/sfc/rx.c | |
parent | 0c605a2061670412d3b5580c92f1e161b1a693d2 (diff) |
sfc: Implement message level control
Replace EFX_ERR() with netif_err(), EFX_INFO() with netif_info(),
EFX_LOG() with netif_dbg() and EFX_TRACE() and EFX_REGDUMP() with
netif_vdbg().
Replace EFX_ERR_RL(), EFX_INFO_RL() and EFX_LOG_RL() using explicit
calls to net_ratelimit().
Implement the ethtool operations to get and set message level flags,
and add a 'debug' module parameter for the initial value.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 9fb698e3519d..d9ed20ee0dc5 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -348,10 +348,11 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
348 | if (space < EFX_RX_BATCH) | 348 | if (space < EFX_RX_BATCH) |
349 | goto out; | 349 | goto out; |
350 | 350 | ||
351 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | 351 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
352 | " level %d to level %d using %s allocation\n", | 352 | "RX queue %d fast-filling descriptor ring from" |
353 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | 353 | " level %d to level %d using %s allocation\n", |
354 | channel->rx_alloc_push_pages ? "page" : "skb"); | 354 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, |
355 | channel->rx_alloc_push_pages ? "page" : "skb"); | ||
355 | 356 | ||
356 | do { | 357 | do { |
357 | if (channel->rx_alloc_push_pages) | 358 | if (channel->rx_alloc_push_pages) |
@@ -366,9 +367,10 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
366 | } | 367 | } |
367 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | 368 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); |
368 | 369 | ||
369 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " | 370 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
370 | "to level %d\n", rx_queue->queue, | 371 | "RX queue %d fast-filled descriptor ring " |
371 | rx_queue->added_count - rx_queue->removed_count); | 372 | "to level %d\n", rx_queue->queue, |
373 | rx_queue->added_count - rx_queue->removed_count); | ||
372 | 374 | ||
373 | out: | 375 | out: |
374 | if (rx_queue->notified_count != rx_queue->added_count) | 376 | if (rx_queue->notified_count != rx_queue->added_count) |
@@ -402,10 +404,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
402 | *discard = true; | 404 | *discard = true; |
403 | 405 | ||
404 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | 406 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { |
405 | EFX_ERR_RL(efx, " RX queue %d seriously overlength " | 407 | if (net_ratelimit()) |
406 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | 408 | netif_err(efx, rx_err, efx->net_dev, |
407 | rx_queue->queue, len, max_len, | 409 | " RX queue %d seriously overlength " |
408 | efx->type->rx_buffer_padding); | 410 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", |
411 | rx_queue->queue, len, max_len, | ||
412 | efx->type->rx_buffer_padding); | ||
409 | /* If this buffer was skb-allocated, then the meta | 413 | /* If this buffer was skb-allocated, then the meta |
410 | * data at the end of the skb will be trashed. So | 414 | * data at the end of the skb will be trashed. So |
411 | * we have no choice but to leak the fragment. | 415 | * we have no choice but to leak the fragment. |
@@ -413,8 +417,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
413 | *leak_packet = (rx_buf->skb != NULL); | 417 | *leak_packet = (rx_buf->skb != NULL); |
414 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | 418 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
415 | } else { | 419 | } else { |
416 | EFX_ERR_RL(efx, " RX queue %d overlength RX event " | 420 | if (net_ratelimit()) |
417 | "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); | 421 | netif_err(efx, rx_err, efx->net_dev, |
422 | " RX queue %d overlength RX event " | ||
423 | "(0x%x > 0x%x)\n", | ||
424 | rx_queue->queue, len, max_len); | ||
418 | } | 425 | } |
419 | 426 | ||
420 | rx_queue->channel->n_rx_overlength++; | 427 | rx_queue->channel->n_rx_overlength++; |
@@ -502,11 +509,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
502 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | 509 | efx_rx_packet__check_len(rx_queue, rx_buf, len, |
503 | &discard, &leak_packet); | 510 | &discard, &leak_packet); |
504 | 511 | ||
505 | EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", | 512 | netif_vdbg(efx, rx_status, efx->net_dev, |
506 | rx_queue->queue, index, | 513 | "RX queue %d received id %x at %llx+%x %s%s\n", |
507 | (unsigned long long)rx_buf->dma_addr, len, | 514 | rx_queue->queue, index, |
508 | (checksummed ? " [SUMMED]" : ""), | 515 | (unsigned long long)rx_buf->dma_addr, len, |
509 | (discard ? " [DISCARD]" : "")); | 516 | (checksummed ? " [SUMMED]" : ""), |
517 | (discard ? " [DISCARD]" : "")); | ||
510 | 518 | ||
511 | /* Discard packet, if instructed to do so */ | 519 | /* Discard packet, if instructed to do so */ |
512 | if (unlikely(discard)) { | 520 | if (unlikely(discard)) { |
@@ -621,7 +629,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
621 | unsigned int rxq_size; | 629 | unsigned int rxq_size; |
622 | int rc; | 630 | int rc; |
623 | 631 | ||
624 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | 632 | netif_dbg(efx, probe, efx->net_dev, |
633 | "creating RX queue %d\n", rx_queue->queue); | ||
625 | 634 | ||
626 | /* Allocate RX buffers */ | 635 | /* Allocate RX buffers */ |
627 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); | 636 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
@@ -641,7 +650,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
641 | { | 650 | { |
642 | unsigned int max_fill, trigger, limit; | 651 | unsigned int max_fill, trigger, limit; |
643 | 652 | ||
644 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | 653 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
654 | "initialising RX queue %d\n", rx_queue->queue); | ||
645 | 655 | ||
646 | /* Initialise ptr fields */ | 656 | /* Initialise ptr fields */ |
647 | rx_queue->added_count = 0; | 657 | rx_queue->added_count = 0; |
@@ -668,7 +678,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
668 | int i; | 678 | int i; |
669 | struct efx_rx_buffer *rx_buf; | 679 | struct efx_rx_buffer *rx_buf; |
670 | 680 | ||
671 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | 681 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
682 | "shutting down RX queue %d\n", rx_queue->queue); | ||
672 | 683 | ||
673 | del_timer_sync(&rx_queue->slow_fill); | 684 | del_timer_sync(&rx_queue->slow_fill); |
674 | efx_nic_fini_rx(rx_queue); | 685 | efx_nic_fini_rx(rx_queue); |
@@ -684,7 +695,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
684 | 695 | ||
685 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | 696 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |
686 | { | 697 | { |
687 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | 698 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
699 | "destroying RX queue %d\n", rx_queue->queue); | ||
688 | 700 | ||
689 | efx_nic_remove_rx(rx_queue); | 701 | efx_nic_remove_rx(rx_queue); |
690 | 702 | ||