aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-06-23 07:30:07 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-25 01:13:22 -0400
commit62776d034cc40c49bafdb3551a6ba35f78e3f08d (patch)
tree1cd2132940ced266ad53619a0c947e153cc83a5e /drivers/net/sfc/tx.c
parent0c605a2061670412d3b5580c92f1e161b1a693d2 (diff)
sfc: Implement message level control
Replace EFX_ERR() with netif_err(), EFX_INFO() with netif_info(), EFX_LOG() with netif_dbg() and EFX_TRACE() and EFX_REGDUMP() with netif_vdbg(). Replace EFX_ERR_RL(), EFX_INFO_RL() and EFX_LOG_RL() using explicit calls to net_ratelimit(). Implement the ethtool operations to get and set message level flags, and add a 'debug' module parameter for the initial value. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6bb12a87ef2d..c6942da2c99a 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel)
42 return; 42 return;
43 43
44 spin_lock_bh(&channel->tx_stop_lock); 44 spin_lock_bh(&channel->tx_stop_lock);
45 EFX_TRACE(efx, "stop TX queue\n"); 45 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
46 46
47 atomic_inc(&channel->tx_stop_count); 47 atomic_inc(&channel->tx_stop_count);
48 netif_tx_stop_queue( 48 netif_tx_stop_queue(
@@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel)
64 local_bh_disable(); 64 local_bh_disable();
65 if (atomic_dec_and_lock(&channel->tx_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
66 &channel->tx_stop_lock)) { 66 &channel->tx_stop_lock)) {
67 EFX_TRACE(efx, "waking TX queue\n"); 67 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
68 netif_tx_wake_queue( 68 netif_tx_wake_queue(
69 netdev_get_tx_queue( 69 netdev_get_tx_queue(
70 efx->net_dev, 70 efx->net_dev,
@@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
94 if (buffer->skb) { 94 if (buffer->skb) {
95 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 95 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
96 buffer->skb = NULL; 96 buffer->skb = NULL;
97 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " 97 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
98 "complete\n", tx_queue->queue, read_ptr); 98 "TX queue %d transmission id %x complete\n",
99 tx_queue->queue, tx_queue->read_count);
99 } 100 }
100} 101}
101 102
@@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
300 return NETDEV_TX_OK; 301 return NETDEV_TX_OK;
301 302
302 pci_err: 303 pci_err:
303 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " 304 netif_err(efx, tx_err, efx->net_dev,
304 "fragments for DMA\n", tx_queue->queue, skb->len, 305 " TX queue %d could not map skb with %d bytes %d "
305 skb_shinfo(skb)->nr_frags + 1); 306 "fragments for DMA\n", tx_queue->queue, skb->len,
307 skb_shinfo(skb)->nr_frags + 1);
306 308
307 /* Mark the packet as transmitted, and free the SKB ourselves */ 309 /* Mark the packet as transmitted, and free the SKB ourselves */
308 dev_kfree_skb_any(skb); 310 dev_kfree_skb_any(skb);
@@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
354 while (read_ptr != stop_index) { 356 while (read_ptr != stop_index) {
355 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
356 if (unlikely(buffer->len == 0)) { 358 if (unlikely(buffer->len == 0)) {
357 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " 359 netif_err(efx, tx_err, efx->net_dev,
358 "completion id %x\n", tx_queue->queue, 360 "TX queue %d spurious TX completion id %x\n",
359 read_ptr); 361 tx_queue->queue, read_ptr);
360 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 362 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
361 return; 363 return;
362 } 364 }
@@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431 unsigned int txq_size; 433 unsigned int txq_size;
432 int i, rc; 434 int i, rc;
433 435
434 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 436 netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
437 tx_queue->queue);
435 438
436 /* Allocate software ring */ 439 /* Allocate software ring */
437 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 440 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
@@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
456 459
457void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 460void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
458{ 461{
459 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); 462 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
463 "initialising TX queue %d\n", tx_queue->queue);
460 464
461 tx_queue->insert_count = 0; 465 tx_queue->insert_count = 0;
462 tx_queue->write_count = 0; 466 tx_queue->write_count = 0;
@@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
488 492
489void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 493void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
490{ 494{
491 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); 495 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
496 "shutting down TX queue %d\n", tx_queue->queue);
492 497
493 /* Flush TX queue, remove descriptor ring */ 498 /* Flush TX queue, remove descriptor ring */
494 efx_nic_fini_tx(tx_queue); 499 efx_nic_fini_tx(tx_queue);
@@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
507 512
508void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 513void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
509{ 514{
510 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); 515 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
516 "destroying TX queue %d\n", tx_queue->queue);
511 efx_nic_remove_tx(tx_queue); 517 efx_nic_remove_tx(tx_queue);
512 518
513 kfree(tx_queue->buffer); 519 kfree(tx_queue->buffer);
@@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
639 645
640 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 646 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
641 if (base_kva == NULL) { 647 if (base_kva == NULL) {
642 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" 648 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
643 " headers\n"); 649 "Unable to allocate page for TSO headers\n");
644 return -ENOMEM; 650 return -ENOMEM;
645 } 651 }
646 652
@@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 return NETDEV_TX_OK; 1130 return NETDEV_TX_OK;
1125 1131
1126 mem_err: 1132 mem_err:
1127 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); 1133 netif_err(efx, tx_err, efx->net_dev,
1134 "Out of memory for TSO headers, or PCI mapping error\n");
1128 dev_kfree_skb_any(skb); 1135 dev_kfree_skb_any(skb);
1129 goto unwind; 1136 goto unwind;
1130 1137