aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorStephen Hemminger <stephen@networkplumber.org>2013-01-15 02:28:29 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-16 14:40:50 -0500
commitfdcd79b94b24418133248e89887b6d432cf700b3 (patch)
treed500f6f70b44f6bb14e38bc470f4757f6cb9826f /drivers/net
parent4bad25fa7ecedd32424a1a6412c3b3e91c4ae9f1 (diff)
vmxnet3: use netdev_dbg
Use netdev_dbg() rather than dev_dbg() because the former prints the device name which is more useful than the pci name. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7b10d9cb37b3..c566b739594a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -629,7 +629,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
629 vmxnet3_cmd_ring_adv_next2fill(ring); 629 vmxnet3_cmd_ring_adv_next2fill(ring);
630 } 630 }
631 631
632 dev_dbg(&adapter->netdev->dev, 632 netdev_dbg(adapter->netdev,
633 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", 633 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
634 num_allocated, ring->next2fill, ring->next2comp); 634 num_allocated, ring->next2fill, ring->next2comp);
635 635
@@ -688,7 +688,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
688 tbi = tq->buf_info + tq->tx_ring.next2fill; 688 tbi = tq->buf_info + tq->tx_ring.next2fill;
689 tbi->map_type = VMXNET3_MAP_NONE; 689 tbi->map_type = VMXNET3_MAP_NONE;
690 690
691 dev_dbg(&adapter->netdev->dev, 691 netdev_dbg(adapter->netdev,
692 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 692 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
693 tq->tx_ring.next2fill, 693 tq->tx_ring.next2fill,
694 le64_to_cpu(ctx->sop_txd->txd.addr), 694 le64_to_cpu(ctx->sop_txd->txd.addr),
@@ -728,7 +728,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
728 gdesc->dword[2] = cpu_to_le32(dw2); 728 gdesc->dword[2] = cpu_to_le32(dw2);
729 gdesc->dword[3] = 0; 729 gdesc->dword[3] = 0;
730 730
731 dev_dbg(&adapter->netdev->dev, 731 netdev_dbg(adapter->netdev,
732 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 732 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
733 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 733 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
734 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 734 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -768,7 +768,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
768 gdesc->dword[2] = cpu_to_le32(dw2); 768 gdesc->dword[2] = cpu_to_le32(dw2);
769 gdesc->dword[3] = 0; 769 gdesc->dword[3] = 0;
770 770
771 dev_dbg(&adapter->netdev->dev, 771 netdev_dbg(adapter->netdev,
772 "txd[%u]: 0x%llu %u %u\n", 772 "txd[%u]: 0x%llu %u %u\n",
773 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 773 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
774 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 774 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -868,7 +868,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
868 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 868 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
869 869
870 memcpy(tdd->data, skb->data, ctx->copy_size); 870 memcpy(tdd->data, skb->data, ctx->copy_size);
871 dev_dbg(&adapter->netdev->dev, 871 netdev_dbg(adapter->netdev,
872 "copy %u bytes to dataRing[%u]\n", 872 "copy %u bytes to dataRing[%u]\n",
873 ctx->copy_size, tq->tx_ring.next2fill); 873 ctx->copy_size, tq->tx_ring.next2fill);
874 return 1; 874 return 1;
@@ -974,7 +974,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
974 974
975 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 975 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
976 tq->stats.tx_ring_full++; 976 tq->stats.tx_ring_full++;
977 dev_dbg(&adapter->netdev->dev, 977 netdev_dbg(adapter->netdev,
978 "tx queue stopped on %s, next2comp %u" 978 "tx queue stopped on %s, next2comp %u"
979 " next2fill %u\n", adapter->netdev->name, 979 " next2fill %u\n", adapter->netdev->name,
980 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 980 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
@@ -1057,7 +1057,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1057 (struct Vmxnet3_TxDesc *)ctx.sop_txd); 1057 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1058 gdesc = ctx.sop_txd; 1058 gdesc = ctx.sop_txd;
1059#endif 1059#endif
1060 dev_dbg(&adapter->netdev->dev, 1060 netdev_dbg(adapter->netdev,
1061 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1061 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1062 (u32)(ctx.sop_txd - 1062 (u32)(ctx.sop_txd -
1063 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1063 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
@@ -1210,7 +1210,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1210 if (unlikely(rcd->len == 0)) { 1210 if (unlikely(rcd->len == 0)) {
1211 /* Pretend the rx buffer is skipped. */ 1211 /* Pretend the rx buffer is skipped. */
1212 BUG_ON(!(rcd->sop && rcd->eop)); 1212 BUG_ON(!(rcd->sop && rcd->eop));
1213 dev_dbg(&adapter->netdev->dev, 1213 netdev_dbg(adapter->netdev,
1214 "rxRing[%u][%u] 0 length\n", 1214 "rxRing[%u][%u] 0 length\n",
1215 ring_idx, idx); 1215 ring_idx, idx);
1216 goto rcd_done; 1216 goto rcd_done;
@@ -2211,7 +2211,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2211 u32 ret; 2211 u32 ret;
2212 unsigned long flags; 2212 unsigned long flags;
2213 2213
2214 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2214 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2215 " ring sizes %u %u %u\n", adapter->netdev->name, 2215 " ring sizes %u %u %u\n", adapter->netdev->name,
2216 adapter->skb_buf_size, adapter->rx_buf_per_pkt, 2216 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2217 adapter->tx_queue[0].tx_ring.size, 2217 adapter->tx_queue[0].tx_ring.size,