aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/renesas/sh_eth.c
diff options
context:
space:
mode:
authorSergei Shtylyov <sergei.shtylyov@cogentembedded.com>2014-03-14 20:30:59 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-17 16:06:36 -0400
commit8d5009f6a9d9f4ef62a39bf68b53379b2b766c1c (patch)
treea7da7057ca17df8802556dc8e78a9f13d0745ede /drivers/net/ethernet/renesas/sh_eth.c
parentda2468555643efbde3fb026cd46e5245800cc872 (diff)
sh_eth: fold netif_msg_*() and netdev_*() calls into netif_*() invocations
Now that we call netdev_*() under netif_msg_*() checks, we can fold these into netif_*() macro invocations. Suggested-by: Joe Perches <joe@perches.com> Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7ae611fcba53..efaca6d5e85b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1557,8 +1557,7 @@ ignore_link:
1557 /* Unused write back interrupt */ 1557 /* Unused write back interrupt */
1558 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1558 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1559 ndev->stats.tx_aborted_errors++; 1559 ndev->stats.tx_aborted_errors++;
1560 if (netif_msg_tx_err(mdp)) 1560 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1561 netdev_err(ndev, "Transmit Abort\n");
1562 } 1561 }
1563 } 1562 }
1564 1563
@@ -1567,45 +1566,38 @@ ignore_link:
1567 if (intr_status & EESR_RFRMER) { 1566 if (intr_status & EESR_RFRMER) {
1568 /* Receive Frame Overflow int */ 1567 /* Receive Frame Overflow int */
1569 ndev->stats.rx_frame_errors++; 1568 ndev->stats.rx_frame_errors++;
1570 if (netif_msg_rx_err(mdp)) 1569 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1571 netdev_err(ndev, "Receive Abort\n");
1572 } 1570 }
1573 } 1571 }
1574 1572
1575 if (intr_status & EESR_TDE) { 1573 if (intr_status & EESR_TDE) {
1576 /* Transmit Descriptor Empty int */ 1574 /* Transmit Descriptor Empty int */
1577 ndev->stats.tx_fifo_errors++; 1575 ndev->stats.tx_fifo_errors++;
1578 if (netif_msg_tx_err(mdp)) 1576 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1579 netdev_err(ndev, "Transmit Descriptor Empty\n");
1580 } 1577 }
1581 1578
1582 if (intr_status & EESR_TFE) { 1579 if (intr_status & EESR_TFE) {
1583 /* FIFO under flow */ 1580 /* FIFO under flow */
1584 ndev->stats.tx_fifo_errors++; 1581 ndev->stats.tx_fifo_errors++;
1585 if (netif_msg_tx_err(mdp)) 1582 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1586 netdev_err(ndev, "Transmit FIFO Under flow\n");
1587 } 1583 }
1588 1584
1589 if (intr_status & EESR_RDE) { 1585 if (intr_status & EESR_RDE) {
1590 /* Receive Descriptor Empty int */ 1586 /* Receive Descriptor Empty int */
1591 ndev->stats.rx_over_errors++; 1587 ndev->stats.rx_over_errors++;
1592 1588 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1593 if (netif_msg_rx_err(mdp))
1594 netdev_err(ndev, "Receive Descriptor Empty\n");
1595 } 1589 }
1596 1590
1597 if (intr_status & EESR_RFE) { 1591 if (intr_status & EESR_RFE) {
1598 /* Receive FIFO Overflow int */ 1592 /* Receive FIFO Overflow int */
1599 ndev->stats.rx_fifo_errors++; 1593 ndev->stats.rx_fifo_errors++;
1600 if (netif_msg_rx_err(mdp)) 1594 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1601 netdev_err(ndev, "Receive FIFO Overflow\n");
1602 } 1595 }
1603 1596
1604 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1605 /* Address Error */ 1598 /* Address Error */
1606 ndev->stats.tx_fifo_errors++; 1599 ndev->stats.tx_fifo_errors++;
1607 if (netif_msg_tx_err(mdp)) 1600 netif_err(mdp, tx_err, ndev, "Address Error\n");
1608 netdev_err(ndev, "Address Error\n");
1609 } 1601 }
1610 1602
1611 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1603 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -2064,11 +2056,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2064 2056
2065 netif_stop_queue(ndev); 2057 netif_stop_queue(ndev);
2066 2058
2067 if (netif_msg_timer(mdp)) { 2059 netif_err(mdp, timer, ndev,
2068 netdev_err(ndev, 2060 "transmit timed out, status %8.8x, resetting...\n",
2069 "transmit timed out, status %8.8x, resetting...\n", 2061 (int)sh_eth_read(ndev, EESR));
2070 (int)sh_eth_read(ndev, EESR));
2071 }
2072 2062
2073 /* tx_errors count up */ 2063 /* tx_errors count up */
2074 ndev->stats.tx_errors++; 2064 ndev->stats.tx_errors++;
@@ -2103,8 +2093,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2103 spin_lock_irqsave(&mdp->lock, flags); 2093 spin_lock_irqsave(&mdp->lock, flags);
2104 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2094 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2105 if (!sh_eth_txfree(ndev)) { 2095 if (!sh_eth_txfree(ndev)) {
2106 if (netif_msg_tx_queued(mdp)) 2096 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2107 netdev_warn(ndev, "TxFD exhausted.\n");
2108 netif_stop_queue(ndev); 2097 netif_stop_queue(ndev);
2109 spin_unlock_irqrestore(&mdp->lock, flags); 2098 spin_unlock_irqrestore(&mdp->lock, flags);
2110 return NETDEV_TX_BUSY; 2099 return NETDEV_TX_BUSY;