aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-02-06 17:17:21 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-07 13:38:57 -0500
commitbb7d92e3e3049e22b5807ac559a72b38fad5f499 (patch)
tree2c0868d909efc6d8c71252382640399192617c34
parentb72061a3cb0f1c5aa3f919e2dadb4a1631773e7a (diff)
sh-eth: use netdev stats structure and fix dma_map_single
No need to maintain a parallel net_device_stats structure in sh_eth_private, since we have a generic one in netdev Fix two dma_map_single() incorrect parameters, passing skb->tail instead of skb->data. Seems that there is no corresponding dmap_unmap_single() calls for the moment in this driver. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c62
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
2 files changed, 31 insertions, 32 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1cb5a34d5779..dacd13150b6c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -657,7 +657,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
657 mdp->rx_skbuff[i] = skb; 657 mdp->rx_skbuff[i] = skb;
658 if (skb == NULL) 658 if (skb == NULL)
659 break; 659 break;
660 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, 660 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
661 DMA_FROM_DEVICE); 661 DMA_FROM_DEVICE);
662 skb->dev = ndev; /* Mark as being used by this device. */ 662 skb->dev = ndev; /* Mark as being used by this device. */
663 sh_eth_set_receive_align(skb); 663 sh_eth_set_receive_align(skb);
@@ -881,8 +881,8 @@ static int sh_eth_txfree(struct net_device *ndev)
881 if (entry >= TX_RING_SIZE - 1) 881 if (entry >= TX_RING_SIZE - 1)
882 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 882 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
883 883
884 mdp->stats.tx_packets++; 884 ndev->stats.tx_packets++;
885 mdp->stats.tx_bytes += txdesc->buffer_length; 885 ndev->stats.tx_bytes += txdesc->buffer_length;
886 } 886 }
887 return freeNum; 887 return freeNum;
888} 888}
@@ -908,23 +908,23 @@ static int sh_eth_rx(struct net_device *ndev)
908 break; 908 break;
909 909
910 if (!(desc_status & RDFEND)) 910 if (!(desc_status & RDFEND))
911 mdp->stats.rx_length_errors++; 911 ndev->stats.rx_length_errors++;
912 912
913 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 913 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
914 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 914 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
915 mdp->stats.rx_errors++; 915 ndev->stats.rx_errors++;
916 if (desc_status & RD_RFS1) 916 if (desc_status & RD_RFS1)
917 mdp->stats.rx_crc_errors++; 917 ndev->stats.rx_crc_errors++;
918 if (desc_status & RD_RFS2) 918 if (desc_status & RD_RFS2)
919 mdp->stats.rx_frame_errors++; 919 ndev->stats.rx_frame_errors++;
920 if (desc_status & RD_RFS3) 920 if (desc_status & RD_RFS3)
921 mdp->stats.rx_length_errors++; 921 ndev->stats.rx_length_errors++;
922 if (desc_status & RD_RFS4) 922 if (desc_status & RD_RFS4)
923 mdp->stats.rx_length_errors++; 923 ndev->stats.rx_length_errors++;
924 if (desc_status & RD_RFS6) 924 if (desc_status & RD_RFS6)
925 mdp->stats.rx_missed_errors++; 925 ndev->stats.rx_missed_errors++;
926 if (desc_status & RD_RFS10) 926 if (desc_status & RD_RFS10)
927 mdp->stats.rx_over_errors++; 927 ndev->stats.rx_over_errors++;
928 } else { 928 } else {
929 if (!mdp->cd->hw_swap) 929 if (!mdp->cd->hw_swap)
930 sh_eth_soft_swap( 930 sh_eth_soft_swap(
@@ -937,8 +937,8 @@ static int sh_eth_rx(struct net_device *ndev)
937 skb_put(skb, pkt_len); 937 skb_put(skb, pkt_len);
938 skb->protocol = eth_type_trans(skb, ndev); 938 skb->protocol = eth_type_trans(skb, ndev);
939 netif_rx(skb); 939 netif_rx(skb);
940 mdp->stats.rx_packets++; 940 ndev->stats.rx_packets++;
941 mdp->stats.rx_bytes += pkt_len; 941 ndev->stats.rx_bytes += pkt_len;
942 } 942 }
943 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 943 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
944 entry = (++mdp->cur_rx) % RX_RING_SIZE; 944 entry = (++mdp->cur_rx) % RX_RING_SIZE;
@@ -957,7 +957,7 @@ static int sh_eth_rx(struct net_device *ndev)
957 mdp->rx_skbuff[entry] = skb; 957 mdp->rx_skbuff[entry] = skb;
958 if (skb == NULL) 958 if (skb == NULL)
959 break; /* Better luck next round. */ 959 break; /* Better luck next round. */
960 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, 960 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
961 DMA_FROM_DEVICE); 961 DMA_FROM_DEVICE);
962 skb->dev = ndev; 962 skb->dev = ndev;
963 sh_eth_set_receive_align(skb); 963 sh_eth_set_receive_align(skb);
@@ -1007,7 +1007,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1007 felic_stat = sh_eth_read(ndev, ECSR); 1007 felic_stat = sh_eth_read(ndev, ECSR);
1008 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1008 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1009 if (felic_stat & ECSR_ICD) 1009 if (felic_stat & ECSR_ICD)
1010 mdp->stats.tx_carrier_errors++; 1010 ndev->stats.tx_carrier_errors++;
1011 if (felic_stat & ECSR_LCHNG) { 1011 if (felic_stat & ECSR_LCHNG) {
1012 /* Link Changed */ 1012 /* Link Changed */
1013 if (mdp->cd->no_psr || mdp->no_ether_link) { 1013 if (mdp->cd->no_psr || mdp->no_ether_link) {
@@ -1040,7 +1040,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1040 if (intr_status & EESR_TWB) { 1040 if (intr_status & EESR_TWB) {
1041 /* Write buck end. unused write back interrupt */ 1041 /* Write buck end. unused write back interrupt */
1042 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1042 if (intr_status & EESR_TABT) /* Transmit Abort int */
1043 mdp->stats.tx_aborted_errors++; 1043 ndev->stats.tx_aborted_errors++;
1044 if (netif_msg_tx_err(mdp)) 1044 if (netif_msg_tx_err(mdp))
1045 dev_err(&ndev->dev, "Transmit Abort\n"); 1045 dev_err(&ndev->dev, "Transmit Abort\n");
1046 } 1046 }
@@ -1049,7 +1049,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1049 /* Receive Abort int */ 1049 /* Receive Abort int */
1050 if (intr_status & EESR_RFRMER) { 1050 if (intr_status & EESR_RFRMER) {
1051 /* Receive Frame Overflow int */ 1051 /* Receive Frame Overflow int */
1052 mdp->stats.rx_frame_errors++; 1052 ndev->stats.rx_frame_errors++;
1053 if (netif_msg_rx_err(mdp)) 1053 if (netif_msg_rx_err(mdp))
1054 dev_err(&ndev->dev, "Receive Abort\n"); 1054 dev_err(&ndev->dev, "Receive Abort\n");
1055 } 1055 }
@@ -1057,21 +1057,21 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1057 1057
1058 if (intr_status & EESR_TDE) { 1058 if (intr_status & EESR_TDE) {
1059 /* Transmit Descriptor Empty int */ 1059 /* Transmit Descriptor Empty int */
1060 mdp->stats.tx_fifo_errors++; 1060 ndev->stats.tx_fifo_errors++;
1061 if (netif_msg_tx_err(mdp)) 1061 if (netif_msg_tx_err(mdp))
1062 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1062 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1063 } 1063 }
1064 1064
1065 if (intr_status & EESR_TFE) { 1065 if (intr_status & EESR_TFE) {
1066 /* FIFO under flow */ 1066 /* FIFO under flow */
1067 mdp->stats.tx_fifo_errors++; 1067 ndev->stats.tx_fifo_errors++;
1068 if (netif_msg_tx_err(mdp)) 1068 if (netif_msg_tx_err(mdp))
1069 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1069 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1070 } 1070 }
1071 1071
1072 if (intr_status & EESR_RDE) { 1072 if (intr_status & EESR_RDE) {
1073 /* Receive Descriptor Empty int */ 1073 /* Receive Descriptor Empty int */
1074 mdp->stats.rx_over_errors++; 1074 ndev->stats.rx_over_errors++;
1075 1075
1076 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) 1076 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1077 sh_eth_write(ndev, EDRRR_R, EDRRR); 1077 sh_eth_write(ndev, EDRRR_R, EDRRR);
@@ -1081,14 +1081,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1081 1081
1082 if (intr_status & EESR_RFE) { 1082 if (intr_status & EESR_RFE) {
1083 /* Receive FIFO Overflow int */ 1083 /* Receive FIFO Overflow int */
1084 mdp->stats.rx_fifo_errors++; 1084 ndev->stats.rx_fifo_errors++;
1085 if (netif_msg_rx_err(mdp)) 1085 if (netif_msg_rx_err(mdp))
1086 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1086 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1087 } 1087 }
1088 1088
1089 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1089 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1090 /* Address Error */ 1090 /* Address Error */
1091 mdp->stats.tx_fifo_errors++; 1091 ndev->stats.tx_fifo_errors++;
1092 if (netif_msg_tx_err(mdp)) 1092 if (netif_msg_tx_err(mdp))
1093 dev_err(&ndev->dev, "Address Error\n"); 1093 dev_err(&ndev->dev, "Address Error\n");
1094 } 1094 }
@@ -1445,7 +1445,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1445 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 1445 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1446 1446
1447 /* tx_errors count up */ 1447 /* tx_errors count up */
1448 mdp->stats.tx_errors++; 1448 ndev->stats.tx_errors++;
1449 1449
1450 /* timer off */ 1450 /* timer off */
1451 del_timer_sync(&mdp->timer); 1451 del_timer_sync(&mdp->timer);
@@ -1567,27 +1567,27 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1567 1567
1568 pm_runtime_get_sync(&mdp->pdev->dev); 1568 pm_runtime_get_sync(&mdp->pdev->dev);
1569 1569
1570 mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR); 1570 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1571 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 1571 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1572 mdp->stats.collisions += sh_eth_read(ndev, CDCR); 1572 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
1573 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 1573 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1574 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 1574 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1575 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 1575 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
1576 if (sh_eth_is_gether(mdp)) { 1576 if (sh_eth_is_gether(mdp)) {
1577 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 1577 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1578 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 1578 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1579 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 1579 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1580 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 1580 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1581 } else { 1581 } else {
1582 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 1582 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1583 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 1583 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1584 } 1584 }
1585 pm_runtime_put_sync(&mdp->pdev->dev); 1585 pm_runtime_put_sync(&mdp->pdev->dev);
1586 1586
1587 return &mdp->stats; 1587 return &ndev->stats;
1588} 1588}
1589 1589
1590/* ioctl to device funciotn*/ 1590/* ioctl to device function */
1591static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1591static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1592 int cmd) 1592 int cmd)
1593{ 1593{
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 47877b13ffad..ba72976926fd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -762,7 +762,6 @@ struct sh_eth_private {
762 struct sh_eth_txdesc *tx_ring; 762 struct sh_eth_txdesc *tx_ring;
763 struct sk_buff **rx_skbuff; 763 struct sk_buff **rx_skbuff;
764 struct sk_buff **tx_skbuff; 764 struct sk_buff **tx_skbuff;
765 struct net_device_stats stats;
766 struct timer_list timer; 765 struct timer_list timer;
767 spinlock_t lock; 766 spinlock_t lock;
768 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 767 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */