aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/renesas/sh_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2cd051e..b5db6b3f939f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
917 return ret; 917 return ret;
918} 918}
919 919
920#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921static void sh_eth_set_receive_align(struct sk_buff *skb) 920static void sh_eth_set_receive_align(struct sk_buff *skb)
922{ 921{
923 int reserve; 922 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
924 923
925 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 if (reserve) 924 if (reserve)
927 skb_reserve(skb, reserve); 925 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
928} 926}
929#else
930static void sh_eth_set_receive_align(struct sk_buff *skb)
931{
932 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933}
934#endif
935 927
936 928
937/* CPU <-> EDMAC endian convert */ 929/* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1119 struct sh_eth_txdesc *txdesc = NULL; 1111 struct sh_eth_txdesc *txdesc = NULL;
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1114 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1122 1115
1123 mdp->cur_rx = 0; 1116 mdp->cur_rx = 0;
1124 mdp->cur_tx = 0; 1117 mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
1131 for (i = 0; i < mdp->num_rx_ring; i++) { 1124 for (i = 0; i < mdp->num_rx_ring; i++) {
1132 /* skb */ 1125 /* skb */
1133 mdp->rx_skbuff[i] = NULL; 1126 mdp->rx_skbuff[i] = NULL;
1134 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1127 skb = netdev_alloc_skb(ndev, skbuff_size);
1135 mdp->rx_skbuff[i] = skb; 1128 mdp->rx_skbuff[i] = skb;
1136 if (skb == NULL) 1129 if (skb == NULL)
1137 break; 1130 break;
1138 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1139 DMA_FROM_DEVICE);
1140 sh_eth_set_receive_align(skb); 1131 sh_eth_set_receive_align(skb);
1141 1132
1142 /* RX descriptor */ 1133 /* RX descriptor */
1143 rxdesc = &mdp->rx_ring[i]; 1134 rxdesc = &mdp->rx_ring[i];
1135 /* The size of the buffer is a multiple of 16 bytes. */
1136 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1137 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1138 DMA_FROM_DEVICE);
1144 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1145 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1146 1141
1147 /* The size of the buffer is 16 byte boundary. */
1148 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1149 /* Rx descriptor address set */ 1142 /* Rx descriptor address set */
1150 if (i == 0) { 1143 if (i == 0) {
1151 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 struct sk_buff *skb; 1390 struct sk_buff *skb;
1398 u16 pkt_len = 0; 1391 u16 pkt_len = 0;
1399 u32 desc_status; 1392 u32 desc_status;
1393 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1400 1394
1401 rxdesc = &mdp->rx_ring[entry]; 1395 rxdesc = &mdp->rx_ring[entry];
1402 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 if (mdp->cd->rpadir) 1442 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1443 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451 mdp->rx_buf_sz, 1445 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1446 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1447 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1448 skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1468 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 1463
1470 if (mdp->rx_skbuff[entry] == NULL) { 1464 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1465 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb; 1466 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1467 if (skb == NULL)
1474 break; /* Better luck next round. */ 1468 break; /* Better luck next round. */
1475 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1476 DMA_FROM_DEVICE);
1477 sh_eth_set_receive_align(skb); 1469 sh_eth_set_receive_align(skb);
1470 dma_map_single(&ndev->dev, skb->data,
1471 rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 1472
1479 skb_checksum_none_assert(skb); 1473 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
2042 if (ret) 2036 if (ret)
2043 goto out_free_irq; 2037 goto out_free_irq;
2044 2038
2039 mdp->is_opened = 1;
2040
2045 return ret; 2041 return ret;
2046 2042
2047out_free_irq: 2043out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2131 return NETDEV_TX_OK; 2127 return NETDEV_TX_OK;
2132} 2128}
2133 2129
2130static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2131{
2132 struct sh_eth_private *mdp = netdev_priv(ndev);
2133
2134 if (sh_eth_is_rz_fast_ether(mdp))
2135 return &ndev->stats;
2136
2137 if (!mdp->is_opened)
2138 return &ndev->stats;
2139
2140 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2141 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2142 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2143 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2145 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2146
2147 if (sh_eth_is_gether(mdp)) {
2148 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2149 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2150 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2151 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2152 } else {
2153 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2154 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2155 }
2156
2157 return &ndev->stats;
2158}
2159
2134/* device close function */ 2160/* device close function */
2135static int sh_eth_close(struct net_device *ndev) 2161static int sh_eth_close(struct net_device *ndev)
2136{ 2162{
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
2145 sh_eth_write(ndev, 0, EDTRR); 2171 sh_eth_write(ndev, 0, EDTRR);
2146 sh_eth_write(ndev, 0, EDRRR); 2172 sh_eth_write(ndev, 0, EDRRR);
2147 2173
2174 sh_eth_get_stats(ndev);
2148 /* PHY Disconnect */ 2175 /* PHY Disconnect */
2149 if (mdp->phydev) { 2176 if (mdp->phydev) {
2150 phy_stop(mdp->phydev); 2177 phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
2163 2190
2164 pm_runtime_put_sync(&mdp->pdev->dev); 2191 pm_runtime_put_sync(&mdp->pdev->dev);
2165 2192
2166 return 0; 2193 mdp->is_opened = 0;
2167}
2168
2169static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2170{
2171 struct sh_eth_private *mdp = netdev_priv(ndev);
2172
2173 if (sh_eth_is_rz_fast_ether(mdp))
2174 return &ndev->stats;
2175 2194
2176 pm_runtime_get_sync(&mdp->pdev->dev); 2195 return 0;
2177
2178 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2179 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2180 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2181 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2182 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2183 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2184 if (sh_eth_is_gether(mdp)) {
2185 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2186 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2187 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2188 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2189 } else {
2190 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2191 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2192 }
2193 pm_runtime_put_sync(&mdp->pdev->dev);
2194
2195 return &ndev->stats;
2196} 2196}
2197 2197
2198/* ioctl to device function */ 2198/* ioctl to device function */