aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-01-27 03:18:57 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-27 03:18:57 -0500
commit225776098b36a5aee5211e881d484f48ad61ea8c (patch)
treecd61f6cb3d68d8a6a6351ff84dcfc3db3c24b2b5
parent8d8d67f140dbf055efc488ab656ca96a2345b5c8 (diff)
parent52b9fa3696c44151a2f1d361a00be7c5513db026 (diff)
Merge branch 'sh_eth'
Ben Hutchings says: ==================== Fixes for sh_eth #3 I'm continuing review and testing of Ethernet support on the R-Car H2 chip. This series fixes the last of the more serious issues I've found. These are not tested on any of the other supported chips. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c80
1 files changed, 59 insertions, 21 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 99a838db032e..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
396 [TSU_ADRL31] = 0x01fc, 396 [TSU_ADRL31] = 0x01fc,
397}; 397};
398 398
399static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
401
399static bool sh_eth_is_gether(struct sh_eth_private *mdp) 402static bool sh_eth_is_gether(struct sh_eth_private *mdp)
400{ 403{
401 return mdp->reg_offset == sh_eth_offset_gigabit; 404 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1122 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126 dma_addr_t dma_addr;
1123 1127
1124 mdp->cur_rx = 0; 1128 mdp->cur_rx = 0;
1125 mdp->cur_tx = 0; 1129 mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
1133 /* skb */ 1137 /* skb */
1134 mdp->rx_skbuff[i] = NULL; 1138 mdp->rx_skbuff[i] = NULL;
1135 skb = netdev_alloc_skb(ndev, skbuff_size); 1139 skb = netdev_alloc_skb(ndev, skbuff_size);
1136 mdp->rx_skbuff[i] = skb;
1137 if (skb == NULL) 1140 if (skb == NULL)
1138 break; 1141 break;
1139 sh_eth_set_receive_align(skb); 1142 sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
1142 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc = &mdp->rx_ring[i];
1143 /* The size of the buffer is a multiple of 16 bytes. */ 1146 /* The size of the buffer is a multiple of 16 bytes. */
1144 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1145 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1148 dma_addr = dma_map_single(&ndev->dev, skb->data,
1146 DMA_FROM_DEVICE); 1149 rxdesc->buffer_length,
1147 rxdesc->addr = virt_to_phys(skb->data); 1150 DMA_FROM_DEVICE);
1151 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152 kfree_skb(skb);
1153 break;
1154 }
1155 mdp->rx_skbuff[i] = skb;
1156 rxdesc->addr = dma_addr;
1148 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1157 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1149 1158
1150 /* Rx descriptor address set */ 1159 /* Rx descriptor address set */
@@ -1358,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1358 return ret; 1367 return ret;
1359} 1368}
1360 1369
1370static void sh_eth_dev_exit(struct net_device *ndev)
1371{
1372 struct sh_eth_private *mdp = netdev_priv(ndev);
1373 int i;
1374
1375 /* Deactivate all TX descriptors, so DMA should stop at next
1376 * packet boundary if it's currently running
1377 */
1378 for (i = 0; i < mdp->num_tx_ring; i++)
1379 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1380
1381 /* Disable TX FIFO egress to MAC */
1382 sh_eth_rcv_snd_disable(ndev);
1383
1384 /* Stop RX DMA at next packet boundary */
1385 sh_eth_write(ndev, 0, EDRRR);
1386
1387 /* Aside from TX DMA, we can't tell when the hardware is
1388 * really stopped, so we need to reset to make sure.
1389 * Before doing that, wait for long enough to *probably*
1390 * finish transmitting the last packet and poll stats.
1391 */
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev);
1395}
1396
1361/* free Tx skb function */ 1397/* free Tx skb function */
1362static int sh_eth_txfree(struct net_device *ndev) 1398static int sh_eth_txfree(struct net_device *ndev)
1363{ 1399{
@@ -1402,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1402 u16 pkt_len = 0; 1438 u16 pkt_len = 0;
1403 u32 desc_status; 1439 u32 desc_status;
1404 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1440 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441 dma_addr_t dma_addr;
1405 1442
1406 boguscnt = min(boguscnt, *quota); 1443 boguscnt = min(boguscnt, *quota);
1407 limit = boguscnt; 1444 limit = boguscnt;
@@ -1449,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1449 mdp->rx_skbuff[entry] = NULL; 1486 mdp->rx_skbuff[entry] = NULL;
1450 if (mdp->cd->rpadir) 1487 if (mdp->cd->rpadir)
1451 skb_reserve(skb, NET_IP_ALIGN); 1488 skb_reserve(skb, NET_IP_ALIGN);
1452 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1489 dma_unmap_single(&ndev->dev, rxdesc->addr,
1453 ALIGN(mdp->rx_buf_sz, 16), 1490 ALIGN(mdp->rx_buf_sz, 16),
1454 DMA_FROM_DEVICE); 1491 DMA_FROM_DEVICE);
1455 skb_put(skb, pkt_len); 1492 skb_put(skb, pkt_len);
1456 skb->protocol = eth_type_trans(skb, ndev); 1493 skb->protocol = eth_type_trans(skb, ndev);
1457 netif_receive_skb(skb); 1494 netif_receive_skb(skb);
@@ -1471,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1471 1508
1472 if (mdp->rx_skbuff[entry] == NULL) { 1509 if (mdp->rx_skbuff[entry] == NULL) {
1473 skb = netdev_alloc_skb(ndev, skbuff_size); 1510 skb = netdev_alloc_skb(ndev, skbuff_size);
1474 mdp->rx_skbuff[entry] = skb;
1475 if (skb == NULL) 1511 if (skb == NULL)
1476 break; /* Better luck next round. */ 1512 break; /* Better luck next round. */
1477 sh_eth_set_receive_align(skb); 1513 sh_eth_set_receive_align(skb);
1478 dma_map_single(&ndev->dev, skb->data, 1514 dma_addr = dma_map_single(&ndev->dev, skb->data,
1479 rxdesc->buffer_length, DMA_FROM_DEVICE); 1515 rxdesc->buffer_length,
1516 DMA_FROM_DEVICE);
1517 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518 kfree_skb(skb);
1519 break;
1520 }
1521 mdp->rx_skbuff[entry] = skb;
1480 1522
1481 skb_checksum_none_assert(skb); 1523 skb_checksum_none_assert(skb);
1482 rxdesc->addr = virt_to_phys(skb->data); 1524 rxdesc->addr = dma_addr;
1483 } 1525 }
1484 if (entry >= mdp->num_rx_ring - 1) 1526 if (entry >= mdp->num_rx_ring - 1)
1485 rxdesc->status |= 1527 rxdesc->status |=
@@ -1575,7 +1617,6 @@ ignore_link:
1575 if (intr_status & EESR_RFRMER) { 1617 if (intr_status & EESR_RFRMER) {
1576 /* Receive Frame Overflow int */ 1618 /* Receive Frame Overflow int */
1577 ndev->stats.rx_frame_errors++; 1619 ndev->stats.rx_frame_errors++;
1578 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1579 } 1620 }
1580 } 1621 }
1581 1622
@@ -1594,13 +1635,11 @@ ignore_link:
1594 if (intr_status & EESR_RDE) { 1635 if (intr_status & EESR_RDE) {
1595 /* Receive Descriptor Empty int */ 1636 /* Receive Descriptor Empty int */
1596 ndev->stats.rx_over_errors++; 1637 ndev->stats.rx_over_errors++;
1597 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1598 } 1638 }
1599 1639
1600 if (intr_status & EESR_RFE) { 1640 if (intr_status & EESR_RFE) {
1601 /* Receive FIFO Overflow int */ 1641 /* Receive FIFO Overflow int */
1602 ndev->stats.rx_fifo_errors++; 1642 ndev->stats.rx_fifo_errors++;
1603 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1604 } 1643 }
1605 1644
1606 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1645 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1989,9 +2028,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1989 napi_synchronize(&mdp->napi); 2028 napi_synchronize(&mdp->napi);
1990 sh_eth_write(ndev, 0x0000, EESIPR); 2029 sh_eth_write(ndev, 0x0000, EESIPR);
1991 2030
1992 /* Stop the chip's Tx and Rx processes. */ 2031 sh_eth_dev_exit(ndev);
1993 sh_eth_write(ndev, 0, EDTRR);
1994 sh_eth_write(ndev, 0, EDRRR);
1995 2032
1996 /* Free all the skbuffs in the Rx queue. */ 2033 /* Free all the skbuffs in the Rx queue. */
1997 sh_eth_ring_free(ndev); 2034 sh_eth_ring_free(ndev);
@@ -2149,6 +2186,10 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2149 skb->len + 2); 2186 skb->len + 2);
2150 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2187 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2151 DMA_TO_DEVICE); 2188 DMA_TO_DEVICE);
2189 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2190 kfree_skb(skb);
2191 return NETDEV_TX_OK;
2192 }
2152 txdesc->buffer_length = skb->len; 2193 txdesc->buffer_length = skb->len;
2153 2194
2154 if (entry >= mdp->num_tx_ring - 1) 2195 if (entry >= mdp->num_tx_ring - 1)
@@ -2210,11 +2251,8 @@ static int sh_eth_close(struct net_device *ndev)
2210 napi_disable(&mdp->napi); 2251 napi_disable(&mdp->napi);
2211 sh_eth_write(ndev, 0x0000, EESIPR); 2252 sh_eth_write(ndev, 0x0000, EESIPR);
2212 2253
2213 /* Stop the chip's Tx and Rx processes. */ 2254 sh_eth_dev_exit(ndev);
2214 sh_eth_write(ndev, 0, EDTRR);
2215 sh_eth_write(ndev, 0, EDRRR);
2216 2255
2217 sh_eth_get_stats(ndev);
2218 /* PHY Disconnect */ 2256 /* PHY Disconnect */
2219 if (mdp->phydev) { 2257 if (mdp->phydev) {
2220 phy_stop(mdp->phydev); 2258 phy_stop(mdp->phydev);