diff options
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 196 |
1 files changed, 134 insertions, 62 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 37583a9d8853..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
396 | [TSU_ADRL31] = 0x01fc, | 396 | [TSU_ADRL31] = 0x01fc, |
397 | }; | 397 | }; |
398 | 398 | ||
399 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | ||
400 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | ||
401 | |||
399 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 402 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
400 | { | 403 | { |
401 | return mdp->reg_offset == sh_eth_offset_gigabit; | 404 | return mdp->reg_offset == sh_eth_offset_gigabit; |
@@ -498,6 +501,8 @@ static struct sh_eth_cpu_data r8a779x_data = { | |||
498 | EESR_ECI, | 501 | EESR_ECI, |
499 | .fdr_value = 0x00000f0f, | 502 | .fdr_value = 0x00000f0f, |
500 | 503 | ||
504 | .trscer_err_mask = DESC_I_RINT8, | ||
505 | |||
501 | .apr = 1, | 506 | .apr = 1, |
502 | .mpr = 1, | 507 | .mpr = 1, |
503 | .tpauser = 1, | 508 | .tpauser = 1, |
@@ -538,8 +543,6 @@ static struct sh_eth_cpu_data sh7724_data = { | |||
538 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | | 543 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | |
539 | EESR_ECI, | 544 | EESR_ECI, |
540 | 545 | ||
541 | .trscer_err_mask = DESC_I_RINT8, | ||
542 | |||
543 | .apr = 1, | 546 | .apr = 1, |
544 | .mpr = 1, | 547 | .mpr = 1, |
545 | .tpauser = 1, | 548 | .tpauser = 1, |
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1120 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1123 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1121 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1124 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1122 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1125 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1126 | dma_addr_t dma_addr; | ||
1123 | 1127 | ||
1124 | mdp->cur_rx = 0; | 1128 | mdp->cur_rx = 0; |
1125 | mdp->cur_tx = 0; | 1129 | mdp->cur_tx = 0; |
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1133 | /* skb */ | 1137 | /* skb */ |
1134 | mdp->rx_skbuff[i] = NULL; | 1138 | mdp->rx_skbuff[i] = NULL; |
1135 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1139 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1136 | mdp->rx_skbuff[i] = skb; | ||
1137 | if (skb == NULL) | 1140 | if (skb == NULL) |
1138 | break; | 1141 | break; |
1139 | sh_eth_set_receive_align(skb); | 1142 | sh_eth_set_receive_align(skb); |
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1142 | rxdesc = &mdp->rx_ring[i]; | 1145 | rxdesc = &mdp->rx_ring[i]; |
1143 | /* The size of the buffer is a multiple of 16 bytes. */ | 1146 | /* The size of the buffer is a multiple of 16 bytes. */ |
1144 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1147 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1145 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | 1148 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1146 | DMA_FROM_DEVICE); | 1149 | rxdesc->buffer_length, |
1147 | rxdesc->addr = virt_to_phys(skb->data); | 1150 | DMA_FROM_DEVICE); |
1151 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1152 | kfree_skb(skb); | ||
1153 | break; | ||
1154 | } | ||
1155 | mdp->rx_skbuff[i] = skb; | ||
1156 | rxdesc->addr = dma_addr; | ||
1148 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1157 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1149 | 1158 | ||
1150 | /* Rx descriptor address set */ | 1159 | /* Rx descriptor address set */ |
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1316 | RFLR); | 1325 | RFLR); |
1317 | 1326 | ||
1318 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | 1327 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
1319 | if (start) | 1328 | if (start) { |
1329 | mdp->irq_enabled = true; | ||
1320 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1330 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
1331 | } | ||
1321 | 1332 | ||
1322 | /* PAUSE Prohibition */ | 1333 | /* PAUSE Prohibition */ |
1323 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | 1334 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1356 | return ret; | 1367 | return ret; |
1357 | } | 1368 | } |
1358 | 1369 | ||
1370 | static void sh_eth_dev_exit(struct net_device *ndev) | ||
1371 | { | ||
1372 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1373 | int i; | ||
1374 | |||
1375 | /* Deactivate all TX descriptors, so DMA should stop at next | ||
1376 | * packet boundary if it's currently running | ||
1377 | */ | ||
1378 | for (i = 0; i < mdp->num_tx_ring; i++) | ||
1379 | mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); | ||
1380 | |||
1381 | /* Disable TX FIFO egress to MAC */ | ||
1382 | sh_eth_rcv_snd_disable(ndev); | ||
1383 | |||
1384 | /* Stop RX DMA at next packet boundary */ | ||
1385 | sh_eth_write(ndev, 0, EDRRR); | ||
1386 | |||
1387 | /* Aside from TX DMA, we can't tell when the hardware is | ||
1388 | * really stopped, so we need to reset to make sure. | ||
1389 | * Before doing that, wait for long enough to *probably* | ||
1390 | * finish transmitting the last packet and poll stats. | ||
1391 | */ | ||
1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | ||
1393 | sh_eth_get_stats(ndev); | ||
1394 | sh_eth_reset(ndev); | ||
1395 | } | ||
1396 | |||
1359 | /* free Tx skb function */ | 1397 | /* free Tx skb function */ |
1360 | static int sh_eth_txfree(struct net_device *ndev) | 1398 | static int sh_eth_txfree(struct net_device *ndev) |
1361 | { | 1399 | { |
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1400 | u16 pkt_len = 0; | 1438 | u16 pkt_len = 0; |
1401 | u32 desc_status; | 1439 | u32 desc_status; |
1402 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1440 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1441 | dma_addr_t dma_addr; | ||
1403 | 1442 | ||
1404 | boguscnt = min(boguscnt, *quota); | 1443 | boguscnt = min(boguscnt, *quota); |
1405 | limit = boguscnt; | 1444 | limit = boguscnt; |
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1447 | mdp->rx_skbuff[entry] = NULL; | 1486 | mdp->rx_skbuff[entry] = NULL; |
1448 | if (mdp->cd->rpadir) | 1487 | if (mdp->cd->rpadir) |
1449 | skb_reserve(skb, NET_IP_ALIGN); | 1488 | skb_reserve(skb, NET_IP_ALIGN); |
1450 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, |
1451 | ALIGN(mdp->rx_buf_sz, 16), | 1490 | ALIGN(mdp->rx_buf_sz, 16), |
1452 | DMA_FROM_DEVICE); | 1491 | DMA_FROM_DEVICE); |
1453 | skb_put(skb, pkt_len); | 1492 | skb_put(skb, pkt_len); |
1454 | skb->protocol = eth_type_trans(skb, ndev); | 1493 | skb->protocol = eth_type_trans(skb, ndev); |
1455 | netif_receive_skb(skb); | 1494 | netif_receive_skb(skb); |
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1469 | 1508 | ||
1470 | if (mdp->rx_skbuff[entry] == NULL) { | 1509 | if (mdp->rx_skbuff[entry] == NULL) { |
1471 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1510 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1472 | mdp->rx_skbuff[entry] = skb; | ||
1473 | if (skb == NULL) | 1511 | if (skb == NULL) |
1474 | break; /* Better luck next round. */ | 1512 | break; /* Better luck next round. */ |
1475 | sh_eth_set_receive_align(skb); | 1513 | sh_eth_set_receive_align(skb); |
1476 | dma_map_single(&ndev->dev, skb->data, | 1514 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1477 | rxdesc->buffer_length, DMA_FROM_DEVICE); | 1515 | rxdesc->buffer_length, |
1516 | DMA_FROM_DEVICE); | ||
1517 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1518 | kfree_skb(skb); | ||
1519 | break; | ||
1520 | } | ||
1521 | mdp->rx_skbuff[entry] = skb; | ||
1478 | 1522 | ||
1479 | skb_checksum_none_assert(skb); | 1523 | skb_checksum_none_assert(skb); |
1480 | rxdesc->addr = virt_to_phys(skb->data); | 1524 | rxdesc->addr = dma_addr; |
1481 | } | 1525 | } |
1482 | if (entry >= mdp->num_rx_ring - 1) | 1526 | if (entry >= mdp->num_rx_ring - 1) |
1483 | rxdesc->status |= | 1527 | rxdesc->status |= |
@@ -1573,7 +1617,6 @@ ignore_link: | |||
1573 | if (intr_status & EESR_RFRMER) { | 1617 | if (intr_status & EESR_RFRMER) { |
1574 | /* Receive Frame Overflow int */ | 1618 | /* Receive Frame Overflow int */ |
1575 | ndev->stats.rx_frame_errors++; | 1619 | ndev->stats.rx_frame_errors++; |
1576 | netif_err(mdp, rx_err, ndev, "Receive Abort\n"); | ||
1577 | } | 1620 | } |
1578 | } | 1621 | } |
1579 | 1622 | ||
@@ -1592,13 +1635,11 @@ ignore_link: | |||
1592 | if (intr_status & EESR_RDE) { | 1635 | if (intr_status & EESR_RDE) { |
1593 | /* Receive Descriptor Empty int */ | 1636 | /* Receive Descriptor Empty int */ |
1594 | ndev->stats.rx_over_errors++; | 1637 | ndev->stats.rx_over_errors++; |
1595 | netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); | ||
1596 | } | 1638 | } |
1597 | 1639 | ||
1598 | if (intr_status & EESR_RFE) { | 1640 | if (intr_status & EESR_RFE) { |
1599 | /* Receive FIFO Overflow int */ | 1641 | /* Receive FIFO Overflow int */ |
1600 | ndev->stats.rx_fifo_errors++; | 1642 | ndev->stats.rx_fifo_errors++; |
1601 | netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); | ||
1602 | } | 1643 | } |
1603 | 1644 | ||
1604 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { | 1645 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { |
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1653 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) | 1694 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) |
1654 | ret = IRQ_HANDLED; | 1695 | ret = IRQ_HANDLED; |
1655 | else | 1696 | else |
1656 | goto other_irq; | 1697 | goto out; |
1698 | |||
1699 | if (!likely(mdp->irq_enabled)) { | ||
1700 | sh_eth_write(ndev, 0, EESIPR); | ||
1701 | goto out; | ||
1702 | } | ||
1657 | 1703 | ||
1658 | if (intr_status & EESR_RX_CHECK) { | 1704 | if (intr_status & EESR_RX_CHECK) { |
1659 | if (napi_schedule_prep(&mdp->napi)) { | 1705 | if (napi_schedule_prep(&mdp->napi)) { |
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1684 | sh_eth_error(ndev, intr_status); | 1730 | sh_eth_error(ndev, intr_status); |
1685 | } | 1731 | } |
1686 | 1732 | ||
1687 | other_irq: | 1733 | out: |
1688 | spin_unlock(&mdp->lock); | 1734 | spin_unlock(&mdp->lock); |
1689 | 1735 | ||
1690 | return ret; | 1736 | return ret; |
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) | |||
1712 | napi_complete(napi); | 1758 | napi_complete(napi); |
1713 | 1759 | ||
1714 | /* Reenable Rx interrupts */ | 1760 | /* Reenable Rx interrupts */ |
1715 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1761 | if (mdp->irq_enabled) |
1762 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
1716 | out: | 1763 | out: |
1717 | return budget - quota; | 1764 | return budget - quota; |
1718 | } | 1765 | } |
@@ -1827,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev, | |||
1827 | unsigned long flags; | 1874 | unsigned long flags; |
1828 | int ret; | 1875 | int ret; |
1829 | 1876 | ||
1877 | if (!mdp->phydev) | ||
1878 | return -ENODEV; | ||
1879 | |||
1830 | spin_lock_irqsave(&mdp->lock, flags); | 1880 | spin_lock_irqsave(&mdp->lock, flags); |
1831 | ret = phy_ethtool_gset(mdp->phydev, ecmd); | 1881 | ret = phy_ethtool_gset(mdp->phydev, ecmd); |
1832 | spin_unlock_irqrestore(&mdp->lock, flags); | 1882 | spin_unlock_irqrestore(&mdp->lock, flags); |
@@ -1841,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev, | |||
1841 | unsigned long flags; | 1891 | unsigned long flags; |
1842 | int ret; | 1892 | int ret; |
1843 | 1893 | ||
1894 | if (!mdp->phydev) | ||
1895 | return -ENODEV; | ||
1896 | |||
1844 | spin_lock_irqsave(&mdp->lock, flags); | 1897 | spin_lock_irqsave(&mdp->lock, flags); |
1845 | 1898 | ||
1846 | /* disable tx and rx */ | 1899 | /* disable tx and rx */ |
@@ -1875,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev) | |||
1875 | unsigned long flags; | 1928 | unsigned long flags; |
1876 | int ret; | 1929 | int ret; |
1877 | 1930 | ||
1931 | if (!mdp->phydev) | ||
1932 | return -ENODEV; | ||
1933 | |||
1878 | spin_lock_irqsave(&mdp->lock, flags); | 1934 | spin_lock_irqsave(&mdp->lock, flags); |
1879 | ret = phy_start_aneg(mdp->phydev); | 1935 | ret = phy_start_aneg(mdp->phydev); |
1880 | spin_unlock_irqrestore(&mdp->lock, flags); | 1936 | spin_unlock_irqrestore(&mdp->lock, flags); |
@@ -1959,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, | |||
1959 | return -EINVAL; | 2015 | return -EINVAL; |
1960 | 2016 | ||
1961 | if (netif_running(ndev)) { | 2017 | if (netif_running(ndev)) { |
2018 | netif_device_detach(ndev); | ||
1962 | netif_tx_disable(ndev); | 2019 | netif_tx_disable(ndev); |
1963 | /* Disable interrupts by clearing the interrupt mask. */ | 2020 | |
1964 | sh_eth_write(ndev, 0x0000, EESIPR); | 2021 | /* Serialise with the interrupt handler and NAPI, then |
1965 | /* Stop the chip's Tx and Rx processes. */ | 2022 | * disable interrupts. We have to clear the |
1966 | sh_eth_write(ndev, 0, EDTRR); | 2023 | * irq_enabled flag first to ensure that interrupts |
1967 | sh_eth_write(ndev, 0, EDRRR); | 2024 | * won't be re-enabled. |
2025 | */ | ||
2026 | mdp->irq_enabled = false; | ||
1968 | synchronize_irq(ndev->irq); | 2027 | synchronize_irq(ndev->irq); |
1969 | } | 2028 | napi_synchronize(&mdp->napi); |
2029 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
1970 | 2030 | ||
1971 | /* Free all the skbuffs in the Rx queue. */ | 2031 | sh_eth_dev_exit(ndev); |
1972 | sh_eth_ring_free(ndev); | 2032 | |
1973 | /* Free DMA buffer */ | 2033 | /* Free all the skbuffs in the Rx queue. */ |
1974 | sh_eth_free_dma_buffer(mdp); | 2034 | sh_eth_ring_free(ndev); |
2035 | /* Free DMA buffer */ | ||
2036 | sh_eth_free_dma_buffer(mdp); | ||
2037 | } | ||
1975 | 2038 | ||
1976 | /* Set new parameters */ | 2039 | /* Set new parameters */ |
1977 | mdp->num_rx_ring = ring->rx_pending; | 2040 | mdp->num_rx_ring = ring->rx_pending; |
1978 | mdp->num_tx_ring = ring->tx_pending; | 2041 | mdp->num_tx_ring = ring->tx_pending; |
1979 | 2042 | ||
1980 | ret = sh_eth_ring_init(ndev); | ||
1981 | if (ret < 0) { | ||
1982 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); | ||
1983 | return ret; | ||
1984 | } | ||
1985 | ret = sh_eth_dev_init(ndev, false); | ||
1986 | if (ret < 0) { | ||
1987 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); | ||
1988 | return ret; | ||
1989 | } | ||
1990 | |||
1991 | if (netif_running(ndev)) { | 2043 | if (netif_running(ndev)) { |
2044 | ret = sh_eth_ring_init(ndev); | ||
2045 | if (ret < 0) { | ||
2046 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", | ||
2047 | __func__); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | ret = sh_eth_dev_init(ndev, false); | ||
2051 | if (ret < 0) { | ||
2052 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", | ||
2053 | __func__); | ||
2054 | return ret; | ||
2055 | } | ||
2056 | |||
2057 | mdp->irq_enabled = true; | ||
1992 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 2058 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
1993 | /* Setting the Rx mode will start the Rx process. */ | 2059 | /* Setting the Rx mode will start the Rx process. */ |
1994 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 2060 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
1995 | netif_wake_queue(ndev); | 2061 | netif_device_attach(ndev); |
1996 | } | 2062 | } |
1997 | 2063 | ||
1998 | return 0; | 2064 | return 0; |
@@ -2108,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2108 | } | 2174 | } |
2109 | spin_unlock_irqrestore(&mdp->lock, flags); | 2175 | spin_unlock_irqrestore(&mdp->lock, flags); |
2110 | 2176 | ||
2177 | if (skb_padto(skb, ETH_ZLEN)) | ||
2178 | return NETDEV_TX_OK; | ||
2179 | |||
2111 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2180 | entry = mdp->cur_tx % mdp->num_tx_ring; |
2112 | mdp->tx_skbuff[entry] = skb; | 2181 | mdp->tx_skbuff[entry] = skb; |
2113 | txdesc = &mdp->tx_ring[entry]; | 2182 | txdesc = &mdp->tx_ring[entry]; |
@@ -2117,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2117 | skb->len + 2); | 2186 | skb->len + 2); |
2118 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2187 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
2119 | DMA_TO_DEVICE); | 2188 | DMA_TO_DEVICE); |
2120 | if (skb->len < ETH_ZLEN) | 2189 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { |
2121 | txdesc->buffer_length = ETH_ZLEN; | 2190 | kfree_skb(skb); |
2122 | else | 2191 | return NETDEV_TX_OK; |
2123 | txdesc->buffer_length = skb->len; | 2192 | } |
2193 | txdesc->buffer_length = skb->len; | ||
2124 | 2194 | ||
2125 | if (entry >= mdp->num_tx_ring - 1) | 2195 | if (entry >= mdp->num_tx_ring - 1) |
2126 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
@@ -2172,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev) | |||
2172 | 2242 | ||
2173 | netif_stop_queue(ndev); | 2243 | netif_stop_queue(ndev); |
2174 | 2244 | ||
2175 | /* Disable interrupts by clearing the interrupt mask. */ | 2245 | /* Serialise with the interrupt handler and NAPI, then disable |
2246 | * interrupts. We have to clear the irq_enabled flag first to | ||
2247 | * ensure that interrupts won't be re-enabled. | ||
2248 | */ | ||
2249 | mdp->irq_enabled = false; | ||
2250 | synchronize_irq(ndev->irq); | ||
2251 | napi_disable(&mdp->napi); | ||
2176 | sh_eth_write(ndev, 0x0000, EESIPR); | 2252 | sh_eth_write(ndev, 0x0000, EESIPR); |
2177 | 2253 | ||
2178 | /* Stop the chip's Tx and Rx processes. */ | 2254 | sh_eth_dev_exit(ndev); |
2179 | sh_eth_write(ndev, 0, EDTRR); | ||
2180 | sh_eth_write(ndev, 0, EDRRR); | ||
2181 | 2255 | ||
2182 | sh_eth_get_stats(ndev); | ||
2183 | /* PHY Disconnect */ | 2256 | /* PHY Disconnect */ |
2184 | if (mdp->phydev) { | 2257 | if (mdp->phydev) { |
2185 | phy_stop(mdp->phydev); | 2258 | phy_stop(mdp->phydev); |
2186 | phy_disconnect(mdp->phydev); | 2259 | phy_disconnect(mdp->phydev); |
2260 | mdp->phydev = NULL; | ||
2187 | } | 2261 | } |
2188 | 2262 | ||
2189 | free_irq(ndev->irq, ndev); | 2263 | free_irq(ndev->irq, ndev); |
2190 | 2264 | ||
2191 | napi_disable(&mdp->napi); | ||
2192 | |||
2193 | /* Free all the skbuffs in the Rx queue. */ | 2265 | /* Free all the skbuffs in the Rx queue. */ |
2194 | sh_eth_ring_free(ndev); | 2266 | sh_eth_ring_free(ndev); |
2195 | 2267 | ||
@@ -2417,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) | |||
2417 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2489 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2418 | int i, ret; | 2490 | int i, ret; |
2419 | 2491 | ||
2420 | if (unlikely(!mdp->cd->tsu)) | 2492 | if (!mdp->cd->tsu) |
2421 | return 0; | 2493 | return 0; |
2422 | 2494 | ||
2423 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { | 2495 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { |
@@ -2440,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) | |||
2440 | void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); | 2512 | void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); |
2441 | int i; | 2513 | int i; |
2442 | 2514 | ||
2443 | if (unlikely(!mdp->cd->tsu)) | 2515 | if (!mdp->cd->tsu) |
2444 | return; | 2516 | return; |
2445 | 2517 | ||
2446 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { | 2518 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { |
@@ -2450,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) | |||
2450 | } | 2522 | } |
2451 | } | 2523 | } |
2452 | 2524 | ||
2453 | /* Multicast reception directions set */ | 2525 | /* Update promiscuous flag and multicast filter */ |
2454 | static void sh_eth_set_multicast_list(struct net_device *ndev) | 2526 | static void sh_eth_set_rx_mode(struct net_device *ndev) |
2455 | { | 2527 | { |
2456 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2528 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2457 | u32 ecmr_bits; | 2529 | u32 ecmr_bits; |
@@ -2462,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) | |||
2462 | /* Initial condition is MCT = 1, PRM = 0. | 2534 | /* Initial condition is MCT = 1, PRM = 0. |
2463 | * Depending on ndev->flags, set PRM or clear MCT | 2535 | * Depending on ndev->flags, set PRM or clear MCT |
2464 | */ | 2536 | */ |
2465 | ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; | 2537 | ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; |
2538 | if (mdp->cd->tsu) | ||
2539 | ecmr_bits |= ECMR_MCT; | ||
2466 | 2540 | ||
2467 | if (!(ndev->flags & IFF_MULTICAST)) { | 2541 | if (!(ndev->flags & IFF_MULTICAST)) { |
2468 | sh_eth_tsu_purge_mcast(ndev); | 2542 | sh_eth_tsu_purge_mcast(ndev); |
@@ -2491,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) | |||
2491 | } | 2565 | } |
2492 | } | 2566 | } |
2493 | } | 2567 | } |
2494 | } else { | ||
2495 | /* Normal, unicast/broadcast-only mode. */ | ||
2496 | ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; | ||
2497 | } | 2568 | } |
2498 | 2569 | ||
2499 | /* update the ethernet mode */ | 2570 | /* update the ethernet mode */ |
@@ -2701,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { | |||
2701 | .ndo_stop = sh_eth_close, | 2772 | .ndo_stop = sh_eth_close, |
2702 | .ndo_start_xmit = sh_eth_start_xmit, | 2773 | .ndo_start_xmit = sh_eth_start_xmit, |
2703 | .ndo_get_stats = sh_eth_get_stats, | 2774 | .ndo_get_stats = sh_eth_get_stats, |
2775 | .ndo_set_rx_mode = sh_eth_set_rx_mode, | ||
2704 | .ndo_tx_timeout = sh_eth_tx_timeout, | 2776 | .ndo_tx_timeout = sh_eth_tx_timeout, |
2705 | .ndo_do_ioctl = sh_eth_do_ioctl, | 2777 | .ndo_do_ioctl = sh_eth_do_ioctl, |
2706 | .ndo_validate_addr = eth_validate_addr, | 2778 | .ndo_validate_addr = eth_validate_addr, |
@@ -2713,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { | |||
2713 | .ndo_stop = sh_eth_close, | 2785 | .ndo_stop = sh_eth_close, |
2714 | .ndo_start_xmit = sh_eth_start_xmit, | 2786 | .ndo_start_xmit = sh_eth_start_xmit, |
2715 | .ndo_get_stats = sh_eth_get_stats, | 2787 | .ndo_get_stats = sh_eth_get_stats, |
2716 | .ndo_set_rx_mode = sh_eth_set_multicast_list, | 2788 | .ndo_set_rx_mode = sh_eth_set_rx_mode, |
2717 | .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, | 2789 | .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, |
2718 | .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, | 2790 | .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, |
2719 | .ndo_tx_timeout = sh_eth_tx_timeout, | 2791 | .ndo_tx_timeout = sh_eth_tx_timeout, |