aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/renesas/sh_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c139
1 files changed, 106 insertions, 33 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 2dd2ff5ecb5a..af0b867a6cf6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -782,7 +782,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
782 782
783 /* Free Rx skb ringbuffer */ 783 /* Free Rx skb ringbuffer */
784 if (mdp->rx_skbuff) { 784 if (mdp->rx_skbuff) {
785 for (i = 0; i < RX_RING_SIZE; i++) { 785 for (i = 0; i < mdp->num_rx_ring; i++) {
786 if (mdp->rx_skbuff[i]) 786 if (mdp->rx_skbuff[i])
787 dev_kfree_skb(mdp->rx_skbuff[i]); 787 dev_kfree_skb(mdp->rx_skbuff[i]);
788 } 788 }
@@ -792,7 +792,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
792 792
793 /* Free Tx skb ringbuffer */ 793 /* Free Tx skb ringbuffer */
794 if (mdp->tx_skbuff) { 794 if (mdp->tx_skbuff) {
795 for (i = 0; i < TX_RING_SIZE; i++) { 795 for (i = 0; i < mdp->num_tx_ring; i++) {
796 if (mdp->tx_skbuff[i]) 796 if (mdp->tx_skbuff[i])
797 dev_kfree_skb(mdp->tx_skbuff[i]); 797 dev_kfree_skb(mdp->tx_skbuff[i]);
798 } 798 }
@@ -809,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
809 struct sk_buff *skb; 809 struct sk_buff *skb;
810 struct sh_eth_rxdesc *rxdesc = NULL; 810 struct sh_eth_rxdesc *rxdesc = NULL;
811 struct sh_eth_txdesc *txdesc = NULL; 811 struct sh_eth_txdesc *txdesc = NULL;
812 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 812 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
813 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 813 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
814 814
815 mdp->cur_rx = mdp->cur_tx = 0; 815 mdp->cur_rx = mdp->cur_tx = 0;
816 mdp->dirty_rx = mdp->dirty_tx = 0; 816 mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -818,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
818 memset(mdp->rx_ring, 0, rx_ringsize); 818 memset(mdp->rx_ring, 0, rx_ringsize);
819 819
820 /* build Rx ring buffer */ 820 /* build Rx ring buffer */
821 for (i = 0; i < RX_RING_SIZE; i++) { 821 for (i = 0; i < mdp->num_rx_ring; i++) {
822 /* skb */ 822 /* skb */
823 mdp->rx_skbuff[i] = NULL; 823 mdp->rx_skbuff[i] = NULL;
824 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 824 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -844,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
844 } 844 }
845 } 845 }
846 846
847 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 847 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
848 848
849 /* Mark the last entry as wrapping the ring. */ 849 /* Mark the last entry as wrapping the ring. */
850 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 850 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -852,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
852 memset(mdp->tx_ring, 0, tx_ringsize); 852 memset(mdp->tx_ring, 0, tx_ringsize);
853 853
854 /* build Tx ring buffer */ 854 /* build Tx ring buffer */
855 for (i = 0; i < TX_RING_SIZE; i++) { 855 for (i = 0; i < mdp->num_tx_ring; i++) {
856 mdp->tx_skbuff[i] = NULL; 856 mdp->tx_skbuff[i] = NULL;
857 txdesc = &mdp->tx_ring[i]; 857 txdesc = &mdp->tx_ring[i];
858 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 858 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -886,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
886 mdp->rx_buf_sz += NET_IP_ALIGN; 886 mdp->rx_buf_sz += NET_IP_ALIGN;
887 887
888 /* Allocate RX and TX skb rings */ 888 /* Allocate RX and TX skb rings */
889 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 889 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
890 GFP_KERNEL); 890 GFP_KERNEL);
891 if (!mdp->rx_skbuff) { 891 if (!mdp->rx_skbuff) {
892 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 892 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -894,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
894 return ret; 894 return ret;
895 } 895 }
896 896
897 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 897 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
898 GFP_KERNEL); 898 GFP_KERNEL);
899 if (!mdp->tx_skbuff) { 899 if (!mdp->tx_skbuff) {
900 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 900 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -903,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
903 } 903 }
904 904
905 /* Allocate all Rx descriptors. */ 905 /* Allocate all Rx descriptors. */
906 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 906 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
907 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 907 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
908 GFP_KERNEL); 908 GFP_KERNEL);
909 909
@@ -917,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
917 mdp->dirty_rx = 0; 917 mdp->dirty_rx = 0;
918 918
919 /* Allocate all Tx descriptors. */ 919 /* Allocate all Tx descriptors. */
920 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 920 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
921 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 921 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
922 GFP_KERNEL); 922 GFP_KERNEL);
923 if (!mdp->tx_ring) { 923 if (!mdp->tx_ring) {
@@ -946,21 +946,21 @@ static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
946 int ringsize; 946 int ringsize;
947 947
948 if (mdp->rx_ring) { 948 if (mdp->rx_ring) {
949 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 949 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
950 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 950 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951 mdp->rx_desc_dma); 951 mdp->rx_desc_dma);
952 mdp->rx_ring = NULL; 952 mdp->rx_ring = NULL;
953 } 953 }
954 954
955 if (mdp->tx_ring) { 955 if (mdp->tx_ring) {
956 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 956 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
957 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 957 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958 mdp->tx_desc_dma); 958 mdp->tx_desc_dma);
959 mdp->tx_ring = NULL; 959 mdp->tx_ring = NULL;
960 } 960 }
961} 961}
962 962
963static int sh_eth_dev_init(struct net_device *ndev) 963static int sh_eth_dev_init(struct net_device *ndev, bool start)
964{ 964{
965 int ret = 0; 965 int ret = 0;
966 struct sh_eth_private *mdp = netdev_priv(ndev); 966 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -1008,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
1008 RFLR); 1008 RFLR);
1009 1009
1010 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1010 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1011 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1011 if (start)
1012 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1012 1013
1013 /* PAUSE Prohibition */ 1014 /* PAUSE Prohibition */
1014 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1015 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1023,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
1023 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1024 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1024 1025
1025 /* E-MAC Interrupt Enable register */ 1026 /* E-MAC Interrupt Enable register */
1026 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1027 if (start)
1028 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1027 1029
1028 /* Set MAC address */ 1030 /* Set MAC address */
1029 update_mac_address(ndev); 1031 update_mac_address(ndev);
@@ -1036,10 +1038,12 @@ static int sh_eth_dev_init(struct net_device *ndev)
1036 if (mdp->cd->tpauser) 1038 if (mdp->cd->tpauser)
1037 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1039 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1038 1040
1039 /* Setting the Rx mode will start the Rx process. */ 1041 if (start) {
1040 sh_eth_write(ndev, EDRRR_R, EDRRR); 1042 /* Setting the Rx mode will start the Rx process. */
1043 sh_eth_write(ndev, EDRRR_R, EDRRR);
1041 1044
1042 netif_start_queue(ndev); 1045 netif_start_queue(ndev);
1046 }
1043 1047
1044out: 1048out:
1045 return ret; 1049 return ret;
@@ -1054,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1054 int entry = 0; 1058 int entry = 0;
1055 1059
1056 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1060 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1057 entry = mdp->dirty_tx % TX_RING_SIZE; 1061 entry = mdp->dirty_tx % mdp->num_tx_ring;
1058 txdesc = &mdp->tx_ring[entry]; 1062 txdesc = &mdp->tx_ring[entry];
1059 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1063 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1060 break; 1064 break;
@@ -1067,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1067 freeNum++; 1071 freeNum++;
1068 } 1072 }
1069 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1073 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1070 if (entry >= TX_RING_SIZE - 1) 1074 if (entry >= mdp->num_tx_ring - 1)
1071 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1075 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1072 1076
1073 ndev->stats.tx_packets++; 1077 ndev->stats.tx_packets++;
@@ -1082,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1082 struct sh_eth_private *mdp = netdev_priv(ndev); 1086 struct sh_eth_private *mdp = netdev_priv(ndev);
1083 struct sh_eth_rxdesc *rxdesc; 1087 struct sh_eth_rxdesc *rxdesc;
1084 1088
1085 int entry = mdp->cur_rx % RX_RING_SIZE; 1089 int entry = mdp->cur_rx % mdp->num_rx_ring;
1086 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 1090 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1087 struct sk_buff *skb; 1091 struct sk_buff *skb;
1088 u16 pkt_len = 0; 1092 u16 pkt_len = 0;
1089 u32 desc_status; 1093 u32 desc_status;
@@ -1134,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1134 ndev->stats.rx_bytes += pkt_len; 1138 ndev->stats.rx_bytes += pkt_len;
1135 } 1139 }
1136 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1140 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1137 entry = (++mdp->cur_rx) % RX_RING_SIZE; 1141 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1138 rxdesc = &mdp->rx_ring[entry]; 1142 rxdesc = &mdp->rx_ring[entry];
1139 } 1143 }
1140 1144
1141 /* Refill the Rx ring buffers. */ 1145 /* Refill the Rx ring buffers. */
1142 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1146 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1143 entry = mdp->dirty_rx % RX_RING_SIZE; 1147 entry = mdp->dirty_rx % mdp->num_rx_ring;
1144 rxdesc = &mdp->rx_ring[entry]; 1148 rxdesc = &mdp->rx_ring[entry];
1145 /* The size of the buffer is 16 byte boundary. */ 1149 /* The size of the buffer is 16 byte boundary. */
1146 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1150 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1157,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1157 skb_checksum_none_assert(skb); 1161 skb_checksum_none_assert(skb);
1158 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1162 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1159 } 1163 }
1160 if (entry >= RX_RING_SIZE - 1) 1164 if (entry >= mdp->num_rx_ring - 1)
1161 rxdesc->status |= 1165 rxdesc->status |=
1162 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1166 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1163 else 1167 else
@@ -1557,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1557 } 1561 }
1558} 1562}
1559 1563
1564static void sh_eth_get_ringparam(struct net_device *ndev,
1565 struct ethtool_ringparam *ring)
1566{
1567 struct sh_eth_private *mdp = netdev_priv(ndev);
1568
1569 ring->rx_max_pending = RX_RING_MAX;
1570 ring->tx_max_pending = TX_RING_MAX;
1571 ring->rx_pending = mdp->num_rx_ring;
1572 ring->tx_pending = mdp->num_tx_ring;
1573}
1574
1575static int sh_eth_set_ringparam(struct net_device *ndev,
1576 struct ethtool_ringparam *ring)
1577{
1578 struct sh_eth_private *mdp = netdev_priv(ndev);
1579 int ret;
1580
1581 if (ring->tx_pending > TX_RING_MAX ||
1582 ring->rx_pending > RX_RING_MAX ||
1583 ring->tx_pending < TX_RING_MIN ||
1584 ring->rx_pending < RX_RING_MIN)
1585 return -EINVAL;
1586 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1587 return -EINVAL;
1588
1589 if (netif_running(ndev)) {
1590 netif_tx_disable(ndev);
1591 /* Disable interrupts by clearing the interrupt mask. */
1592 sh_eth_write(ndev, 0x0000, EESIPR);
1593 /* Stop the chip's Tx and Rx processes. */
1594 sh_eth_write(ndev, 0, EDTRR);
1595 sh_eth_write(ndev, 0, EDRRR);
1596 synchronize_irq(ndev->irq);
1597 }
1598
1599 /* Free all the skbuffs in the Rx queue. */
1600 sh_eth_ring_free(ndev);
1601 /* Free DMA buffer */
1602 sh_eth_free_dma_buffer(mdp);
1603
1604 /* Set new parameters */
1605 mdp->num_rx_ring = ring->rx_pending;
1606 mdp->num_tx_ring = ring->tx_pending;
1607
1608 ret = sh_eth_ring_init(ndev);
1609 if (ret < 0) {
1610 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1611 return ret;
1612 }
1613 ret = sh_eth_dev_init(ndev, false);
1614 if (ret < 0) {
1615 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1616 return ret;
1617 }
1618
1619 if (netif_running(ndev)) {
1620 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1621 /* Setting the Rx mode will start the Rx process. */
1622 sh_eth_write(ndev, EDRRR_R, EDRRR);
1623 netif_wake_queue(ndev);
1624 }
1625
1626 return 0;
1627}
1628
1560static const struct ethtool_ops sh_eth_ethtool_ops = { 1629static const struct ethtool_ops sh_eth_ethtool_ops = {
1561 .get_settings = sh_eth_get_settings, 1630 .get_settings = sh_eth_get_settings,
1562 .set_settings = sh_eth_set_settings, 1631 .set_settings = sh_eth_set_settings,
@@ -1567,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
1567 .get_strings = sh_eth_get_strings, 1636 .get_strings = sh_eth_get_strings,
1568 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1637 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1569 .get_sset_count = sh_eth_get_sset_count, 1638 .get_sset_count = sh_eth_get_sset_count,
1639 .get_ringparam = sh_eth_get_ringparam,
1640 .set_ringparam = sh_eth_set_ringparam,
1570}; 1641};
1571 1642
1572/* network device open function */ 1643/* network device open function */
@@ -1597,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
1597 goto out_free_irq; 1668 goto out_free_irq;
1598 1669
1599 /* device init */ 1670 /* device init */
1600 ret = sh_eth_dev_init(ndev); 1671 ret = sh_eth_dev_init(ndev, true);
1601 if (ret) 1672 if (ret)
1602 goto out_free_irq; 1673 goto out_free_irq;
1603 1674
@@ -1631,7 +1702,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1631 ndev->stats.tx_errors++; 1702 ndev->stats.tx_errors++;
1632 1703
1633 /* Free all the skbuffs in the Rx queue. */ 1704 /* Free all the skbuffs in the Rx queue. */
1634 for (i = 0; i < RX_RING_SIZE; i++) { 1705 for (i = 0; i < mdp->num_rx_ring; i++) {
1635 rxdesc = &mdp->rx_ring[i]; 1706 rxdesc = &mdp->rx_ring[i];
1636 rxdesc->status = 0; 1707 rxdesc->status = 0;
1637 rxdesc->addr = 0xBADF00D0; 1708 rxdesc->addr = 0xBADF00D0;
@@ -1639,14 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1639 dev_kfree_skb(mdp->rx_skbuff[i]); 1710 dev_kfree_skb(mdp->rx_skbuff[i]);
1640 mdp->rx_skbuff[i] = NULL; 1711 mdp->rx_skbuff[i] = NULL;
1641 } 1712 }
1642 for (i = 0; i < TX_RING_SIZE; i++) { 1713 for (i = 0; i < mdp->num_tx_ring; i++) {
1643 if (mdp->tx_skbuff[i]) 1714 if (mdp->tx_skbuff[i])
1644 dev_kfree_skb(mdp->tx_skbuff[i]); 1715 dev_kfree_skb(mdp->tx_skbuff[i]);
1645 mdp->tx_skbuff[i] = NULL; 1716 mdp->tx_skbuff[i] = NULL;
1646 } 1717 }
1647 1718
1648 /* device init */ 1719 /* device init */
1649 sh_eth_dev_init(ndev); 1720 sh_eth_dev_init(ndev, true);
1650} 1721}
1651 1722
1652/* Packet transmit function */ 1723/* Packet transmit function */
@@ -1658,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1658 unsigned long flags; 1729 unsigned long flags;
1659 1730
1660 spin_lock_irqsave(&mdp->lock, flags); 1731 spin_lock_irqsave(&mdp->lock, flags);
1661 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1732 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1662 if (!sh_eth_txfree(ndev)) { 1733 if (!sh_eth_txfree(ndev)) {
1663 if (netif_msg_tx_queued(mdp)) 1734 if (netif_msg_tx_queued(mdp))
1664 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1735 dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1669,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1669 } 1740 }
1670 spin_unlock_irqrestore(&mdp->lock, flags); 1741 spin_unlock_irqrestore(&mdp->lock, flags);
1671 1742
1672 entry = mdp->cur_tx % TX_RING_SIZE; 1743 entry = mdp->cur_tx % mdp->num_tx_ring;
1673 mdp->tx_skbuff[entry] = skb; 1744 mdp->tx_skbuff[entry] = skb;
1674 txdesc = &mdp->tx_ring[entry]; 1745 txdesc = &mdp->tx_ring[entry];
1675 /* soft swap. */ 1746 /* soft swap. */
@@ -1683,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1683 else 1754 else
1684 txdesc->buffer_length = skb->len; 1755 txdesc->buffer_length = skb->len;
1685 1756
1686 if (entry >= TX_RING_SIZE - 1) 1757 if (entry >= mdp->num_tx_ring - 1)
1687 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1758 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1688 else 1759 else
1689 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1760 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -2313,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2313 ether_setup(ndev); 2384 ether_setup(ndev);
2314 2385
2315 mdp = netdev_priv(ndev); 2386 mdp = netdev_priv(ndev);
2387 mdp->num_tx_ring = TX_RING_SIZE;
2388 mdp->num_rx_ring = RX_RING_SIZE;
2316 mdp->addr = ioremap(res->start, resource_size(res)); 2389 mdp->addr = ioremap(res->start, resource_size(res));
2317 if (mdp->addr == NULL) { 2390 if (mdp->addr == NULL) {
2318 ret = -ENOMEM; 2391 ret = -ENOMEM;