aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c238
1 files changed, 184 insertions, 54 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c3f061957c04..ea7d5ddb7760 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
82#include <linux/tcp.h> 82#include <linux/tcp.h>
83#include <linux/udp.h> 83#include <linux/udp.h>
84#include <linux/in.h> 84#include <linux/in.h>
85#include <linux/net_tstamp.h>
85 86
86#include <asm/io.h> 87#include <asm/io.h>
87#include <asm/irq.h> 88#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
377 rctrl |= RCTRL_PADDING(priv->padding); 378 rctrl |= RCTRL_PADDING(priv->padding);
378 } 379 }
379 380
381 /* Insert receive time stamps into padding alignment bytes */
382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
383 rctrl &= ~RCTRL_PAL_MASK;
384 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
385 priv->padding = 8;
386 }
387
380 /* keep vlan related bits if it's enabled */ 388 /* keep vlan related bits if it's enabled */
381 if (priv->vlgrp) { 389 if (priv->vlgrp) {
382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
501/* Returns 1 if incoming frames use an FCB */ 509/* Returns 1 if incoming frames use an FCB */
502static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
503{ 511{
504 return priv->vlgrp || priv->rx_csum_enable; 512 return priv->vlgrp || priv->rx_csum_enable ||
513 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
505} 514}
506 515
507static void free_tx_pointers(struct gfar_private *priv) 516static void free_tx_pointers(struct gfar_private *priv)
@@ -549,12 +558,8 @@ static int gfar_parse_group(struct device_node *np,
549 struct gfar_private *priv, const char *model) 558 struct gfar_private *priv, const char *model)
550{ 559{
551 u32 *queue_mask; 560 u32 *queue_mask;
552 u64 addr, size;
553
554 addr = of_translate_address(np,
555 of_get_address(np, 0, &size, NULL));
556 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
557 561
562 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
558 if (!priv->gfargrp[priv->num_grps].regs) 563 if (!priv->gfargrp[priv->num_grps].regs)
559 return -ENOMEM; 564 return -ENOMEM;
560 565
@@ -676,7 +681,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
676 priv->rx_queue[i] = NULL; 681 priv->rx_queue[i] = NULL;
677 682
678 for (i = 0; i < priv->num_tx_queues; i++) { 683 for (i = 0; i < priv->num_tx_queues; i++) {
679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( 684 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 685 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
681 if (!priv->tx_queue[i]) { 686 if (!priv->tx_queue[i]) {
682 err = -ENOMEM; 687 err = -ENOMEM;
@@ -689,7 +694,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
689 } 694 }
690 695
691 for (i = 0; i < priv->num_rx_queues; i++) { 696 for (i = 0; i < priv->num_rx_queues; i++) {
692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( 697 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 698 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
694 if (!priv->rx_queue[i]) { 699 if (!priv->rx_queue[i]) {
695 err = -ENOMEM; 700 err = -ENOMEM;
@@ -742,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
742 FSL_GIANFAR_DEV_HAS_CSUM | 747 FSL_GIANFAR_DEV_HAS_CSUM |
743 FSL_GIANFAR_DEV_HAS_VLAN | 748 FSL_GIANFAR_DEV_HAS_VLAN |
744 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
745 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
751 FSL_GIANFAR_DEV_HAS_TIMER;
746 752
747 ctype = of_get_property(np, "phy-connection-type", NULL); 753 ctype = of_get_property(np, "phy-connection-type", NULL);
748 754
@@ -772,6 +778,48 @@ err_grp_init:
772 return err; 778 return err;
773} 779}
774 780
781static int gfar_hwtstamp_ioctl(struct net_device *netdev,
782 struct ifreq *ifr, int cmd)
783{
784 struct hwtstamp_config config;
785 struct gfar_private *priv = netdev_priv(netdev);
786
787 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
788 return -EFAULT;
789
790 /* reserved for future extensions */
791 if (config.flags)
792 return -EINVAL;
793
794 switch (config.tx_type) {
795 case HWTSTAMP_TX_OFF:
796 priv->hwts_tx_en = 0;
797 break;
798 case HWTSTAMP_TX_ON:
799 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
800 return -ERANGE;
801 priv->hwts_tx_en = 1;
802 break;
803 default:
804 return -ERANGE;
805 }
806
807 switch (config.rx_filter) {
808 case HWTSTAMP_FILTER_NONE:
809 priv->hwts_rx_en = 0;
810 break;
811 default:
812 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
813 return -ERANGE;
814 priv->hwts_rx_en = 1;
815 config.rx_filter = HWTSTAMP_FILTER_ALL;
816 break;
817 }
818
819 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
820 -EFAULT : 0;
821}
822
775/* Ioctl MII Interface */ 823/* Ioctl MII Interface */
776static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 824static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
777{ 825{
@@ -780,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
780 if (!netif_running(dev)) 828 if (!netif_running(dev))
781 return -EINVAL; 829 return -EINVAL;
782 830
831 if (cmd == SIOCSHWTSTAMP)
832 return gfar_hwtstamp_ioctl(dev, rq, cmd);
833
783 if (!priv->phydev) 834 if (!priv->phydev)
784 return -ENODEV; 835 return -ENODEV;
785 836
@@ -982,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
982 else 1033 else
983 priv->padding = 0; 1034 priv->padding = 0;
984 1035
985 if (dev->features & NETIF_F_IP_CSUM) 1036 if (dev->features & NETIF_F_IP_CSUM ||
1037 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
986 dev->hard_header_len += GMAC_FCB_LEN; 1038 dev->hard_header_len += GMAC_FCB_LEN;
987 1039
988 /* Program the isrg regs only if number of grps > 1 */ 1040 /* Program the isrg regs only if number of grps > 1 */
@@ -998,7 +1050,7 @@ static int gfar_probe(struct of_device *ofdev,
998 } 1050 }
999 1051
1000 /* Need to reverse the bit maps as bit_map's MSB is q0 1052 /* Need to reverse the bit maps as bit_map's MSB is q0
1001 * but, for_each_bit parses from right to left, which 1053 * but, for_each_set_bit parses from right to left, which
1002 * basically reverses the queue numbers */ 1054 * basically reverses the queue numbers */
1003 for (i = 0; i< priv->num_grps; i++) { 1055 for (i = 0; i< priv->num_grps; i++) {
1004 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1056 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -1011,7 +1063,7 @@ static int gfar_probe(struct of_device *ofdev,
1011 * also assign queues to groups */ 1063 * also assign queues to groups */
1012 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1064 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1013 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1065 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1014 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1066 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1015 priv->num_rx_queues) { 1067 priv->num_rx_queues) {
1016 priv->gfargrp[grp_idx].num_rx_queues++; 1068 priv->gfargrp[grp_idx].num_rx_queues++;
1017 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1069 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1019,7 +1071,7 @@ static int gfar_probe(struct of_device *ofdev,
1019 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1071 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1020 } 1072 }
1021 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1073 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1022 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, 1074 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1023 priv->num_tx_queues) { 1075 priv->num_tx_queues) {
1024 priv->gfargrp[grp_idx].num_tx_queues++; 1076 priv->gfargrp[grp_idx].num_tx_queues++;
1025 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1077 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1120,10 +1172,10 @@ static int gfar_probe(struct of_device *ofdev,
1120 /* provided which set of benchmarks. */ 1172 /* provided which set of benchmarks. */
1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1173 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1122 for (i = 0; i < priv->num_rx_queues; i++) 1174 for (i = 0; i < priv->num_rx_queues; i++)
1123 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", 1175 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1124 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1176 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1125 for(i = 0; i < priv->num_tx_queues; i++) 1177 for(i = 0; i < priv->num_tx_queues; i++)
1126 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", 1178 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1127 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1179 dev->name, i, priv->tx_queue[i]->tx_ring_size);
1128 1180
1129 return 0; 1181 return 0;
@@ -1515,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
1515 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1567 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1516 gfar_write(&regs->dmactrl, tempval); 1568 gfar_write(&regs->dmactrl, tempval);
1517 1569
1518 while (!(gfar_read(&regs->ievent) & 1570 spin_event_timeout(((gfar_read(&regs->ievent) &
1519 (IEVENT_GRSC | IEVENT_GTSC))) 1571 (IEVENT_GRSC | IEVENT_GTSC)) ==
1520 cpu_relax(); 1572 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
1521 } 1573 }
1522} 1574}
1523 1575
@@ -1638,13 +1690,13 @@ static void free_skb_resources(struct gfar_private *priv)
1638 /* Go through all the buffer descriptors and free their data buffers */ 1690 /* Go through all the buffer descriptors and free their data buffers */
1639 for (i = 0; i < priv->num_tx_queues; i++) { 1691 for (i = 0; i < priv->num_tx_queues; i++) {
1640 tx_queue = priv->tx_queue[i]; 1692 tx_queue = priv->tx_queue[i];
1641 if(!tx_queue->tx_skbuff) 1693 if(tx_queue->tx_skbuff)
1642 free_skb_tx_queue(tx_queue); 1694 free_skb_tx_queue(tx_queue);
1643 } 1695 }
1644 1696
1645 for (i = 0; i < priv->num_rx_queues; i++) { 1697 for (i = 0; i < priv->num_rx_queues; i++) {
1646 rx_queue = priv->rx_queue[i]; 1698 rx_queue = priv->rx_queue[i];
1647 if(!rx_queue->rx_skbuff) 1699 if(rx_queue->rx_skbuff)
1648 free_skb_rx_queue(rx_queue); 1700 free_skb_rx_queue(rx_queue);
1649 } 1701 }
1650 1702
@@ -1653,6 +1705,7 @@ static void free_skb_resources(struct gfar_private *priv)
1653 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1705 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1654 priv->tx_queue[0]->tx_bd_base, 1706 priv->tx_queue[0]->tx_bd_base,
1655 priv->tx_queue[0]->tx_bd_dma_base); 1707 priv->tx_queue[0]->tx_bd_dma_base);
1708 skb_queue_purge(&priv->rx_recycle);
1656} 1709}
1657 1710
1658void gfar_start(struct net_device *dev) 1711void gfar_start(struct net_device *dev)
@@ -1686,7 +1739,7 @@ void gfar_start(struct net_device *dev)
1686 gfar_write(&regs->imask, IMASK_DEFAULT); 1739 gfar_write(&regs->imask, IMASK_DEFAULT);
1687 } 1740 }
1688 1741
1689 dev->trans_start = jiffies; 1742 dev->trans_start = jiffies; /* prevent tx timeout */
1690} 1743}
1691 1744
1692void gfar_configure_coalescing(struct gfar_private *priv, 1745void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1709,7 +1762,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1709 1762
1710 if (priv->mode == MQ_MG_MODE) { 1763 if (priv->mode == MQ_MG_MODE) {
1711 baddr = &regs->txic0; 1764 baddr = &regs->txic0;
1712 for_each_bit (i, &tx_mask, priv->num_tx_queues) { 1765 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1713 if (likely(priv->tx_queue[i]->txcoalescing)) { 1766 if (likely(priv->tx_queue[i]->txcoalescing)) {
1714 gfar_write(baddr + i, 0); 1767 gfar_write(baddr + i, 0);
1715 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1768 gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1717,7 +1770,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1717 } 1770 }
1718 1771
1719 baddr = &regs->rxic0; 1772 baddr = &regs->rxic0;
1720 for_each_bit (i, &rx_mask, priv->num_rx_queues) { 1773 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1721 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1774 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1722 gfar_write(baddr + i, 0); 1775 gfar_write(baddr + i, 0);
1723 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1776 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -1926,23 +1979,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 struct netdev_queue *txq; 1979 struct netdev_queue *txq;
1927 struct gfar __iomem *regs = NULL; 1980 struct gfar __iomem *regs = NULL;
1928 struct txfcb *fcb = NULL; 1981 struct txfcb *fcb = NULL;
1929 struct txbd8 *txbdp, *txbdp_start, *base; 1982 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1930 u32 lstatus; 1983 u32 lstatus;
1931 int i, rq = 0; 1984 int i, rq = 0, do_tstamp = 0;
1932 u32 bufaddr; 1985 u32 bufaddr;
1933 unsigned long flags; 1986 unsigned long flags;
1934 unsigned int nr_frags, length; 1987 unsigned int nr_frags, nr_txbds, length;
1935 1988 union skb_shared_tx *shtx;
1936 1989
1937 rq = skb->queue_mapping; 1990 rq = skb->queue_mapping;
1938 tx_queue = priv->tx_queue[rq]; 1991 tx_queue = priv->tx_queue[rq];
1939 txq = netdev_get_tx_queue(dev, rq); 1992 txq = netdev_get_tx_queue(dev, rq);
1940 base = tx_queue->tx_bd_base; 1993 base = tx_queue->tx_bd_base;
1941 regs = tx_queue->grp->regs; 1994 regs = tx_queue->grp->regs;
1995 shtx = skb_tx(skb);
1996
1997 /* check if time stamp should be generated */
1998 if (unlikely(shtx->hardware && priv->hwts_tx_en))
1999 do_tstamp = 1;
1942 2000
1943 /* make space for additional header when fcb is needed */ 2001 /* make space for additional header when fcb is needed */
1944 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2002 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1945 (priv->vlgrp && vlan_tx_tag_present(skb))) && 2003 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
2004 unlikely(do_tstamp)) &&
1946 (skb_headroom(skb) < GMAC_FCB_LEN)) { 2005 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1947 struct sk_buff *skb_new; 2006 struct sk_buff *skb_new;
1948 2007
@@ -1959,8 +2018,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1959 /* total number of fragments in the SKB */ 2018 /* total number of fragments in the SKB */
1960 nr_frags = skb_shinfo(skb)->nr_frags; 2019 nr_frags = skb_shinfo(skb)->nr_frags;
1961 2020
2021 /* calculate the required number of TxBDs for this skb */
2022 if (unlikely(do_tstamp))
2023 nr_txbds = nr_frags + 2;
2024 else
2025 nr_txbds = nr_frags + 1;
2026
1962 /* check if there is space to queue this packet */ 2027 /* check if there is space to queue this packet */
1963 if ((nr_frags+1) > tx_queue->num_txbdfree) { 2028 if (nr_txbds > tx_queue->num_txbdfree) {
1964 /* no space, stop the queue */ 2029 /* no space, stop the queue */
1965 netif_tx_stop_queue(txq); 2030 netif_tx_stop_queue(txq);
1966 dev->stats.tx_fifo_errors++; 2031 dev->stats.tx_fifo_errors++;
@@ -1972,9 +2037,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1972 txq->tx_packets ++; 2037 txq->tx_packets ++;
1973 2038
1974 txbdp = txbdp_start = tx_queue->cur_tx; 2039 txbdp = txbdp_start = tx_queue->cur_tx;
2040 lstatus = txbdp->lstatus;
2041
2042 /* Time stamp insertion requires one additional TxBD */
2043 if (unlikely(do_tstamp))
2044 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2045 tx_queue->tx_ring_size);
1975 2046
1976 if (nr_frags == 0) { 2047 if (nr_frags == 0) {
1977 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2048 if (unlikely(do_tstamp))
2049 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2050 TXBD_INTERRUPT);
2051 else
2052 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1978 } else { 2053 } else {
1979 /* Place the fragment addresses and lengths into the TxBDs */ 2054 /* Place the fragment addresses and lengths into the TxBDs */
1980 for (i = 0; i < nr_frags; i++) { 2055 for (i = 0; i < nr_frags; i++) {
@@ -2020,11 +2095,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2020 gfar_tx_vlan(skb, fcb); 2095 gfar_tx_vlan(skb, fcb);
2021 } 2096 }
2022 2097
2023 /* setup the TxBD length and buffer pointer for the first BD */ 2098 /* Setup tx hardware time stamping if requested */
2099 if (unlikely(do_tstamp)) {
2100 shtx->in_progress = 1;
2101 if (fcb == NULL)
2102 fcb = gfar_add_fcb(skb);
2103 fcb->ptp = 1;
2104 lstatus |= BD_LFLAG(TXBD_TOE);
2105 }
2106
2024 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2107 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2025 skb_headlen(skb), DMA_TO_DEVICE); 2108 skb_headlen(skb), DMA_TO_DEVICE);
2026 2109
2027 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2110 /*
2111 * If time stamping is requested one additional TxBD must be set up. The
2112 * first TxBD points to the FCB and must have a data length of
2113 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2114 * the full frame length.
2115 */
2116 if (unlikely(do_tstamp)) {
2117 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2118 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2119 (skb_headlen(skb) - GMAC_FCB_LEN);
2120 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2121 } else {
2122 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2123 }
2028 2124
2029 /* 2125 /*
2030 * We can work in parallel with gfar_clean_tx_ring(), except 2126 * We can work in parallel with gfar_clean_tx_ring(), except
@@ -2064,9 +2160,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2160 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2065 2161
2066 /* reduce TxBD free count */ 2162 /* reduce TxBD free count */
2067 tx_queue->num_txbdfree -= (nr_frags + 1); 2163 tx_queue->num_txbdfree -= (nr_txbds);
2068
2069 dev->trans_start = jiffies;
2070 2164
2071 /* If the next BD still needs to be cleaned up, then the bds 2165 /* If the next BD still needs to be cleaned up, then the bds
2072 are full. We need to tell the kernel to stop sending us stuff. */ 2166 are full. We need to tell the kernel to stop sending us stuff. */
@@ -2092,7 +2186,6 @@ static int gfar_close(struct net_device *dev)
2092 2186
2093 disable_napi(priv); 2187 disable_napi(priv);
2094 2188
2095 skb_queue_purge(&priv->rx_recycle);
2096 cancel_work_sync(&priv->reset_task); 2189 cancel_work_sync(&priv->reset_task);
2097 stop_gfar(dev); 2190 stop_gfar(dev);
2098 2191
@@ -2255,16 +2348,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2255 struct net_device *dev = tx_queue->dev; 2348 struct net_device *dev = tx_queue->dev;
2256 struct gfar_private *priv = netdev_priv(dev); 2349 struct gfar_private *priv = netdev_priv(dev);
2257 struct gfar_priv_rx_q *rx_queue = NULL; 2350 struct gfar_priv_rx_q *rx_queue = NULL;
2258 struct txbd8 *bdp; 2351 struct txbd8 *bdp, *next = NULL;
2259 struct txbd8 *lbdp = NULL; 2352 struct txbd8 *lbdp = NULL;
2260 struct txbd8 *base = tx_queue->tx_bd_base; 2353 struct txbd8 *base = tx_queue->tx_bd_base;
2261 struct sk_buff *skb; 2354 struct sk_buff *skb;
2262 int skb_dirtytx; 2355 int skb_dirtytx;
2263 int tx_ring_size = tx_queue->tx_ring_size; 2356 int tx_ring_size = tx_queue->tx_ring_size;
2264 int frags = 0; 2357 int frags = 0, nr_txbds = 0;
2265 int i; 2358 int i;
2266 int howmany = 0; 2359 int howmany = 0;
2267 u32 lstatus; 2360 u32 lstatus;
2361 size_t buflen;
2362 union skb_shared_tx *shtx;
2268 2363
2269 rx_queue = priv->rx_queue[tx_queue->qindex]; 2364 rx_queue = priv->rx_queue[tx_queue->qindex];
2270 bdp = tx_queue->dirty_tx; 2365 bdp = tx_queue->dirty_tx;
@@ -2274,7 +2369,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2274 unsigned long flags; 2369 unsigned long flags;
2275 2370
2276 frags = skb_shinfo(skb)->nr_frags; 2371 frags = skb_shinfo(skb)->nr_frags;
2277 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2372
2373 /*
2374 * When time stamping, one additional TxBD must be freed.
2375 * Also, we need to dma_unmap_single() the TxPAL.
2376 */
2377 shtx = skb_tx(skb);
2378 if (unlikely(shtx->in_progress))
2379 nr_txbds = frags + 2;
2380 else
2381 nr_txbds = frags + 1;
2382
2383 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2278 2384
2279 lstatus = lbdp->lstatus; 2385 lstatus = lbdp->lstatus;
2280 2386
@@ -2283,10 +2389,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2283 (lstatus & BD_LENGTH_MASK)) 2389 (lstatus & BD_LENGTH_MASK))
2284 break; 2390 break;
2285 2391
2286 dma_unmap_single(&priv->ofdev->dev, 2392 if (unlikely(shtx->in_progress)) {
2287 bdp->bufPtr, 2393 next = next_txbd(bdp, base, tx_ring_size);
2288 bdp->length, 2394 buflen = next->length + GMAC_FCB_LEN;
2289 DMA_TO_DEVICE); 2395 } else
2396 buflen = bdp->length;
2397
2398 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2399 buflen, DMA_TO_DEVICE);
2400
2401 if (unlikely(shtx->in_progress)) {
2402 struct skb_shared_hwtstamps shhwtstamps;
2403 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2404 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2405 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2406 skb_tstamp_tx(skb, &shhwtstamps);
2407 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2408 bdp = next;
2409 }
2290 2410
2291 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2411 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2292 bdp = next_txbd(bdp, base, tx_ring_size); 2412 bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2318,7 +2438,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2318 2438
2319 howmany++; 2439 howmany++;
2320 spin_lock_irqsave(&tx_queue->txlock, flags); 2440 spin_lock_irqsave(&tx_queue->txlock, flags);
2321 tx_queue->num_txbdfree += frags + 1; 2441 tx_queue->num_txbdfree += nr_txbds;
2322 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2442 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2323 } 2443 }
2324 2444
@@ -2393,6 +2513,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
2393 * as many bytes as needed to align the data properly 2513 * as many bytes as needed to align the data properly
2394 */ 2514 */
2395 skb_reserve(skb, alignamount); 2515 skb_reserve(skb, alignamount);
2516 GFAR_CB(skb)->alignamount = alignamount;
2396 2517
2397 return skb; 2518 return skb;
2398} 2519}
@@ -2473,6 +2594,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2473 skb_pull(skb, amount_pull); 2594 skb_pull(skb, amount_pull);
2474 } 2595 }
2475 2596
2597 /* Get receive timestamp from the skb */
2598 if (priv->hwts_rx_en) {
2599 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2600 u64 *ns = (u64 *) skb->data;
2601 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2602 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2603 }
2604
2605 if (priv->padding)
2606 skb_pull(skb, priv->padding);
2607
2476 if (priv->rx_csum_enable) 2608 if (priv->rx_csum_enable)
2477 gfar_rx_checksum(skb, fcb); 2609 gfar_rx_checksum(skb, fcb);
2478 2610
@@ -2509,8 +2641,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2509 bdp = rx_queue->cur_rx; 2641 bdp = rx_queue->cur_rx;
2510 base = rx_queue->rx_bd_base; 2642 base = rx_queue->rx_bd_base;
2511 2643
2512 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2644 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2513 priv->padding;
2514 2645
2515 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2646 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2516 struct sk_buff *newskb; 2647 struct sk_buff *newskb;
@@ -2533,13 +2664,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2533 newskb = skb; 2664 newskb = skb;
2534 else if (skb) { 2665 else if (skb) {
2535 /* 2666 /*
2536 * We need to reset ->data to what it 2667 * We need to un-reserve() the skb to what it
2537 * was before gfar_new_skb() re-aligned 2668 * was before gfar_new_skb() re-aligned
2538 * it to an RXBUF_ALIGNMENT boundary 2669 * it to an RXBUF_ALIGNMENT boundary
2539 * before we put the skb back on the 2670 * before we put the skb back on the
2540 * recycle list. 2671 * recycle list.
2541 */ 2672 */
2542 skb->data = skb->head + NET_SKB_PAD; 2673 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
2543 __skb_queue_head(&priv->rx_recycle, skb); 2674 __skb_queue_head(&priv->rx_recycle, skb);
2544 } 2675 }
2545 } else { 2676 } else {
@@ -2610,7 +2741,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2610 budget_per_queue = left_over_budget/num_queues; 2741 budget_per_queue = left_over_budget/num_queues;
2611 left_over_budget = 0; 2742 left_over_budget = 0;
2612 2743
2613 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2744 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2614 if (test_bit(i, &serviced_queues)) 2745 if (test_bit(i, &serviced_queues))
2615 continue; 2746 continue;
2616 rx_queue = priv->rx_queue[i]; 2747 rx_queue = priv->rx_queue[i];
@@ -2797,7 +2928,7 @@ static void adjust_link(struct net_device *dev)
2797 * whenever dev->flags is changed */ 2928 * whenever dev->flags is changed */
2798static void gfar_set_multi(struct net_device *dev) 2929static void gfar_set_multi(struct net_device *dev)
2799{ 2930{
2800 struct dev_mc_list *mc_ptr; 2931 struct netdev_hw_addr *ha;
2801 struct gfar_private *priv = netdev_priv(dev); 2932 struct gfar_private *priv = netdev_priv(dev);
2802 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2933 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2803 u32 tempval; 2934 u32 tempval;
@@ -2870,13 +3001,12 @@ static void gfar_set_multi(struct net_device *dev)
2870 return; 3001 return;
2871 3002
2872 /* Parse the list, and set the appropriate bits */ 3003 /* Parse the list, and set the appropriate bits */
2873 netdev_for_each_mc_addr(mc_ptr, dev) { 3004 netdev_for_each_mc_addr(ha, dev) {
2874 if (idx < em_num) { 3005 if (idx < em_num) {
2875 gfar_set_mac_for_addr(dev, idx, 3006 gfar_set_mac_for_addr(dev, idx, ha->addr);
2876 mc_ptr->dmi_addr);
2877 idx++; 3007 idx++;
2878 } else 3008 } else
2879 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 3009 gfar_set_hash_for_addr(dev, ha->addr);
2880 } 3010 }
2881 } 3011 }
2882 3012