diff options
| author | Patrick McHardy <kaber@trash.net> | 2010-04-20 10:02:01 -0400 |
|---|---|---|
| committer | Patrick McHardy <kaber@trash.net> | 2010-04-20 10:02:01 -0400 |
| commit | 62910554656cdcd6b6f84a5154c4155aae4ca231 (patch) | |
| tree | dcf14004f6fd2ef7154362ff948bfeba0f3ea92d /drivers/net/gianfar.c | |
| parent | 22265a5c3c103cf8c50be62e6c90d045eb649e6d (diff) | |
| parent | ab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff) | |
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts:
Documentation/feature-removal-schedule.txt
net/ipv6/netfilter/ip6t_REJECT.c
net/netfilter/xt_limit.c
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'drivers/net/gianfar.c')
| -rw-r--r-- | drivers/net/gianfar.c | 208 |
1 files changed, 172 insertions, 36 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b6715553cf17..032073d1e3d2 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -82,6 +82,7 @@ | |||
| 82 | #include <linux/tcp.h> | 82 | #include <linux/tcp.h> |
| 83 | #include <linux/udp.h> | 83 | #include <linux/udp.h> |
| 84 | #include <linux/in.h> | 84 | #include <linux/in.h> |
| 85 | #include <linux/net_tstamp.h> | ||
| 85 | 86 | ||
| 86 | #include <asm/io.h> | 87 | #include <asm/io.h> |
| 87 | #include <asm/irq.h> | 88 | #include <asm/irq.h> |
| @@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev) | |||
| 377 | rctrl |= RCTRL_PADDING(priv->padding); | 378 | rctrl |= RCTRL_PADDING(priv->padding); |
| 378 | } | 379 | } |
| 379 | 380 | ||
| 381 | /* Insert receive time stamps into padding alignment bytes */ | ||
| 382 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { | ||
| 383 | rctrl &= ~RCTRL_PAL_MASK; | ||
| 384 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8); | ||
| 385 | priv->padding = 8; | ||
| 386 | } | ||
| 387 | |||
| 380 | /* keep vlan related bits if it's enabled */ | 388 | /* keep vlan related bits if it's enabled */ |
| 381 | if (priv->vlgrp) { | 389 | if (priv->vlgrp) { |
| 382 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | 390 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
| @@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv) | |||
| 501 | /* Returns 1 if incoming frames use an FCB */ | 509 | /* Returns 1 if incoming frames use an FCB */ |
| 502 | static inline int gfar_uses_fcb(struct gfar_private *priv) | 510 | static inline int gfar_uses_fcb(struct gfar_private *priv) |
| 503 | { | 511 | { |
| 504 | return priv->vlgrp || priv->rx_csum_enable; | 512 | return priv->vlgrp || priv->rx_csum_enable || |
| 513 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); | ||
| 505 | } | 514 | } |
| 506 | 515 | ||
| 507 | static void free_tx_pointers(struct gfar_private *priv) | 516 | static void free_tx_pointers(struct gfar_private *priv) |
| @@ -676,7 +685,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
| 676 | priv->rx_queue[i] = NULL; | 685 | priv->rx_queue[i] = NULL; |
| 677 | 686 | ||
| 678 | for (i = 0; i < priv->num_tx_queues; i++) { | 687 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 679 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | 688 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( |
| 680 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | 689 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); |
| 681 | if (!priv->tx_queue[i]) { | 690 | if (!priv->tx_queue[i]) { |
| 682 | err = -ENOMEM; | 691 | err = -ENOMEM; |
| @@ -689,7 +698,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
| 689 | } | 698 | } |
| 690 | 699 | ||
| 691 | for (i = 0; i < priv->num_rx_queues; i++) { | 700 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 692 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( | 701 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( |
| 693 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | 702 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); |
| 694 | if (!priv->rx_queue[i]) { | 703 | if (!priv->rx_queue[i]) { |
| 695 | err = -ENOMEM; | 704 | err = -ENOMEM; |
| @@ -742,7 +751,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
| 742 | FSL_GIANFAR_DEV_HAS_CSUM | | 751 | FSL_GIANFAR_DEV_HAS_CSUM | |
| 743 | FSL_GIANFAR_DEV_HAS_VLAN | | 752 | FSL_GIANFAR_DEV_HAS_VLAN | |
| 744 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | 753 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
| 745 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; | 754 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
| 755 | FSL_GIANFAR_DEV_HAS_TIMER; | ||
| 746 | 756 | ||
| 747 | ctype = of_get_property(np, "phy-connection-type", NULL); | 757 | ctype = of_get_property(np, "phy-connection-type", NULL); |
| 748 | 758 | ||
| @@ -772,6 +782,48 @@ err_grp_init: | |||
| 772 | return err; | 782 | return err; |
| 773 | } | 783 | } |
| 774 | 784 | ||
| 785 | static int gfar_hwtstamp_ioctl(struct net_device *netdev, | ||
| 786 | struct ifreq *ifr, int cmd) | ||
| 787 | { | ||
| 788 | struct hwtstamp_config config; | ||
| 789 | struct gfar_private *priv = netdev_priv(netdev); | ||
| 790 | |||
| 791 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | ||
| 792 | return -EFAULT; | ||
| 793 | |||
| 794 | /* reserved for future extensions */ | ||
| 795 | if (config.flags) | ||
| 796 | return -EINVAL; | ||
| 797 | |||
| 798 | switch (config.tx_type) { | ||
| 799 | case HWTSTAMP_TX_OFF: | ||
| 800 | priv->hwts_tx_en = 0; | ||
| 801 | break; | ||
| 802 | case HWTSTAMP_TX_ON: | ||
| 803 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
| 804 | return -ERANGE; | ||
| 805 | priv->hwts_tx_en = 1; | ||
| 806 | break; | ||
| 807 | default: | ||
| 808 | return -ERANGE; | ||
| 809 | } | ||
| 810 | |||
| 811 | switch (config.rx_filter) { | ||
| 812 | case HWTSTAMP_FILTER_NONE: | ||
| 813 | priv->hwts_rx_en = 0; | ||
| 814 | break; | ||
| 815 | default: | ||
| 816 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
| 817 | return -ERANGE; | ||
| 818 | priv->hwts_rx_en = 1; | ||
| 819 | config.rx_filter = HWTSTAMP_FILTER_ALL; | ||
| 820 | break; | ||
| 821 | } | ||
| 822 | |||
| 823 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | ||
| 824 | -EFAULT : 0; | ||
| 825 | } | ||
| 826 | |||
| 775 | /* Ioctl MII Interface */ | 827 | /* Ioctl MII Interface */ |
| 776 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 828 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 777 | { | 829 | { |
| @@ -780,6 +832,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 780 | if (!netif_running(dev)) | 832 | if (!netif_running(dev)) |
| 781 | return -EINVAL; | 833 | return -EINVAL; |
| 782 | 834 | ||
| 835 | if (cmd == SIOCSHWTSTAMP) | ||
| 836 | return gfar_hwtstamp_ioctl(dev, rq, cmd); | ||
| 837 | |||
| 783 | if (!priv->phydev) | 838 | if (!priv->phydev) |
| 784 | return -ENODEV; | 839 | return -ENODEV; |
| 785 | 840 | ||
| @@ -982,7 +1037,8 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 982 | else | 1037 | else |
| 983 | priv->padding = 0; | 1038 | priv->padding = 0; |
| 984 | 1039 | ||
| 985 | if (dev->features & NETIF_F_IP_CSUM) | 1040 | if (dev->features & NETIF_F_IP_CSUM || |
| 1041 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | ||
| 986 | dev->hard_header_len += GMAC_FCB_LEN; | 1042 | dev->hard_header_len += GMAC_FCB_LEN; |
| 987 | 1043 | ||
| 988 | /* Program the isrg regs only if number of grps > 1 */ | 1044 | /* Program the isrg regs only if number of grps > 1 */ |
| @@ -1120,10 +1176,10 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 1120 | /* provided which set of benchmarks. */ | 1176 | /* provided which set of benchmarks. */ |
| 1121 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); | 1177 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
| 1122 | for (i = 0; i < priv->num_rx_queues; i++) | 1178 | for (i = 0; i < priv->num_rx_queues; i++) |
| 1123 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", | 1179 | printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", |
| 1124 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | 1180 | dev->name, i, priv->rx_queue[i]->rx_ring_size); |
| 1125 | for(i = 0; i < priv->num_tx_queues; i++) | 1181 | for(i = 0; i < priv->num_tx_queues; i++) |
| 1126 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | 1182 | printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", |
| 1127 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | 1183 | dev->name, i, priv->tx_queue[i]->tx_ring_size); |
| 1128 | 1184 | ||
| 1129 | return 0; | 1185 | return 0; |
| @@ -1638,13 +1694,13 @@ static void free_skb_resources(struct gfar_private *priv) | |||
| 1638 | /* Go through all the buffer descriptors and free their data buffers */ | 1694 | /* Go through all the buffer descriptors and free their data buffers */ |
| 1639 | for (i = 0; i < priv->num_tx_queues; i++) { | 1695 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1640 | tx_queue = priv->tx_queue[i]; | 1696 | tx_queue = priv->tx_queue[i]; |
| 1641 | if(!tx_queue->tx_skbuff) | 1697 | if(tx_queue->tx_skbuff) |
| 1642 | free_skb_tx_queue(tx_queue); | 1698 | free_skb_tx_queue(tx_queue); |
| 1643 | } | 1699 | } |
| 1644 | 1700 | ||
| 1645 | for (i = 0; i < priv->num_rx_queues; i++) { | 1701 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1646 | rx_queue = priv->rx_queue[i]; | 1702 | rx_queue = priv->rx_queue[i]; |
| 1647 | if(!rx_queue->rx_skbuff) | 1703 | if(rx_queue->rx_skbuff) |
| 1648 | free_skb_rx_queue(rx_queue); | 1704 | free_skb_rx_queue(rx_queue); |
| 1649 | } | 1705 | } |
| 1650 | 1706 | ||
| @@ -1926,23 +1982,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1926 | struct netdev_queue *txq; | 1982 | struct netdev_queue *txq; |
| 1927 | struct gfar __iomem *regs = NULL; | 1983 | struct gfar __iomem *regs = NULL; |
| 1928 | struct txfcb *fcb = NULL; | 1984 | struct txfcb *fcb = NULL; |
| 1929 | struct txbd8 *txbdp, *txbdp_start, *base; | 1985 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
| 1930 | u32 lstatus; | 1986 | u32 lstatus; |
| 1931 | int i, rq = 0; | 1987 | int i, rq = 0, do_tstamp = 0; |
| 1932 | u32 bufaddr; | 1988 | u32 bufaddr; |
| 1933 | unsigned long flags; | 1989 | unsigned long flags; |
| 1934 | unsigned int nr_frags, length; | 1990 | unsigned int nr_frags, nr_txbds, length; |
| 1935 | 1991 | union skb_shared_tx *shtx; | |
| 1936 | 1992 | ||
| 1937 | rq = skb->queue_mapping; | 1993 | rq = skb->queue_mapping; |
| 1938 | tx_queue = priv->tx_queue[rq]; | 1994 | tx_queue = priv->tx_queue[rq]; |
| 1939 | txq = netdev_get_tx_queue(dev, rq); | 1995 | txq = netdev_get_tx_queue(dev, rq); |
| 1940 | base = tx_queue->tx_bd_base; | 1996 | base = tx_queue->tx_bd_base; |
| 1941 | regs = tx_queue->grp->regs; | 1997 | regs = tx_queue->grp->regs; |
| 1998 | shtx = skb_tx(skb); | ||
| 1999 | |||
| 2000 | /* check if time stamp should be generated */ | ||
| 2001 | if (unlikely(shtx->hardware && priv->hwts_tx_en)) | ||
| 2002 | do_tstamp = 1; | ||
| 1942 | 2003 | ||
| 1943 | /* make space for additional header when fcb is needed */ | 2004 | /* make space for additional header when fcb is needed */ |
| 1944 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | 2005 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || |
| 1945 | (priv->vlgrp && vlan_tx_tag_present(skb))) && | 2006 | (priv->vlgrp && vlan_tx_tag_present(skb)) || |
| 2007 | unlikely(do_tstamp)) && | ||
| 1946 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | 2008 | (skb_headroom(skb) < GMAC_FCB_LEN)) { |
| 1947 | struct sk_buff *skb_new; | 2009 | struct sk_buff *skb_new; |
| 1948 | 2010 | ||
| @@ -1959,8 +2021,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1959 | /* total number of fragments in the SKB */ | 2021 | /* total number of fragments in the SKB */ |
| 1960 | nr_frags = skb_shinfo(skb)->nr_frags; | 2022 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 1961 | 2023 | ||
| 2024 | /* calculate the required number of TxBDs for this skb */ | ||
| 2025 | if (unlikely(do_tstamp)) | ||
| 2026 | nr_txbds = nr_frags + 2; | ||
| 2027 | else | ||
| 2028 | nr_txbds = nr_frags + 1; | ||
| 2029 | |||
| 1962 | /* check if there is space to queue this packet */ | 2030 | /* check if there is space to queue this packet */ |
| 1963 | if ((nr_frags+1) > tx_queue->num_txbdfree) { | 2031 | if (nr_txbds > tx_queue->num_txbdfree) { |
| 1964 | /* no space, stop the queue */ | 2032 | /* no space, stop the queue */ |
| 1965 | netif_tx_stop_queue(txq); | 2033 | netif_tx_stop_queue(txq); |
| 1966 | dev->stats.tx_fifo_errors++; | 2034 | dev->stats.tx_fifo_errors++; |
| @@ -1972,9 +2040,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1972 | txq->tx_packets ++; | 2040 | txq->tx_packets ++; |
| 1973 | 2041 | ||
| 1974 | txbdp = txbdp_start = tx_queue->cur_tx; | 2042 | txbdp = txbdp_start = tx_queue->cur_tx; |
| 2043 | lstatus = txbdp->lstatus; | ||
| 2044 | |||
| 2045 | /* Time stamp insertion requires one additional TxBD */ | ||
| 2046 | if (unlikely(do_tstamp)) | ||
| 2047 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | ||
| 2048 | tx_queue->tx_ring_size); | ||
| 1975 | 2049 | ||
| 1976 | if (nr_frags == 0) { | 2050 | if (nr_frags == 0) { |
| 1977 | lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 2051 | if (unlikely(do_tstamp)) |
| 2052 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | ||
| 2053 | TXBD_INTERRUPT); | ||
| 2054 | else | ||
| 2055 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
| 1978 | } else { | 2056 | } else { |
| 1979 | /* Place the fragment addresses and lengths into the TxBDs */ | 2057 | /* Place the fragment addresses and lengths into the TxBDs */ |
| 1980 | for (i = 0; i < nr_frags; i++) { | 2058 | for (i = 0; i < nr_frags; i++) { |
| @@ -2020,11 +2098,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2020 | gfar_tx_vlan(skb, fcb); | 2098 | gfar_tx_vlan(skb, fcb); |
| 2021 | } | 2099 | } |
| 2022 | 2100 | ||
| 2023 | /* setup the TxBD length and buffer pointer for the first BD */ | 2101 | /* Setup tx hardware time stamping if requested */ |
| 2102 | if (unlikely(do_tstamp)) { | ||
| 2103 | shtx->in_progress = 1; | ||
| 2104 | if (fcb == NULL) | ||
| 2105 | fcb = gfar_add_fcb(skb); | ||
| 2106 | fcb->ptp = 1; | ||
| 2107 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
| 2108 | } | ||
| 2109 | |||
| 2024 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | 2110 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
| 2025 | skb_headlen(skb), DMA_TO_DEVICE); | 2111 | skb_headlen(skb), DMA_TO_DEVICE); |
| 2026 | 2112 | ||
| 2027 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 2113 | /* |
| 2114 | * If time stamping is requested one additional TxBD must be set up. The | ||
| 2115 | * first TxBD points to the FCB and must have a data length of | ||
| 2116 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | ||
| 2117 | * the full frame length. | ||
| 2118 | */ | ||
| 2119 | if (unlikely(do_tstamp)) { | ||
| 2120 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | ||
| 2121 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | ||
| 2122 | (skb_headlen(skb) - GMAC_FCB_LEN); | ||
| 2123 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | ||
| 2124 | } else { | ||
| 2125 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | ||
| 2126 | } | ||
| 2028 | 2127 | ||
| 2029 | /* | 2128 | /* |
| 2030 | * We can work in parallel with gfar_clean_tx_ring(), except | 2129 | * We can work in parallel with gfar_clean_tx_ring(), except |
| @@ -2064,7 +2163,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2064 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); | 2163 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
| 2065 | 2164 | ||
| 2066 | /* reduce TxBD free count */ | 2165 | /* reduce TxBD free count */ |
| 2067 | tx_queue->num_txbdfree -= (nr_frags + 1); | 2166 | tx_queue->num_txbdfree -= (nr_txbds); |
| 2068 | 2167 | ||
| 2069 | dev->trans_start = jiffies; | 2168 | dev->trans_start = jiffies; |
| 2070 | 2169 | ||
| @@ -2255,16 +2354,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2255 | struct net_device *dev = tx_queue->dev; | 2354 | struct net_device *dev = tx_queue->dev; |
| 2256 | struct gfar_private *priv = netdev_priv(dev); | 2355 | struct gfar_private *priv = netdev_priv(dev); |
| 2257 | struct gfar_priv_rx_q *rx_queue = NULL; | 2356 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 2258 | struct txbd8 *bdp; | 2357 | struct txbd8 *bdp, *next = NULL; |
| 2259 | struct txbd8 *lbdp = NULL; | 2358 | struct txbd8 *lbdp = NULL; |
| 2260 | struct txbd8 *base = tx_queue->tx_bd_base; | 2359 | struct txbd8 *base = tx_queue->tx_bd_base; |
| 2261 | struct sk_buff *skb; | 2360 | struct sk_buff *skb; |
| 2262 | int skb_dirtytx; | 2361 | int skb_dirtytx; |
| 2263 | int tx_ring_size = tx_queue->tx_ring_size; | 2362 | int tx_ring_size = tx_queue->tx_ring_size; |
| 2264 | int frags = 0; | 2363 | int frags = 0, nr_txbds = 0; |
| 2265 | int i; | 2364 | int i; |
| 2266 | int howmany = 0; | 2365 | int howmany = 0; |
| 2267 | u32 lstatus; | 2366 | u32 lstatus; |
| 2367 | size_t buflen; | ||
| 2368 | union skb_shared_tx *shtx; | ||
| 2268 | 2369 | ||
| 2269 | rx_queue = priv->rx_queue[tx_queue->qindex]; | 2370 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
| 2270 | bdp = tx_queue->dirty_tx; | 2371 | bdp = tx_queue->dirty_tx; |
| @@ -2274,7 +2375,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2274 | unsigned long flags; | 2375 | unsigned long flags; |
| 2275 | 2376 | ||
| 2276 | frags = skb_shinfo(skb)->nr_frags; | 2377 | frags = skb_shinfo(skb)->nr_frags; |
| 2277 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); | 2378 | |
| 2379 | /* | ||
| 2380 | * When time stamping, one additional TxBD must be freed. | ||
| 2381 | * Also, we need to dma_unmap_single() the TxPAL. | ||
| 2382 | */ | ||
| 2383 | shtx = skb_tx(skb); | ||
| 2384 | if (unlikely(shtx->in_progress)) | ||
| 2385 | nr_txbds = frags + 2; | ||
| 2386 | else | ||
| 2387 | nr_txbds = frags + 1; | ||
| 2388 | |||
| 2389 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | ||
| 2278 | 2390 | ||
| 2279 | lstatus = lbdp->lstatus; | 2391 | lstatus = lbdp->lstatus; |
| 2280 | 2392 | ||
| @@ -2283,10 +2395,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2283 | (lstatus & BD_LENGTH_MASK)) | 2395 | (lstatus & BD_LENGTH_MASK)) |
| 2284 | break; | 2396 | break; |
| 2285 | 2397 | ||
| 2286 | dma_unmap_single(&priv->ofdev->dev, | 2398 | if (unlikely(shtx->in_progress)) { |
| 2287 | bdp->bufPtr, | 2399 | next = next_txbd(bdp, base, tx_ring_size); |
| 2288 | bdp->length, | 2400 | buflen = next->length + GMAC_FCB_LEN; |
| 2289 | DMA_TO_DEVICE); | 2401 | } else |
| 2402 | buflen = bdp->length; | ||
| 2403 | |||
| 2404 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | ||
| 2405 | buflen, DMA_TO_DEVICE); | ||
| 2406 | |||
| 2407 | if (unlikely(shtx->in_progress)) { | ||
| 2408 | struct skb_shared_hwtstamps shhwtstamps; | ||
| 2409 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | ||
| 2410 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | ||
| 2411 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | ||
| 2412 | skb_tstamp_tx(skb, &shhwtstamps); | ||
| 2413 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
| 2414 | bdp = next; | ||
| 2415 | } | ||
| 2290 | 2416 | ||
| 2291 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | 2417 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
| 2292 | bdp = next_txbd(bdp, base, tx_ring_size); | 2418 | bdp = next_txbd(bdp, base, tx_ring_size); |
| @@ -2318,7 +2444,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2318 | 2444 | ||
| 2319 | howmany++; | 2445 | howmany++; |
| 2320 | spin_lock_irqsave(&tx_queue->txlock, flags); | 2446 | spin_lock_irqsave(&tx_queue->txlock, flags); |
| 2321 | tx_queue->num_txbdfree += frags + 1; | 2447 | tx_queue->num_txbdfree += nr_txbds; |
| 2322 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 2448 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
| 2323 | } | 2449 | } |
| 2324 | 2450 | ||
| @@ -2393,6 +2519,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) | |||
| 2393 | * as many bytes as needed to align the data properly | 2519 | * as many bytes as needed to align the data properly |
| 2394 | */ | 2520 | */ |
| 2395 | skb_reserve(skb, alignamount); | 2521 | skb_reserve(skb, alignamount); |
| 2522 | GFAR_CB(skb)->alignamount = alignamount; | ||
| 2396 | 2523 | ||
| 2397 | return skb; | 2524 | return skb; |
| 2398 | } | 2525 | } |
| @@ -2473,6 +2600,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 2473 | skb_pull(skb, amount_pull); | 2600 | skb_pull(skb, amount_pull); |
| 2474 | } | 2601 | } |
| 2475 | 2602 | ||
| 2603 | /* Get receive timestamp from the skb */ | ||
| 2604 | if (priv->hwts_rx_en) { | ||
| 2605 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | ||
| 2606 | u64 *ns = (u64 *) skb->data; | ||
| 2607 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | ||
| 2608 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | ||
| 2609 | } | ||
| 2610 | |||
| 2611 | if (priv->padding) | ||
| 2612 | skb_pull(skb, priv->padding); | ||
| 2613 | |||
| 2476 | if (priv->rx_csum_enable) | 2614 | if (priv->rx_csum_enable) |
| 2477 | gfar_rx_checksum(skb, fcb); | 2615 | gfar_rx_checksum(skb, fcb); |
| 2478 | 2616 | ||
| @@ -2509,8 +2647,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2509 | bdp = rx_queue->cur_rx; | 2647 | bdp = rx_queue->cur_rx; |
| 2510 | base = rx_queue->rx_bd_base; | 2648 | base = rx_queue->rx_bd_base; |
| 2511 | 2649 | ||
| 2512 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + | 2650 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); |
| 2513 | priv->padding; | ||
| 2514 | 2651 | ||
| 2515 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | 2652 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
| 2516 | struct sk_buff *newskb; | 2653 | struct sk_buff *newskb; |
| @@ -2533,13 +2670,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2533 | newskb = skb; | 2670 | newskb = skb; |
| 2534 | else if (skb) { | 2671 | else if (skb) { |
| 2535 | /* | 2672 | /* |
| 2536 | * We need to reset ->data to what it | 2673 | * We need to un-reserve() the skb to what it |
| 2537 | * was before gfar_new_skb() re-aligned | 2674 | * was before gfar_new_skb() re-aligned |
| 2538 | * it to an RXBUF_ALIGNMENT boundary | 2675 | * it to an RXBUF_ALIGNMENT boundary |
| 2539 | * before we put the skb back on the | 2676 | * before we put the skb back on the |
| 2540 | * recycle list. | 2677 | * recycle list. |
| 2541 | */ | 2678 | */ |
| 2542 | skb->data = skb->head + NET_SKB_PAD; | 2679 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); |
| 2543 | __skb_queue_head(&priv->rx_recycle, skb); | 2680 | __skb_queue_head(&priv->rx_recycle, skb); |
| 2544 | } | 2681 | } |
| 2545 | } else { | 2682 | } else { |
| @@ -2797,7 +2934,7 @@ static void adjust_link(struct net_device *dev) | |||
| 2797 | * whenever dev->flags is changed */ | 2934 | * whenever dev->flags is changed */ |
| 2798 | static void gfar_set_multi(struct net_device *dev) | 2935 | static void gfar_set_multi(struct net_device *dev) |
| 2799 | { | 2936 | { |
| 2800 | struct dev_mc_list *mc_ptr; | 2937 | struct netdev_hw_addr *ha; |
| 2801 | struct gfar_private *priv = netdev_priv(dev); | 2938 | struct gfar_private *priv = netdev_priv(dev); |
| 2802 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | 2939 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 2803 | u32 tempval; | 2940 | u32 tempval; |
| @@ -2870,13 +3007,12 @@ static void gfar_set_multi(struct net_device *dev) | |||
| 2870 | return; | 3007 | return; |
| 2871 | 3008 | ||
| 2872 | /* Parse the list, and set the appropriate bits */ | 3009 | /* Parse the list, and set the appropriate bits */ |
| 2873 | netdev_for_each_mc_addr(mc_ptr, dev) { | 3010 | netdev_for_each_mc_addr(ha, dev) { |
| 2874 | if (idx < em_num) { | 3011 | if (idx < em_num) { |
| 2875 | gfar_set_mac_for_addr(dev, idx, | 3012 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
| 2876 | mc_ptr->dmi_addr); | ||
| 2877 | idx++; | 3013 | idx++; |
| 2878 | } else | 3014 | } else |
| 2879 | gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); | 3015 | gfar_set_hash_for_addr(dev, ha->addr); |
| 2880 | } | 3016 | } |
| 2881 | } | 3017 | } |
| 2882 | 3018 | ||
