aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorManfred Rudigier <Manfred.Rudigier@omicron.at>2010-04-08 19:10:35 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 04:41:31 -0400
commitf0ee7acfcdd4169cee2fefc630de72deb5bc34b9 (patch)
tree161b16941b8c11023a9fb24324f22463dd16f163
parentcc772ab7cdcaa24d1fae332d92a1602788644f7a (diff)
gianfar: Add hardware TX timestamping support
If a packet has the skb_shared_tx->hardware flag set the device is instructed to generate a TX timestamp and write it back to memory after the frame is transmitted. During the clean_tx_ring operation the timestamp will be extracted and copied into the skb_shared_hwtstamps struct of the skb. TX timestamping is enabled by setting the tx_type to something else than HWTSTAMP_TX_OFF with the SIOCSHWTSTAMP ioctl command. It is only supported by eTSEC devices. Signed-off-by: Manfred Rudigier <manfred.rudigier@omicron.at> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/gianfar.c118
-rw-r--r--drivers/net/gianfar.h3
2 files changed, 101 insertions, 20 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d102484c4b36..032073d1e3d2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -795,8 +795,18 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
795 if (config.flags) 795 if (config.flags)
796 return -EINVAL; 796 return -EINVAL;
797 797
798 if (config.tx_type) 798 switch (config.tx_type) {
799 case HWTSTAMP_TX_OFF:
800 priv->hwts_tx_en = 0;
801 break;
802 case HWTSTAMP_TX_ON:
803 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
804 return -ERANGE;
805 priv->hwts_tx_en = 1;
806 break;
807 default:
799 return -ERANGE; 808 return -ERANGE;
809 }
800 810
801 switch (config.rx_filter) { 811 switch (config.rx_filter) {
802 case HWTSTAMP_FILTER_NONE: 812 case HWTSTAMP_FILTER_NONE:
@@ -1972,23 +1982,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1972 struct netdev_queue *txq; 1982 struct netdev_queue *txq;
1973 struct gfar __iomem *regs = NULL; 1983 struct gfar __iomem *regs = NULL;
1974 struct txfcb *fcb = NULL; 1984 struct txfcb *fcb = NULL;
1975 struct txbd8 *txbdp, *txbdp_start, *base; 1985 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1976 u32 lstatus; 1986 u32 lstatus;
1977 int i, rq = 0; 1987 int i, rq = 0, do_tstamp = 0;
1978 u32 bufaddr; 1988 u32 bufaddr;
1979 unsigned long flags; 1989 unsigned long flags;
1980 unsigned int nr_frags, length; 1990 unsigned int nr_frags, nr_txbds, length;
1981 1991 union skb_shared_tx *shtx;
1982 1992
1983 rq = skb->queue_mapping; 1993 rq = skb->queue_mapping;
1984 tx_queue = priv->tx_queue[rq]; 1994 tx_queue = priv->tx_queue[rq];
1985 txq = netdev_get_tx_queue(dev, rq); 1995 txq = netdev_get_tx_queue(dev, rq);
1986 base = tx_queue->tx_bd_base; 1996 base = tx_queue->tx_bd_base;
1987 regs = tx_queue->grp->regs; 1997 regs = tx_queue->grp->regs;
1998 shtx = skb_tx(skb);
1999
2000 /* check if time stamp should be generated */
2001 if (unlikely(shtx->hardware && priv->hwts_tx_en))
2002 do_tstamp = 1;
1988 2003
1989 /* make space for additional header when fcb is needed */ 2004 /* make space for additional header when fcb is needed */
1990 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2005 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1991 (priv->vlgrp && vlan_tx_tag_present(skb))) && 2006 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
2007 unlikely(do_tstamp)) &&
1992 (skb_headroom(skb) < GMAC_FCB_LEN)) { 2008 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1993 struct sk_buff *skb_new; 2009 struct sk_buff *skb_new;
1994 2010
@@ -2005,8 +2021,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2005 /* total number of fragments in the SKB */ 2021 /* total number of fragments in the SKB */
2006 nr_frags = skb_shinfo(skb)->nr_frags; 2022 nr_frags = skb_shinfo(skb)->nr_frags;
2007 2023
2024 /* calculate the required number of TxBDs for this skb */
2025 if (unlikely(do_tstamp))
2026 nr_txbds = nr_frags + 2;
2027 else
2028 nr_txbds = nr_frags + 1;
2029
2008 /* check if there is space to queue this packet */ 2030 /* check if there is space to queue this packet */
2009 if ((nr_frags+1) > tx_queue->num_txbdfree) { 2031 if (nr_txbds > tx_queue->num_txbdfree) {
2010 /* no space, stop the queue */ 2032 /* no space, stop the queue */
2011 netif_tx_stop_queue(txq); 2033 netif_tx_stop_queue(txq);
2012 dev->stats.tx_fifo_errors++; 2034 dev->stats.tx_fifo_errors++;
@@ -2018,9 +2040,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2018 txq->tx_packets ++; 2040 txq->tx_packets ++;
2019 2041
2020 txbdp = txbdp_start = tx_queue->cur_tx; 2042 txbdp = txbdp_start = tx_queue->cur_tx;
2043 lstatus = txbdp->lstatus;
2044
2045 /* Time stamp insertion requires one additional TxBD */
2046 if (unlikely(do_tstamp))
2047 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2048 tx_queue->tx_ring_size);
2021 2049
2022 if (nr_frags == 0) { 2050 if (nr_frags == 0) {
2023 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2051 if (unlikely(do_tstamp))
2052 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2053 TXBD_INTERRUPT);
2054 else
2055 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2024 } else { 2056 } else {
2025 /* Place the fragment addresses and lengths into the TxBDs */ 2057 /* Place the fragment addresses and lengths into the TxBDs */
2026 for (i = 0; i < nr_frags; i++) { 2058 for (i = 0; i < nr_frags; i++) {
@@ -2066,11 +2098,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2066 gfar_tx_vlan(skb, fcb); 2098 gfar_tx_vlan(skb, fcb);
2067 } 2099 }
2068 2100
2069 /* setup the TxBD length and buffer pointer for the first BD */ 2101 /* Setup tx hardware time stamping if requested */
2102 if (unlikely(do_tstamp)) {
2103 shtx->in_progress = 1;
2104 if (fcb == NULL)
2105 fcb = gfar_add_fcb(skb);
2106 fcb->ptp = 1;
2107 lstatus |= BD_LFLAG(TXBD_TOE);
2108 }
2109
2070 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2110 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2071 skb_headlen(skb), DMA_TO_DEVICE); 2111 skb_headlen(skb), DMA_TO_DEVICE);
2072 2112
2073 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2113 /*
2114 * If time stamping is requested one additional TxBD must be set up. The
2115 * first TxBD points to the FCB and must have a data length of
2116 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2117 * the full frame length.
2118 */
2119 if (unlikely(do_tstamp)) {
2120 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2121 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2122 (skb_headlen(skb) - GMAC_FCB_LEN);
2123 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2124 } else {
2125 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2126 }
2074 2127
2075 /* 2128 /*
2076 * We can work in parallel with gfar_clean_tx_ring(), except 2129 * We can work in parallel with gfar_clean_tx_ring(), except
@@ -2110,7 +2163,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2110 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2163 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2111 2164
2112 /* reduce TxBD free count */ 2165 /* reduce TxBD free count */
2113 tx_queue->num_txbdfree -= (nr_frags + 1); 2166 tx_queue->num_txbdfree -= (nr_txbds);
2114 2167
2115 dev->trans_start = jiffies; 2168 dev->trans_start = jiffies;
2116 2169
@@ -2301,16 +2354,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2301 struct net_device *dev = tx_queue->dev; 2354 struct net_device *dev = tx_queue->dev;
2302 struct gfar_private *priv = netdev_priv(dev); 2355 struct gfar_private *priv = netdev_priv(dev);
2303 struct gfar_priv_rx_q *rx_queue = NULL; 2356 struct gfar_priv_rx_q *rx_queue = NULL;
2304 struct txbd8 *bdp; 2357 struct txbd8 *bdp, *next = NULL;
2305 struct txbd8 *lbdp = NULL; 2358 struct txbd8 *lbdp = NULL;
2306 struct txbd8 *base = tx_queue->tx_bd_base; 2359 struct txbd8 *base = tx_queue->tx_bd_base;
2307 struct sk_buff *skb; 2360 struct sk_buff *skb;
2308 int skb_dirtytx; 2361 int skb_dirtytx;
2309 int tx_ring_size = tx_queue->tx_ring_size; 2362 int tx_ring_size = tx_queue->tx_ring_size;
2310 int frags = 0; 2363 int frags = 0, nr_txbds = 0;
2311 int i; 2364 int i;
2312 int howmany = 0; 2365 int howmany = 0;
2313 u32 lstatus; 2366 u32 lstatus;
2367 size_t buflen;
2368 union skb_shared_tx *shtx;
2314 2369
2315 rx_queue = priv->rx_queue[tx_queue->qindex]; 2370 rx_queue = priv->rx_queue[tx_queue->qindex];
2316 bdp = tx_queue->dirty_tx; 2371 bdp = tx_queue->dirty_tx;
@@ -2320,7 +2375,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2320 unsigned long flags; 2375 unsigned long flags;
2321 2376
2322 frags = skb_shinfo(skb)->nr_frags; 2377 frags = skb_shinfo(skb)->nr_frags;
2323 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2378
2379 /*
2380 * When time stamping, one additional TxBD must be freed.
2381 * Also, we need to dma_unmap_single() the TxPAL.
2382 */
2383 shtx = skb_tx(skb);
2384 if (unlikely(shtx->in_progress))
2385 nr_txbds = frags + 2;
2386 else
2387 nr_txbds = frags + 1;
2388
2389 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2324 2390
2325 lstatus = lbdp->lstatus; 2391 lstatus = lbdp->lstatus;
2326 2392
@@ -2329,10 +2395,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2329 (lstatus & BD_LENGTH_MASK)) 2395 (lstatus & BD_LENGTH_MASK))
2330 break; 2396 break;
2331 2397
2332 dma_unmap_single(&priv->ofdev->dev, 2398 if (unlikely(shtx->in_progress)) {
2333 bdp->bufPtr, 2399 next = next_txbd(bdp, base, tx_ring_size);
2334 bdp->length, 2400 buflen = next->length + GMAC_FCB_LEN;
2335 DMA_TO_DEVICE); 2401 } else
2402 buflen = bdp->length;
2403
2404 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2405 buflen, DMA_TO_DEVICE);
2406
2407 if (unlikely(shtx->in_progress)) {
2408 struct skb_shared_hwtstamps shhwtstamps;
2409 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2410 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2411 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2412 skb_tstamp_tx(skb, &shhwtstamps);
2413 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2414 bdp = next;
2415 }
2336 2416
2337 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2417 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2338 bdp = next_txbd(bdp, base, tx_ring_size); 2418 bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2364,7 +2444,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2364 2444
2365 howmany++; 2445 howmany++;
2366 spin_lock_irqsave(&tx_queue->txlock, flags); 2446 spin_lock_irqsave(&tx_queue->txlock, flags);
2367 tx_queue->num_txbdfree += frags + 1; 2447 tx_queue->num_txbdfree += nr_txbds;
2368 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2448 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2369 } 2449 }
2370 2450
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 1ea287cba231..ac4a92e08c09 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -540,7 +540,7 @@ struct txbd8
540 540
541struct txfcb { 541struct txfcb {
542 u8 flags; 542 u8 flags;
543 u8 reserved; 543 u8 ptp; /* Flag to enable tx timestamping */
544 u8 l4os; /* Level 4 Header Offset */ 544 u8 l4os; /* Level 4 Header Offset */
545 u8 l3os; /* Level 3 Header Offset */ 545 u8 l3os; /* Level 3 Header Offset */
546 u16 phcs; /* Pseudo-header Checksum */ 546 u16 phcs; /* Pseudo-header Checksum */
@@ -1105,6 +1105,7 @@ struct gfar_private {
1105 1105
1106 /* HW time stamping enabled flag */ 1106 /* HW time stamping enabled flag */
1107 int hwts_rx_en; 1107 int hwts_rx_en;
1108 int hwts_tx_en;
1108}; 1109};
1109 1110
1110extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1111extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];