aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fec_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4cbebf3d80eb..50bb71c663e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev);
98 * detected as not set during a prior frame transmission, then the 98 * detected as not set during a prior frame transmission, then the
99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs 99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in 100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
101 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
102 * detected as not set during a prior frame transmission, then the
103 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
104 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
105 * frames not being transmitted until there is a 0-to-1 transition on 101 * frames not being transmitted until there is a 0-to-1 transition on
106 * ENET_TDAR[TDAR]. 102 * ENET_TDAR[TDAR].
107 */ 103 */
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
385 * data. 381 * data.
386 */ 382 */
387 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 383 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
388 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 384 skb->len, DMA_TO_DEVICE);
389 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 385 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
390 bdp->cbd_bufaddr = 0; 386 bdp->cbd_bufaddr = 0;
391 fep->tx_skbuff[index] = NULL; 387 fep->tx_skbuff[index] = NULL;
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
432 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
433 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
434 430
431 skb_tx_timestamp(skb);
432
435 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
436 434
437 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
440 /* Trigger transmission start */ 438 /* Trigger transmission start */
441 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
442 440
443 skb_tx_timestamp(skb);
444
445 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
446} 442}
447 443
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev)
779 else 775 else
780 index = bdp - fep->tx_bd_base; 776 index = bdp - fep->tx_bd_base;
781 777
782 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
783 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
784 bdp->cbd_bufaddr = 0;
785
786 skb = fep->tx_skbuff[index]; 778 skb = fep->tx_skbuff[index];
779 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
780 DMA_TO_DEVICE);
781 bdp->cbd_bufaddr = 0;
787 782
788 /* Check for errors. */ 783 /* Check for errors. */
789 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 784 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |