aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/fec.c')
-rw-r--r--drivers/net/ethernet/freescale/fec.c82
1 files changed, 50 insertions, 32 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index dd098ea44d48..cb5783d4b1e0 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
345 return NETDEV_TX_OK; 345 return NETDEV_TX_OK;
346} 346}
347 347
348/* Init RX & TX buffer descriptors
349 */
350static void fec_enet_bd_init(struct net_device *dev)
351{
352 struct fec_enet_private *fep = netdev_priv(dev);
353 struct bufdesc *bdp;
354 unsigned int i;
355
356 /* Initialize the receive buffer descriptors. */
357 bdp = fep->rx_bd_base;
358 for (i = 0; i < RX_RING_SIZE; i++) {
359
360 /* Initialize the BD for every fragment in the page. */
361 if (bdp->cbd_bufaddr)
362 bdp->cbd_sc = BD_ENET_RX_EMPTY;
363 else
364 bdp->cbd_sc = 0;
365 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
366 }
367
368 /* Set the last buffer to wrap */
369 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
370 bdp->cbd_sc |= BD_SC_WRAP;
371
372 fep->cur_rx = fep->rx_bd_base;
373
374 /* ...and the same for transmit */
375 bdp = fep->tx_bd_base;
376 fep->cur_tx = bdp;
377 for (i = 0; i < TX_RING_SIZE; i++) {
378
379 /* Initialize the BD for every fragment in the page. */
380 bdp->cbd_sc = 0;
381 if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
382 dev_kfree_skb_any(fep->tx_skbuff[i]);
383 fep->tx_skbuff[i] = NULL;
384 }
385 bdp->cbd_bufaddr = 0;
386 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
387 }
388
389 /* Set the last buffer to wrap */
390 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
391 bdp->cbd_sc |= BD_SC_WRAP;
392 fep->dirty_tx = bdp;
393}
394
348/* This function is called to start or restart the FEC during a link 395/* This function is called to start or restart the FEC during a link
349 * change. This only happens when switching between half and full 396 * change. This only happens when switching between half and full
350 * duplex. 397 * duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
388 /* Set maximum receive buffer size. */ 435 /* Set maximum receive buffer size. */
389 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 436 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
390 437
438 fec_enet_bd_init(ndev);
439
391 /* Set receive and transmit descriptor base. */ 440 /* Set receive and transmit descriptor base. */
392 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 441 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
393 if (fep->bufdesc_ex) 442 if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 446 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 447 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
399 448
400 fep->cur_rx = fep->rx_bd_base;
401 449
402 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 450 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
403 if (fep->tx_skbuff[i]) { 451 if (fep->tx_skbuff[i]) {
@@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
1597{ 1645{
1598 struct fec_enet_private *fep = netdev_priv(ndev); 1646 struct fec_enet_private *fep = netdev_priv(ndev);
1599 struct bufdesc *cbd_base; 1647 struct bufdesc *cbd_base;
1600 struct bufdesc *bdp;
1601 unsigned int i;
1602 1648
1603 /* Allocate memory for buffer descriptors. */ 1649 /* Allocate memory for buffer descriptors. */
1604 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1650 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
1608 return -ENOMEM; 1654 return -ENOMEM;
1609 } 1655 }
1610 1656
1657 memset(cbd_base, 0, PAGE_SIZE);
1611 spin_lock_init(&fep->hw_lock); 1658 spin_lock_init(&fep->hw_lock);
1612 1659
1613 fep->netdev = ndev; 1660 fep->netdev = ndev;
@@ -1631,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
1631 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1678 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1632 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1679 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1633 1680
1634 /* Initialize the receive buffer descriptors. */
1635 bdp = fep->rx_bd_base;
1636 for (i = 0; i < RX_RING_SIZE; i++) {
1637
1638 /* Initialize the BD for every fragment in the page. */
1639 bdp->cbd_sc = 0;
1640 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1641 }
1642
1643 /* Set the last buffer to wrap */
1644 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1645 bdp->cbd_sc |= BD_SC_WRAP;
1646
1647 /* ...and the same for transmit */
1648 bdp = fep->tx_bd_base;
1649 fep->cur_tx = bdp;
1650 for (i = 0; i < TX_RING_SIZE; i++) {
1651
1652 /* Initialize the BD for every fragment in the page. */
1653 bdp->cbd_sc = 0;
1654 bdp->cbd_bufaddr = 0;
1655 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1656 }
1657
1658 /* Set the last buffer to wrap */
1659 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1660 bdp->cbd_sc |= BD_SC_WRAP;
1661 fep->dirty_tx = bdp;
1662
1663 fec_restart(ndev, 0); 1681 fec_restart(ndev, 0);
1664 1682
1665 return 0; 1683 return 0;