aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/fec.c
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2009-04-14 21:32:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-16 05:34:53 -0400
commitf0b3fbeae11a526c3d308b691684589ee37c359b (patch)
treef6a40339d96ad7921a4969a100e272194e08d0d2 /drivers/net/fec.c
parent009fda83ee2f38e5deb9d62fc54a904a92645fe4 (diff)
FEC Buffer rework
Allocate buffers in fec_open and free them again in fec_close. This makes it possible to use this driver as a module. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/fec.c')
-rw-r--r--drivers/net/fec.c139
1 files changed, 88 insertions, 51 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 54ee493768f1..54d6f86d9f6d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -172,6 +172,7 @@ struct fec_enet_private {
172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
173 unsigned char *tx_bounce[TX_RING_SIZE]; 173 unsigned char *tx_bounce[TX_RING_SIZE];
174 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 174 struct sk_buff* tx_skbuff[TX_RING_SIZE];
175 struct sk_buff* rx_skbuff[RX_RING_SIZE];
175 ushort skb_cur; 176 ushort skb_cur;
176 ushort skb_dirty; 177 ushort skb_dirty;
177 178
@@ -335,8 +336,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
335 /* Push the data cache so the CPM does not get stale memory 336 /* Push the data cache so the CPM does not get stale memory
336 * data. 337 * data.
337 */ 338 */
338 dma_sync_single(NULL, bdp->cbd_bufaddr, 339 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
339 bdp->cbd_datlen, DMA_TO_DEVICE); 340 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
340 341
341 /* Send it on its way. Tell FEC it's ready, interrupt when done, 342 /* Send it on its way. Tell FEC it's ready, interrupt when done,
342 * it's the last BD of the frame, and to put the CRC on the end. 343 * it's the last BD of the frame, and to put the CRC on the end.
@@ -429,7 +430,11 @@ fec_enet_tx(struct net_device *dev)
429 bdp = fep->dirty_tx; 430 bdp = fep->dirty_tx;
430 431
431 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 432 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
432 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 433 if (bdp == fep->cur_tx && fep->tx_full == 0)
434 break;
435
436 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
437 bdp->cbd_bufaddr = 0;
433 438
434 skb = fep->tx_skbuff[fep->skb_dirty]; 439 skb = fep->tx_skbuff[fep->skb_dirty];
435 /* Check for errors. */ 440 /* Check for errors. */
@@ -553,8 +558,8 @@ fec_enet_rx(struct net_device *dev)
553 dev->stats.rx_bytes += pkt_len; 558 dev->stats.rx_bytes += pkt_len;
554 data = (__u8*)__va(bdp->cbd_bufaddr); 559 data = (__u8*)__va(bdp->cbd_bufaddr);
555 560
556 dma_sync_single(NULL, (unsigned long)__pa(data), 561 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
557 pkt_len - 4, DMA_FROM_DEVICE); 562 DMA_FROM_DEVICE);
558 563
559 /* This does 16 byte alignment, exactly what we need. 564 /* This does 16 byte alignment, exactly what we need.
560 * The packet length includes FCS, but we don't want to 565 * The packet length includes FCS, but we don't want to
@@ -574,6 +579,9 @@ fec_enet_rx(struct net_device *dev)
574 skb->protocol = eth_type_trans(skb, dev); 579 skb->protocol = eth_type_trans(skb, dev);
575 netif_rx(skb); 580 netif_rx(skb);
576 } 581 }
582
583 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
584 DMA_FROM_DEVICE);
577rx_processing_done: 585rx_processing_done:
578 /* Clear the status flags for this buffer */ 586 /* Clear the status flags for this buffer */
579 status &= ~BD_ENET_RX_STATS; 587 status &= ~BD_ENET_RX_STATS;
@@ -1398,15 +1406,86 @@ mii_link_interrupt(int irq, void * dev_id)
1398} 1406}
1399#endif 1407#endif
1400 1408
1409static void fec_enet_free_buffers(struct net_device *dev)
1410{
1411 struct fec_enet_private *fep = netdev_priv(dev);
1412 int i;
1413 struct sk_buff *skb;
1414 struct bufdesc *bdp;
1415
1416 bdp = fep->rx_bd_base;
1417 for (i = 0; i < RX_RING_SIZE; i++) {
1418 skb = fep->rx_skbuff[i];
1419
1420 if (bdp->cbd_bufaddr)
1421 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
1422 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1423 if (skb)
1424 dev_kfree_skb(skb);
1425 bdp++;
1426 }
1427
1428 bdp = fep->tx_bd_base;
1429 for (i = 0; i < TX_RING_SIZE; i++)
1430 kfree(fep->tx_bounce[i]);
1431}
1432
1433static int fec_enet_alloc_buffers(struct net_device *dev)
1434{
1435 struct fec_enet_private *fep = netdev_priv(dev);
1436 int i;
1437 struct sk_buff *skb;
1438 struct bufdesc *bdp;
1439
1440 bdp = fep->rx_bd_base;
1441 for (i = 0; i < RX_RING_SIZE; i++) {
1442 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
1443 if (!skb) {
1444 fec_enet_free_buffers(dev);
1445 return -ENOMEM;
1446 }
1447 fep->rx_skbuff[i] = skb;
1448
1449 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
1450 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1451 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1452 bdp++;
1453 }
1454
1455 /* Set the last buffer to wrap. */
1456 bdp--;
1457 bdp->cbd_sc |= BD_SC_WRAP;
1458
1459 bdp = fep->tx_bd_base;
1460 for (i = 0; i < TX_RING_SIZE; i++) {
1461 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1462
1463 bdp->cbd_sc = 0;
1464 bdp->cbd_bufaddr = 0;
1465 bdp++;
1466 }
1467
1468 /* Set the last buffer to wrap. */
1469 bdp--;
1470 bdp->cbd_sc |= BD_SC_WRAP;
1471
1472 return 0;
1473}
1474
1401static int 1475static int
1402fec_enet_open(struct net_device *dev) 1476fec_enet_open(struct net_device *dev)
1403{ 1477{
1404 struct fec_enet_private *fep = netdev_priv(dev); 1478 struct fec_enet_private *fep = netdev_priv(dev);
1479 int ret;
1405 1480
1406 /* I should reset the ring buffers here, but I don't yet know 1481 /* I should reset the ring buffers here, but I don't yet know
1407 * a simple way to do that. 1482 * a simple way to do that.
1408 */ 1483 */
1409 1484
1485 ret = fec_enet_alloc_buffers(dev);
1486 if (ret)
1487 return ret;
1488
1410 fep->sequence_done = 0; 1489 fep->sequence_done = 0;
1411 fep->link = 0; 1490 fep->link = 0;
1412 1491
@@ -1453,6 +1532,8 @@ fec_enet_close(struct net_device *dev)
1453 netif_stop_queue(dev); 1532 netif_stop_queue(dev);
1454 fec_stop(dev); 1533 fec_stop(dev);
1455 1534
1535 fec_enet_free_buffers(dev);
1536
1456 return 0; 1537 return 0;
1457} 1538}
1458 1539
@@ -1575,9 +1656,8 @@ static const struct net_device_ops fec_netdev_ops = {
1575int __init fec_enet_init(struct net_device *dev, int index) 1656int __init fec_enet_init(struct net_device *dev, int index)
1576{ 1657{
1577 struct fec_enet_private *fep = netdev_priv(dev); 1658 struct fec_enet_private *fep = netdev_priv(dev);
1578 unsigned long mem_addr; 1659 struct bufdesc *cbd_base;
1579 struct bufdesc *bdp, *cbd_base; 1660 int i;
1580 int i, j;
1581 1661
1582 /* Allocate memory for buffer descriptors. */ 1662 /* Allocate memory for buffer descriptors. */
1583 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1663 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1615,49 +1695,6 @@ int __init fec_enet_init(struct net_device *dev, int index)
1615 fep->rx_bd_base = cbd_base; 1695 fep->rx_bd_base = cbd_base;
1616 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1696 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1617 1697
1618 /* Initialize the receive buffer descriptors. */
1619 bdp = fep->rx_bd_base;
1620 for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1621
1622 /* Allocate a page */
1623 mem_addr = __get_free_page(GFP_KERNEL);
1624 /* XXX: missing check for allocation failure */
1625
1626 /* Initialize the BD for every fragment in the page */
1627 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1628 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1629 bdp->cbd_bufaddr = __pa(mem_addr);
1630 mem_addr += FEC_ENET_RX_FRSIZE;
1631 bdp++;
1632 }
1633 }
1634
1635 /* Set the last buffer to wrap */
1636 bdp--;
1637 bdp->cbd_sc |= BD_SC_WRAP;
1638
1639 /* ...and the same for transmit */
1640 bdp = fep->tx_bd_base;
1641 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
1642 if (j >= FEC_ENET_TX_FRPPG) {
1643 mem_addr = __get_free_page(GFP_KERNEL);
1644 j = 1;
1645 } else {
1646 mem_addr += FEC_ENET_TX_FRSIZE;
1647 j++;
1648 }
1649 fep->tx_bounce[i] = (unsigned char *) mem_addr;
1650
1651 /* Initialize the BD for every fragment in the page */
1652 bdp->cbd_sc = 0;
1653 bdp->cbd_bufaddr = 0;
1654 bdp++;
1655 }
1656
1657 /* Set the last buffer to wrap */
1658 bdp--;
1659 bdp->cbd_sc |= BD_SC_WRAP;
1660
1661#ifdef HAVE_mii_link_interrupt 1698#ifdef HAVE_mii_link_interrupt
1662 fec_request_mii_intr(dev); 1699 fec_request_mii_intr(dev);
1663#endif 1700#endif