diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/gianfar.c | 100 |
1 files changed, 65 insertions, 35 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b917616bf6f1..99a4b990939f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -98,7 +98,6 @@ | |||
98 | #include "gianfar_mii.h" | 98 | #include "gianfar_mii.h" |
99 | 99 | ||
100 | #define TX_TIMEOUT (1*HZ) | 100 | #define TX_TIMEOUT (1*HZ) |
101 | #define SKB_ALLOC_TIMEOUT 1000000 | ||
102 | #undef BRIEF_GFAR_ERRORS | 101 | #undef BRIEF_GFAR_ERRORS |
103 | #undef VERBOSE_GFAR_ERRORS | 102 | #undef VERBOSE_GFAR_ERRORS |
104 | 103 | ||
@@ -115,7 +114,9 @@ static int gfar_enet_open(struct net_device *dev); | |||
115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | 114 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
116 | static void gfar_timeout(struct net_device *dev); | 115 | static void gfar_timeout(struct net_device *dev); |
117 | static int gfar_close(struct net_device *dev); | 116 | static int gfar_close(struct net_device *dev); |
118 | struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); | 117 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
118 | static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, | ||
119 | struct sk_buff *skb); | ||
119 | static int gfar_set_mac_address(struct net_device *dev); | 120 | static int gfar_set_mac_address(struct net_device *dev); |
120 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | 121 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
121 | static irqreturn_t gfar_error(int irq, void *dev_id); | 122 | static irqreturn_t gfar_error(int irq, void *dev_id); |
@@ -783,14 +784,21 @@ int startup_gfar(struct net_device *dev) | |||
783 | 784 | ||
784 | rxbdp = priv->rx_bd_base; | 785 | rxbdp = priv->rx_bd_base; |
785 | for (i = 0; i < priv->rx_ring_size; i++) { | 786 | for (i = 0; i < priv->rx_ring_size; i++) { |
786 | struct sk_buff *skb = NULL; | 787 | struct sk_buff *skb; |
787 | 788 | ||
788 | rxbdp->status = 0; | 789 | skb = gfar_new_skb(dev); |
789 | 790 | ||
790 | skb = gfar_new_skb(dev, rxbdp); | 791 | if (!skb) { |
792 | printk(KERN_ERR "%s: Can't allocate RX buffers\n", | ||
793 | dev->name); | ||
794 | |||
795 | goto err_rxalloc_fail; | ||
796 | } | ||
791 | 797 | ||
792 | priv->rx_skbuff[i] = skb; | 798 | priv->rx_skbuff[i] = skb; |
793 | 799 | ||
800 | gfar_new_rxbdp(dev, rxbdp, skb); | ||
801 | |||
794 | rxbdp++; | 802 | rxbdp++; |
795 | } | 803 | } |
796 | 804 | ||
@@ -916,6 +924,7 @@ rx_irq_fail: | |||
916 | tx_irq_fail: | 924 | tx_irq_fail: |
917 | free_irq(priv->interruptError, dev); | 925 | free_irq(priv->interruptError, dev); |
918 | err_irq_fail: | 926 | err_irq_fail: |
927 | err_rxalloc_fail: | ||
919 | rx_skb_fail: | 928 | rx_skb_fail: |
920 | free_skb_resources(priv); | 929 | free_skb_resources(priv); |
921 | tx_skb_fail: | 930 | tx_skb_fail: |
@@ -1328,18 +1337,37 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1328 | return IRQ_HANDLED; | 1337 | return IRQ_HANDLED; |
1329 | } | 1338 | } |
1330 | 1339 | ||
1331 | struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | 1340 | static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, |
1341 | struct sk_buff *skb) | ||
1342 | { | ||
1343 | struct gfar_private *priv = netdev_priv(dev); | ||
1344 | u32 * status_len = (u32 *)bdp; | ||
1345 | u16 flags; | ||
1346 | |||
1347 | bdp->bufPtr = dma_map_single(&dev->dev, skb->data, | ||
1348 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
1349 | |||
1350 | flags = RXBD_EMPTY | RXBD_INTERRUPT; | ||
1351 | |||
1352 | if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) | ||
1353 | flags |= RXBD_WRAP; | ||
1354 | |||
1355 | eieio(); | ||
1356 | |||
1357 | *status_len = (u32)flags << 16; | ||
1358 | } | ||
1359 | |||
1360 | |||
1361 | struct sk_buff * gfar_new_skb(struct net_device *dev) | ||
1332 | { | 1362 | { |
1333 | unsigned int alignamount; | 1363 | unsigned int alignamount; |
1334 | struct gfar_private *priv = netdev_priv(dev); | 1364 | struct gfar_private *priv = netdev_priv(dev); |
1335 | struct sk_buff *skb = NULL; | 1365 | struct sk_buff *skb = NULL; |
1336 | unsigned int timeout = SKB_ALLOC_TIMEOUT; | ||
1337 | 1366 | ||
1338 | /* We have to allocate the skb, so keep trying till we succeed */ | 1367 | /* We have to allocate the skb, so keep trying till we succeed */ |
1339 | while ((!skb) && timeout--) | 1368 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
1340 | skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
1341 | 1369 | ||
1342 | if (NULL == skb) | 1370 | if (!skb) |
1343 | return NULL; | 1371 | return NULL; |
1344 | 1372 | ||
1345 | alignamount = RXBUF_ALIGNMENT - | 1373 | alignamount = RXBUF_ALIGNMENT - |
@@ -1350,15 +1378,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1350 | */ | 1378 | */ |
1351 | skb_reserve(skb, alignamount); | 1379 | skb_reserve(skb, alignamount); |
1352 | 1380 | ||
1353 | bdp->bufPtr = dma_map_single(&dev->dev, skb->data, | ||
1354 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
1355 | |||
1356 | bdp->length = 0; | ||
1357 | |||
1358 | /* Mark the buffer empty */ | ||
1359 | eieio(); | ||
1360 | bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); | ||
1361 | |||
1362 | return skb; | 1381 | return skb; |
1363 | } | 1382 | } |
1364 | 1383 | ||
@@ -1544,10 +1563,31 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1544 | bdp = priv->cur_rx; | 1563 | bdp = priv->cur_rx; |
1545 | 1564 | ||
1546 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | 1565 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
1566 | struct sk_buff *newskb; | ||
1547 | rmb(); | 1567 | rmb(); |
1568 | |||
1569 | /* Add another skb for the future */ | ||
1570 | newskb = gfar_new_skb(dev); | ||
1571 | |||
1548 | skb = priv->rx_skbuff[priv->skb_currx]; | 1572 | skb = priv->rx_skbuff[priv->skb_currx]; |
1549 | 1573 | ||
1550 | if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) { | 1574 | /* We drop the frame if we failed to allocate a new buffer */ |
1575 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | ||
1576 | bdp->status & RXBD_ERR)) { | ||
1577 | count_errors(bdp->status, dev); | ||
1578 | |||
1579 | if (unlikely(!newskb)) | ||
1580 | newskb = skb; | ||
1581 | |||
1582 | if (skb) { | ||
1583 | dma_unmap_single(&priv->dev->dev, | ||
1584 | bdp->bufPtr, | ||
1585 | priv->rx_buffer_size, | ||
1586 | DMA_FROM_DEVICE); | ||
1587 | |||
1588 | dev_kfree_skb_any(skb); | ||
1589 | } | ||
1590 | } else { | ||
1551 | /* Increment the number of packets */ | 1591 | /* Increment the number of packets */ |
1552 | dev->stats.rx_packets++; | 1592 | dev->stats.rx_packets++; |
1553 | howmany++; | 1593 | howmany++; |
@@ -1558,23 +1598,14 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1558 | gfar_process_frame(dev, skb, pkt_len); | 1598 | gfar_process_frame(dev, skb, pkt_len); |
1559 | 1599 | ||
1560 | dev->stats.rx_bytes += pkt_len; | 1600 | dev->stats.rx_bytes += pkt_len; |
1561 | } else { | ||
1562 | count_errors(bdp->status, dev); | ||
1563 | |||
1564 | if (skb) | ||
1565 | dev_kfree_skb_any(skb); | ||
1566 | |||
1567 | priv->rx_skbuff[priv->skb_currx] = NULL; | ||
1568 | } | 1601 | } |
1569 | 1602 | ||
1570 | dev->last_rx = jiffies; | 1603 | dev->last_rx = jiffies; |
1571 | 1604 | ||
1572 | /* Clear the status flags for this buffer */ | 1605 | priv->rx_skbuff[priv->skb_currx] = newskb; |
1573 | bdp->status &= ~RXBD_STATS; | ||
1574 | 1606 | ||
1575 | /* Add another skb for the future */ | 1607 | /* Setup the new bdp */ |
1576 | skb = gfar_new_skb(dev, bdp); | 1608 | gfar_new_rxbdp(dev, bdp, newskb); |
1577 | priv->rx_skbuff[priv->skb_currx] = skb; | ||
1578 | 1609 | ||
1579 | /* Update to the next pointer */ | 1610 | /* Update to the next pointer */ |
1580 | if (bdp->status & RXBD_WRAP) | 1611 | if (bdp->status & RXBD_WRAP) |
@@ -1584,9 +1615,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1584 | 1615 | ||
1585 | /* update to point at the next skb */ | 1616 | /* update to point at the next skb */ |
1586 | priv->skb_currx = | 1617 | priv->skb_currx = |
1587 | (priv->skb_currx + | 1618 | (priv->skb_currx + 1) & |
1588 | 1) & RX_RING_MOD_MASK(priv->rx_ring_size); | 1619 | RX_RING_MOD_MASK(priv->rx_ring_size); |
1589 | |||
1590 | } | 1620 | } |
1591 | 1621 | ||
1592 | /* Update the current rxbd pointer to be the next one */ | 1622 | /* Update the current rxbd pointer to be the next one */ |