aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-05-30 12:49:56 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-30 22:18:00 -0400
commit9ded65a1d79a2ca9aa44ee0989fd7924304c31cc (patch)
treedeb7bd14f58ce7cc95472f588cf654070ceb789b /drivers/net
parent93e16847c9db0093065c98063cfc639cdfccf19a (diff)
tlan: manage rx allocation failure better
Rx allocation failure at runtime is non-fatal. For normal Rx frame, it just reuses the buffer, and during setup it just continues with a smaller receive buffer pool. Compile tested only. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/tlan.c63
1 files changed, 29 insertions, 34 deletions
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index aee2a458adc3..afc831002db2 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1539,8 +1539,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1539 TLanList *head_list; 1539 TLanList *head_list;
1540 struct sk_buff *skb; 1540 struct sk_buff *skb;
1541 TLanList *tail_list; 1541 TLanList *tail_list;
1542 void *t;
1543 u32 frameSize;
1544 u16 tmpCStat; 1542 u16 tmpCStat;
1545 dma_addr_t head_list_phys; 1543 dma_addr_t head_list_phys;
1546 1544
@@ -1549,40 +1547,34 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1547 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1550 1548
1551 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1549 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1552 frameSize = head_list->frameSize; 1550 dma_addr_t frameDma = head_list->buffer[0].address;
1551 u32 frameSize = head_list->frameSize;
1553 ack++; 1552 ack++;
1554 if (tmpCStat & TLAN_CSTAT_EOC) 1553 if (tmpCStat & TLAN_CSTAT_EOC)
1555 eoc = 1; 1554 eoc = 1;
1556 1555
1557 if (bbuf) { 1556 if (bbuf) {
1558 skb = dev_alloc_skb(frameSize + 7); 1557 skb = netdev_alloc_skb(dev, frameSize + 7);
1559 if (skb == NULL) 1558 if ( skb ) {
1560 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n");
1561 else {
1562 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1559 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1563 skb_reserve(skb, 2); 1560 skb_reserve(skb, 2);
1564 t = (void *) skb_put(skb, frameSize); 1561 pci_dma_sync_single_for_cpu(priv->pciDev,
1565 1562 frameDma, frameSize,
1566 dev->stats.rx_bytes += head_list->frameSize; 1563 PCI_DMA_FROMDEVICE);
1564 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1565 skb_put(skb, frameSize);
1566 dev->stats.rx_bytes += frameSize;
1567 1567
1568 memcpy( t, head_buffer, frameSize );
1569 skb->protocol = eth_type_trans( skb, dev ); 1568 skb->protocol = eth_type_trans( skb, dev );
1570 netif_rx( skb ); 1569 netif_rx( skb );
1571 } 1570 }
1572 } else { 1571 } else {
1573 struct sk_buff *new_skb; 1572 struct sk_buff *new_skb;
1574 1573
1575 /* 1574 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1576 * I changed the algorithm here. What we now do 1575 if ( new_skb ) {
1577 * is allocate the new frame. If this fails we
1578 * simply recycle the frame.
1579 */
1580
1581 new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
1582
1583 if ( new_skb != NULL ) {
1584 skb = TLan_GetSKB(head_list); 1576 skb = TLan_GetSKB(head_list);
1585 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1577 pci_unmap_single(priv->pciDev, frameDma, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1586 skb_put( skb, frameSize ); 1578 skb_put( skb, frameSize );
1587 1579
1588 dev->stats.rx_bytes += frameSize; 1580 dev->stats.rx_bytes += frameSize;
@@ -1590,12 +1582,12 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1590 skb->protocol = eth_type_trans( skb, dev ); 1582 skb->protocol = eth_type_trans( skb, dev );
1591 netif_rx( skb ); 1583 netif_rx( skb );
1592 1584
1593 skb_reserve( new_skb, 2 ); 1585 skb_reserve( new_skb, NET_IP_ALIGN );
1594 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1586 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1595 1587
1596 TLan_StoreSKB(head_list, new_skb); 1588 TLan_StoreSKB(head_list, new_skb);
1597 } else 1589 }
1598 printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" ); 1590
1599 } 1591 }
1600 1592
1601 head_list->forward = 0; 1593 head_list->forward = 0;
@@ -1994,24 +1986,27 @@ static void TLan_ResetLists( struct net_device *dev )
1994 if ( bbuf ) { 1986 if ( bbuf ) {
1995 list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1987 list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE );
1996 } else { 1988 } else {
1997 skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 1989 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1998 if ( skb == NULL ) { 1990 if ( !skb ) {
1999 printk( "TLAN: Couldn't allocate memory for received data.\n" ); 1991 printk( "TLAN: Couldn't allocate memory for received data.\n" );
2000 /* If this ever happened it would be a problem */ 1992 break;
2001 } else {
2002 skb->dev = dev;
2003 skb_reserve( skb, 2 );
2004 } 1993 }
1994
1995 skb_reserve( skb, NET_IP_ALIGN );
2005 list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1996 list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
2006 TLan_StoreSKB(list, skb); 1997 TLan_StoreSKB(list, skb);
2007 } 1998 }
2008 list->buffer[1].count = 0; 1999 list->buffer[1].count = 0;
2009 list->buffer[1].address = 0; 2000 list->buffer[1].address = 0;
2010 if ( i < TLAN_NUM_RX_LISTS - 1 ) 2001 list->forward = list_phys + sizeof(TLanList);
2011 list->forward = list_phys + sizeof(TLanList); 2002 }
2012 else 2003
2013 list->forward = 0; 2004 /* in case ran out of memory early, clear bits */
2005 while (i < TLAN_NUM_RX_LISTS) {
2006 TLan_StoreSKB(priv->rxList + i, NULL);
2007 ++i;
2014 } 2008 }
2009 list->forward = 0;
2015 2010
2016} /* TLan_ResetLists */ 2011} /* TLan_ResetLists */
2017 2012