aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSakari Ailus <sakari.ailus@iki.fi>2008-12-16 18:24:05 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-16 18:24:05 -0500
commit5eeabf5150878018d7c7092042f3b681f5b554b5 (patch)
treeb32a05d76586339b9384f8cb73d3f746d6e55541
parent354ade9058687fdef8a612c7b298d4c51dae3da7 (diff)
tlan: Remove broken support for big buffers
The big rx/tx buffer support is broken and unlikely to be very useful as such. Remove it. Signed-off-by: Sakari Ailus <sakari.ailus@iki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tlan.c211
1 files changed, 77 insertions, 134 deletions
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 369eec744188..85ef8b744557 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -163,6 +163,11 @@
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be 163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback 164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman 165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 *
166 *******************************************************************************/ 171 *******************************************************************************/
167 172
168#include <linux/module.h> 173#include <linux/module.h>
@@ -213,12 +218,8 @@ static int debug;
213module_param(debug, int, 0); 218module_param(debug, int, 0);
214MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 219MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
215 220
216static int bbuf;
217module_param(bbuf, int, 0);
218MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
219
220static const char TLanSignature[] = "TLAN"; 221static const char TLanSignature[] = "TLAN";
221static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; 222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
222static int tlan_have_pci; 223static int tlan_have_pci;
223static int tlan_have_eisa; 224static int tlan_have_eisa;
224 225
@@ -859,13 +860,8 @@ static int TLan_Init( struct net_device *dev )
859 860
860 priv = netdev_priv(dev); 861 priv = netdev_priv(dev);
861 862
862 if ( bbuf ) { 863 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
863 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 864 * ( sizeof(TLanList) );
864 * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
865 } else {
866 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
867 * ( sizeof(TLanList) );
868 }
869 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 865 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
870 dma_size, &priv->dmaStorageDMA); 866 dma_size, &priv->dmaStorageDMA);
871 priv->dmaSize = dma_size; 867 priv->dmaSize = dma_size;
@@ -881,16 +877,6 @@ static int TLan_Init( struct net_device *dev )
881 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 877 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
882 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 878 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
883 879
884 if ( bbuf ) {
885 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
886 priv->rxBufferDMA =priv->txListDMA
887 + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
888 priv->txBuffer = priv->rxBuffer
889 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
890 priv->txBufferDMA = priv->rxBufferDMA
891 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
892 }
893
894 err = 0; 880 err = 0;
895 for ( i = 0; i < 6 ; i++ ) 881 for ( i = 0; i < 6 ; i++ )
896 err |= TLan_EeReadByte( dev, 882 err |= TLan_EeReadByte( dev,
@@ -1094,9 +1080,8 @@ static void TLan_tx_timeout_work(struct work_struct *work)
1094static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1080static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1095{ 1081{
1096 TLanPrivateInfo *priv = netdev_priv(dev); 1082 TLanPrivateInfo *priv = netdev_priv(dev);
1097 TLanList *tail_list;
1098 dma_addr_t tail_list_phys; 1083 dma_addr_t tail_list_phys;
1099 u8 *tail_buffer; 1084 TLanList *tail_list;
1100 unsigned long flags; 1085 unsigned long flags;
1101 unsigned int txlen; 1086 unsigned int txlen;
1102 1087
@@ -1125,15 +1110,10 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1125 1110
1126 tail_list->forward = 0; 1111 tail_list->forward = 0;
1127 1112
1128 if ( bbuf ) { 1113 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1129 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1114 skb->data, txlen,
1130 skb_copy_from_linear_data(skb, tail_buffer, txlen); 1115 PCI_DMA_TODEVICE);
1131 } else { 1116 TLan_StoreSKB(tail_list, skb);
1132 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1133 skb->data, txlen,
1134 PCI_DMA_TODEVICE);
1135 TLan_StoreSKB(tail_list, skb);
1136 }
1137 1117
1138 tail_list->frameSize = (u16) txlen; 1118 tail_list->frameSize = (u16) txlen;
1139 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1119 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
@@ -1163,9 +1143,6 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1163 1143
1164 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1144 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
1165 1145
1166 if ( bbuf )
1167 dev_kfree_skb_any(skb);
1168
1169 dev->trans_start = jiffies; 1146 dev->trans_start = jiffies;
1170 return 0; 1147 return 0;
1171 1148
@@ -1429,17 +1406,16 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1429 head_list = priv->txList + priv->txHead; 1406 head_list = priv->txList + priv->txHead;
1430 1407
1431 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1408 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1409 struct sk_buff *skb = TLan_GetSKB(head_list);
1410
1432 ack++; 1411 ack++;
1433 if ( ! bbuf ) { 1412 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1434 struct sk_buff *skb = TLan_GetSKB(head_list); 1413 max(skb->len,
1435 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1414 (unsigned int)TLAN_MIN_FRAME_SIZE),
1436 max(skb->len, 1415 PCI_DMA_TODEVICE);
1437 (unsigned int)TLAN_MIN_FRAME_SIZE), 1416 dev_kfree_skb_any(skb);
1438 PCI_DMA_TODEVICE); 1417 head_list->buffer[8].address = 0;
1439 dev_kfree_skb_any(skb); 1418 head_list->buffer[9].address = 0;
1440 head_list->buffer[8].address = 0;
1441 head_list->buffer[9].address = 0;
1442 }
1443 1419
1444 if ( tmpCStat & TLAN_CSTAT_EOC ) 1420 if ( tmpCStat & TLAN_CSTAT_EOC )
1445 eoc = 1; 1421 eoc = 1;
@@ -1549,7 +1525,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 TLanPrivateInfo *priv = netdev_priv(dev); 1525 TLanPrivateInfo *priv = netdev_priv(dev);
1550 u32 ack = 0; 1526 u32 ack = 0;
1551 int eoc = 0; 1527 int eoc = 0;
1552 u8 *head_buffer;
1553 TLanList *head_list; 1528 TLanList *head_list;
1554 struct sk_buff *skb; 1529 struct sk_buff *skb;
1555 TLanList *tail_list; 1530 TLanList *tail_list;
@@ -1564,53 +1539,33 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1564 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1539 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1565 dma_addr_t frameDma = head_list->buffer[0].address; 1540 dma_addr_t frameDma = head_list->buffer[0].address;
1566 u32 frameSize = head_list->frameSize; 1541 u32 frameSize = head_list->frameSize;
1542 struct sk_buff *new_skb;
1543
1567 ack++; 1544 ack++;
1568 if (tmpCStat & TLAN_CSTAT_EOC) 1545 if (tmpCStat & TLAN_CSTAT_EOC)
1569 eoc = 1; 1546 eoc = 1;
1570 1547
1571 if (bbuf) { 1548 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1572 skb = netdev_alloc_skb(dev, frameSize + 7); 1549 if ( !new_skb )
1573 if ( !skb ) 1550 goto drop_and_reuse;
1574 goto drop_and_reuse;
1575
1576 head_buffer = priv->rxBuffer
1577 + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1578 skb_reserve(skb, 2);
1579 pci_dma_sync_single_for_cpu(priv->pciDev,
1580 frameDma, frameSize,
1581 PCI_DMA_FROMDEVICE);
1582 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1583 skb_put(skb, frameSize);
1584 dev->stats.rx_bytes += frameSize;
1585
1586 skb->protocol = eth_type_trans( skb, dev );
1587 netif_rx( skb );
1588 } else {
1589 struct sk_buff *new_skb;
1590 1551
1591 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1552 skb = TLan_GetSKB(head_list);
1592 if ( !new_skb ) 1553 pci_unmap_single(priv->pciDev, frameDma,
1593 goto drop_and_reuse; 1554 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1555 skb_put( skb, frameSize );
1594 1556
1595 skb = TLan_GetSKB(head_list); 1557 dev->stats.rx_bytes += frameSize;
1596 pci_unmap_single(priv->pciDev, frameDma,
1597 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1598 skb_put( skb, frameSize );
1599 1558
1600 dev->stats.rx_bytes += frameSize; 1559 skb->protocol = eth_type_trans( skb, dev );
1560 netif_rx( skb );
1601 1561
1602 skb->protocol = eth_type_trans( skb, dev ); 1562 skb_reserve( new_skb, NET_IP_ALIGN );
1603 netif_rx( skb ); 1563 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1564 new_skb->data,
1565 TLAN_MAX_FRAME_SIZE,
1566 PCI_DMA_FROMDEVICE);
1604 1567
1605 skb_reserve( new_skb, NET_IP_ALIGN ); 1568 TLan_StoreSKB(head_list, new_skb);
1606 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1607 new_skb->data,
1608 TLAN_MAX_FRAME_SIZE,
1609 PCI_DMA_FROMDEVICE);
1610
1611 TLan_StoreSKB(head_list, new_skb);
1612
1613 }
1614drop_and_reuse: 1569drop_and_reuse:
1615 head_list->forward = 0; 1570 head_list->forward = 0;
1616 head_list->cStat = 0; 1571 head_list->cStat = 0;
@@ -1993,12 +1948,7 @@ static void TLan_ResetLists( struct net_device *dev )
1993 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1948 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
1994 list = priv->txList + i; 1949 list = priv->txList + i;
1995 list->cStat = TLAN_CSTAT_UNUSED; 1950 list->cStat = TLAN_CSTAT_UNUSED;
1996 if ( bbuf ) { 1951 list->buffer[0].address = 0;
1997 list->buffer[0].address = priv->txBufferDMA
1998 + ( i * TLAN_MAX_FRAME_SIZE );
1999 } else {
2000 list->buffer[0].address = 0;
2001 }
2002 list->buffer[2].count = 0; 1952 list->buffer[2].count = 0;
2003 list->buffer[2].address = 0; 1953 list->buffer[2].address = 0;
2004 list->buffer[8].address = 0; 1954 list->buffer[8].address = 0;
@@ -2013,23 +1963,18 @@ static void TLan_ResetLists( struct net_device *dev )
2013 list->cStat = TLAN_CSTAT_READY; 1963 list->cStat = TLAN_CSTAT_READY;
2014 list->frameSize = TLAN_MAX_FRAME_SIZE; 1964 list->frameSize = TLAN_MAX_FRAME_SIZE;
2015 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1965 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2016 if ( bbuf ) { 1966 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2017 list->buffer[0].address = priv->rxBufferDMA 1967 if ( !skb ) {
2018 + ( i * TLAN_MAX_FRAME_SIZE ); 1968 pr_err("TLAN: out of memory for received data.\n" );
2019 } else { 1969 break;
2020 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2021 if ( !skb ) {
2022 pr_err("TLAN: out of memory for received data.\n" );
2023 break;
2024 }
2025
2026 skb_reserve( skb, NET_IP_ALIGN );
2027 list->buffer[0].address = pci_map_single(priv->pciDev,
2028 skb->data,
2029 TLAN_MAX_FRAME_SIZE,
2030 PCI_DMA_FROMDEVICE);
2031 TLan_StoreSKB(list, skb);
2032 } 1970 }
1971
1972 skb_reserve( skb, NET_IP_ALIGN );
1973 list->buffer[0].address = pci_map_single(priv->pciDev,
1974 skb->data,
1975 TLAN_MAX_FRAME_SIZE,
1976 PCI_DMA_FROMDEVICE);
1977 TLan_StoreSKB(list, skb);
2033 list->buffer[1].count = 0; 1978 list->buffer[1].count = 0;
2034 list->buffer[1].address = 0; 1979 list->buffer[1].address = 0;
2035 list->forward = list_phys + sizeof(TLanList); 1980 list->forward = list_phys + sizeof(TLanList);
@@ -2052,35 +1997,33 @@ static void TLan_FreeLists( struct net_device *dev )
2052 TLanList *list; 1997 TLanList *list;
2053 struct sk_buff *skb; 1998 struct sk_buff *skb;
2054 1999
2055 if ( ! bbuf ) { 2000 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
2056 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 2001 list = priv->txList + i;
2057 list = priv->txList + i; 2002 skb = TLan_GetSKB(list);
2058 skb = TLan_GetSKB(list); 2003 if ( skb ) {
2059 if ( skb ) { 2004 pci_unmap_single(
2060 pci_unmap_single( 2005 priv->pciDev,
2061 priv->pciDev, 2006 list->buffer[0].address,
2062 list->buffer[0].address, 2007 max(skb->len,
2063 max(skb->len, 2008 (unsigned int)TLAN_MIN_FRAME_SIZE),
2064 (unsigned int)TLAN_MIN_FRAME_SIZE), 2009 PCI_DMA_TODEVICE);
2065 PCI_DMA_TODEVICE); 2010 dev_kfree_skb_any( skb );
2066 dev_kfree_skb_any( skb ); 2011 list->buffer[8].address = 0;
2067 list->buffer[8].address = 0; 2012 list->buffer[9].address = 0;
2068 list->buffer[9].address = 0;
2069 }
2070 } 2013 }
2014 }
2071 2015
2072 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 2016 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
2073 list = priv->rxList + i; 2017 list = priv->rxList + i;
2074 skb = TLan_GetSKB(list); 2018 skb = TLan_GetSKB(list);
2075 if ( skb ) { 2019 if ( skb ) {
2076 pci_unmap_single(priv->pciDev, 2020 pci_unmap_single(priv->pciDev,
2077 list->buffer[0].address, 2021 list->buffer[0].address,
2078 TLAN_MAX_FRAME_SIZE, 2022 TLAN_MAX_FRAME_SIZE,
2079 PCI_DMA_FROMDEVICE); 2023 PCI_DMA_FROMDEVICE);
2080 dev_kfree_skb_any( skb ); 2024 dev_kfree_skb_any( skb );
2081 list->buffer[8].address = 0; 2025 list->buffer[8].address = 0;
2082 list->buffer[9].address = 0; 2026 list->buffer[9].address = 0;
2083 }
2084 } 2027 }
2085 } 2028 }
2086} /* TLan_FreeLists */ 2029} /* TLan_FreeLists */