aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tlan.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tlan.c')
-rw-r--r--drivers/net/tlan.c490
1 files changed, 256 insertions, 234 deletions
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 0166407d7061..85246ed7cb9c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -13,8 +13,6 @@
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 * 15 *
16 ** This file is best viewed/edited with columns>=132.
17 *
18 ** Useful (if not required) reading: 16 ** Useful (if not required) reading:
19 * 17 *
20 * Texas Instruments, ThunderLAN Programmer's Guide, 18 * Texas Instruments, ThunderLAN Programmer's Guide,
@@ -218,9 +216,7 @@ static int bbuf;
218module_param(bbuf, int, 0); 216module_param(bbuf, int, 0);
219MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)"); 217MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
220 218
221static u8 *TLanPadBuffer; 219static const char TLanSignature[] = "TLAN";
222static dma_addr_t TLanPadBufferDMA;
223static char TLanSignature[] = "TLAN";
224static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; 220static const char tlan_banner[] = "ThunderLAN driver v1.15\n";
225static int tlan_have_pci; 221static int tlan_have_pci;
226static int tlan_have_eisa; 222static int tlan_have_eisa;
@@ -238,9 +234,11 @@ static struct board {
238 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 234 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
239 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 235 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
240 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 236 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 237 { "Compaq NetFlex-3/P",
238 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
242 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 239 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
243 { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 240 { "Compaq Netelligent Integrated 10/100 TX UTP",
241 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 242 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 243 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 244 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
@@ -248,8 +246,9 @@ static struct board {
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 246 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 247 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 248 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 249 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 250 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
251 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
253 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 252 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
254}; 253};
255 254
@@ -294,12 +293,12 @@ static int TLan_Close( struct net_device *);
294static struct net_device_stats *TLan_GetStats( struct net_device *); 293static struct net_device_stats *TLan_GetStats( struct net_device *);
295static void TLan_SetMulticastList( struct net_device *); 294static void TLan_SetMulticastList( struct net_device *);
296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 295static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 296static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
297 int irq, int rev, const struct pci_device_id *ent);
298static void TLan_tx_timeout( struct net_device *dev); 298static void TLan_tx_timeout( struct net_device *dev);
299static void TLan_tx_timeout_work(struct work_struct *work); 299static void TLan_tx_timeout_work(struct work_struct *work);
300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
301 301
302static u32 TLan_HandleInvalid( struct net_device *, u16 );
303static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 302static u32 TLan_HandleTxEOF( struct net_device *, u16 );
304static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 303static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
305static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 304static u32 TLan_HandleRxEOF( struct net_device *, u16 );
@@ -348,29 +347,27 @@ static void TLan_EeReceiveByte( u16, u8 *, int );
348static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 347static int TLan_EeReadByte( struct net_device *, u8, u8 * );
349 348
350 349
351static void 350static inline void
352TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 351TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
353{ 352{
354 unsigned long addr = (unsigned long)skb; 353 unsigned long addr = (unsigned long)skb;
355 tag->buffer[9].address = (u32)addr; 354 tag->buffer[9].address = addr;
356 addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */ 355 tag->buffer[8].address = upper_32_bits(addr);
357 addr >>= 1;
358 tag->buffer[8].address = (u32)addr;
359} 356}
360 357
361static struct sk_buff * 358static inline struct sk_buff *
362TLan_GetSKB( struct tlan_list_tag *tag) 359TLan_GetSKB( const struct tlan_list_tag *tag)
363{ 360{
364 unsigned long addr = tag->buffer[8].address; 361 unsigned long addr;
365 addr <<= 31; 362
366 addr <<= 1; 363 addr = tag->buffer[8].address;
367 addr |= tag->buffer[9].address; 364 addr |= (tag->buffer[9].address << 16) << 16;
368 return (struct sk_buff *) addr; 365 return (struct sk_buff *) addr;
369} 366}
370 367
371 368
372static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 369static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
373 TLan_HandleInvalid, 370 NULL,
374 TLan_HandleTxEOF, 371 TLan_HandleTxEOF,
375 TLan_HandleStatOverflow, 372 TLan_HandleStatOverflow,
376 TLan_HandleRxEOF, 373 TLan_HandleRxEOF,
@@ -444,7 +441,9 @@ static void __devexit tlan_remove_one( struct pci_dev *pdev)
444 unregister_netdev( dev ); 441 unregister_netdev( dev );
445 442
446 if ( priv->dmaStorage ) { 443 if ( priv->dmaStorage ) {
447 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 444 pci_free_consistent(priv->pciDev,
445 priv->dmaSize, priv->dmaStorage,
446 priv->dmaStorageDMA );
448 } 447 }
449 448
450#ifdef CONFIG_PCI 449#ifdef CONFIG_PCI
@@ -469,16 +468,6 @@ static int __init tlan_probe(void)
469 468
470 printk(KERN_INFO "%s", tlan_banner); 469 printk(KERN_INFO "%s", tlan_banner);
471 470
472 TLanPadBuffer = (u8 *) pci_alloc_consistent(NULL, TLAN_MIN_FRAME_SIZE, &TLanPadBufferDMA);
473
474 if (TLanPadBuffer == NULL) {
475 printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
476 rc = -ENOMEM;
477 goto err_out;
478 }
479
480 memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
481
482 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 471 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
483 472
484 /* Use new style PCI probing. Now the kernel will 473 /* Use new style PCI probing. Now the kernel will
@@ -506,8 +495,6 @@ static int __init tlan_probe(void)
506err_out_pci_unreg: 495err_out_pci_unreg:
507 pci_unregister_driver(&tlan_driver); 496 pci_unregister_driver(&tlan_driver);
508err_out_pci_free: 497err_out_pci_free:
509 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
510err_out:
511 return rc; 498 return rc;
512} 499}
513 500
@@ -539,7 +526,8 @@ static int __devinit tlan_init_one( struct pci_dev *pdev,
539 **************************************************************/ 526 **************************************************************/
540 527
541static int __devinit TLan_probe1(struct pci_dev *pdev, 528static int __devinit TLan_probe1(struct pci_dev *pdev,
542 long ioaddr, int irq, int rev, const struct pci_device_id *ent ) 529 long ioaddr, int irq, int rev,
530 const struct pci_device_id *ent )
543{ 531{
544 532
545 struct net_device *dev; 533 struct net_device *dev;
@@ -625,8 +613,10 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
625 /* Kernel parameters */ 613 /* Kernel parameters */
626 if (dev->mem_start) { 614 if (dev->mem_start) {
627 priv->aui = dev->mem_start & 0x01; 615 priv->aui = dev->mem_start & 0x01;
628 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 : (dev->mem_start & 0x06) >> 1; 616 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
629 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; 617 : (dev->mem_start & 0x06) >> 1;
618 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
619 : (dev->mem_start & 0x18) >> 3;
630 620
631 if (priv->speed == 0x1) { 621 if (priv->speed == 0x1) {
632 priv->speed = TLAN_SPEED_10; 622 priv->speed = TLAN_SPEED_10;
@@ -706,7 +696,8 @@ static void TLan_Eisa_Cleanup(void)
706 dev = TLan_Eisa_Devices; 696 dev = TLan_Eisa_Devices;
707 priv = netdev_priv(dev); 697 priv = netdev_priv(dev);
708 if (priv->dmaStorage) { 698 if (priv->dmaStorage) {
709 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 699 pci_free_consistent(priv->pciDev, priv->dmaSize,
700 priv->dmaStorage, priv->dmaStorageDMA );
710 } 701 }
711 release_region( dev->base_addr, 0x10); 702 release_region( dev->base_addr, 0x10);
712 unregister_netdev( dev ); 703 unregister_netdev( dev );
@@ -724,8 +715,6 @@ static void __exit tlan_exit(void)
724 if (tlan_have_eisa) 715 if (tlan_have_eisa)
725 TLan_Eisa_Cleanup(); 716 TLan_Eisa_Cleanup();
726 717
727 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
728
729} 718}
730 719
731 720
@@ -763,8 +752,10 @@ static void __init TLan_EisaProbe (void)
763 /* Loop through all slots of the EISA bus */ 752 /* Loop through all slots of the EISA bus */
764 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 753 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
765 754
766 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 755 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
767 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 756 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
768 759
769 760
770 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 761 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
@@ -874,7 +865,8 @@ static int TLan_Init( struct net_device *dev )
874 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 865 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
875 * ( sizeof(TLanList) ); 866 * ( sizeof(TLanList) );
876 } 867 }
877 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, dma_size, &priv->dmaStorageDMA); 868 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
869 dma_size, &priv->dmaStorageDMA);
878 priv->dmaSize = dma_size; 870 priv->dmaSize = dma_size;
879 871
880 if ( priv->dmaStorage == NULL ) { 872 if ( priv->dmaStorage == NULL ) {
@@ -883,16 +875,19 @@ static int TLan_Init( struct net_device *dev )
883 return -ENOMEM; 875 return -ENOMEM;
884 } 876 }
885 memset( priv->dmaStorage, 0, dma_size ); 877 memset( priv->dmaStorage, 0, dma_size );
886 priv->rxList = (TLanList *) 878 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
887 ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 ); 879 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
888 priv->rxListDMA = ( ( ( (u32) priv->dmaStorageDMA ) + 7 ) & 0xFFFFFFF8 );
889 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 880 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
890 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 881 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
882
891 if ( bbuf ) { 883 if ( bbuf ) {
892 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS ); 884 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
893 priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS; 885 priv->rxBufferDMA =priv->txListDMA
894 priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 886 + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
895 priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 887 priv->txBuffer = priv->rxBuffer
888 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
889 priv->txBufferDMA = priv->rxBufferDMA
890 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
896 } 891 }
897 892
898 err = 0; 893 err = 0;
@@ -952,10 +947,12 @@ static int TLan_Open( struct net_device *dev )
952 int err; 947 int err;
953 948
954 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 949 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
955 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, TLanSignature, dev ); 950 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
951 dev->name, dev );
956 952
957 if ( err ) { 953 if ( err ) {
958 printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq ); 954 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
955 dev->name, dev->irq );
959 return err; 956 return err;
960 } 957 }
961 958
@@ -969,7 +966,8 @@ static int TLan_Open( struct net_device *dev )
969 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
970 TLan_ResetAdapter( dev ); 967 TLan_ResetAdapter( dev );
971 968
972 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev ); 969 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
970 dev->name, priv->tlanRev );
973 971
974 return 0; 972 return 0;
975 973
@@ -1007,14 +1005,16 @@ static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1007 1005
1008 1006
1009 case SIOCGMIIREG: /* Read MII PHY register. */ 1007 case SIOCGMIIREG: /* Read MII PHY register. */
1010 TLan_MiiReadReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, &data->val_out); 1008 TLan_MiiReadReg(dev, data->phy_id & 0x1f,
1009 data->reg_num & 0x1f, &data->val_out);
1011 return 0; 1010 return 0;
1012 1011
1013 1012
1014 case SIOCSMIIREG: /* Write MII PHY register. */ 1013 case SIOCSMIIREG: /* Write MII PHY register. */
1015 if (!capable(CAP_NET_ADMIN)) 1014 if (!capable(CAP_NET_ADMIN))
1016 return -EPERM; 1015 return -EPERM;
1017 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1016 TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
1017 data->reg_num & 0x1f, data->val_in);
1018 return 0; 1018 return 0;
1019 default: 1019 default:
1020 return -EOPNOTSUPP; 1020 return -EOPNOTSUPP;
@@ -1096,20 +1096,25 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1096 TLanList *tail_list; 1096 TLanList *tail_list;
1097 dma_addr_t tail_list_phys; 1097 dma_addr_t tail_list_phys;
1098 u8 *tail_buffer; 1098 u8 *tail_buffer;
1099 int pad;
1100 unsigned long flags; 1099 unsigned long flags;
1101 1100
1102 if ( ! priv->phyOnline ) { 1101 if ( ! priv->phyOnline ) {
1103 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", dev->name ); 1102 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1103 dev->name );
1104 dev_kfree_skb_any(skb); 1104 dev_kfree_skb_any(skb);
1105 return 0; 1105 return 0;
1106 } 1106 }
1107 1107
1108 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1109 return 0;
1110
1108 tail_list = priv->txList + priv->txTail; 1111 tail_list = priv->txList + priv->txTail;
1109 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1112 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
1110 1113
1111 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1114 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
1112 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail ); 1115 TLAN_DBG( TLAN_DEBUG_TX,
1116 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1117 dev->name, priv->txHead, priv->txTail );
1113 netif_stop_queue(dev); 1118 netif_stop_queue(dev);
1114 priv->txBusyCount++; 1119 priv->txBusyCount++;
1115 return 1; 1120 return 1;
@@ -1121,37 +1126,34 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1121 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1126 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1122 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1127 skb_copy_from_linear_data(skb, tail_buffer, skb->len);
1123 } else { 1128 } else {
1124 tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); 1129 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1130 skb->data, skb->len,
1131 PCI_DMA_TODEVICE);
1125 TLan_StoreSKB(tail_list, skb); 1132 TLan_StoreSKB(tail_list, skb);
1126 } 1133 }
1127 1134
1128 pad = TLAN_MIN_FRAME_SIZE - skb->len; 1135 tail_list->frameSize = (u16) skb->len;
1129 1136 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1130 if ( pad > 0 ) { 1137 tail_list->buffer[1].count = 0;
1131 tail_list->frameSize = (u16) skb->len + pad; 1138 tail_list->buffer[1].address = 0;
1132 tail_list->buffer[0].count = (u32) skb->len;
1133 tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
1134 tail_list->buffer[1].address = TLanPadBufferDMA;
1135 } else {
1136 tail_list->frameSize = (u16) skb->len;
1137 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1138 tail_list->buffer[1].count = 0;
1139 tail_list->buffer[1].address = 0;
1140 }
1141 1139
1142 spin_lock_irqsave(&priv->lock, flags); 1140 spin_lock_irqsave(&priv->lock, flags);
1143 tail_list->cStat = TLAN_CSTAT_READY; 1141 tail_list->cStat = TLAN_CSTAT_READY;
1144 if ( ! priv->txInProgress ) { 1142 if ( ! priv->txInProgress ) {
1145 priv->txInProgress = 1; 1143 priv->txInProgress = 1;
1146 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1144 TLAN_DBG( TLAN_DEBUG_TX,
1145 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
1147 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1146 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
1148 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1147 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
1149 } else { 1148 } else {
1150 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail ); 1149 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
1150 priv->txTail );
1151 if ( priv->txTail == 0 ) { 1151 if ( priv->txTail == 0 ) {
1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; 1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
1153 = tail_list_phys;
1153 } else { 1154 } else {
1154 ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; 1155 ( priv->txList + ( priv->txTail - 1 ) )->forward
1156 = tail_list_phys;
1155 } 1157 }
1156 } 1158 }
1157 spin_unlock_irqrestore(&priv->lock, flags); 1159 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1191,33 +1193,31 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1191 1193
1192static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1194static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
1193{ 1195{
1194 u32 ack; 1196 struct net_device *dev = dev_id;
1195 struct net_device *dev; 1197 TLanPrivateInfo *priv = netdev_priv(dev);
1196 u32 host_cmd;
1197 u16 host_int; 1198 u16 host_int;
1198 int type; 1199 u16 type;
1199 TLanPrivateInfo *priv;
1200
1201 dev = dev_id;
1202 priv = netdev_priv(dev);
1203 1200
1204 spin_lock(&priv->lock); 1201 spin_lock(&priv->lock);
1205 1202
1206 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1203 host_int = inw( dev->base_addr + TLAN_HOST_INT );
1207 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1208
1209 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1204 type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
1205 if ( type ) {
1206 u32 ack;
1207 u32 host_cmd;
1210 1208
1211 ack = TLanIntVector[type]( dev, host_int ); 1209 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1210 ack = TLanIntVector[type]( dev, host_int );
1212 1211
1213 if ( ack ) { 1212 if ( ack ) {
1214 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1213 host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
1215 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1214 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
1215 }
1216 } 1216 }
1217 1217
1218 spin_unlock(&priv->lock); 1218 spin_unlock(&priv->lock);
1219 1219
1220 return IRQ_HANDLED; 1220 return IRQ_RETVAL(type);
1221} /* TLan_HandleInterrupts */ 1221} /* TLan_HandleInterrupts */
1222 1222
1223 1223
@@ -1286,8 +1286,10 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1286 /* Should only read stats if open ? */ 1286 /* Should only read stats if open ? */
1287 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1287 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1288 1288
1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount ); 1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1290 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount ); 1290 priv->rxEocCount );
1291 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1292 priv->txBusyCount );
1291 if ( debug & TLAN_DEBUG_GNRL ) { 1293 if ( debug & TLAN_DEBUG_GNRL ) {
1292 TLan_PrintDio( dev->base_addr ); 1294 TLan_PrintDio( dev->base_addr );
1293 TLan_PhyPrint( dev ); 1295 TLan_PhyPrint( dev );
@@ -1299,7 +1301,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1299 TLan_PrintList( priv->txList + i, "TX", i ); 1301 TLan_PrintList( priv->txList + i, "TX", i );
1300 } 1302 }
1301 1303
1302 return ( &( (TLanPrivateInfo *) netdev_priv(dev) )->stats ); 1304 return &dev->stats;
1303 1305
1304} /* TLan_GetStats */ 1306} /* TLan_GetStats */
1305 1307
@@ -1337,10 +1339,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
1337 1339
1338 if ( dev->flags & IFF_PROMISC ) { 1340 if ( dev->flags & IFF_PROMISC ) {
1339 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1341 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1340 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1342 TLan_DioWrite8( dev->base_addr,
1343 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
1341 } else { 1344 } else {
1342 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1345 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1343 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1346 TLan_DioWrite8( dev->base_addr,
1347 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
1344 if ( dev->flags & IFF_ALLMULTI ) { 1348 if ( dev->flags & IFF_ALLMULTI ) {
1345 for ( i = 0; i < 3; i++ ) 1349 for ( i = 0; i < 3; i++ )
1346 TLan_SetMac( dev, i + 1, NULL ); 1350 TLan_SetMac( dev, i + 1, NULL );
@@ -1349,7 +1353,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
1349 } else { 1353 } else {
1350 for ( i = 0; i < dev->mc_count; i++ ) { 1354 for ( i = 0; i < dev->mc_count; i++ ) {
1351 if ( i < 3 ) { 1355 if ( i < 3 ) {
1352 TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr ); 1356 TLan_SetMac( dev, i + 1,
1357 (char *) &dmi->dmi_addr );
1353 } else { 1358 } else {
1354 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1359 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
1355 if ( offset < 32 ) 1360 if ( offset < 32 )
@@ -1383,31 +1388,6 @@ static void TLan_SetMulticastList( struct net_device *dev )
1383*****************************************************************************/ 1388*****************************************************************************/
1384 1389
1385 1390
1386 /***************************************************************
1387 * TLan_HandleInvalid
1388 *
1389 * Returns:
1390 * 0
1391 * Parms:
1392 * dev Device assigned the IRQ that was
1393 * raised.
1394 * host_int The contents of the HOST_INT
1395 * port.
1396 *
1397 * This function handles invalid interrupts. This should
1398 * never happen unless some other adapter is trying to use
1399 * the IRQ line assigned to the device.
1400 *
1401 **************************************************************/
1402
1403static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
1404{
1405 /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
1406 return 0;
1407
1408} /* TLan_HandleInvalid */
1409
1410
1411 1391
1412 1392
1413 /*************************************************************** 1393 /***************************************************************
@@ -1441,14 +1421,16 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1441 u32 ack = 0; 1421 u32 ack = 0;
1442 u16 tmpCStat; 1422 u16 tmpCStat;
1443 1423
1444 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1424 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1425 priv->txHead, priv->txTail );
1445 head_list = priv->txList + priv->txHead; 1426 head_list = priv->txList + priv->txHead;
1446 1427
1447 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1428 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1448 ack++; 1429 ack++;
1449 if ( ! bbuf ) { 1430 if ( ! bbuf ) {
1450 struct sk_buff *skb = TLan_GetSKB(head_list); 1431 struct sk_buff *skb = TLan_GetSKB(head_list);
1451 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 1432 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1433 skb->len, PCI_DMA_TODEVICE);
1452 dev_kfree_skb_any(skb); 1434 dev_kfree_skb_any(skb);
1453 head_list->buffer[8].address = 0; 1435 head_list->buffer[8].address = 0;
1454 head_list->buffer[9].address = 0; 1436 head_list->buffer[9].address = 0;
@@ -1457,7 +1439,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1457 if ( tmpCStat & TLAN_CSTAT_EOC ) 1439 if ( tmpCStat & TLAN_CSTAT_EOC )
1458 eoc = 1; 1440 eoc = 1;
1459 1441
1460 priv->stats.tx_bytes += head_list->frameSize; 1442 dev->stats.tx_bytes += head_list->frameSize;
1461 1443
1462 head_list->cStat = TLAN_CSTAT_UNUSED; 1444 head_list->cStat = TLAN_CSTAT_UNUSED;
1463 netif_start_queue(dev); 1445 netif_start_queue(dev);
@@ -1469,7 +1451,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1469 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1451 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
1470 1452
1471 if ( eoc ) { 1453 if ( eoc ) {
1472 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1454 TLAN_DBG( TLAN_DEBUG_TX,
1455 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
1456 priv->txHead, priv->txTail );
1473 head_list = priv->txList + priv->txHead; 1457 head_list = priv->txList + priv->txHead;
1474 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1458 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1475 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1459 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1481,7 +1465,8 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1481 } 1465 }
1482 1466
1483 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1467 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1484 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1468 TLan_DioWrite8( dev->base_addr,
1469 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1485 if ( priv->timer.function == NULL ) { 1470 if ( priv->timer.function == NULL ) {
1486 priv->timer.function = &TLan_Timer; 1471 priv->timer.function = &TLan_Timer;
1487 priv->timer.data = (unsigned long) dev; 1472 priv->timer.data = (unsigned long) dev;
@@ -1563,66 +1548,65 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 TLanList *head_list; 1548 TLanList *head_list;
1564 struct sk_buff *skb; 1549 struct sk_buff *skb;
1565 TLanList *tail_list; 1550 TLanList *tail_list;
1566 void *t;
1567 u32 frameSize;
1568 u16 tmpCStat; 1551 u16 tmpCStat;
1569 dma_addr_t head_list_phys; 1552 dma_addr_t head_list_phys;
1570 1553
1571 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1554 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
1555 priv->rxHead, priv->rxTail );
1572 head_list = priv->rxList + priv->rxHead; 1556 head_list = priv->rxList + priv->rxHead;
1573 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1557 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1574 1558
1575 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1559 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1576 frameSize = head_list->frameSize; 1560 dma_addr_t frameDma = head_list->buffer[0].address;
1561 u32 frameSize = head_list->frameSize;
1577 ack++; 1562 ack++;
1578 if (tmpCStat & TLAN_CSTAT_EOC) 1563 if (tmpCStat & TLAN_CSTAT_EOC)
1579 eoc = 1; 1564 eoc = 1;
1580 1565
1581 if (bbuf) { 1566 if (bbuf) {
1582 skb = dev_alloc_skb(frameSize + 7); 1567 skb = netdev_alloc_skb(dev, frameSize + 7);
1583 if (skb == NULL) 1568 if ( !skb )
1584 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); 1569 goto drop_and_reuse;
1585 else { 1570
1586 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1571 head_buffer = priv->rxBuffer
1587 skb_reserve(skb, 2); 1572 + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1588 t = (void *) skb_put(skb, frameSize); 1573 skb_reserve(skb, 2);
1589 1574 pci_dma_sync_single_for_cpu(priv->pciDev,
1590 priv->stats.rx_bytes += head_list->frameSize; 1575 frameDma, frameSize,
1591 1576 PCI_DMA_FROMDEVICE);
1592 memcpy( t, head_buffer, frameSize ); 1577 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1593 skb->protocol = eth_type_trans( skb, dev ); 1578 skb_put(skb, frameSize);
1594 netif_rx( skb ); 1579 dev->stats.rx_bytes += frameSize;
1595 } 1580
1581 skb->protocol = eth_type_trans( skb, dev );
1582 netif_rx( skb );
1596 } else { 1583 } else {
1597 struct sk_buff *new_skb; 1584 struct sk_buff *new_skb;
1598 1585
1599 /* 1586 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1600 * I changed the algorithm here. What we now do 1587 if ( !new_skb )
1601 * is allocate the new frame. If this fails we 1588 goto drop_and_reuse;
1602 * simply recycle the frame.
1603 */
1604 1589
1605 new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 1590 skb = TLan_GetSKB(head_list);
1591 pci_unmap_single(priv->pciDev, frameDma,
1592 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1593 skb_put( skb, frameSize );
1606 1594
1607 if ( new_skb != NULL ) { 1595 dev->stats.rx_bytes += frameSize;
1608 skb = TLan_GetSKB(head_list);
1609 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1610 skb_trim( skb, frameSize );
1611 1596
1612 priv->stats.rx_bytes += frameSize; 1597 skb->protocol = eth_type_trans( skb, dev );
1598 netif_rx( skb );
1613 1599
1614 skb->protocol = eth_type_trans( skb, dev ); 1600 skb_reserve( new_skb, NET_IP_ALIGN );
1615 netif_rx( skb ); 1601 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1602 new_skb->data,
1603 TLAN_MAX_FRAME_SIZE,
1604 PCI_DMA_FROMDEVICE);
1616 1605
1617 skb_reserve( new_skb, 2 ); 1606 TLan_StoreSKB(head_list, new_skb);
1618 t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
1619 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1620 head_list->buffer[8].address = (u32) t;
1621 TLan_StoreSKB(head_list, new_skb);
1622 } else
1623 printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
1624 }
1625 1607
1608 }
1609drop_and_reuse:
1626 head_list->forward = 0; 1610 head_list->forward = 0;
1627 head_list->cStat = 0; 1611 head_list->cStat = 0;
1628 tail_list = priv->rxList + priv->rxTail; 1612 tail_list = priv->rxList + priv->rxTail;
@@ -1638,10 +1622,10 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1638 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1622 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
1639 1623
1640 1624
1641
1642
1643 if ( eoc ) { 1625 if ( eoc ) {
1644 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1626 TLAN_DBG( TLAN_DEBUG_RX,
1627 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
1628 priv->rxHead, priv->rxTail );
1645 head_list = priv->rxList + priv->rxHead; 1629 head_list = priv->rxList + priv->rxHead;
1646 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1630 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1647 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1631 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
@@ -1650,7 +1634,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1650 } 1634 }
1651 1635
1652 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1636 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1653 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1637 TLan_DioWrite8( dev->base_addr,
1638 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1654 if ( priv->timer.function == NULL ) { 1639 if ( priv->timer.function == NULL ) {
1655 priv->timer.function = &TLan_Timer; 1640 priv->timer.function = &TLan_Timer;
1656 priv->timer.data = (unsigned long) dev; 1641 priv->timer.data = (unsigned long) dev;
@@ -1728,7 +1713,9 @@ static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1728 1713
1729 host_int = 0; 1714 host_int = 0;
1730 if ( priv->tlanRev < 0x30 ) { 1715 if ( priv->tlanRev < 0x30 ) {
1731 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail ); 1716 TLAN_DBG( TLAN_DEBUG_TX,
1717 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1718 priv->txHead, priv->txTail );
1732 head_list = priv->txList + priv->txHead; 1719 head_list = priv->txList + priv->txHead;
1733 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1720 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1734 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1721 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1796,15 +1783,18 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1796 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1783 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
1797 if ( net_sts ) { 1784 if ( net_sts ) {
1798 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1785 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
1799 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", dev->name, (unsigned) net_sts ); 1786 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1787 dev->name, (unsigned) net_sts );
1800 } 1788 }
1801 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1789 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
1802 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1790 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
1803 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1791 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
1804 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1792 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
1793 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1805 tlphy_ctl |= TLAN_TC_SWAPOL; 1794 tlphy_ctl |= TLAN_TC_SWAPOL;
1806 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1795 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1807 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1796 } else if ( ( tlphy_sts & TLAN_TS_POLOK )
1797 && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1808 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1798 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1809 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1799 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1810 } 1800 }
@@ -1849,7 +1839,9 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1849 u32 ack = 1; 1839 u32 ack = 1;
1850 1840
1851 if ( priv->tlanRev < 0x30 ) { 1841 if ( priv->tlanRev < 0x30 ) {
1852 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail ); 1842 TLAN_DBG( TLAN_DEBUG_RX,
1843 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
1844 priv->rxHead, priv->rxTail );
1853 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1845 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1854 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1846 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
1855 ack |= TLAN_HC_GO | TLAN_HC_RT; 1847 ack |= TLAN_HC_GO | TLAN_HC_RT;
@@ -1940,10 +1932,12 @@ static void TLan_Timer( unsigned long data )
1940 if ( priv->timer.function == NULL ) { 1932 if ( priv->timer.function == NULL ) {
1941 elapsed = jiffies - priv->timerSetAt; 1933 elapsed = jiffies - priv->timerSetAt;
1942 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1934 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
1943 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 1935 TLan_DioWrite8( dev->base_addr,
1936 TLAN_LED_REG, TLAN_LED_LINK );
1944 } else { 1937 } else {
1945 priv->timer.function = &TLan_Timer; 1938 priv->timer.function = &TLan_Timer;
1946 priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY; 1939 priv->timer.expires = priv->timerSetAt
1940 + TLAN_TIMER_ACT_DELAY;
1947 spin_unlock_irqrestore(&priv->lock, flags); 1941 spin_unlock_irqrestore(&priv->lock, flags);
1948 add_timer( &priv->timer ); 1942 add_timer( &priv->timer );
1949 break; 1943 break;
@@ -1998,7 +1992,8 @@ static void TLan_ResetLists( struct net_device *dev )
1998 list = priv->txList + i; 1992 list = priv->txList + i;
1999 list->cStat = TLAN_CSTAT_UNUSED; 1993 list->cStat = TLAN_CSTAT_UNUSED;
2000 if ( bbuf ) { 1994 if ( bbuf ) {
2001 list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1995 list->buffer[0].address = priv->txBufferDMA
1996 + ( i * TLAN_MAX_FRAME_SIZE );
2002 } else { 1997 } else {
2003 list->buffer[0].address = 0; 1998 list->buffer[0].address = 0;
2004 } 1999 }
@@ -2017,28 +2012,32 @@ static void TLan_ResetLists( struct net_device *dev )
2017 list->frameSize = TLAN_MAX_FRAME_SIZE; 2012 list->frameSize = TLAN_MAX_FRAME_SIZE;
2018 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 2013 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2019 if ( bbuf ) { 2014 if ( bbuf ) {
2020 list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 2015 list->buffer[0].address = priv->rxBufferDMA
2016 + ( i * TLAN_MAX_FRAME_SIZE );
2021 } else { 2017 } else {
2022 skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 2018 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2023 if ( skb == NULL ) { 2019 if ( !skb ) {
2024 printk( "TLAN: Couldn't allocate memory for received data.\n" ); 2020 pr_err("TLAN: out of memory for received data.\n" );
2025 /* If this ever happened it would be a problem */ 2021 break;
2026 } else {
2027 skb->dev = dev;
2028 skb_reserve( skb, 2 );
2029 t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
2030 } 2022 }
2031 list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2023
2032 list->buffer[8].address = (u32) t; 2024 skb_reserve( skb, NET_IP_ALIGN );
2025 list->buffer[0].address = pci_map_single(priv->pciDev, t,
2026 TLAN_MAX_FRAME_SIZE,
2027 PCI_DMA_FROMDEVICE);
2033 TLan_StoreSKB(list, skb); 2028 TLan_StoreSKB(list, skb);
2034 } 2029 }
2035 list->buffer[1].count = 0; 2030 list->buffer[1].count = 0;
2036 list->buffer[1].address = 0; 2031 list->buffer[1].address = 0;
2037 if ( i < TLAN_NUM_RX_LISTS - 1 ) 2032 list->forward = list_phys + sizeof(TLanList);
2038 list->forward = list_phys + sizeof(TLanList); 2033 }
2039 else 2034
2040 list->forward = 0; 2035 /* in case ran out of memory early, clear bits */
2036 while (i < TLAN_NUM_RX_LISTS) {
2037 TLan_StoreSKB(priv->rxList + i, NULL);
2038 ++i;
2041 } 2039 }
2040 list->forward = 0;
2042 2041
2043} /* TLan_ResetLists */ 2042} /* TLan_ResetLists */
2044 2043
@@ -2055,7 +2054,9 @@ static void TLan_FreeLists( struct net_device *dev )
2055 list = priv->txList + i; 2054 list = priv->txList + i;
2056 skb = TLan_GetSKB(list); 2055 skb = TLan_GetSKB(list);
2057 if ( skb ) { 2056 if ( skb ) {
2058 pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 2057 pci_unmap_single(priv->pciDev,
2058 list->buffer[0].address, skb->len,
2059 PCI_DMA_TODEVICE);
2059 dev_kfree_skb_any( skb ); 2060 dev_kfree_skb_any( skb );
2060 list->buffer[8].address = 0; 2061 list->buffer[8].address = 0;
2061 list->buffer[9].address = 0; 2062 list->buffer[9].address = 0;
@@ -2066,7 +2067,10 @@ static void TLan_FreeLists( struct net_device *dev )
2066 list = priv->rxList + i; 2067 list = priv->rxList + i;
2067 skb = TLan_GetSKB(list); 2068 skb = TLan_GetSKB(list);
2068 if ( skb ) { 2069 if ( skb ) {
2069 pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2070 pci_unmap_single(priv->pciDev,
2071 list->buffer[0].address,
2072 TLAN_MAX_FRAME_SIZE,
2073 PCI_DMA_FROMDEVICE);
2070 dev_kfree_skb_any( skb ); 2074 dev_kfree_skb_any( skb );
2071 list->buffer[8].address = 0; 2075 list->buffer[8].address = 0;
2072 list->buffer[9].address = 0; 2076 list->buffer[9].address = 0;
@@ -2097,7 +2101,8 @@ static void TLan_PrintDio( u16 io_base )
2097 u32 data0, data1; 2101 u32 data0, data1;
2098 int i; 2102 int i;
2099 2103
2100 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base ); 2104 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
2105 io_base );
2101 printk( "TLAN: Off. +0 +4\n" ); 2106 printk( "TLAN: Off. +0 +4\n" );
2102 for ( i = 0; i < 0x4C; i+= 8 ) { 2107 for ( i = 0; i < 0x4C; i+= 8 ) {
2103 data0 = TLan_DioRead32( io_base, i ); 2108 data0 = TLan_DioRead32( io_base, i );
@@ -2131,13 +2136,14 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2131{ 2136{
2132 int i; 2137 int i;
2133 2138
2134 printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list ); 2139 printk( "TLAN: %s List %d at %p\n", type, num, list );
2135 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2140 printk( "TLAN: Forward = 0x%08x\n", list->forward );
2136 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2141 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
2137 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2142 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
2138 /* for ( i = 0; i < 10; i++ ) { */ 2143 /* for ( i = 0; i < 10; i++ ) { */
2139 for ( i = 0; i < 2; i++ ) { 2144 for ( i = 0; i < 2; i++ ) {
2140 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address ); 2145 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2146 i, list->buffer[i].count, list->buffer[i].address );
2141 } 2147 }
2142 2148
2143} /* TLan_PrintList */ 2149} /* TLan_PrintList */
@@ -2165,7 +2171,6 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2165 2171
2166static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2172static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2167{ 2173{
2168 TLanPrivateInfo *priv = netdev_priv(dev);
2169 u32 tx_good, tx_under; 2174 u32 tx_good, tx_under;
2170 u32 rx_good, rx_over; 2175 u32 rx_good, rx_over;
2171 u32 def_tx, crc, code; 2176 u32 def_tx, crc, code;
@@ -2202,18 +2207,18 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2202 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2207 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2203 2208
2204 if ( record ) { 2209 if ( record ) {
2205 priv->stats.rx_packets += rx_good; 2210 dev->stats.rx_packets += rx_good;
2206 priv->stats.rx_errors += rx_over + crc + code; 2211 dev->stats.rx_errors += rx_over + crc + code;
2207 priv->stats.tx_packets += tx_good; 2212 dev->stats.tx_packets += tx_good;
2208 priv->stats.tx_errors += tx_under + loss; 2213 dev->stats.tx_errors += tx_under + loss;
2209 priv->stats.collisions += multi_col + single_col + excess_col + late_col; 2214 dev->stats.collisions += multi_col + single_col + excess_col + late_col;
2210 2215
2211 priv->stats.rx_over_errors += rx_over; 2216 dev->stats.rx_over_errors += rx_over;
2212 priv->stats.rx_crc_errors += crc; 2217 dev->stats.rx_crc_errors += crc;
2213 priv->stats.rx_frame_errors += code; 2218 dev->stats.rx_frame_errors += code;
2214 2219
2215 priv->stats.tx_aborted_errors += tx_under; 2220 dev->stats.tx_aborted_errors += tx_under;
2216 priv->stats.tx_carrier_errors += loss; 2221 dev->stats.tx_carrier_errors += loss;
2217 } 2222 }
2218 2223
2219} /* TLan_ReadAndClearStats */ 2224} /* TLan_ReadAndClearStats */
@@ -2354,14 +2359,16 @@ TLan_FinishReset( struct net_device *dev )
2354 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2359 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
2355 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); 2360 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2356 2361
2357 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) { 2362 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
2363 ( priv->aui ) ) {
2358 status = MII_GS_LINK; 2364 status = MII_GS_LINK;
2359 printk( "TLAN: %s: Link forced.\n", dev->name ); 2365 printk( "TLAN: %s: Link forced.\n", dev->name );
2360 } else { 2366 } else {
2361 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2367 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2362 udelay( 1000 ); 2368 udelay( 1000 );
2363 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2369 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2364 if ( (status & MII_GS_LINK) && /* We only support link info on Nat.Sem. PHY's */ 2370 if ( (status & MII_GS_LINK) &&
2371 /* We only support link info on Nat.Sem. PHY's */
2365 (tlphy_id1 == NAT_SEM_ID1) && 2372 (tlphy_id1 == NAT_SEM_ID1) &&
2366 (tlphy_id2 == NAT_SEM_ID2) ) { 2373 (tlphy_id2 == NAT_SEM_ID2) ) {
2367 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2374 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
@@ -2370,12 +2377,12 @@ TLan_FinishReset( struct net_device *dev )
2370 printk( "TLAN: %s: Link active with ", dev->name ); 2377 printk( "TLAN: %s: Link active with ", dev->name );
2371 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2378 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
2372 printk( "forced 10%sMbps %s-Duplex\n", 2379 printk( "forced 10%sMbps %s-Duplex\n",
2373 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2380 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2374 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2381 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2375 } else { 2382 } else {
2376 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2383 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
2377 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2384 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2378 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2385 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2379 printk("TLAN: Partner capability: "); 2386 printk("TLAN: Partner capability: ");
2380 for (i = 5; i <= 10; i++) 2387 for (i = 5; i <= 10; i++)
2381 if (partner & (1<<i)) 2388 if (partner & (1<<i))
@@ -2416,7 +2423,8 @@ TLan_FinishReset( struct net_device *dev )
2416 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2423 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
2417 netif_carrier_on(dev); 2424 netif_carrier_on(dev);
2418 } else { 2425 } else {
2419 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name ); 2426 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
2427 dev->name );
2420 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); 2428 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2421 return; 2429 return;
2422 } 2430 }
@@ -2456,10 +2464,12 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2456 2464
2457 if ( mac != NULL ) { 2465 if ( mac != NULL ) {
2458 for ( i = 0; i < 6; i++ ) 2466 for ( i = 0; i < 6; i++ )
2459 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] ); 2467 TLan_DioWrite8( dev->base_addr,
2468 TLAN_AREG_0 + areg + i, mac[i] );
2460 } else { 2469 } else {
2461 for ( i = 0; i < 6; i++ ) 2470 for ( i = 0; i < 6; i++ )
2462 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 ); 2471 TLan_DioWrite8( dev->base_addr,
2472 TLAN_AREG_0 + areg + i, 0 );
2463 } 2473 }
2464 2474
2465} /* TLan_SetMac */ 2475} /* TLan_SetMac */
@@ -2565,9 +2575,13 @@ static void TLan_PhyDetect( struct net_device *dev )
2565 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2575 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
2566 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2576 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
2567 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2577 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
2568 if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2578 if ( ( control != 0xFFFF ) ||
2569 TLAN_DBG( TLAN_DEBUG_GNRL, "PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo ); 2579 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
2570 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) { 2580 TLAN_DBG( TLAN_DEBUG_GNRL,
2581 "PHY found at %02x %04x %04x %04x\n",
2582 phy, control, hi, lo );
2583 if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
2584 ( phy != TLAN_PHY_MAX_ADDR ) ) {
2571 priv->phy[1] = phy; 2585 priv->phy[1] = phy;
2572 } 2586 }
2573 } 2587 }
@@ -2595,7 +2609,9 @@ static void TLan_PhyPowerDown( struct net_device *dev )
2595 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2609 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2596 TLan_MiiSync( dev->base_addr ); 2610 TLan_MiiSync( dev->base_addr );
2597 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2611 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
2598 if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2612 if ( ( priv->phyNum == 0 ) &&
2613 ( priv->phy[1] != TLAN_PHY_NONE ) &&
2614 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
2599 TLan_MiiSync( dev->base_addr ); 2615 TLan_MiiSync( dev->base_addr );
2600 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2616 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
2601 } 2617 }
@@ -2768,10 +2784,10 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2768 * more time. Perhaps we should fail after a while. 2784 * more time. Perhaps we should fail after a while.
2769 */ 2785 */
2770 if (!priv->neg_be_verbose++) { 2786 if (!priv->neg_be_verbose++) {
2771 printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n"); 2787 pr_info("TLAN: Giving autonegotiation more time.\n");
2772 printk(KERN_INFO "TLAN: Please check that your adapter has\n"); 2788 pr_info("TLAN: Please check that your adapter has\n");
2773 printk(KERN_INFO "TLAN: been properly connected to a HUB or Switch.\n"); 2789 pr_info("TLAN: been properly connected to a HUB or Switch.\n");
2774 printk(KERN_INFO "TLAN: Trying to establish link in the background...\n"); 2790 pr_info("TLAN: Trying to establish link in the background...\n");
2775 } 2791 }
2776 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2792 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
2777 return; 2793 return;
@@ -2787,7 +2803,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2787 priv->tlanFullDuplex = TRUE; 2803 priv->tlanFullDuplex = TRUE;
2788 } 2804 }
2789 2805
2790 if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) { 2806 if ( ( ! ( mode & 0x0180 ) ) &&
2807 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
2808 ( priv->phyNum != 0 ) ) {
2791 priv->phyNum = 0; 2809 priv->phyNum = 0;
2792 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2810 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2793 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2811 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
@@ -2796,12 +2814,14 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2796 } 2814 }
2797 2815
2798 if ( priv->phyNum == 0 ) { 2816 if ( priv->phyNum == 0 ) {
2799 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) { 2817 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
2800 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX ); 2818 ( an_adv & an_lpa & 0x0040 ) ) {
2801 printk( "TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2819 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
2820 MII_GC_AUTOENB | MII_GC_DUPLEX );
2821 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
2802 } else { 2822 } else {
2803 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2823 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
2804 printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2824 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
2805 } 2825 }
2806 } 2826 }
2807 2827
@@ -3209,7 +3229,8 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3209 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3229 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3210 3230
3211 if ( ( ! err ) && stop ) { 3231 if ( ( ! err ) && stop ) {
3212 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3232 /* STOP, raise data while clock is high */
3233 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3213 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3234 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3214 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3235 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3215 } 3236 }
@@ -3272,7 +3293,8 @@ static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3272 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3293 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
3273 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3294 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3274 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3295 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3275 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3296 /* STOP, raise data while clock is high */
3297 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3276 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3298 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3277 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3299 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3278 } 3300 }