aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/8139cp.c14
-rw-r--r--drivers/net/8139too.c14
-rw-r--r--drivers/net/epic100.c56
-rw-r--r--drivers/net/forcedeth.c40
-rw-r--r--drivers/net/tulip/de2104x.c58
-rw-r--r--drivers/net/tulip/de4x5.c716
-rw-r--r--drivers/net/tulip/de4x5.h14
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c8
-rw-r--r--drivers/net/tulip/interrupt.c126
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/tulip.h2
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/tulip/uli526x.c80
-rw-r--r--drivers/net/tulip/winbond-840.c26
-rw-r--r--drivers/net/tulip/xircom_cb.c208
16 files changed, 686 insertions, 686 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 066e22b01a94..46d8c01437e9 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -19,11 +19,11 @@
19 See the file COPYING in this distribution for more information. 19 See the file COPYING in this distribution for more information.
20 20
21 Contributors: 21 Contributors:
22 22
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> 23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> 24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> 25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
26 26
27 TODO: 27 TODO:
28 * Test Tx checksumming thoroughly 28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout 29 * Implement dev->tx_timeout
@@ -461,7 +461,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
461static inline void cp_set_rxbufsize (struct cp_private *cp) 461static inline void cp_set_rxbufsize (struct cp_private *cp)
462{ 462{
463 unsigned int mtu = cp->dev->mtu; 463 unsigned int mtu = cp->dev->mtu;
464 464
465 if (mtu > ETH_DATA_LEN) 465 if (mtu > ETH_DATA_LEN)
466 /* MTU + ethernet header + FCS + optional VLAN tag */ 466 /* MTU + ethernet header + FCS + optional VLAN tag */
467 cp->rx_buf_sz = mtu + ETH_HLEN + 8; 467 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
@@ -510,7 +510,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
510static inline unsigned int cp_rx_csum_ok (u32 status) 510static inline unsigned int cp_rx_csum_ok (u32 status)
511{ 511{
512 unsigned int protocol = (status >> 16) & 0x3; 512 unsigned int protocol = (status >> 16) & 0x3;
513 513
514 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) 514 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
515 return 1; 515 return 1;
516 else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) 516 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
@@ -1061,7 +1061,7 @@ static void cp_init_hw (struct cp_private *cp)
1061 cpw8(Config3, PARMEnable); 1061 cpw8(Config3, PARMEnable);
1062 cp->wol_enabled = 0; 1062 cp->wol_enabled = 0;
1063 1063
1064 cpw8(Config5, cpr8(Config5) & PMEStatus); 1064 cpw8(Config5, cpr8(Config5) & PMEStatus);
1065 1065
1066 cpw32_f(HiTxRingAddr, 0); 1066 cpw32_f(HiTxRingAddr, 0);
1067 cpw32_f(HiTxRingAddr + 4, 0); 1067 cpw32_f(HiTxRingAddr + 4, 0);
@@ -1351,7 +1351,7 @@ static void netdev_get_wol (struct cp_private *cp,
1351 WAKE_MCAST | WAKE_UCAST; 1351 WAKE_MCAST | WAKE_UCAST;
1352 /* We don't need to go on if WOL is disabled */ 1352 /* We don't need to go on if WOL is disabled */
1353 if (!cp->wol_enabled) return; 1353 if (!cp->wol_enabled) return;
1354 1354
1355 options = cpr8 (Config3); 1355 options = cpr8 (Config3);
1356 if (options & LinkUp) wol->wolopts |= WAKE_PHY; 1356 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1357 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; 1357 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
@@ -1919,7 +1919,7 @@ static int cp_resume (struct pci_dev *pdev)
1919 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); 1919 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
1920 1920
1921 spin_unlock_irqrestore (&cp->lock, flags); 1921 spin_unlock_irqrestore (&cp->lock, flags);
1922 1922
1923 return 0; 1923 return 0;
1924} 1924}
1925#endif /* CONFIG_PM */ 1925#endif /* CONFIG_PM */
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index feae7832fc84..abd6261465f1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -165,7 +165,7 @@ static int multicast_filter_limit = 32;
165static int debug = -1; 165static int debug = -1;
166 166
167/* 167/*
168 * Receive ring size 168 * Receive ring size
169 * Warning: 64K ring has hardware issues and may lock up. 169 * Warning: 64K ring has hardware issues and may lock up.
170 */ 170 */
171#if defined(CONFIG_SH_DREAMCAST) 171#if defined(CONFIG_SH_DREAMCAST)
@@ -257,7 +257,7 @@ static struct pci_device_id rtl8139_pci_tbl[] = {
257 {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 257 {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
258 {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 258 {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
259 {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 259 {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
260 {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 260 {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
261 261
262#ifdef CONFIG_SH_SECUREEDGE5410 262#ifdef CONFIG_SH_SECUREEDGE5410
263 /* Bogus 8139 silicon reports 8129 without external PROM :-( */ 263 /* Bogus 8139 silicon reports 8129 without external PROM :-( */
@@ -1824,7 +1824,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1824 int tmp_work; 1824 int tmp_work;
1825#endif 1825#endif
1826 1826
1827 if (netif_msg_rx_err (tp)) 1827 if (netif_msg_rx_err (tp))
1828 printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n", 1828 printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
1829 dev->name, rx_status); 1829 dev->name, rx_status);
1830 tp->stats.rx_errors++; 1830 tp->stats.rx_errors++;
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1944 RTL_R16 (RxBufAddr), 1944 RTL_R16 (RxBufAddr),
1945 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); 1945 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
1946 1946
1947 while (netif_running(dev) && received < budget 1947 while (netif_running(dev) && received < budget
1948 && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { 1948 && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
1949 u32 ring_offset = cur_rx % RX_BUF_LEN; 1949 u32 ring_offset = cur_rx % RX_BUF_LEN;
1950 u32 rx_status; 1950 u32 rx_status;
@@ -2031,7 +2031,7 @@ no_early_rx:
2031 2031
2032 netif_receive_skb (skb); 2032 netif_receive_skb (skb);
2033 } else { 2033 } else {
2034 if (net_ratelimit()) 2034 if (net_ratelimit())
2035 printk (KERN_WARNING 2035 printk (KERN_WARNING
2036 "%s: Memory squeeze, dropping packet.\n", 2036 "%s: Memory squeeze, dropping packet.\n",
2037 dev->name); 2037 dev->name);
@@ -2158,13 +2158,13 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
2158 status = RTL_R16 (IntrStatus); 2158 status = RTL_R16 (IntrStatus);
2159 2159
2160 /* shared irq? */ 2160 /* shared irq? */
2161 if (unlikely((status & rtl8139_intr_mask) == 0)) 2161 if (unlikely((status & rtl8139_intr_mask) == 0))
2162 goto out; 2162 goto out;
2163 2163
2164 handled = 1; 2164 handled = 1;
2165 2165
2166 /* h/w no longer present (hotplug?) or major error, bail */ 2166 /* h/w no longer present (hotplug?) or major error, bail */
2167 if (unlikely(status == 0xFFFF)) 2167 if (unlikely(status == 0xFFFF))
2168 goto out; 2168 goto out;
2169 2169
2170 /* close possible race's with dev_close */ 2170 /* close possible race's with dev_close */
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 2f7b86837fe8..8d680ce600d7 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -21,15 +21,15 @@
21 http://www.scyld.com/network/epic100.html 21 http://www.scyld.com/network/epic100.html
22 22
23 --------------------------------------------------------------------- 23 ---------------------------------------------------------------------
24 24
25 Linux kernel-specific changes: 25 Linux kernel-specific changes:
26 26
27 LK1.1.2 (jgarzik): 27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000) 28 * Merge becker version 1.09 (4/08/2000)
29 29
30 LK1.1.3: 30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu) 31 * Major bugfix to 1.09 driver (Francis Romieu)
32 32
33 LK1.1.4 (jgarzik): 33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000) 34 * Merge becker test version 1.09 (5/29/2000)
35 35
@@ -66,7 +66,7 @@
66 LK1.1.14 (Kryzsztof Halasa): 66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations 67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject 68 * pound phy a la SMSC's app note on the subject
69 69
70 AC1.1.14ac 70 AC1.1.14ac
71 * fix power up/down for ethtool that broke in 1.11 71 * fix power up/down for ethtool that broke in 1.11
72 72
@@ -244,7 +244,7 @@ static struct pci_device_id epic_pci_tbl[] = {
244}; 244};
245MODULE_DEVICE_TABLE (pci, epic_pci_tbl); 245MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
246 246
247 247
248#ifndef USE_IO_OPS 248#ifndef USE_IO_OPS
249#undef inb 249#undef inb
250#undef inw 250#undef inw
@@ -370,7 +370,7 @@ static int epic_close(struct net_device *dev);
370static struct net_device_stats *epic_get_stats(struct net_device *dev); 370static struct net_device_stats *epic_get_stats(struct net_device *dev);
371static void set_rx_mode(struct net_device *dev); 371static void set_rx_mode(struct net_device *dev);
372 372
373 373
374 374
375static int __devinit epic_init_one (struct pci_dev *pdev, 375static int __devinit epic_init_one (struct pci_dev *pdev,
376 const struct pci_device_id *ent) 376 const struct pci_device_id *ent)
@@ -392,9 +392,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
392 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 392 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
393 version, version2, version3); 393 version, version2, version3);
394#endif 394#endif
395 395
396 card_idx++; 396 card_idx++;
397 397
398 ret = pci_enable_device(pdev); 398 ret = pci_enable_device(pdev);
399 if (ret) 399 if (ret)
400 goto out; 400 goto out;
@@ -405,7 +405,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
405 ret = -ENODEV; 405 ret = -ENODEV;
406 goto err_out_disable; 406 goto err_out_disable;
407 } 407 }
408 408
409 pci_set_master(pdev); 409 pci_set_master(pdev);
410 410
411 ret = pci_request_regions(pdev, DRV_NAME); 411 ret = pci_request_regions(pdev, DRV_NAME);
@@ -498,7 +498,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
498 ep->pci_dev = pdev; 498 ep->pci_dev = pdev;
499 ep->chip_id = chip_idx; 499 ep->chip_id = chip_idx;
500 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; 500 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
501 ep->irq_mask = 501 ep->irq_mask =
502 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 502 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
503 | CntFull | TxUnderrun | EpicNapiEvent; 503 | CntFull | TxUnderrun | EpicNapiEvent;
504 504
@@ -587,7 +587,7 @@ err_out_disable:
587 pci_disable_device(pdev); 587 pci_disable_device(pdev);
588 goto out; 588 goto out;
589} 589}
590 590
591/* Serial EEPROM section. */ 591/* Serial EEPROM section. */
592 592
593/* EEPROM_Ctrl bits. */ 593/* EEPROM_Ctrl bits. */
@@ -709,7 +709,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
709 709
710 outw(value, ioaddr + MIIData); 710 outw(value, ioaddr + MIIData);
711 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); 711 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
712 for (i = 10000; i > 0; i--) { 712 for (i = 10000; i > 0; i--) {
713 barrier(); 713 barrier();
714 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) 714 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
715 break; 715 break;
@@ -717,7 +717,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
717 return; 717 return;
718} 718}
719 719
720 720
721static int epic_open(struct net_device *dev) 721static int epic_open(struct net_device *dev)
722{ 722{
723 struct epic_private *ep = dev->priv; 723 struct epic_private *ep = dev->priv;
@@ -760,7 +760,7 @@ static int epic_open(struct net_device *dev)
760#endif 760#endif
761 761
762 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ 762 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
763 763
764 for (i = 0; i < 3; i++) 764 for (i = 0; i < 3; i++)
765 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 765 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
766 766
@@ -803,7 +803,7 @@ static int epic_open(struct net_device *dev)
803 803
804 /* Enable interrupts by setting the interrupt mask. */ 804 /* Enable interrupts by setting the interrupt mask. */
805 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 805 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
806 | CntFull | TxUnderrun 806 | CntFull | TxUnderrun
807 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 807 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
808 808
809 if (debug > 1) 809 if (debug > 1)
@@ -831,7 +831,7 @@ static void epic_pause(struct net_device *dev)
831 struct epic_private *ep = dev->priv; 831 struct epic_private *ep = dev->priv;
832 832
833 netif_stop_queue (dev); 833 netif_stop_queue (dev);
834 834
835 /* Disable interrupts by clearing the interrupt mask. */ 835 /* Disable interrupts by clearing the interrupt mask. */
836 outl(0x00000000, ioaddr + INTMASK); 836 outl(0x00000000, ioaddr + INTMASK);
837 /* Stop the chip's Tx and Rx DMA processes. */ 837 /* Stop the chip's Tx and Rx DMA processes. */
@@ -987,7 +987,7 @@ static void epic_init_ring(struct net_device *dev)
987 for (i = 0; i < RX_RING_SIZE; i++) { 987 for (i = 0; i < RX_RING_SIZE; i++) {
988 ep->rx_ring[i].rxstatus = 0; 988 ep->rx_ring[i].rxstatus = 0;
989 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz); 989 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
990 ep->rx_ring[i].next = ep->rx_ring_dma + 990 ep->rx_ring[i].next = ep->rx_ring_dma +
991 (i+1)*sizeof(struct epic_rx_desc); 991 (i+1)*sizeof(struct epic_rx_desc);
992 ep->rx_skbuff[i] = NULL; 992 ep->rx_skbuff[i] = NULL;
993 } 993 }
@@ -1002,7 +1002,7 @@ static void epic_init_ring(struct net_device *dev)
1002 break; 1002 break;
1003 skb->dev = dev; /* Mark as being used by this device. */ 1003 skb->dev = dev; /* Mark as being used by this device. */
1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
1006 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1006 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); 1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
1008 } 1008 }
@@ -1013,7 +1013,7 @@ static void epic_init_ring(struct net_device *dev)
1013 for (i = 0; i < TX_RING_SIZE; i++) { 1013 for (i = 0; i < TX_RING_SIZE; i++) {
1014 ep->tx_skbuff[i] = NULL; 1014 ep->tx_skbuff[i] = NULL;
1015 ep->tx_ring[i].txstatus = 0x0000; 1015 ep->tx_ring[i].txstatus = 0x0000;
1016 ep->tx_ring[i].next = ep->tx_ring_dma + 1016 ep->tx_ring[i].next = ep->tx_ring_dma +
1017 (i+1)*sizeof(struct epic_tx_desc); 1017 (i+1)*sizeof(struct epic_tx_desc);
1018 } 1018 }
1019 ep->tx_ring[i-1].next = ep->tx_ring_dma; 1019 ep->tx_ring[i-1].next = ep->tx_ring_dma;
@@ -1026,7 +1026,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 int entry, free_count; 1026 int entry, free_count;
1027 u32 ctrl_word; 1027 u32 ctrl_word;
1028 unsigned long flags; 1028 unsigned long flags;
1029 1029
1030 if (skb->len < ETH_ZLEN) { 1030 if (skb->len < ETH_ZLEN) {
1031 skb = skb_padto(skb, ETH_ZLEN); 1031 skb = skb_padto(skb, ETH_ZLEN);
1032 if (skb == NULL) 1032 if (skb == NULL)
@@ -1042,7 +1042,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1042 entry = ep->cur_tx % TX_RING_SIZE; 1042 entry = ep->cur_tx % TX_RING_SIZE;
1043 1043
1044 ep->tx_skbuff[entry] = skb; 1044 ep->tx_skbuff[entry] = skb;
1045 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, 1045 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1046 skb->len, PCI_DMA_TODEVICE); 1046 skb->len, PCI_DMA_TODEVICE);
1047 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ 1047 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1048 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */ 1048 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
@@ -1126,7 +1126,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1126 1126
1127 /* Free the original skb. */ 1127 /* Free the original skb. */
1128 skb = ep->tx_skbuff[entry]; 1128 skb = ep->tx_skbuff[entry];
1129 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 1129 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1130 skb->len, PCI_DMA_TODEVICE); 1130 skb->len, PCI_DMA_TODEVICE);
1131 dev_kfree_skb_irq(skb); 1131 dev_kfree_skb_irq(skb);
1132 ep->tx_skbuff[entry] = NULL; 1132 ep->tx_skbuff[entry] = NULL;
@@ -1281,8 +1281,8 @@ static int epic_rx(struct net_device *dev, int budget)
1281 ep->rx_buf_sz, 1281 ep->rx_buf_sz,
1282 PCI_DMA_FROMDEVICE); 1282 PCI_DMA_FROMDEVICE);
1283 } else { 1283 } else {
1284 pci_unmap_single(ep->pci_dev, 1284 pci_unmap_single(ep->pci_dev,
1285 ep->rx_ring[entry].bufaddr, 1285 ep->rx_ring[entry].bufaddr,
1286 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1286 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1287 skb_put(skb = ep->rx_skbuff[entry], pkt_len); 1287 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1288 ep->rx_skbuff[entry] = NULL; 1288 ep->rx_skbuff[entry] = NULL;
@@ -1307,7 +1307,7 @@ static int epic_rx(struct net_device *dev, int budget)
1307 break; 1307 break;
1308 skb->dev = dev; /* Mark as being used by this device. */ 1308 skb->dev = dev; /* Mark as being used by this device. */
1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1311 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1311 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1312 work_done++; 1312 work_done++;
1313 } 1313 }
@@ -1403,7 +1403,7 @@ static int epic_close(struct net_device *dev)
1403 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ 1403 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1404 ep->rx_ring[i].buflength = 0; 1404 ep->rx_ring[i].buflength = 0;
1405 if (skb) { 1405 if (skb) {
1406 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, 1406 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1407 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1407 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1408 dev_kfree_skb(skb); 1408 dev_kfree_skb(skb);
1409 } 1409 }
@@ -1414,7 +1414,7 @@ static int epic_close(struct net_device *dev)
1414 ep->tx_skbuff[i] = NULL; 1414 ep->tx_skbuff[i] = NULL;
1415 if (!skb) 1415 if (!skb)
1416 continue; 1416 continue;
1417 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, 1417 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1418 skb->len, PCI_DMA_TODEVICE); 1418 skb->len, PCI_DMA_TODEVICE);
1419 dev_kfree_skb(skb); 1419 dev_kfree_skb(skb);
1420 } 1420 }
@@ -1607,7 +1607,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1607{ 1607{
1608 struct net_device *dev = pci_get_drvdata(pdev); 1608 struct net_device *dev = pci_get_drvdata(pdev);
1609 struct epic_private *ep = dev->priv; 1609 struct epic_private *ep = dev->priv;
1610 1610
1611 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 1611 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1612 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); 1612 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1613 unregister_netdev(dev); 1613 unregister_netdev(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cee25fe7e19b..66ea5fc5c2e2 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -458,7 +458,7 @@ typedef union _ring_type {
458 458
459#define RX_RING 128 459#define RX_RING 128
460#define TX_RING 256 460#define TX_RING 256
461/* 461/*
462 * If your nic mysteriously hangs then try to reduce the limits 462 * If your nic mysteriously hangs then try to reduce the limits
463 * to 1/0: It might be required to set NV_TX_LASTPACKET in the 463 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
464 * last valid ring entry. But this would be impossible to 464 * last valid ring entry. But this would be impossible to
@@ -480,7 +480,7 @@ typedef union _ring_type {
480#define POLL_WAIT (1+HZ/100) 480#define POLL_WAIT (1+HZ/100)
481#define LINK_TIMEOUT (3*HZ) 481#define LINK_TIMEOUT (3*HZ)
482 482
483/* 483/*
484 * desc_ver values: 484 * desc_ver values:
485 * The nic supports three different descriptor types: 485 * The nic supports three different descriptor types:
486 * - DESC_VER_1: Original 486 * - DESC_VER_1: Original
@@ -619,7 +619,7 @@ static int max_interrupt_work = 5;
619 619
620/* 620/*
621 * Optimization can be either throuput mode or cpu mode 621 * Optimization can be either throuput mode or cpu mode
622 * 622 *
623 * Throughput Mode: Every tx and rx packet will generate an interrupt. 623 * Throughput Mode: Every tx and rx packet will generate an interrupt.
624 * CPU Mode: Interrupts are controlled by a timer. 624 * CPU Mode: Interrupts are controlled by a timer.
625 */ 625 */
@@ -1119,7 +1119,7 @@ static void nv_do_rx_refill(unsigned long data)
1119 } 1119 }
1120} 1120}
1121 1121
1122static void nv_init_rx(struct net_device *dev) 1122static void nv_init_rx(struct net_device *dev)
1123{ 1123{
1124 struct fe_priv *np = netdev_priv(dev); 1124 struct fe_priv *np = netdev_priv(dev);
1125 int i; 1125 int i;
@@ -1183,7 +1183,7 @@ static void nv_drain_tx(struct net_device *dev)
1183{ 1183{
1184 struct fe_priv *np = netdev_priv(dev); 1184 struct fe_priv *np = netdev_priv(dev);
1185 unsigned int i; 1185 unsigned int i;
1186 1186
1187 for (i = 0; i < TX_RING; i++) { 1187 for (i = 0; i < TX_RING; i++) {
1188 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1188 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1189 np->tx_ring.orig[i].FlagLen = 0; 1189 np->tx_ring.orig[i].FlagLen = 0;
@@ -1329,7 +1329,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1329 } else { 1329 } else {
1330 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1330 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
1331 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1331 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1332 } 1332 }
1333 1333
1334 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1334 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
1335 dev->name, np->next_tx, entries, tx_flags_extra); 1335 dev->name, np->next_tx, entries, tx_flags_extra);
@@ -1404,7 +1404,7 @@ static void nv_tx_done(struct net_device *dev)
1404 } else { 1404 } else {
1405 np->stats.tx_packets++; 1405 np->stats.tx_packets++;
1406 np->stats.tx_bytes += skb->len; 1406 np->stats.tx_bytes += skb->len;
1407 } 1407 }
1408 } 1408 }
1409 } 1409 }
1410 nv_release_txskb(dev, i); 1410 nv_release_txskb(dev, i);
@@ -1450,7 +1450,7 @@ static void nv_tx_timeout(struct net_device *dev)
1450 for (i=0;i<TX_RING;i+= 4) { 1450 for (i=0;i<TX_RING;i+= 4) {
1451 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1451 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1452 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1452 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1453 i, 1453 i,
1454 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1454 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1455 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1455 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1456 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1456 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
@@ -1461,7 +1461,7 @@ static void nv_tx_timeout(struct net_device *dev)
1461 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1461 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1462 } else { 1462 } else {
1463 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1463 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1464 i, 1464 i,
1465 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1465 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1466 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1466 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1467 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1467 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@ -2067,7 +2067,7 @@ set_speed:
2067 if (lpa_pause == LPA_PAUSE_ASYM) 2067 if (lpa_pause == LPA_PAUSE_ASYM)
2068 { 2068 {
2069 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2069 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2070 } 2070 }
2071 break; 2071 break;
2072 } 2072 }
2073 } 2073 }
@@ -2086,7 +2086,7 @@ set_speed:
2086 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2086 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2087 } else { 2087 } else {
2088 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 2088 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2089 writel(regmisc, base + NvRegMisc1); 2089 writel(regmisc, base + NvRegMisc1);
2090 } 2090 }
2091 } 2091 }
2092 2092
@@ -2150,7 +2150,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2150 spin_lock(&np->lock); 2150 spin_lock(&np->lock);
2151 nv_tx_done(dev); 2151 nv_tx_done(dev);
2152 spin_unlock(&np->lock); 2152 spin_unlock(&np->lock);
2153 2153
2154 nv_rx_process(dev); 2154 nv_rx_process(dev);
2155 if (nv_alloc_rx(dev)) { 2155 if (nv_alloc_rx(dev)) {
2156 spin_lock(&np->lock); 2156 spin_lock(&np->lock);
@@ -2158,7 +2158,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2158 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2158 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2159 spin_unlock(&np->lock); 2159 spin_unlock(&np->lock);
2160 } 2160 }
2161 2161
2162 if (events & NVREG_IRQ_LINK) { 2162 if (events & NVREG_IRQ_LINK) {
2163 spin_lock(&np->lock); 2163 spin_lock(&np->lock);
2164 nv_link_irq(dev); 2164 nv_link_irq(dev);
@@ -2223,7 +2223,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2223 spin_lock_irq(&np->lock); 2223 spin_lock_irq(&np->lock);
2224 nv_tx_done(dev); 2224 nv_tx_done(dev);
2225 spin_unlock_irq(&np->lock); 2225 spin_unlock_irq(&np->lock);
2226 2226
2227 if (events & (NVREG_IRQ_TX_ERR)) { 2227 if (events & (NVREG_IRQ_TX_ERR)) {
2228 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2228 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2229 dev->name, events); 2229 dev->name, events);
@@ -2266,7 +2266,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2266 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 2266 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2267 if (!(events & np->irqmask)) 2267 if (!(events & np->irqmask))
2268 break; 2268 break;
2269 2269
2270 nv_rx_process(dev); 2270 nv_rx_process(dev);
2271 if (nv_alloc_rx(dev)) { 2271 if (nv_alloc_rx(dev)) {
2272 spin_lock_irq(&np->lock); 2272 spin_lock_irq(&np->lock);
@@ -2274,7 +2274,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2274 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2274 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2275 spin_unlock_irq(&np->lock); 2275 spin_unlock_irq(&np->lock);
2276 } 2276 }
2277 2277
2278 if (i > max_interrupt_work) { 2278 if (i > max_interrupt_work) {
2279 spin_lock_irq(&np->lock); 2279 spin_lock_irq(&np->lock);
2280 /* disable interrupts on the nic */ 2280 /* disable interrupts on the nic */
@@ -2313,7 +2313,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2313 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2313 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2314 if (!(events & np->irqmask)) 2314 if (!(events & np->irqmask))
2315 break; 2315 break;
2316 2316
2317 if (events & NVREG_IRQ_LINK) { 2317 if (events & NVREG_IRQ_LINK) {
2318 spin_lock_irq(&np->lock); 2318 spin_lock_irq(&np->lock);
2319 nv_link_irq(dev); 2319 nv_link_irq(dev);
@@ -2386,7 +2386,7 @@ static void nv_do_nic_poll(unsigned long data)
2386 np->nic_poll_irq = 0; 2386 np->nic_poll_irq = 0;
2387 2387
2388 /* FIXME: Do we need synchronize_irq(dev->irq) here? */ 2388 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2389 2389
2390 writel(mask, base + NvRegIrqMask); 2390 writel(mask, base + NvRegIrqMask);
2391 pci_push(base); 2391 pci_push(base);
2392 2392
@@ -3165,7 +3165,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3165 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 3165 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
3166 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE; 3166 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
3167 } 3167 }
3168 3168
3169 3169
3170 err = -ENOMEM; 3170 err = -ENOMEM;
3171 np->base = ioremap(addr, np->register_size); 3171 np->base = ioremap(addr, np->register_size);
@@ -3313,7 +3313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3313 pci_name(pci_dev)); 3313 pci_name(pci_dev));
3314 goto out_freering; 3314 goto out_freering;
3315 } 3315 }
3316 3316
3317 /* reset it */ 3317 /* reset it */
3318 phy_init(dev); 3318 phy_init(dev);
3319 3319
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e3dd144d326b..5f743b972949 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -227,12 +227,12 @@ enum {
227 SROMC0InfoLeaf = 27, 227 SROMC0InfoLeaf = 27,
228 MediaBlockMask = 0x3f, 228 MediaBlockMask = 0x3f,
229 MediaCustomCSRs = (1 << 6), 229 MediaCustomCSRs = (1 << 6),
230 230
231 /* PCIPM bits */ 231 /* PCIPM bits */
232 PM_Sleep = (1 << 31), 232 PM_Sleep = (1 << 31),
233 PM_Snooze = (1 << 30), 233 PM_Snooze = (1 << 30),
234 PM_Mask = PM_Sleep | PM_Snooze, 234 PM_Mask = PM_Sleep | PM_Snooze,
235 235
236 /* SIAStatus bits */ 236 /* SIAStatus bits */
237 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 237 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
238 NWayRestart = (1 << 12), 238 NWayRestart = (1 << 12),
@@ -858,7 +858,7 @@ static void de_stop_rxtx (struct de_private *de)
858 return; 858 return;
859 cpu_relax(); 859 cpu_relax();
860 } 860 }
861 861
862 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); 862 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
863} 863}
864 864
@@ -931,7 +931,7 @@ static void de_set_media (struct de_private *de)
931 macmode |= FullDuplex; 931 macmode |= FullDuplex;
932 else 932 else
933 macmode &= ~FullDuplex; 933 macmode &= ~FullDuplex;
934 934
935 if (netif_msg_link(de)) { 935 if (netif_msg_link(de)) {
936 printk(KERN_INFO "%s: set link %s\n" 936 printk(KERN_INFO "%s: set link %s\n"
937 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" 937 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
@@ -966,9 +966,9 @@ static void de21040_media_timer (unsigned long data)
966 u32 status = dr32(SIAStatus); 966 u32 status = dr32(SIAStatus);
967 unsigned int carrier; 967 unsigned int carrier;
968 unsigned long flags; 968 unsigned long flags;
969 969
970 carrier = (status & NetCxnErr) ? 0 : 1; 970 carrier = (status & NetCxnErr) ? 0 : 1;
971 971
972 if (carrier) { 972 if (carrier) {
973 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) 973 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
974 goto no_link_yet; 974 goto no_link_yet;
@@ -985,7 +985,7 @@ static void de21040_media_timer (unsigned long data)
985 return; 985 return;
986 } 986 }
987 987
988 de_link_down(de); 988 de_link_down(de);
989 989
990 if (de->media_lock) 990 if (de->media_lock)
991 return; 991 return;
@@ -1039,7 +1039,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1039 return 0; 1039 return 0;
1040 break; 1040 break;
1041 } 1041 }
1042 1042
1043 return 1; 1043 return 1;
1044} 1044}
1045 1045
@@ -1050,9 +1050,9 @@ static void de21041_media_timer (unsigned long data)
1050 u32 status = dr32(SIAStatus); 1050 u32 status = dr32(SIAStatus);
1051 unsigned int carrier; 1051 unsigned int carrier;
1052 unsigned long flags; 1052 unsigned long flags;
1053 1053
1054 carrier = (status & NetCxnErr) ? 0 : 1; 1054 carrier = (status & NetCxnErr) ? 0 : 1;
1055 1055
1056 if (carrier) { 1056 if (carrier) {
1057 if ((de->media_type == DE_MEDIA_TP_AUTO || 1057 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1058 de->media_type == DE_MEDIA_TP || 1058 de->media_type == DE_MEDIA_TP ||
@@ -1072,7 +1072,7 @@ static void de21041_media_timer (unsigned long data)
1072 return; 1072 return;
1073 } 1073 }
1074 1074
1075 de_link_down(de); 1075 de_link_down(de);
1076 1076
1077 /* if media type locked, don't switch media */ 1077 /* if media type locked, don't switch media */
1078 if (de->media_lock) 1078 if (de->media_lock)
@@ -1124,7 +1124,7 @@ static void de21041_media_timer (unsigned long data)
1124 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; 1124 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1125 de_next_media(de, next_states, ARRAY_SIZE(next_states)); 1125 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1126 } 1126 }
1127 1127
1128set_media: 1128set_media:
1129 spin_lock_irqsave(&de->lock, flags); 1129 spin_lock_irqsave(&de->lock, flags);
1130 de_stop_rxtx(de); 1130 de_stop_rxtx(de);
@@ -1148,7 +1148,7 @@ static void de_media_interrupt (struct de_private *de, u32 status)
1148 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1148 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1149 return; 1149 return;
1150 } 1150 }
1151 1151
1152 BUG_ON(!(status & LinkFail)); 1152 BUG_ON(!(status & LinkFail));
1153 1153
1154 if (netif_carrier_ok(de->dev)) { 1154 if (netif_carrier_ok(de->dev)) {
@@ -1227,7 +1227,7 @@ static int de_init_hw (struct de_private *de)
1227 int rc; 1227 int rc;
1228 1228
1229 de_adapter_wake(de); 1229 de_adapter_wake(de);
1230 1230
1231 macmode = dr32(MacMode) & ~MacModeClear; 1231 macmode = dr32(MacMode) & ~MacModeClear;
1232 1232
1233 rc = de_reset_mac(de); 1233 rc = de_reset_mac(de);
@@ -1413,7 +1413,7 @@ static int de_close (struct net_device *dev)
1413 netif_stop_queue(dev); 1413 netif_stop_queue(dev);
1414 netif_carrier_off(dev); 1414 netif_carrier_off(dev);
1415 spin_unlock_irqrestore(&de->lock, flags); 1415 spin_unlock_irqrestore(&de->lock, flags);
1416 1416
1417 free_irq(dev->irq, dev); 1417 free_irq(dev->irq, dev);
1418 1418
1419 de_free_rings(de); 1419 de_free_rings(de);
@@ -1441,7 +1441,7 @@ static void de_tx_timeout (struct net_device *dev)
1441 1441
1442 spin_unlock_irq(&de->lock); 1442 spin_unlock_irq(&de->lock);
1443 enable_irq(dev->irq); 1443 enable_irq(dev->irq);
1444 1444
1445 /* Update the error counts. */ 1445 /* Update the error counts. */
1446 __de_get_stats(de); 1446 __de_get_stats(de);
1447 1447
@@ -1451,7 +1451,7 @@ static void de_tx_timeout (struct net_device *dev)
1451 de_init_rings(de); 1451 de_init_rings(de);
1452 1452
1453 de_init_hw(de); 1453 de_init_hw(de);
1454 1454
1455 netif_wake_queue(dev); 1455 netif_wake_queue(dev);
1456} 1456}
1457 1457
@@ -1459,7 +1459,7 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
1459{ 1459{
1460 int i; 1460 int i;
1461 u32 *rbuf = (u32 *)buf; 1461 u32 *rbuf = (u32 *)buf;
1462 1462
1463 /* read all CSRs */ 1463 /* read all CSRs */
1464 for (i = 0; i < DE_NUM_REGS; i++) 1464 for (i = 0; i < DE_NUM_REGS; i++)
1465 rbuf[i] = dr32(i * 8); 1465 rbuf[i] = dr32(i * 8);
@@ -1474,7 +1474,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1474 ecmd->transceiver = XCVR_INTERNAL; 1474 ecmd->transceiver = XCVR_INTERNAL;
1475 ecmd->phy_address = 0; 1475 ecmd->phy_address = 0;
1476 ecmd->advertising = de->media_advertise; 1476 ecmd->advertising = de->media_advertise;
1477 1477
1478 switch (de->media_type) { 1478 switch (de->media_type) {
1479 case DE_MEDIA_AUI: 1479 case DE_MEDIA_AUI:
1480 ecmd->port = PORT_AUI; 1480 ecmd->port = PORT_AUI;
@@ -1489,7 +1489,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1489 ecmd->speed = SPEED_10; 1489 ecmd->speed = SPEED_10;
1490 break; 1490 break;
1491 } 1491 }
1492 1492
1493 if (dr32(MacMode) & FullDuplex) 1493 if (dr32(MacMode) & FullDuplex)
1494 ecmd->duplex = DUPLEX_FULL; 1494 ecmd->duplex = DUPLEX_FULL;
1495 else 1495 else
@@ -1529,7 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1529 if (ecmd->autoneg == AUTONEG_ENABLE && 1529 if (ecmd->autoneg == AUTONEG_ENABLE &&
1530 (!(ecmd->advertising & ADVERTISED_Autoneg))) 1530 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1531 return -EINVAL; 1531 return -EINVAL;
1532 1532
1533 switch (ecmd->port) { 1533 switch (ecmd->port) {
1534 case PORT_AUI: 1534 case PORT_AUI:
1535 new_media = DE_MEDIA_AUI; 1535 new_media = DE_MEDIA_AUI;
@@ -1554,22 +1554,22 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1554 return -EINVAL; 1554 return -EINVAL;
1555 break; 1555 break;
1556 } 1556 }
1557 1557
1558 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; 1558 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1559 1559
1560 if ((new_media == de->media_type) && 1560 if ((new_media == de->media_type) &&
1561 (media_lock == de->media_lock) && 1561 (media_lock == de->media_lock) &&
1562 (ecmd->advertising == de->media_advertise)) 1562 (ecmd->advertising == de->media_advertise))
1563 return 0; /* nothing to change */ 1563 return 0; /* nothing to change */
1564 1564
1565 de_link_down(de); 1565 de_link_down(de);
1566 de_stop_rxtx(de); 1566 de_stop_rxtx(de);
1567 1567
1568 de->media_type = new_media; 1568 de->media_type = new_media;
1569 de->media_lock = media_lock; 1569 de->media_lock = media_lock;
1570 de->media_advertise = ecmd->advertising; 1570 de->media_advertise = ecmd->advertising;
1571 de_set_media(de); 1571 de_set_media(de);
1572 1572
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
@@ -1817,7 +1817,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
1817 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; 1817 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1818 default: de->media_type = DE_MEDIA_TP_AUTO; break; 1818 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1819 } 1819 }
1820 1820
1821 if (netif_msg_probe(de)) 1821 if (netif_msg_probe(de))
1822 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n", 1822 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1823 de->board_idx, ofs, 1823 de->board_idx, ofs,
@@ -1886,7 +1886,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
1886 de->media[idx].csr13, 1886 de->media[idx].csr13,
1887 de->media[idx].csr14, 1887 de->media[idx].csr14,
1888 de->media[idx].csr15); 1888 de->media[idx].csr15);
1889 1889
1890 } else if (netif_msg_probe(de)) 1890 } else if (netif_msg_probe(de))
1891 printk("\n"); 1891 printk("\n");
1892 1892
@@ -2118,7 +2118,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2118 2118
2119 spin_unlock_irq(&de->lock); 2119 spin_unlock_irq(&de->lock);
2120 enable_irq(dev->irq); 2120 enable_irq(dev->irq);
2121 2121
2122 /* Update the error counts. */ 2122 /* Update the error counts. */
2123 __de_get_stats(de); 2123 __de_get_stats(de);
2124 2124
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index f56094102042..da8bd0d62a3f 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -41,11 +41,11 @@
41 Digital Semiconductor SROM Specification. The driver currently 41 Digital Semiconductor SROM Specification. The driver currently
42 recognises the following chips: 42 recognises the following chips:
43 43
44 DC21040 (no SROM) 44 DC21040 (no SROM)
45 DC21041[A] 45 DC21041[A]
46 DC21140[A] 46 DC21140[A]
47 DC21142 47 DC21142
48 DC21143 48 DC21143
49 49
50 So far the driver is known to work with the following cards: 50 So far the driver is known to work with the following cards:
51 51
@@ -55,7 +55,7 @@
55 SMC8432 55 SMC8432
56 SMC9332 (w/new SROM) 56 SMC9332 (w/new SROM)
57 ZNYX31[45] 57 ZNYX31[45]
58 ZNYX346 10/100 4 port (can act as a 10/100 bridge!) 58 ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
59 59
60 The driver has been tested on a relatively busy network using the DE425, 60 The driver has been tested on a relatively busy network using the DE425,
61 DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred 61 DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
@@ -106,7 +106,7 @@
106 loading by: 106 loading by:
107 107
108 insmod de4x5 io=0xghh where g = bus number 108 insmod de4x5 io=0xghh where g = bus number
109 hh = device number 109 hh = device number
110 110
111 NB: autoprobing for modules is now supported by default. You may just 111 NB: autoprobing for modules is now supported by default. You may just
112 use: 112 use:
@@ -120,11 +120,11 @@
120 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a 120 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
121 kernel with the de4x5 configuration turned off and reboot. 121 kernel with the de4x5 configuration turned off and reboot.
122 5) insmod de4x5 [io=0xghh] 122 5) insmod de4x5 [io=0xghh]
123 6) run the net startup bits for your new eth?? interface(s) manually 123 6) run the net startup bits for your new eth?? interface(s) manually
124 (usually /etc/rc.inet[12] at boot time). 124 (usually /etc/rc.inet[12] at boot time).
125 7) enjoy! 125 7) enjoy!
126 126
127 To unload a module, turn off the associated interface(s) 127 To unload a module, turn off the associated interface(s)
128 'ifconfig eth?? down' then 'rmmod de4x5'. 128 'ifconfig eth?? down' then 'rmmod de4x5'.
129 129
130 Automedia detection is included so that in principal you can disconnect 130 Automedia detection is included so that in principal you can disconnect
@@ -135,7 +135,7 @@
135 By default, the driver will now autodetect any DECchip based card. 135 By default, the driver will now autodetect any DECchip based card.
136 Should you have a need to restrict the driver to DIGITAL only cards, you 136 Should you have a need to restrict the driver to DIGITAL only cards, you
137 can compile with a DEC_ONLY define, or if loading as a module, use the 137 can compile with a DEC_ONLY define, or if loading as a module, use the
138 'dec_only=1' parameter. 138 'dec_only=1' parameter.
139 139
140 I've changed the timing routines to use the kernel timer and scheduling 140 I've changed the timing routines to use the kernel timer and scheduling
141 functions so that the hangs and other assorted problems that occurred 141 functions so that the hangs and other assorted problems that occurred
@@ -204,7 +204,7 @@
204 following parameters are allowed: 204 following parameters are allowed:
205 205
206 fdx for full duplex 206 fdx for full duplex
207 autosense to set the media/speed; with the following 207 autosense to set the media/speed; with the following
208 sub-parameters: 208 sub-parameters:
209 TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO 209 TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
210 210
@@ -235,14 +235,14 @@
235 this automatically or include #define DE4X5_FORCE_EISA on or before 235 this automatically or include #define DE4X5_FORCE_EISA on or before
236 line 1040 in the driver. 236 line 1040 in the driver.
237 237
238 TO DO: 238 TO DO:
239 ------ 239 ------
240 240
241 Revision History 241 Revision History
242 ---------------- 242 ----------------
243 243
244 Version Date Description 244 Version Date Description
245 245
246 0.1 17-Nov-94 Initial writing. ALPHA code release. 246 0.1 17-Nov-94 Initial writing. ALPHA code release.
247 0.2 13-Jan-95 Added PCI support for DE435's. 247 0.2 13-Jan-95 Added PCI support for DE435's.
248 0.21 19-Jan-95 Added auto media detection. 248 0.21 19-Jan-95 Added auto media detection.
@@ -251,7 +251,7 @@
251 Add request/release_region code. 251 Add request/release_region code.
252 Add loadable modules support for PCI. 252 Add loadable modules support for PCI.
253 Clean up loadable modules support. 253 Clean up loadable modules support.
254 0.23 28-Feb-95 Added DC21041 and DC21140 support. 254 0.23 28-Feb-95 Added DC21041 and DC21140 support.
255 Fix missed frame counter value and initialisation. 255 Fix missed frame counter value and initialisation.
256 Fixed EISA probe. 256 Fixed EISA probe.
257 0.24 11-Apr-95 Change delay routine to use <linux/udelay>. 257 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
@@ -280,7 +280,7 @@
280 Add kernel timer code (h/w is too flaky). 280 Add kernel timer code (h/w is too flaky).
281 Add MII based PHY autosense. 281 Add MII based PHY autosense.
282 Add new multicasting code. 282 Add new multicasting code.
283 Add new autosense algorithms for media/mode 283 Add new autosense algorithms for media/mode
284 selection using kernel scheduling/timing. 284 selection using kernel scheduling/timing.
285 Re-formatted. 285 Re-formatted.
286 Made changes suggested by <jeff@router.patch.net>: 286 Made changes suggested by <jeff@router.patch.net>:
@@ -307,10 +307,10 @@
307 Add Accton to the list of broken cards. 307 Add Accton to the list of broken cards.
308 Fix TX under-run bug for non DC21140 chips. 308 Fix TX under-run bug for non DC21140 chips.
309 Fix boot command probe bug in alloc_device() as 309 Fix boot command probe bug in alloc_device() as
310 reported by <koen.gadeyne@barco.com> and 310 reported by <koen.gadeyne@barco.com> and
311 <orava@nether.tky.hut.fi>. 311 <orava@nether.tky.hut.fi>.
312 Add cache locks to prevent a race condition as 312 Add cache locks to prevent a race condition as
313 reported by <csd@microplex.com> and 313 reported by <csd@microplex.com> and
314 <baba@beckman.uiuc.edu>. 314 <baba@beckman.uiuc.edu>.
315 Upgraded alloc_device() code. 315 Upgraded alloc_device() code.
316 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion 316 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
@@ -322,7 +322,7 @@
322 with a loopback packet. 322 with a loopback packet.
323 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported 323 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
324 by <bhat@mundook.cs.mu.OZ.AU> 324 by <bhat@mundook.cs.mu.OZ.AU>
325 0.45 8-Dec-96 Include endian functions for PPC use, from work 325 0.45 8-Dec-96 Include endian functions for PPC use, from work
326 by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>. 326 by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
327 0.451 28-Dec-96 Added fix to allow autoprobe for modules after 327 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
328 suggestion from <mjacob@feral.com>. 328 suggestion from <mjacob@feral.com>.
@@ -346,14 +346,14 @@
346 <paubert@iram.es>. 346 <paubert@iram.es>.
347 0.52 26-Apr-97 Some changes may not credit the right people - 347 0.52 26-Apr-97 Some changes may not credit the right people -
348 a disk crash meant I lost some mail. 348 a disk crash meant I lost some mail.
349 Change RX interrupt routine to drop rather than 349 Change RX interrupt routine to drop rather than
350 defer packets to avoid hang reported by 350 defer packets to avoid hang reported by
351 <g.thomas@opengroup.org>. 351 <g.thomas@opengroup.org>.
352 Fix srom_exec() to return for COMPACT and type 1 352 Fix srom_exec() to return for COMPACT and type 1
353 infoblocks. 353 infoblocks.
354 Added DC21142 and DC21143 functions. 354 Added DC21142 and DC21143 functions.
355 Added byte counters from <phil@tazenda.demon.co.uk> 355 Added byte counters from <phil@tazenda.demon.co.uk>
356 Added SA_INTERRUPT temporary fix from 356 Added SA_INTERRUPT temporary fix from
357 <mjacob@feral.com>. 357 <mjacob@feral.com>.
358 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during 358 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
359 module load: bug reported by 359 module load: bug reported by
@@ -363,10 +363,10 @@
363 Make above search independent of BIOS device scan 363 Make above search independent of BIOS device scan
364 direction. 364 direction.
365 Completed DC2114[23] autosense functions. 365 Completed DC2114[23] autosense functions.
366 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by 366 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
367 <robin@intercore.com 367 <robin@intercore.com
368 Fix type1_infoblock() bug introduced in 0.53, from 368 Fix type1_infoblock() bug introduced in 0.53, from
369 problem reports by 369 problem reports by
370 <parmee@postecss.ncrfran.france.ncr.com> and 370 <parmee@postecss.ncrfran.france.ncr.com> and
371 <jo@ice.dillingen.baynet.de>. 371 <jo@ice.dillingen.baynet.de>.
372 Added argument list to set up each board from either 372 Added argument list to set up each board from either
@@ -374,7 +374,7 @@
374 Added generic MII PHY functionality to deal with 374 Added generic MII PHY functionality to deal with
375 newer PHY chips. 375 newer PHY chips.
376 Fix the mess in 2.1.67. 376 Fix the mess in 2.1.67.
377 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by 377 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
378 <redhat@cococo.net>. 378 <redhat@cococo.net>.
379 Fix bug in pci_probe() for 64 bit systems reported 379 Fix bug in pci_probe() for 64 bit systems reported
380 by <belliott@accessone.com>. 380 by <belliott@accessone.com>.
@@ -398,7 +398,7 @@
398 version. I hope nothing is broken... 398 version. I hope nothing is broken...
399 Add TX done interrupt modification from suggestion 399 Add TX done interrupt modification from suggestion
400 by <Austin.Donnelly@cl.cam.ac.uk>. 400 by <Austin.Donnelly@cl.cam.ac.uk>.
401 Fix is_anc_capable() bug reported by 401 Fix is_anc_capable() bug reported by
402 <Austin.Donnelly@cl.cam.ac.uk>. 402 <Austin.Donnelly@cl.cam.ac.uk>.
403 Fix type[13]_infoblock() bug: during MII search, PHY 403 Fix type[13]_infoblock() bug: during MII search, PHY
404 lp->rst not run because lp->ibn not initialised - 404 lp->rst not run because lp->ibn not initialised -
@@ -413,7 +413,7 @@
413 Add an_exception() for old ZYNX346 and fix compile 413 Add an_exception() for old ZYNX346 and fix compile
414 warning on PPC & SPARC, from <ecd@skynet.be>. 414 warning on PPC & SPARC, from <ecd@skynet.be>.
415 Fix lastPCI to correctly work with compiled in 415 Fix lastPCI to correctly work with compiled in
416 kernels and modules from bug report by 416 kernels and modules from bug report by
417 <Zlatko.Calusic@CARNet.hr> et al. 417 <Zlatko.Calusic@CARNet.hr> et al.
418 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages 418 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
419 when media is unconnected. 419 when media is unconnected.
@@ -425,7 +425,7 @@
425 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using 425 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
426 a 21143 by <mmporter@home.com>. 426 a 21143 by <mmporter@home.com>.
427 Change PCI/EISA bus probing order. 427 Change PCI/EISA bus probing order.
428 0.545 28-Nov-99 Further Moto SROM bug fix from 428 0.545 28-Nov-99 Further Moto SROM bug fix from
429 <mporter@eng.mcd.mot.com> 429 <mporter@eng.mcd.mot.com>
430 Remove double checking for DEBUG_RX in de4x5_dbg_rx() 430 Remove double checking for DEBUG_RX in de4x5_dbg_rx()
431 from report by <geert@linux-m68k.org> 431 from report by <geert@linux-m68k.org>
@@ -434,8 +434,8 @@
434 variable 'pb', on a non de4x5 PCI device, in this 434 variable 'pb', on a non de4x5 PCI device, in this
435 case a PCI bridge (DEC chip 21152). The value of 435 case a PCI bridge (DEC chip 21152). The value of
436 'pb' is now only initialized if a de4x5 chip is 436 'pb' is now only initialized if a de4x5 chip is
437 present. 437 present.
438 <france@handhelds.org> 438 <france@handhelds.org>
439 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com> 439 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
440 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and 440 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
441 generic DMA APIs. Fixed DE425 support on Alpha. 441 generic DMA APIs. Fixed DE425 support on Alpha.
@@ -584,7 +584,7 @@ static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
584 584
585/* 585/*
586** Allow per adapter set up. For modules this is simply a command line 586** Allow per adapter set up. For modules this is simply a command line
587** parameter, e.g.: 587** parameter, e.g.:
588** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. 588** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
589** 589**
590** For a compiled in driver, place e.g. 590** For a compiled in driver, place e.g.
@@ -655,7 +655,7 @@ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
655** Memory Alignment. Each descriptor is 4 longwords long. To force a 655** Memory Alignment. Each descriptor is 4 longwords long. To force a
656** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and 656** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
657** DESC_ALIGN. ALIGN aligns the start address of the private memory area 657** DESC_ALIGN. ALIGN aligns the start address of the private memory area
658** and hence the RX descriptor ring's first entry. 658** and hence the RX descriptor ring's first entry.
659*/ 659*/
660#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ 660#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
661#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ 661#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
@@ -1081,8 +1081,8 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1081 mdelay(2); /* Wait for 2ms */\ 1081 mdelay(2); /* Wait for 2ms */\
1082} 1082}
1083 1083
1084 1084
1085static int __devinit 1085static int __devinit
1086de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) 1086de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1087{ 1087{
1088 char name[DE4X5_NAME_LENGTH + 1]; 1088 char name[DE4X5_NAME_LENGTH + 1];
@@ -1102,12 +1102,12 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1102 mdelay(10); 1102 mdelay(10);
1103 1103
1104 RESET_DE4X5; 1104 RESET_DE4X5;
1105 1105
1106 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { 1106 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1107 return -ENXIO; /* Hardware could not reset */ 1107 return -ENXIO; /* Hardware could not reset */
1108 } 1108 }
1109 1109
1110 /* 1110 /*
1111 ** Now find out what kind of DC21040/DC21041/DC21140 board we have. 1111 ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
1112 */ 1112 */
1113 lp->useSROM = FALSE; 1113 lp->useSROM = FALSE;
@@ -1116,21 +1116,21 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1116 } else { 1116 } else {
1117 EISA_signature(name, gendev); 1117 EISA_signature(name, gendev);
1118 } 1118 }
1119 1119
1120 if (*name == '\0') { /* Not found a board signature */ 1120 if (*name == '\0') { /* Not found a board signature */
1121 return -ENXIO; 1121 return -ENXIO;
1122 } 1122 }
1123 1123
1124 dev->base_addr = iobase; 1124 dev->base_addr = iobase;
1125 printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); 1125 printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase);
1126 1126
1127 printk(", h/w address "); 1127 printk(", h/w address ");
1128 status = get_hw_addr(dev); 1128 status = get_hw_addr(dev);
1129 for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ 1129 for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
1130 printk("%2.2x:", dev->dev_addr[i]); 1130 printk("%2.2x:", dev->dev_addr[i]);
1131 } 1131 }
1132 printk("%2.2x,\n", dev->dev_addr[i]); 1132 printk("%2.2x,\n", dev->dev_addr[i]);
1133 1133
1134 if (status != 0) { 1134 if (status != 0) {
1135 printk(" which has an Ethernet PROM CRC error.\n"); 1135 printk(" which has an Ethernet PROM CRC error.\n");
1136 return -ENXIO; 1136 return -ENXIO;
@@ -1171,10 +1171,10 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1171 } 1171 }
1172 1172
1173 lp->tx_ring = lp->rx_ring + NUM_RX_DESC; 1173 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1174 1174
1175 /* 1175 /*
1176 ** Set up the RX descriptor ring (Intels) 1176 ** Set up the RX descriptor ring (Intels)
1177 ** Allocate contiguous receive buffers, long word aligned (Alphas) 1177 ** Allocate contiguous receive buffers, long word aligned (Alphas)
1178 */ 1178 */
1179#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) 1179#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
1180 for (i=0; i<NUM_RX_DESC; i++) { 1180 for (i=0; i<NUM_RX_DESC; i++) {
@@ -1210,7 +1210,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1210 1210
1211 lp->rxRingSize = NUM_RX_DESC; 1211 lp->rxRingSize = NUM_RX_DESC;
1212 lp->txRingSize = NUM_TX_DESC; 1212 lp->txRingSize = NUM_TX_DESC;
1213 1213
1214 /* Write the end of list marker to the descriptor lists */ 1214 /* Write the end of list marker to the descriptor lists */
1215 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); 1215 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1216 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); 1216 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
@@ -1219,7 +1219,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1219 outl(lp->dma_rings, DE4X5_RRBA); 1219 outl(lp->dma_rings, DE4X5_RRBA);
1220 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), 1220 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1221 DE4X5_TRBA); 1221 DE4X5_TRBA);
1222 1222
1223 /* Initialise the IRQ mask and Enable/Disable */ 1223 /* Initialise the IRQ mask and Enable/Disable */
1224 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; 1224 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1225 lp->irq_en = IMR_NIM | IMR_AIM; 1225 lp->irq_en = IMR_NIM | IMR_AIM;
@@ -1252,7 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1252 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { 1252 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1253 mii_get_phy(dev); 1253 mii_get_phy(dev);
1254 } 1254 }
1255 1255
1256#ifndef __sparc_v9__ 1256#ifndef __sparc_v9__
1257 printk(" and requires IRQ%d (provided by %s).\n", dev->irq, 1257 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1258#else 1258#else
@@ -1260,11 +1260,11 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1260#endif 1260#endif
1261 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); 1261 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1262 } 1262 }
1263 1263
1264 if (de4x5_debug & DEBUG_VERSION) { 1264 if (de4x5_debug & DEBUG_VERSION) {
1265 printk(version); 1265 printk(version);
1266 } 1266 }
1267 1267
1268 /* The DE4X5-specific entries in the device structure. */ 1268 /* The DE4X5-specific entries in the device structure. */
1269 SET_MODULE_OWNER(dev); 1269 SET_MODULE_OWNER(dev);
1270 SET_NETDEV_DEV(dev, gendev); 1270 SET_NETDEV_DEV(dev, gendev);
@@ -1274,23 +1274,23 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1274 dev->get_stats = &de4x5_get_stats; 1274 dev->get_stats = &de4x5_get_stats;
1275 dev->set_multicast_list = &set_multicast_list; 1275 dev->set_multicast_list = &set_multicast_list;
1276 dev->do_ioctl = &de4x5_ioctl; 1276 dev->do_ioctl = &de4x5_ioctl;
1277 1277
1278 dev->mem_start = 0; 1278 dev->mem_start = 0;
1279 1279
1280 /* Fill in the generic fields of the device structure. */ 1280 /* Fill in the generic fields of the device structure. */
1281 if ((status = register_netdev (dev))) { 1281 if ((status = register_netdev (dev))) {
1282 dma_free_coherent (gendev, lp->dma_size, 1282 dma_free_coherent (gendev, lp->dma_size,
1283 lp->rx_ring, lp->dma_rings); 1283 lp->rx_ring, lp->dma_rings);
1284 return status; 1284 return status;
1285 } 1285 }
1286 1286
1287 /* Let the adapter sleep to save power */ 1287 /* Let the adapter sleep to save power */
1288 yawn(dev, SLEEP); 1288 yawn(dev, SLEEP);
1289 1289
1290 return status; 1290 return status;
1291} 1291}
1292 1292
1293 1293
1294static int 1294static int
1295de4x5_open(struct net_device *dev) 1295de4x5_open(struct net_device *dev)
1296{ 1296{
@@ -1312,15 +1312,15 @@ de4x5_open(struct net_device *dev)
1312 */ 1312 */
1313 yawn(dev, WAKEUP); 1313 yawn(dev, WAKEUP);
1314 1314
1315 /* 1315 /*
1316 ** Re-initialize the DE4X5... 1316 ** Re-initialize the DE4X5...
1317 */ 1317 */
1318 status = de4x5_init(dev); 1318 status = de4x5_init(dev);
1319 spin_lock_init(&lp->lock); 1319 spin_lock_init(&lp->lock);
1320 lp->state = OPEN; 1320 lp->state = OPEN;
1321 de4x5_dbg_open(dev); 1321 de4x5_dbg_open(dev);
1322 1322
1323 if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ, 1323 if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
1324 lp->adapter_name, dev)) { 1324 lp->adapter_name, dev)) {
1325 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); 1325 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1326 if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ, 1326 if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
@@ -1340,11 +1340,11 @@ de4x5_open(struct net_device *dev)
1340 1340
1341 lp->interrupt = UNMASK_INTERRUPTS; 1341 lp->interrupt = UNMASK_INTERRUPTS;
1342 dev->trans_start = jiffies; 1342 dev->trans_start = jiffies;
1343 1343
1344 START_DE4X5; 1344 START_DE4X5;
1345 1345
1346 de4x5_setup_intr(dev); 1346 de4x5_setup_intr(dev);
1347 1347
1348 if (de4x5_debug & DEBUG_OPEN) { 1348 if (de4x5_debug & DEBUG_OPEN) {
1349 printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); 1349 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1350 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); 1350 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
@@ -1355,7 +1355,7 @@ de4x5_open(struct net_device *dev)
1355 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); 1355 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1356 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); 1356 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1357 } 1357 }
1358 1358
1359 return status; 1359 return status;
1360} 1360}
1361 1361
@@ -1369,15 +1369,15 @@ de4x5_open(struct net_device *dev)
1369*/ 1369*/
1370static int 1370static int
1371de4x5_init(struct net_device *dev) 1371de4x5_init(struct net_device *dev)
1372{ 1372{
1373 /* Lock out other processes whilst setting up the hardware */ 1373 /* Lock out other processes whilst setting up the hardware */
1374 netif_stop_queue(dev); 1374 netif_stop_queue(dev);
1375 1375
1376 de4x5_sw_reset(dev); 1376 de4x5_sw_reset(dev);
1377 1377
1378 /* Autoconfigure the connected port */ 1378 /* Autoconfigure the connected port */
1379 autoconf_media(dev); 1379 autoconf_media(dev);
1380 1380
1381 return 0; 1381 return 0;
1382} 1382}
1383 1383
@@ -1388,7 +1388,7 @@ de4x5_sw_reset(struct net_device *dev)
1388 u_long iobase = dev->base_addr; 1388 u_long iobase = dev->base_addr;
1389 int i, j, status = 0; 1389 int i, j, status = 0;
1390 s32 bmr, omr; 1390 s32 bmr, omr;
1391 1391
1392 /* Select the MII or SRL port now and RESET the MAC */ 1392 /* Select the MII or SRL port now and RESET the MAC */
1393 if (!lp->useSROM) { 1393 if (!lp->useSROM) {
1394 if (lp->phy[lp->active].id != 0) { 1394 if (lp->phy[lp->active].id != 0) {
@@ -1399,7 +1399,7 @@ de4x5_sw_reset(struct net_device *dev)
1399 de4x5_switch_mac_port(dev); 1399 de4x5_switch_mac_port(dev);
1400 } 1400 }
1401 1401
1402 /* 1402 /*
1403 ** Set the programmable burst length to 8 longwords for all the DC21140 1403 ** Set the programmable burst length to 8 longwords for all the DC21140
1404 ** Fasternet chips and 4 longwords for all others: DMA errors result 1404 ** Fasternet chips and 4 longwords for all others: DMA errors result
1405 ** without these values. Cache align 16 long. 1405 ** without these values. Cache align 16 long.
@@ -1416,23 +1416,23 @@ de4x5_sw_reset(struct net_device *dev)
1416 outl(lp->dma_rings, DE4X5_RRBA); 1416 outl(lp->dma_rings, DE4X5_RRBA);
1417 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), 1417 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1418 DE4X5_TRBA); 1418 DE4X5_TRBA);
1419 1419
1420 lp->rx_new = lp->rx_old = 0; 1420 lp->rx_new = lp->rx_old = 0;
1421 lp->tx_new = lp->tx_old = 0; 1421 lp->tx_new = lp->tx_old = 0;
1422 1422
1423 for (i = 0; i < lp->rxRingSize; i++) { 1423 for (i = 0; i < lp->rxRingSize; i++) {
1424 lp->rx_ring[i].status = cpu_to_le32(R_OWN); 1424 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1425 } 1425 }
1426 1426
1427 for (i = 0; i < lp->txRingSize; i++) { 1427 for (i = 0; i < lp->txRingSize; i++) {
1428 lp->tx_ring[i].status = cpu_to_le32(0); 1428 lp->tx_ring[i].status = cpu_to_le32(0);
1429 } 1429 }
1430 1430
1431 barrier(); 1431 barrier();
1432 1432
1433 /* Build the setup frame depending on filtering mode */ 1433 /* Build the setup frame depending on filtering mode */
1434 SetMulticastFilter(dev); 1434 SetMulticastFilter(dev);
1435 1435
1436 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); 1436 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1437 outl(omr|OMR_ST, DE4X5_OMR); 1437 outl(omr|OMR_ST, DE4X5_OMR);
1438 1438
@@ -1445,18 +1445,18 @@ de4x5_sw_reset(struct net_device *dev)
1445 outl(omr, DE4X5_OMR); /* Stop everything! */ 1445 outl(omr, DE4X5_OMR); /* Stop everything! */
1446 1446
1447 if (j == 0) { 1447 if (j == 0) {
1448 printk("%s: Setup frame timed out, status %08x\n", dev->name, 1448 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1449 inl(DE4X5_STS)); 1449 inl(DE4X5_STS));
1450 status = -EIO; 1450 status = -EIO;
1451 } 1451 }
1452 1452
1453 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1453 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1454 lp->tx_old = lp->tx_new; 1454 lp->tx_old = lp->tx_new;
1455 1455
1456 return status; 1456 return status;
1457} 1457}
1458 1458
1459/* 1459/*
1460** Writes a socket buffer address to the next available transmit descriptor. 1460** Writes a socket buffer address to the next available transmit descriptor.
1461*/ 1461*/
1462static int 1462static int
@@ -1469,9 +1469,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1469 1469
1470 netif_stop_queue(dev); 1470 netif_stop_queue(dev);
1471 if (lp->tx_enable == NO) { /* Cannot send for now */ 1471 if (lp->tx_enable == NO) { /* Cannot send for now */
1472 return -1; 1472 return -1;
1473 } 1473 }
1474 1474
1475 /* 1475 /*
1476 ** Clean out the TX ring asynchronously to interrupts - sometimes the 1476 ** Clean out the TX ring asynchronously to interrupts - sometimes the
1477 ** interrupts are lost by delayed descriptor status updates relative to 1477 ** interrupts are lost by delayed descriptor status updates relative to
@@ -1482,7 +1482,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1482 spin_unlock_irqrestore(&lp->lock, flags); 1482 spin_unlock_irqrestore(&lp->lock, flags);
1483 1483
1484 /* Test if cache is already locked - requeue skb if so */ 1484 /* Test if cache is already locked - requeue skb if so */
1485 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) 1485 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1486 return -1; 1486 return -1;
1487 1487
1488 /* Transmit descriptor ring full or stale skb */ 1488 /* Transmit descriptor ring full or stale skb */
@@ -1509,10 +1509,10 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1509 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); 1509 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1510 lp->stats.tx_bytes += skb->len; 1510 lp->stats.tx_bytes += skb->len;
1511 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ 1511 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
1512 1512
1513 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1513 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1514 dev->trans_start = jiffies; 1514 dev->trans_start = jiffies;
1515 1515
1516 if (TX_BUFFS_AVAIL) { 1516 if (TX_BUFFS_AVAIL) {
1517 netif_start_queue(dev); /* Another pkt may be queued */ 1517 netif_start_queue(dev); /* Another pkt may be queued */
1518 } 1518 }
@@ -1521,15 +1521,15 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1521 } 1521 }
1522 if (skb) de4x5_putb_cache(dev, skb); 1522 if (skb) de4x5_putb_cache(dev, skb);
1523 } 1523 }
1524 1524
1525 lp->cache.lock = 0; 1525 lp->cache.lock = 0;
1526 1526
1527 return status; 1527 return status;
1528} 1528}
1529 1529
1530/* 1530/*
1531** The DE4X5 interrupt handler. 1531** The DE4X5 interrupt handler.
1532** 1532**
1533** I/O Read/Writes through intermediate PCI bridges are never 'posted', 1533** I/O Read/Writes through intermediate PCI bridges are never 'posted',
1534** so that the asserted interrupt always has some real data to work with - 1534** so that the asserted interrupt always has some real data to work with -
1535** if these I/O accesses are ever changed to memory accesses, ensure the 1535** if these I/O accesses are ever changed to memory accesses, ensure the
@@ -1546,7 +1546,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1546 s32 imr, omr, sts, limit; 1546 s32 imr, omr, sts, limit;
1547 u_long iobase; 1547 u_long iobase;
1548 unsigned int handled = 0; 1548 unsigned int handled = 0;
1549 1549
1550 if (dev == NULL) { 1550 if (dev == NULL) {
1551 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq); 1551 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1552 return IRQ_NONE; 1552 return IRQ_NONE;
@@ -1554,35 +1554,35 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1554 lp = netdev_priv(dev); 1554 lp = netdev_priv(dev);
1555 spin_lock(&lp->lock); 1555 spin_lock(&lp->lock);
1556 iobase = dev->base_addr; 1556 iobase = dev->base_addr;
1557 1557
1558 DISABLE_IRQs; /* Ensure non re-entrancy */ 1558 DISABLE_IRQs; /* Ensure non re-entrancy */
1559 1559
1560 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) 1560 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1561 printk("%s: Re-entering the interrupt handler.\n", dev->name); 1561 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1562 1562
1563 synchronize_irq(dev->irq); 1563 synchronize_irq(dev->irq);
1564 1564
1565 for (limit=0; limit<8; limit++) { 1565 for (limit=0; limit<8; limit++) {
1566 sts = inl(DE4X5_STS); /* Read IRQ status */ 1566 sts = inl(DE4X5_STS); /* Read IRQ status */
1567 outl(sts, DE4X5_STS); /* Reset the board interrupts */ 1567 outl(sts, DE4X5_STS); /* Reset the board interrupts */
1568 1568
1569 if (!(sts & lp->irq_mask)) break;/* All done */ 1569 if (!(sts & lp->irq_mask)) break;/* All done */
1570 handled = 1; 1570 handled = 1;
1571 1571
1572 if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ 1572 if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
1573 de4x5_rx(dev); 1573 de4x5_rx(dev);
1574 1574
1575 if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ 1575 if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
1576 de4x5_tx(dev); 1576 de4x5_tx(dev);
1577 1577
1578 if (sts & STS_LNF) { /* TP Link has failed */ 1578 if (sts & STS_LNF) { /* TP Link has failed */
1579 lp->irq_mask &= ~IMR_LFM; 1579 lp->irq_mask &= ~IMR_LFM;
1580 } 1580 }
1581 1581
1582 if (sts & STS_UNF) { /* Transmit underrun */ 1582 if (sts & STS_UNF) { /* Transmit underrun */
1583 de4x5_txur(dev); 1583 de4x5_txur(dev);
1584 } 1584 }
1585 1585
1586 if (sts & STS_SE) { /* Bus Error */ 1586 if (sts & STS_SE) { /* Bus Error */
1587 STOP_DE4X5; 1587 STOP_DE4X5;
1588 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", 1588 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
@@ -1603,7 +1603,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1603 lp->interrupt = UNMASK_INTERRUPTS; 1603 lp->interrupt = UNMASK_INTERRUPTS;
1604 ENABLE_IRQs; 1604 ENABLE_IRQs;
1605 spin_unlock(&lp->lock); 1605 spin_unlock(&lp->lock);
1606 1606
1607 return IRQ_RETVAL(handled); 1607 return IRQ_RETVAL(handled);
1608} 1608}
1609 1609
@@ -1614,11 +1614,11 @@ de4x5_rx(struct net_device *dev)
1614 u_long iobase = dev->base_addr; 1614 u_long iobase = dev->base_addr;
1615 int entry; 1615 int entry;
1616 s32 status; 1616 s32 status;
1617 1617
1618 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; 1618 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1619 entry=lp->rx_new) { 1619 entry=lp->rx_new) {
1620 status = (s32)le32_to_cpu(lp->rx_ring[entry].status); 1620 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1621 1621
1622 if (lp->rx_ovf) { 1622 if (lp->rx_ovf) {
1623 if (inl(DE4X5_MFC) & MFC_FOCM) { 1623 if (inl(DE4X5_MFC) & MFC_FOCM) {
1624 de4x5_rx_ovfc(dev); 1624 de4x5_rx_ovfc(dev);
@@ -1629,7 +1629,7 @@ de4x5_rx(struct net_device *dev)
1629 if (status & RD_FS) { /* Remember the start of frame */ 1629 if (status & RD_FS) { /* Remember the start of frame */
1630 lp->rx_old = entry; 1630 lp->rx_old = entry;
1631 } 1631 }
1632 1632
1633 if (status & RD_LS) { /* Valid frame status */ 1633 if (status & RD_LS) { /* Valid frame status */
1634 if (lp->tx_enable) lp->linkOK++; 1634 if (lp->tx_enable) lp->linkOK++;
1635 if (status & RD_ES) { /* There was an error. */ 1635 if (status & RD_ES) { /* There was an error. */
@@ -1646,9 +1646,9 @@ de4x5_rx(struct net_device *dev)
1646 struct sk_buff *skb; 1646 struct sk_buff *skb;
1647 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) 1647 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1648 >> 16) - 4; 1648 >> 16) - 4;
1649 1649
1650 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { 1650 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1651 printk("%s: Insufficient memory; nuking packet.\n", 1651 printk("%s: Insufficient memory; nuking packet.\n",
1652 dev->name); 1652 dev->name);
1653 lp->stats.rx_dropped++; 1653 lp->stats.rx_dropped++;
1654 } else { 1654 } else {
@@ -1658,14 +1658,14 @@ de4x5_rx(struct net_device *dev)
1658 skb->protocol=eth_type_trans(skb,dev); 1658 skb->protocol=eth_type_trans(skb,dev);
1659 de4x5_local_stats(dev, skb->data, pkt_len); 1659 de4x5_local_stats(dev, skb->data, pkt_len);
1660 netif_rx(skb); 1660 netif_rx(skb);
1661 1661
1662 /* Update stats */ 1662 /* Update stats */
1663 dev->last_rx = jiffies; 1663 dev->last_rx = jiffies;
1664 lp->stats.rx_packets++; 1664 lp->stats.rx_packets++;
1665 lp->stats.rx_bytes += pkt_len; 1665 lp->stats.rx_bytes += pkt_len;
1666 } 1666 }
1667 } 1667 }
1668 1668
1669 /* Change buffer ownership for this frame, back to the adapter */ 1669 /* Change buffer ownership for this frame, back to the adapter */
1670 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) { 1670 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1671 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); 1671 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
@@ -1674,13 +1674,13 @@ de4x5_rx(struct net_device *dev)
1674 lp->rx_ring[entry].status = cpu_to_le32(R_OWN); 1674 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1675 barrier(); 1675 barrier();
1676 } 1676 }
1677 1677
1678 /* 1678 /*
1679 ** Update entry information 1679 ** Update entry information
1680 */ 1680 */
1681 lp->rx_new = (++lp->rx_new) % lp->rxRingSize; 1681 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1682 } 1682 }
1683 1683
1684 return 0; 1684 return 0;
1685} 1685}
1686 1686
@@ -1705,20 +1705,20 @@ de4x5_tx(struct net_device *dev)
1705 u_long iobase = dev->base_addr; 1705 u_long iobase = dev->base_addr;
1706 int entry; 1706 int entry;
1707 s32 status; 1707 s32 status;
1708 1708
1709 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { 1709 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1710 status = (s32)le32_to_cpu(lp->tx_ring[entry].status); 1710 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1711 if (status < 0) { /* Buffer not sent yet */ 1711 if (status < 0) { /* Buffer not sent yet */
1712 break; 1712 break;
1713 } else if (status != 0x7fffffff) { /* Not setup frame */ 1713 } else if (status != 0x7fffffff) { /* Not setup frame */
1714 if (status & TD_ES) { /* An error happened */ 1714 if (status & TD_ES) { /* An error happened */
1715 lp->stats.tx_errors++; 1715 lp->stats.tx_errors++;
1716 if (status & TD_NC) lp->stats.tx_carrier_errors++; 1716 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1717 if (status & TD_LC) lp->stats.tx_window_errors++; 1717 if (status & TD_LC) lp->stats.tx_window_errors++;
1718 if (status & TD_UF) lp->stats.tx_fifo_errors++; 1718 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1719 if (status & TD_EC) lp->pktStats.excessive_collisions++; 1719 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1720 if (status & TD_DE) lp->stats.tx_aborted_errors++; 1720 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1721 1721
1722 if (TX_PKT_PENDING) { 1722 if (TX_PKT_PENDING) {
1723 outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ 1723 outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
1724 } 1724 }
@@ -1727,14 +1727,14 @@ de4x5_tx(struct net_device *dev)
1727 if (lp->tx_enable) lp->linkOK++; 1727 if (lp->tx_enable) lp->linkOK++;
1728 } 1728 }
1729 /* Update the collision counter */ 1729 /* Update the collision counter */
1730 lp->stats.collisions += ((status & TD_EC) ? 16 : 1730 lp->stats.collisions += ((status & TD_EC) ? 16 :
1731 ((status & TD_CC) >> 3)); 1731 ((status & TD_CC) >> 3));
1732 1732
1733 /* Free the buffer. */ 1733 /* Free the buffer. */
1734 if (lp->tx_skb[entry] != NULL) 1734 if (lp->tx_skb[entry] != NULL)
1735 de4x5_free_tx_buff(lp, entry); 1735 de4x5_free_tx_buff(lp, entry);
1736 } 1736 }
1737 1737
1738 /* Update all the pointers */ 1738 /* Update all the pointers */
1739 lp->tx_old = (++lp->tx_old) % lp->txRingSize; 1739 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1740 } 1740 }
@@ -1746,7 +1746,7 @@ de4x5_tx(struct net_device *dev)
1746 else 1746 else
1747 netif_start_queue(dev); 1747 netif_start_queue(dev);
1748 } 1748 }
1749 1749
1750 return 0; 1750 return 0;
1751} 1751}
1752 1752
@@ -1755,9 +1755,9 @@ de4x5_ast(struct net_device *dev)
1755{ 1755{
1756 struct de4x5_private *lp = netdev_priv(dev); 1756 struct de4x5_private *lp = netdev_priv(dev);
1757 int next_tick = DE4X5_AUTOSENSE_MS; 1757 int next_tick = DE4X5_AUTOSENSE_MS;
1758 1758
1759 disable_ast(dev); 1759 disable_ast(dev);
1760 1760
1761 if (lp->useSROM) { 1761 if (lp->useSROM) {
1762 next_tick = srom_autoconf(dev); 1762 next_tick = srom_autoconf(dev);
1763 } else if (lp->chipset == DC21140) { 1763 } else if (lp->chipset == DC21140) {
@@ -1769,7 +1769,7 @@ de4x5_ast(struct net_device *dev)
1769 } 1769 }
1770 lp->linkOK = 0; 1770 lp->linkOK = 0;
1771 enable_ast(dev, next_tick); 1771 enable_ast(dev, next_tick);
1772 1772
1773 return 0; 1773 return 0;
1774} 1774}
1775 1775
@@ -1792,11 +1792,11 @@ de4x5_txur(struct net_device *dev)
1792 } 1792 }
1793 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); 1793 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1794 } 1794 }
1795 1795
1796 return 0; 1796 return 0;
1797} 1797}
1798 1798
1799static int 1799static int
1800de4x5_rx_ovfc(struct net_device *dev) 1800de4x5_rx_ovfc(struct net_device *dev)
1801{ 1801{
1802 struct de4x5_private *lp = netdev_priv(dev); 1802 struct de4x5_private *lp = netdev_priv(dev);
@@ -1813,7 +1813,7 @@ de4x5_rx_ovfc(struct net_device *dev)
1813 } 1813 }
1814 1814
1815 outl(omr, DE4X5_OMR); 1815 outl(omr, DE4X5_OMR);
1816 1816
1817 return 0; 1817 return 0;
1818} 1818}
1819 1819
@@ -1823,22 +1823,22 @@ de4x5_close(struct net_device *dev)
1823 struct de4x5_private *lp = netdev_priv(dev); 1823 struct de4x5_private *lp = netdev_priv(dev);
1824 u_long iobase = dev->base_addr; 1824 u_long iobase = dev->base_addr;
1825 s32 imr, omr; 1825 s32 imr, omr;
1826 1826
1827 disable_ast(dev); 1827 disable_ast(dev);
1828 1828
1829 netif_stop_queue(dev); 1829 netif_stop_queue(dev);
1830 1830
1831 if (de4x5_debug & DEBUG_CLOSE) { 1831 if (de4x5_debug & DEBUG_CLOSE) {
1832 printk("%s: Shutting down ethercard, status was %8.8x.\n", 1832 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1833 dev->name, inl(DE4X5_STS)); 1833 dev->name, inl(DE4X5_STS));
1834 } 1834 }
1835 1835
1836 /* 1836 /*
1837 ** We stop the DE4X5 here... mask interrupts and stop TX & RX 1837 ** We stop the DE4X5 here... mask interrupts and stop TX & RX
1838 */ 1838 */
1839 DISABLE_IRQs; 1839 DISABLE_IRQs;
1840 STOP_DE4X5; 1840 STOP_DE4X5;
1841 1841
1842 /* Free the associated irq */ 1842 /* Free the associated irq */
1843 free_irq(dev->irq, dev); 1843 free_irq(dev->irq, dev);
1844 lp->state = CLOSED; 1844 lp->state = CLOSED;
@@ -1846,10 +1846,10 @@ de4x5_close(struct net_device *dev)
1846 /* Free any socket buffers */ 1846 /* Free any socket buffers */
1847 de4x5_free_rx_buffs(dev); 1847 de4x5_free_rx_buffs(dev);
1848 de4x5_free_tx_buffs(dev); 1848 de4x5_free_tx_buffs(dev);
1849 1849
1850 /* Put the adapter to sleep to save power */ 1850 /* Put the adapter to sleep to save power */
1851 yawn(dev, SLEEP); 1851 yawn(dev, SLEEP);
1852 1852
1853 return 0; 1853 return 0;
1854} 1854}
1855 1855
@@ -1858,9 +1858,9 @@ de4x5_get_stats(struct net_device *dev)
1858{ 1858{
1859 struct de4x5_private *lp = netdev_priv(dev); 1859 struct de4x5_private *lp = netdev_priv(dev);
1860 u_long iobase = dev->base_addr; 1860 u_long iobase = dev->base_addr;
1861 1861
1862 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); 1862 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1863 1863
1864 return &lp->stats; 1864 return &lp->stats;
1865} 1865}
1866 1866
@@ -1886,7 +1886,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1886 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) { 1886 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1887 lp->pktStats.unicast++; 1887 lp->pktStats.unicast++;
1888 } 1888 }
1889 1889
1890 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ 1890 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
1891 if (lp->pktStats.bins[0] == 0) { /* Reset counters */ 1891 if (lp->pktStats.bins[0] == 0) { /* Reset counters */
1892 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); 1892 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
@@ -1937,11 +1937,11 @@ set_multicast_list(struct net_device *dev)
1937 omr = inl(DE4X5_OMR); 1937 omr = inl(DE4X5_OMR);
1938 omr |= OMR_PR; 1938 omr |= OMR_PR;
1939 outl(omr, DE4X5_OMR); 1939 outl(omr, DE4X5_OMR);
1940 } else { 1940 } else {
1941 SetMulticastFilter(dev); 1941 SetMulticastFilter(dev);
1942 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 1942 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1943 SETUP_FRAME_LEN, (struct sk_buff *)1); 1943 SETUP_FRAME_LEN, (struct sk_buff *)1);
1944 1944
1945 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1945 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1946 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ 1946 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
1947 dev->trans_start = jiffies; 1947 dev->trans_start = jiffies;
@@ -1969,20 +1969,20 @@ SetMulticastFilter(struct net_device *dev)
1969 omr = inl(DE4X5_OMR); 1969 omr = inl(DE4X5_OMR);
1970 omr &= ~(OMR_PR | OMR_PM); 1970 omr &= ~(OMR_PR | OMR_PM);
1971 pa = build_setup_frame(dev, ALL); /* Build the basic frame */ 1971 pa = build_setup_frame(dev, ALL); /* Build the basic frame */
1972 1972
1973 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) { 1973 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
1974 omr |= OMR_PM; /* Pass all multicasts */ 1974 omr |= OMR_PM; /* Pass all multicasts */
1975 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ 1975 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1976 for (i=0;i<dev->mc_count;i++) { /* for each address in the list */ 1976 for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
1977 addrs=dmi->dmi_addr; 1977 addrs=dmi->dmi_addr;
1978 dmi=dmi->next; 1978 dmi=dmi->next;
1979 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1979 if ((*addrs & 0x01) == 1) { /* multicast address? */
1980 crc = ether_crc_le(ETH_ALEN, addrs); 1980 crc = ether_crc_le(ETH_ALEN, addrs);
1981 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ 1981 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
1982 1982
1983 byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ 1983 byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
1984 bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ 1984 bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
1985 1985
1986 byte <<= 1; /* calc offset into setup frame */ 1986 byte <<= 1; /* calc offset into setup frame */
1987 if (byte & 0x02) { 1987 if (byte & 0x02) {
1988 byte -= 1; 1988 byte -= 1;
@@ -1994,14 +1994,14 @@ SetMulticastFilter(struct net_device *dev)
1994 for (j=0; j<dev->mc_count; j++) { 1994 for (j=0; j<dev->mc_count; j++) {
1995 addrs=dmi->dmi_addr; 1995 addrs=dmi->dmi_addr;
1996 dmi=dmi->next; 1996 dmi=dmi->next;
1997 for (i=0; i<ETH_ALEN; i++) { 1997 for (i=0; i<ETH_ALEN; i++) {
1998 *(pa + (i&1)) = *addrs++; 1998 *(pa + (i&1)) = *addrs++;
1999 if (i & 0x01) pa += 4; 1999 if (i & 0x01) pa += 4;
2000 } 2000 }
2001 } 2001 }
2002 } 2002 }
2003 outl(omr, DE4X5_OMR); 2003 outl(omr, DE4X5_OMR);
2004 2004
2005 return; 2005 return;
2006} 2006}
2007 2007
@@ -2031,18 +2031,18 @@ static int __init de4x5_eisa_probe (struct device *gendev)
2031 status = -EBUSY; 2031 status = -EBUSY;
2032 goto release_reg_1; 2032 goto release_reg_1;
2033 } 2033 }
2034 2034
2035 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { 2035 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2036 status = -ENOMEM; 2036 status = -ENOMEM;
2037 goto release_reg_2; 2037 goto release_reg_2;
2038 } 2038 }
2039 lp = netdev_priv(dev); 2039 lp = netdev_priv(dev);
2040 2040
2041 cfid = (u32) inl(PCI_CFID); 2041 cfid = (u32) inl(PCI_CFID);
2042 lp->cfrv = (u_short) inl(PCI_CFRV); 2042 lp->cfrv = (u_short) inl(PCI_CFRV);
2043 device = (cfid >> 8) & 0x00ffff00; 2043 device = (cfid >> 8) & 0x00ffff00;
2044 vendor = (u_short) cfid; 2044 vendor = (u_short) cfid;
2045 2045
2046 /* Read the EISA Configuration Registers */ 2046 /* Read the EISA Configuration Registers */
2047 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); 2047 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2048#ifdef CONFIG_ALPHA 2048#ifdef CONFIG_ALPHA
@@ -2050,7 +2050,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
2050 * care about the EISA configuration, and thus doesn't 2050 * care about the EISA configuration, and thus doesn't
2051 * configure the PLX bridge properly. Oh well... Simply mimic 2051 * configure the PLX bridge properly. Oh well... Simply mimic
2052 * the EISA config file to sort it out. */ 2052 * the EISA config file to sort it out. */
2053 2053
2054 /* EISA REG1: Assert DecChip 21040 HW Reset */ 2054 /* EISA REG1: Assert DecChip 21040 HW Reset */
2055 outb (ER1_IAM | 1, EISA_REG1); 2055 outb (ER1_IAM | 1, EISA_REG1);
2056 mdelay (1); 2056 mdelay (1);
@@ -2061,12 +2061,12 @@ static int __init de4x5_eisa_probe (struct device *gendev)
2061 2061
2062 /* EISA REG3: R/W Burst Transfer Enable */ 2062 /* EISA REG3: R/W Burst Transfer Enable */
2063 outb (ER3_BWE | ER3_BRE, EISA_REG3); 2063 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2064 2064
2065 /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ 2065 /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
2066 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); 2066 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2067#endif 2067#endif
2068 irq = de4x5_irq[(regval >> 1) & 0x03]; 2068 irq = de4x5_irq[(regval >> 1) & 0x03];
2069 2069
2070 if (is_DC2114x) { 2070 if (is_DC2114x) {
2071 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); 2071 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2072 } 2072 }
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
2077 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); 2077 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2078 outl(0x00006000, PCI_CFLT); 2078 outl(0x00006000, PCI_CFLT);
2079 outl(iobase, PCI_CBIO); 2079 outl(iobase, PCI_CBIO);
2080 2080
2081 DevicePresent(dev, EISA_APROM); 2081 DevicePresent(dev, EISA_APROM);
2082 2082
2083 dev->irq = irq; 2083 dev->irq = irq;
@@ -2102,7 +2102,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
2102 2102
2103 dev = device->driver_data; 2103 dev = device->driver_data;
2104 iobase = dev->base_addr; 2104 iobase = dev->base_addr;
2105 2105
2106 unregister_netdev (dev); 2106 unregister_netdev (dev);
2107 free_netdev (dev); 2107 free_netdev (dev);
2108 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); 2108 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
@@ -2131,11 +2131,11 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2131 2131
2132/* 2132/*
2133** This function searches the current bus (which is >0) for a DECchip with an 2133** This function searches the current bus (which is >0) for a DECchip with an
2134** SROM, so that in multiport cards that have one SROM shared between multiple 2134** SROM, so that in multiport cards that have one SROM shared between multiple
2135** DECchips, we can find the base SROM irrespective of the BIOS scan direction. 2135** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
2136** For single port cards this is a time waster... 2136** For single port cards this is a time waster...
2137*/ 2137*/
2138static void __devinit 2138static void __devinit
2139srom_search(struct net_device *dev, struct pci_dev *pdev) 2139srom_search(struct net_device *dev, struct pci_dev *pdev)
2140{ 2140{
2141 u_char pb; 2141 u_char pb;
@@ -2163,7 +2163,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2163 /* Set the device number information */ 2163 /* Set the device number information */
2164 lp->device = PCI_SLOT(this_dev->devfn); 2164 lp->device = PCI_SLOT(this_dev->devfn);
2165 lp->bus_num = pb; 2165 lp->bus_num = pb;
2166 2166
2167 /* Set the chipset information */ 2167 /* Set the chipset information */
2168 if (is_DC2114x) { 2168 if (is_DC2114x) {
2169 device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); 2169 device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2176,7 +2176,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2176 /* Fetch the IRQ to be used */ 2176 /* Fetch the IRQ to be used */
2177 irq = this_dev->irq; 2177 irq = this_dev->irq;
2178 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; 2178 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2179 2179
2180 /* Check if I/O accesses are enabled */ 2180 /* Check if I/O accesses are enabled */
2181 pci_read_config_word(this_dev, PCI_COMMAND, &status); 2181 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2182 if (!(status & PCI_COMMAND_IO)) continue; 2182 if (!(status & PCI_COMMAND_IO)) continue;
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2254 lp = netdev_priv(dev); 2254 lp = netdev_priv(dev);
2255 lp->bus = PCI; 2255 lp->bus = PCI;
2256 lp->bus_num = 0; 2256 lp->bus_num = 0;
2257 2257
2258 /* Search for an SROM on this bus */ 2258 /* Search for an SROM on this bus */
2259 if (lp->bus_num != pb) { 2259 if (lp->bus_num != pb) {
2260 lp->bus_num = pb; 2260 lp->bus_num = pb;
@@ -2267,7 +2267,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2267 /* Set the device number information */ 2267 /* Set the device number information */
2268 lp->device = dev_num; 2268 lp->device = dev_num;
2269 lp->bus_num = pb; 2269 lp->bus_num = pb;
2270 2270
2271 /* Set the chipset information */ 2271 /* Set the chipset information */
2272 if (is_DC2114x) { 2272 if (is_DC2114x) {
2273 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); 2273 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2283,7 +2283,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2283 error = -ENODEV; 2283 error = -ENODEV;
2284 goto free_dev; 2284 goto free_dev;
2285 } 2285 }
2286 2286
2287 /* Check if I/O accesses and Bus Mastering are enabled */ 2287 /* Check if I/O accesses and Bus Mastering are enabled */
2288 pci_read_config_word(pdev, PCI_COMMAND, &status); 2288 pci_read_config_word(pdev, PCI_COMMAND, &status);
2289#ifdef __powerpc__ 2289#ifdef __powerpc__
@@ -2322,7 +2322,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2322 } 2322 }
2323 2323
2324 dev->irq = irq; 2324 dev->irq = irq;
2325 2325
2326 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { 2326 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2327 goto release; 2327 goto release;
2328 } 2328 }
@@ -2377,7 +2377,7 @@ static struct pci_driver de4x5_pci_driver = {
2377** Auto configure the media here rather than setting the port at compile 2377** Auto configure the media here rather than setting the port at compile
2378** time. This routine is called by de4x5_init() and when a loss of media is 2378** time. This routine is called by de4x5_init() and when a loss of media is
2379** detected (excessive collisions, loss of carrier, no carrier or link fail 2379** detected (excessive collisions, loss of carrier, no carrier or link fail
2380** [TP] or no recent receive activity) to check whether the user has been 2380** [TP] or no recent receive activity) to check whether the user has been
2381** sneaky and changed the port on us. 2381** sneaky and changed the port on us.
2382*/ 2382*/
2383static int 2383static int
@@ -2405,7 +2405,7 @@ autoconf_media(struct net_device *dev)
2405 } 2405 }
2406 2406
2407 enable_ast(dev, next_tick); 2407 enable_ast(dev, next_tick);
2408 2408
2409 return (lp->media); 2409 return (lp->media);
2410} 2410}
2411 2411
@@ -2428,7 +2428,7 @@ dc21040_autoconf(struct net_device *dev)
2428 u_long iobase = dev->base_addr; 2428 u_long iobase = dev->base_addr;
2429 int next_tick = DE4X5_AUTOSENSE_MS; 2429 int next_tick = DE4X5_AUTOSENSE_MS;
2430 s32 imr; 2430 s32 imr;
2431 2431
2432 switch (lp->media) { 2432 switch (lp->media) {
2433 case INIT: 2433 case INIT:
2434 DISABLE_IRQs; 2434 DISABLE_IRQs;
@@ -2447,36 +2447,36 @@ dc21040_autoconf(struct net_device *dev)
2447 lp->local_state = 0; 2447 lp->local_state = 0;
2448 next_tick = dc21040_autoconf(dev); 2448 next_tick = dc21040_autoconf(dev);
2449 break; 2449 break;
2450 2450
2451 case TP: 2451 case TP:
2452 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, 2452 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2453 TP_SUSPECT, test_tp); 2453 TP_SUSPECT, test_tp);
2454 break; 2454 break;
2455 2455
2456 case TP_SUSPECT: 2456 case TP_SUSPECT:
2457 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); 2457 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2458 break; 2458 break;
2459 2459
2460 case BNC: 2460 case BNC:
2461 case AUI: 2461 case AUI:
2462 case BNC_AUI: 2462 case BNC_AUI:
2463 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, 2463 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2464 BNC_AUI_SUSPECT, ping_media); 2464 BNC_AUI_SUSPECT, ping_media);
2465 break; 2465 break;
2466 2466
2467 case BNC_AUI_SUSPECT: 2467 case BNC_AUI_SUSPECT:
2468 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); 2468 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2469 break; 2469 break;
2470 2470
2471 case EXT_SIA: 2471 case EXT_SIA:
2472 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, 2472 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2473 NC, EXT_SIA_SUSPECT, ping_media); 2473 NC, EXT_SIA_SUSPECT, ping_media);
2474 break; 2474 break;
2475 2475
2476 case EXT_SIA_SUSPECT: 2476 case EXT_SIA_SUSPECT:
2477 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); 2477 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2478 break; 2478 break;
2479 2479
2480 case NC: 2480 case NC:
2481 /* default to TP for all */ 2481 /* default to TP for all */
2482 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); 2482 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
@@ -2488,13 +2488,13 @@ dc21040_autoconf(struct net_device *dev)
2488 lp->tx_enable = NO; 2488 lp->tx_enable = NO;
2489 break; 2489 break;
2490 } 2490 }
2491 2491
2492 return next_tick; 2492 return next_tick;
2493} 2493}
2494 2494
2495static int 2495static int
2496dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, 2496dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2497 int next_state, int suspect_state, 2497 int next_state, int suspect_state,
2498 int (*fn)(struct net_device *, int)) 2498 int (*fn)(struct net_device *, int))
2499{ 2499{
2500 struct de4x5_private *lp = netdev_priv(dev); 2500 struct de4x5_private *lp = netdev_priv(dev);
@@ -2507,7 +2507,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
2507 lp->local_state++; 2507 lp->local_state++;
2508 next_tick = 500; 2508 next_tick = 500;
2509 break; 2509 break;
2510 2510
2511 case 1: 2511 case 1:
2512 if (!lp->tx_enable) { 2512 if (!lp->tx_enable) {
2513 linkBad = fn(dev, timeout); 2513 linkBad = fn(dev, timeout);
@@ -2527,7 +2527,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
2527 } 2527 }
2528 break; 2528 break;
2529 } 2529 }
2530 2530
2531 return next_tick; 2531 return next_tick;
2532} 2532}
2533 2533
@@ -2582,7 +2582,7 @@ dc21041_autoconf(struct net_device *dev)
2582 u_long iobase = dev->base_addr; 2582 u_long iobase = dev->base_addr;
2583 s32 sts, irqs, irq_mask, imr, omr; 2583 s32 sts, irqs, irq_mask, imr, omr;
2584 int next_tick = DE4X5_AUTOSENSE_MS; 2584 int next_tick = DE4X5_AUTOSENSE_MS;
2585 2585
2586 switch (lp->media) { 2586 switch (lp->media) {
2587 case INIT: 2587 case INIT:
2588 DISABLE_IRQs; 2588 DISABLE_IRQs;
@@ -2603,7 +2603,7 @@ dc21041_autoconf(struct net_device *dev)
2603 lp->local_state = 0; 2603 lp->local_state = 0;
2604 next_tick = dc21041_autoconf(dev); 2604 next_tick = dc21041_autoconf(dev);
2605 break; 2605 break;
2606 2606
2607 case TP_NW: 2607 case TP_NW:
2608 if (lp->timeout < 0) { 2608 if (lp->timeout < 0) {
2609 omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ 2609 omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
@@ -2623,7 +2623,7 @@ dc21041_autoconf(struct net_device *dev)
2623 next_tick = dc21041_autoconf(dev); 2623 next_tick = dc21041_autoconf(dev);
2624 } 2624 }
2625 break; 2625 break;
2626 2626
2627 case ANS: 2627 case ANS:
2628 if (!lp->tx_enable) { 2628 if (!lp->tx_enable) {
2629 irqs = STS_LNP; 2629 irqs = STS_LNP;
@@ -2645,11 +2645,11 @@ dc21041_autoconf(struct net_device *dev)
2645 next_tick = 3000; 2645 next_tick = 3000;
2646 } 2646 }
2647 break; 2647 break;
2648 2648
2649 case ANS_SUSPECT: 2649 case ANS_SUSPECT:
2650 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); 2650 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2651 break; 2651 break;
2652 2652
2653 case TP: 2653 case TP:
2654 if (!lp->tx_enable) { 2654 if (!lp->tx_enable) {
2655 if (lp->timeout < 0) { 2655 if (lp->timeout < 0) {
@@ -2679,11 +2679,11 @@ dc21041_autoconf(struct net_device *dev)
2679 next_tick = 3000; 2679 next_tick = 3000;
2680 } 2680 }
2681 break; 2681 break;
2682 2682
2683 case TP_SUSPECT: 2683 case TP_SUSPECT:
2684 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); 2684 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2685 break; 2685 break;
2686 2686
2687 case AUI: 2687 case AUI:
2688 if (!lp->tx_enable) { 2688 if (!lp->tx_enable) {
2689 if (lp->timeout < 0) { 2689 if (lp->timeout < 0) {
@@ -2709,11 +2709,11 @@ dc21041_autoconf(struct net_device *dev)
2709 next_tick = 3000; 2709 next_tick = 3000;
2710 } 2710 }
2711 break; 2711 break;
2712 2712
2713 case AUI_SUSPECT: 2713 case AUI_SUSPECT:
2714 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); 2714 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2715 break; 2715 break;
2716 2716
2717 case BNC: 2717 case BNC:
2718 switch (lp->local_state) { 2718 switch (lp->local_state) {
2719 case 0: 2719 case 0:
@@ -2731,7 +2731,7 @@ dc21041_autoconf(struct net_device *dev)
2731 next_tick = dc21041_autoconf(dev); 2731 next_tick = dc21041_autoconf(dev);
2732 } 2732 }
2733 break; 2733 break;
2734 2734
2735 case 1: 2735 case 1:
2736 if (!lp->tx_enable) { 2736 if (!lp->tx_enable) {
2737 if ((sts = ping_media(dev, 3000)) < 0) { 2737 if ((sts = ping_media(dev, 3000)) < 0) {
@@ -2751,11 +2751,11 @@ dc21041_autoconf(struct net_device *dev)
2751 break; 2751 break;
2752 } 2752 }
2753 break; 2753 break;
2754 2754
2755 case BNC_SUSPECT: 2755 case BNC_SUSPECT:
2756 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); 2756 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2757 break; 2757 break;
2758 2758
2759 case NC: 2759 case NC:
2760 omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ 2760 omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
2761 outl(omr | OMR_FDX, DE4X5_OMR); 2761 outl(omr | OMR_FDX, DE4X5_OMR);
@@ -2768,7 +2768,7 @@ dc21041_autoconf(struct net_device *dev)
2768 lp->tx_enable = NO; 2768 lp->tx_enable = NO;
2769 break; 2769 break;
2770 } 2770 }
2771 2771
2772 return next_tick; 2772 return next_tick;
2773} 2773}
2774 2774
@@ -2784,9 +2784,9 @@ dc21140m_autoconf(struct net_device *dev)
2784 int ana, anlpa, cap, cr, slnk, sr; 2784 int ana, anlpa, cap, cr, slnk, sr;
2785 int next_tick = DE4X5_AUTOSENSE_MS; 2785 int next_tick = DE4X5_AUTOSENSE_MS;
2786 u_long imr, omr, iobase = dev->base_addr; 2786 u_long imr, omr, iobase = dev->base_addr;
2787 2787
2788 switch(lp->media) { 2788 switch(lp->media) {
2789 case INIT: 2789 case INIT:
2790 if (lp->timeout < 0) { 2790 if (lp->timeout < 0) {
2791 DISABLE_IRQs; 2791 DISABLE_IRQs;
2792 lp->tx_enable = FALSE; 2792 lp->tx_enable = FALSE;
@@ -2813,7 +2813,7 @@ dc21140m_autoconf(struct net_device *dev)
2813 lp->media = _100Mb; 2813 lp->media = _100Mb;
2814 } else if (lp->autosense == _10Mb) { 2814 } else if (lp->autosense == _10Mb) {
2815 lp->media = _10Mb; 2815 lp->media = _10Mb;
2816 } else if ((lp->autosense == AUTO) && 2816 } else if ((lp->autosense == AUTO) &&
2817 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { 2817 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2818 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); 2818 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2819 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); 2819 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -2831,7 +2831,7 @@ dc21140m_autoconf(struct net_device *dev)
2831 next_tick = dc21140m_autoconf(dev); 2831 next_tick = dc21140m_autoconf(dev);
2832 } 2832 }
2833 break; 2833 break;
2834 2834
2835 case ANS: 2835 case ANS:
2836 switch (lp->local_state) { 2836 switch (lp->local_state) {
2837 case 0: 2837 case 0:
@@ -2851,7 +2851,7 @@ dc21140m_autoconf(struct net_device *dev)
2851 next_tick = dc21140m_autoconf(dev); 2851 next_tick = dc21140m_autoconf(dev);
2852 } 2852 }
2853 break; 2853 break;
2854 2854
2855 case 1: 2855 case 1:
2856 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { 2856 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
2857 next_tick = sr & ~TIMER_CB; 2857 next_tick = sr & ~TIMER_CB;
@@ -2862,7 +2862,7 @@ dc21140m_autoconf(struct net_device *dev)
2862 lp->tmp = MII_SR_ASSC; 2862 lp->tmp = MII_SR_ASSC;
2863 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); 2863 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2864 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); 2864 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2865 if (!(anlpa & MII_ANLPA_RF) && 2865 if (!(anlpa & MII_ANLPA_RF) &&
2866 (cap = anlpa & MII_ANLPA_TAF & ana)) { 2866 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2867 if (cap & MII_ANA_100M) { 2867 if (cap & MII_ANA_100M) {
2868 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); 2868 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -2879,10 +2879,10 @@ dc21140m_autoconf(struct net_device *dev)
2879 break; 2879 break;
2880 } 2880 }
2881 break; 2881 break;
2882 2882
2883 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ 2883 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
2884 if (lp->timeout < 0) { 2884 if (lp->timeout < 0) {
2885 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : 2885 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2886 (~gep_rd(dev) & GEP_LNP)); 2886 (~gep_rd(dev) & GEP_LNP));
2887 SET_100Mb_PDET; 2887 SET_100Mb_PDET;
2888 } 2888 }
@@ -2899,7 +2899,7 @@ dc21140m_autoconf(struct net_device *dev)
2899 next_tick = dc21140m_autoconf(dev); 2899 next_tick = dc21140m_autoconf(dev);
2900 } 2900 }
2901 break; 2901 break;
2902 2902
2903 case _100Mb: /* Set 100Mb/s */ 2903 case _100Mb: /* Set 100Mb/s */
2904 next_tick = 3000; 2904 next_tick = 3000;
2905 if (!lp->tx_enable) { 2905 if (!lp->tx_enable) {
@@ -2933,7 +2933,7 @@ dc21140m_autoconf(struct net_device *dev)
2933 } 2933 }
2934 } 2934 }
2935 break; 2935 break;
2936 2936
2937 case NC: 2937 case NC:
2938 if (lp->media != lp->c_media) { 2938 if (lp->media != lp->c_media) {
2939 de4x5_dbg_media(dev); 2939 de4x5_dbg_media(dev);
@@ -2943,7 +2943,7 @@ dc21140m_autoconf(struct net_device *dev)
2943 lp->tx_enable = FALSE; 2943 lp->tx_enable = FALSE;
2944 break; 2944 break;
2945 } 2945 }
2946 2946
2947 return next_tick; 2947 return next_tick;
2948} 2948}
2949 2949
@@ -3002,7 +3002,7 @@ dc2114x_autoconf(struct net_device *dev)
3002 lp->media = AUI; 3002 lp->media = AUI;
3003 } else { 3003 } else {
3004 lp->media = SPD_DET; 3004 lp->media = SPD_DET;
3005 if ((lp->infoblock_media == ANS) && 3005 if ((lp->infoblock_media == ANS) &&
3006 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { 3006 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
3007 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); 3007 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
3008 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); 3008 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -3014,7 +3014,7 @@ dc2114x_autoconf(struct net_device *dev)
3014 next_tick = dc2114x_autoconf(dev); 3014 next_tick = dc2114x_autoconf(dev);
3015 } 3015 }
3016 break; 3016 break;
3017 3017
3018 case ANS: 3018 case ANS:
3019 switch (lp->local_state) { 3019 switch (lp->local_state) {
3020 case 0: 3020 case 0:
@@ -3034,7 +3034,7 @@ dc2114x_autoconf(struct net_device *dev)
3034 next_tick = dc2114x_autoconf(dev); 3034 next_tick = dc2114x_autoconf(dev);
3035 } 3035 }
3036 break; 3036 break;
3037 3037
3038 case 1: 3038 case 1:
3039 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { 3039 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
3040 next_tick = sr & ~TIMER_CB; 3040 next_tick = sr & ~TIMER_CB;
@@ -3045,7 +3045,7 @@ dc2114x_autoconf(struct net_device *dev)
3045 lp->tmp = MII_SR_ASSC; 3045 lp->tmp = MII_SR_ASSC;
3046 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); 3046 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3047 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); 3047 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3048 if (!(anlpa & MII_ANLPA_RF) && 3048 if (!(anlpa & MII_ANLPA_RF) &&
3049 (cap = anlpa & MII_ANLPA_TAF & ana)) { 3049 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3050 if (cap & MII_ANA_100M) { 3050 if (cap & MII_ANA_100M) {
3051 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); 3051 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -3087,11 +3087,11 @@ dc2114x_autoconf(struct net_device *dev)
3087 next_tick = 3000; 3087 next_tick = 3000;
3088 } 3088 }
3089 break; 3089 break;
3090 3090
3091 case AUI_SUSPECT: 3091 case AUI_SUSPECT:
3092 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); 3092 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3093 break; 3093 break;
3094 3094
3095 case BNC: 3095 case BNC:
3096 switch (lp->local_state) { 3096 switch (lp->local_state) {
3097 case 0: 3097 case 0:
@@ -3109,7 +3109,7 @@ dc2114x_autoconf(struct net_device *dev)
3109 next_tick = dc2114x_autoconf(dev); 3109 next_tick = dc2114x_autoconf(dev);
3110 } 3110 }
3111 break; 3111 break;
3112 3112
3113 case 1: 3113 case 1:
3114 if (!lp->tx_enable) { 3114 if (!lp->tx_enable) {
3115 if ((sts = ping_media(dev, 3000)) < 0) { 3115 if ((sts = ping_media(dev, 3000)) < 0) {
@@ -3130,11 +3130,11 @@ dc2114x_autoconf(struct net_device *dev)
3130 break; 3130 break;
3131 } 3131 }
3132 break; 3132 break;
3133 3133
3134 case BNC_SUSPECT: 3134 case BNC_SUSPECT:
3135 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); 3135 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3136 break; 3136 break;
3137 3137
3138 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ 3138 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
3139 if (srom_map_media(dev) < 0) { 3139 if (srom_map_media(dev) < 0) {
3140 lp->tcount++; 3140 lp->tcount++;
@@ -3161,7 +3161,7 @@ dc2114x_autoconf(struct net_device *dev)
3161 next_tick = dc2114x_autoconf(dev); 3161 next_tick = dc2114x_autoconf(dev);
3162 } else if (((lp->media == _100Mb) && is_100_up(dev)) || 3162 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3163 (((lp->media == _10Mb) || (lp->media == TP) || 3163 (((lp->media == _10Mb) || (lp->media == TP) ||
3164 (lp->media == BNC) || (lp->media == AUI)) && 3164 (lp->media == BNC) || (lp->media == AUI)) &&
3165 is_10_up(dev))) { 3165 is_10_up(dev))) {
3166 next_tick = dc2114x_autoconf(dev); 3166 next_tick = dc2114x_autoconf(dev);
3167 } else { 3167 } else {
@@ -3169,7 +3169,7 @@ dc2114x_autoconf(struct net_device *dev)
3169 lp->media = INIT; 3169 lp->media = INIT;
3170 } 3170 }
3171 break; 3171 break;
3172 3172
3173 case _10Mb: 3173 case _10Mb:
3174 next_tick = 3000; 3174 next_tick = 3000;
3175 if (!lp->tx_enable) { 3175 if (!lp->tx_enable) {
@@ -3208,7 +3208,7 @@ printk("Huh?: media:%02x\n", lp->media);
3208 lp->media = INIT; 3208 lp->media = INIT;
3209 break; 3209 break;
3210 } 3210 }
3211 3211
3212 return next_tick; 3212 return next_tick;
3213} 3213}
3214 3214
@@ -3231,7 +3231,7 @@ srom_map_media(struct net_device *dev)
3231 struct de4x5_private *lp = netdev_priv(dev); 3231 struct de4x5_private *lp = netdev_priv(dev);
3232 3232
3233 lp->fdx = 0; 3233 lp->fdx = 0;
3234 if (lp->infoblock_media == lp->media) 3234 if (lp->infoblock_media == lp->media)
3235 return 0; 3235 return 0;
3236 3236
3237 switch(lp->infoblock_media) { 3237 switch(lp->infoblock_media) {
@@ -3270,7 +3270,7 @@ srom_map_media(struct net_device *dev)
3270 case SROM_100BASEFF: 3270 case SROM_100BASEFF:
3271 if (!lp->params.fdx) return -1; 3271 if (!lp->params.fdx) return -1;
3272 lp->fdx = TRUE; 3272 lp->fdx = TRUE;
3273 case SROM_100BASEF: 3273 case SROM_100BASEF:
3274 if (lp->params.fdx && !lp->fdx) return -1; 3274 if (lp->params.fdx && !lp->fdx) return -1;
3275 lp->media = _100Mb; 3275 lp->media = _100Mb;
3276 break; 3276 break;
@@ -3280,8 +3280,8 @@ srom_map_media(struct net_device *dev)
3280 lp->fdx = lp->params.fdx; 3280 lp->fdx = lp->params.fdx;
3281 break; 3281 break;
3282 3282
3283 default: 3283 default:
3284 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, 3284 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3285 lp->infoblock_media); 3285 lp->infoblock_media);
3286 return -1; 3286 return -1;
3287 break; 3287 break;
@@ -3359,7 +3359,7 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
3359 struct de4x5_private *lp = netdev_priv(dev); 3359 struct de4x5_private *lp = netdev_priv(dev);
3360 u_long iobase = dev->base_addr; 3360 u_long iobase = dev->base_addr;
3361 s32 sts, csr12; 3361 s32 sts, csr12;
3362 3362
3363 if (lp->timeout < 0) { 3363 if (lp->timeout < 0) {
3364 lp->timeout = msec/100; 3364 lp->timeout = msec/100;
3365 if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ 3365 if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
@@ -3372,22 +3372,22 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
3372 /* clear all pending interrupts */ 3372 /* clear all pending interrupts */
3373 sts = inl(DE4X5_STS); 3373 sts = inl(DE4X5_STS);
3374 outl(sts, DE4X5_STS); 3374 outl(sts, DE4X5_STS);
3375 3375
3376 /* clear csr12 NRA and SRA bits */ 3376 /* clear csr12 NRA and SRA bits */
3377 if ((lp->chipset == DC21041) || lp->useSROM) { 3377 if ((lp->chipset == DC21041) || lp->useSROM) {
3378 csr12 = inl(DE4X5_SISR); 3378 csr12 = inl(DE4X5_SISR);
3379 outl(csr12, DE4X5_SISR); 3379 outl(csr12, DE4X5_SISR);
3380 } 3380 }
3381 } 3381 }
3382 3382
3383 sts = inl(DE4X5_STS) & ~TIMER_CB; 3383 sts = inl(DE4X5_STS) & ~TIMER_CB;
3384 3384
3385 if (!(sts & irqs) && --lp->timeout) { 3385 if (!(sts & irqs) && --lp->timeout) {
3386 sts = 100 | TIMER_CB; 3386 sts = 100 | TIMER_CB;
3387 } else { 3387 } else {
3388 lp->timeout = -1; 3388 lp->timeout = -1;
3389 } 3389 }
3390 3390
3391 return sts; 3391 return sts;
3392} 3392}
3393 3393
@@ -3397,11 +3397,11 @@ test_tp(struct net_device *dev, s32 msec)
3397 struct de4x5_private *lp = netdev_priv(dev); 3397 struct de4x5_private *lp = netdev_priv(dev);
3398 u_long iobase = dev->base_addr; 3398 u_long iobase = dev->base_addr;
3399 int sisr; 3399 int sisr;
3400 3400
3401 if (lp->timeout < 0) { 3401 if (lp->timeout < 0) {
3402 lp->timeout = msec/100; 3402 lp->timeout = msec/100;
3403 } 3403 }
3404 3404
3405 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); 3405 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3406 3406
3407 if (sisr && --lp->timeout) { 3407 if (sisr && --lp->timeout) {
@@ -3409,7 +3409,7 @@ test_tp(struct net_device *dev, s32 msec)
3409 } else { 3409 } else {
3410 lp->timeout = -1; 3410 lp->timeout = -1;
3411 } 3411 }
3412 3412
3413 return sisr; 3413 return sisr;
3414} 3414}
3415 3415
@@ -3436,7 +3436,7 @@ test_for_100Mb(struct net_device *dev, int msec)
3436 lp->timeout = msec/SAMPLE_INTERVAL; 3436 lp->timeout = msec/SAMPLE_INTERVAL;
3437 } 3437 }
3438 } 3438 }
3439 3439
3440 if (lp->phy[lp->active].id || lp->useSROM) { 3440 if (lp->phy[lp->active].id || lp->useSROM) {
3441 gep = is_100_up(dev) | is_spd_100(dev); 3441 gep = is_100_up(dev) | is_spd_100(dev);
3442 } else { 3442 } else {
@@ -3447,7 +3447,7 @@ test_for_100Mb(struct net_device *dev, int msec)
3447 } else { 3447 } else {
3448 lp->timeout = -1; 3448 lp->timeout = -1;
3449 } 3449 }
3450 3450
3451 return gep; 3451 return gep;
3452} 3452}
3453 3453
@@ -3459,13 +3459,13 @@ wait_for_link(struct net_device *dev)
3459 if (lp->timeout < 0) { 3459 if (lp->timeout < 0) {
3460 lp->timeout = 1; 3460 lp->timeout = 1;
3461 } 3461 }
3462 3462
3463 if (lp->timeout--) { 3463 if (lp->timeout--) {
3464 return TIMER_CB; 3464 return TIMER_CB;
3465 } else { 3465 } else {
3466 lp->timeout = -1; 3466 lp->timeout = -1;
3467 } 3467 }
3468 3468
3469 return 0; 3469 return 0;
3470} 3470}
3471 3471
@@ -3479,21 +3479,21 @@ test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
3479 struct de4x5_private *lp = netdev_priv(dev); 3479 struct de4x5_private *lp = netdev_priv(dev);
3480 int test; 3480 int test;
3481 u_long iobase = dev->base_addr; 3481 u_long iobase = dev->base_addr;
3482 3482
3483 if (lp->timeout < 0) { 3483 if (lp->timeout < 0) {
3484 lp->timeout = msec/100; 3484 lp->timeout = msec/100;
3485 } 3485 }
3486 3486
3487 if (pol) pol = ~0; 3487 if (pol) pol = ~0;
3488 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; 3488 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3489 test = (reg ^ pol) & mask; 3489 test = (reg ^ pol) & mask;
3490 3490
3491 if (test && --lp->timeout) { 3491 if (test && --lp->timeout) {
3492 reg = 100 | TIMER_CB; 3492 reg = 100 | TIMER_CB;
3493 } else { 3493 } else {
3494 lp->timeout = -1; 3494 lp->timeout = -1;
3495 } 3495 }
3496 3496
3497 return reg; 3497 return reg;
3498} 3498}
3499 3499
@@ -3503,7 +3503,7 @@ is_spd_100(struct net_device *dev)
3503 struct de4x5_private *lp = netdev_priv(dev); 3503 struct de4x5_private *lp = netdev_priv(dev);
3504 u_long iobase = dev->base_addr; 3504 u_long iobase = dev->base_addr;
3505 int spd; 3505 int spd;
3506 3506
3507 if (lp->useMII) { 3507 if (lp->useMII) {
3508 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); 3508 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3509 spd = ~(spd ^ lp->phy[lp->active].spd.value); 3509 spd = ~(spd ^ lp->phy[lp->active].spd.value);
@@ -3517,7 +3517,7 @@ is_spd_100(struct net_device *dev)
3517 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | 3517 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3518 (lp->linkOK & ~lp->asBitValid); 3518 (lp->linkOK & ~lp->asBitValid);
3519 } 3519 }
3520 3520
3521 return spd; 3521 return spd;
3522} 3522}
3523 3523
@@ -3526,7 +3526,7 @@ is_100_up(struct net_device *dev)
3526{ 3526{
3527 struct de4x5_private *lp = netdev_priv(dev); 3527 struct de4x5_private *lp = netdev_priv(dev);
3528 u_long iobase = dev->base_addr; 3528 u_long iobase = dev->base_addr;
3529 3529
3530 if (lp->useMII) { 3530 if (lp->useMII) {
3531 /* Double read for sticky bits & temporary drops */ 3531 /* Double read for sticky bits & temporary drops */
3532 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3532 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3547,7 +3547,7 @@ is_10_up(struct net_device *dev)
3547{ 3547{
3548 struct de4x5_private *lp = netdev_priv(dev); 3548 struct de4x5_private *lp = netdev_priv(dev);
3549 u_long iobase = dev->base_addr; 3549 u_long iobase = dev->base_addr;
3550 3550
3551 if (lp->useMII) { 3551 if (lp->useMII) {
3552 /* Double read for sticky bits & temporary drops */ 3552 /* Double read for sticky bits & temporary drops */
3553 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3553 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3570,7 +3570,7 @@ is_anc_capable(struct net_device *dev)
3570{ 3570{
3571 struct de4x5_private *lp = netdev_priv(dev); 3571 struct de4x5_private *lp = netdev_priv(dev);
3572 u_long iobase = dev->base_addr; 3572 u_long iobase = dev->base_addr;
3573 3573
3574 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { 3574 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3575 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII)); 3575 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
3576 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 3576 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
@@ -3590,24 +3590,24 @@ ping_media(struct net_device *dev, int msec)
3590 struct de4x5_private *lp = netdev_priv(dev); 3590 struct de4x5_private *lp = netdev_priv(dev);
3591 u_long iobase = dev->base_addr; 3591 u_long iobase = dev->base_addr;
3592 int sisr; 3592 int sisr;
3593 3593
3594 if (lp->timeout < 0) { 3594 if (lp->timeout < 0) {
3595 lp->timeout = msec/100; 3595 lp->timeout = msec/100;
3596 3596
3597 lp->tmp = lp->tx_new; /* Remember the ring position */ 3597 lp->tmp = lp->tx_new; /* Remember the ring position */
3598 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); 3598 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3599 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 3599 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
3600 outl(POLL_DEMAND, DE4X5_TPD); 3600 outl(POLL_DEMAND, DE4X5_TPD);
3601 } 3601 }
3602 3602
3603 sisr = inl(DE4X5_SISR); 3603 sisr = inl(DE4X5_SISR);
3604 3604
3605 if ((!(sisr & SISR_NCR)) && 3605 if ((!(sisr & SISR_NCR)) &&
3606 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && 3606 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3607 (--lp->timeout)) { 3607 (--lp->timeout)) {
3608 sisr = 100 | TIMER_CB; 3608 sisr = 100 | TIMER_CB;
3609 } else { 3609 } else {
3610 if ((!(sisr & SISR_NCR)) && 3610 if ((!(sisr & SISR_NCR)) &&
3611 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && 3611 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3612 lp->timeout) { 3612 lp->timeout) {
3613 sisr = 0; 3613 sisr = 0;
@@ -3616,7 +3616,7 @@ ping_media(struct net_device *dev, int msec)
3616 } 3616 }
3617 lp->timeout = -1; 3617 lp->timeout = -1;
3618 } 3618 }
3619 3619
3620 return sisr; 3620 return sisr;
3621} 3621}
3622 3622
@@ -3668,7 +3668,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3668 } else { /* Linear buffer */ 3668 } else { /* Linear buffer */
3669 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len); 3669 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
3670 } 3670 }
3671 3671
3672 return p; 3672 return p;
3673#endif 3673#endif
3674} 3674}
@@ -3751,23 +3751,23 @@ de4x5_rst_desc_ring(struct net_device *dev)
3751 outl(lp->dma_rings, DE4X5_RRBA); 3751 outl(lp->dma_rings, DE4X5_RRBA);
3752 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), 3752 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3753 DE4X5_TRBA); 3753 DE4X5_TRBA);
3754 3754
3755 lp->rx_new = lp->rx_old = 0; 3755 lp->rx_new = lp->rx_old = 0;
3756 lp->tx_new = lp->tx_old = 0; 3756 lp->tx_new = lp->tx_old = 0;
3757 3757
3758 for (i = 0; i < lp->rxRingSize; i++) { 3758 for (i = 0; i < lp->rxRingSize; i++) {
3759 lp->rx_ring[i].status = cpu_to_le32(R_OWN); 3759 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3760 } 3760 }
3761 3761
3762 for (i = 0; i < lp->txRingSize; i++) { 3762 for (i = 0; i < lp->txRingSize; i++) {
3763 lp->tx_ring[i].status = cpu_to_le32(0); 3763 lp->tx_ring[i].status = cpu_to_le32(0);
3764 } 3764 }
3765 3765
3766 barrier(); 3766 barrier();
3767 lp->cache.save_cnt--; 3767 lp->cache.save_cnt--;
3768 START_DE4X5; 3768 START_DE4X5;
3769 } 3769 }
3770 3770
3771 return; 3771 return;
3772} 3772}
3773 3773
@@ -3792,7 +3792,7 @@ de4x5_cache_state(struct net_device *dev, int flag)
3792 gep_wr(lp->cache.gepc, dev); 3792 gep_wr(lp->cache.gepc, dev);
3793 gep_wr(lp->cache.gep, dev); 3793 gep_wr(lp->cache.gep, dev);
3794 } else { 3794 } else {
3795 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, 3795 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3796 lp->cache.csr15); 3796 lp->cache.csr15);
3797 } 3797 }
3798 break; 3798 break;
@@ -3854,25 +3854,25 @@ test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3854 struct de4x5_private *lp = netdev_priv(dev); 3854 struct de4x5_private *lp = netdev_priv(dev);
3855 u_long iobase = dev->base_addr; 3855 u_long iobase = dev->base_addr;
3856 s32 sts, ans; 3856 s32 sts, ans;
3857 3857
3858 if (lp->timeout < 0) { 3858 if (lp->timeout < 0) {
3859 lp->timeout = msec/100; 3859 lp->timeout = msec/100;
3860 outl(irq_mask, DE4X5_IMR); 3860 outl(irq_mask, DE4X5_IMR);
3861 3861
3862 /* clear all pending interrupts */ 3862 /* clear all pending interrupts */
3863 sts = inl(DE4X5_STS); 3863 sts = inl(DE4X5_STS);
3864 outl(sts, DE4X5_STS); 3864 outl(sts, DE4X5_STS);
3865 } 3865 }
3866 3866
3867 ans = inl(DE4X5_SISR) & SISR_ANS; 3867 ans = inl(DE4X5_SISR) & SISR_ANS;
3868 sts = inl(DE4X5_STS) & ~TIMER_CB; 3868 sts = inl(DE4X5_STS) & ~TIMER_CB;
3869 3869
3870 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { 3870 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3871 sts = 100 | TIMER_CB; 3871 sts = 100 | TIMER_CB;
3872 } else { 3872 } else {
3873 lp->timeout = -1; 3873 lp->timeout = -1;
3874 } 3874 }
3875 3875
3876 return sts; 3876 return sts;
3877} 3877}
3878 3878
@@ -3882,7 +3882,7 @@ de4x5_setup_intr(struct net_device *dev)
3882 struct de4x5_private *lp = netdev_priv(dev); 3882 struct de4x5_private *lp = netdev_priv(dev);
3883 u_long iobase = dev->base_addr; 3883 u_long iobase = dev->base_addr;
3884 s32 imr, sts; 3884 s32 imr, sts;
3885 3885
3886 if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ 3886 if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
3887 imr = 0; 3887 imr = 0;
3888 UNMASK_IRQs; 3888 UNMASK_IRQs;
@@ -3890,7 +3890,7 @@ de4x5_setup_intr(struct net_device *dev)
3890 outl(sts, DE4X5_STS); 3890 outl(sts, DE4X5_STS);
3891 ENABLE_IRQs; 3891 ENABLE_IRQs;
3892 } 3892 }
3893 3893
3894 return; 3894 return;
3895} 3895}
3896 3896
@@ -3936,17 +3936,17 @@ create_packet(struct net_device *dev, char *frame, int len)
3936{ 3936{
3937 int i; 3937 int i;
3938 char *buf = frame; 3938 char *buf = frame;
3939 3939
3940 for (i=0; i<ETH_ALEN; i++) { /* Use this source address */ 3940 for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
3941 *buf++ = dev->dev_addr[i]; 3941 *buf++ = dev->dev_addr[i];
3942 } 3942 }
3943 for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */ 3943 for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
3944 *buf++ = dev->dev_addr[i]; 3944 *buf++ = dev->dev_addr[i];
3945 } 3945 }
3946 3946
3947 *buf++ = 0; /* Packet length (2 bytes) */ 3947 *buf++ = 0; /* Packet length (2 bytes) */
3948 *buf++ = 1; 3948 *buf++ = 1;
3949 3949
3950 return; 3950 return;
3951} 3951}
3952 3952
@@ -3978,7 +3978,7 @@ static int
3978PCI_signature(char *name, struct de4x5_private *lp) 3978PCI_signature(char *name, struct de4x5_private *lp)
3979{ 3979{
3980 int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); 3980 int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
3981 3981
3982 if (lp->chipset == DC21040) { 3982 if (lp->chipset == DC21040) {
3983 strcpy(name, "DE434/5"); 3983 strcpy(name, "DE434/5");
3984 return status; 3984 return status;
@@ -4007,7 +4007,7 @@ PCI_signature(char *name, struct de4x5_private *lp)
4007 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 4007 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
4008 lp->useSROM = TRUE; 4008 lp->useSROM = TRUE;
4009 } 4009 }
4010 4010
4011 return status; 4011 return status;
4012} 4012}
4013 4013
@@ -4024,7 +4024,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
4024{ 4024{
4025 int i, j=0; 4025 int i, j=0;
4026 struct de4x5_private *lp = netdev_priv(dev); 4026 struct de4x5_private *lp = netdev_priv(dev);
4027 4027
4028 if (lp->chipset == DC21040) { 4028 if (lp->chipset == DC21040) {
4029 if (lp->bus == EISA) { 4029 if (lp->bus == EISA) {
4030 enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ 4030 enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
@@ -4049,7 +4049,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
4049 } 4049 }
4050 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); 4050 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
4051 } 4051 }
4052 4052
4053 return; 4053 return;
4054} 4054}
4055 4055
@@ -4071,11 +4071,11 @@ enet_addr_rst(u_long aprom_addr)
4071 short sigLength=0; 4071 short sigLength=0;
4072 s8 data; 4072 s8 data;
4073 int i, j; 4073 int i, j;
4074 4074
4075 dev.llsig.a = ETH_PROM_SIG; 4075 dev.llsig.a = ETH_PROM_SIG;
4076 dev.llsig.b = ETH_PROM_SIG; 4076 dev.llsig.b = ETH_PROM_SIG;
4077 sigLength = sizeof(u32) << 1; 4077 sigLength = sizeof(u32) << 1;
4078 4078
4079 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) { 4079 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4080 data = inb(aprom_addr); 4080 data = inb(aprom_addr);
4081 if (dev.Sig[j] == data) { /* track signature */ 4081 if (dev.Sig[j] == data) { /* track signature */
@@ -4088,7 +4088,7 @@ enet_addr_rst(u_long aprom_addr)
4088 } 4088 }
4089 } 4089 }
4090 } 4090 }
4091 4091
4092 return; 4092 return;
4093} 4093}
4094 4094
@@ -4111,7 +4111,7 @@ get_hw_addr(struct net_device *dev)
4111 for (i=0,k=0,j=0;j<3;j++) { 4111 for (i=0,k=0,j=0;j<3;j++) {
4112 k <<= 1; 4112 k <<= 1;
4113 if (k > 0xffff) k-=0xffff; 4113 if (k > 0xffff) k-=0xffff;
4114 4114
4115 if (lp->bus == PCI) { 4115 if (lp->bus == PCI) {
4116 if (lp->chipset == DC21040) { 4116 if (lp->chipset == DC21040) {
4117 while ((tmp = inl(DE4X5_APROM)) < 0); 4117 while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4133,11 +4133,11 @@ get_hw_addr(struct net_device *dev)
4133 k += (u_short) ((tmp = inb(EISA_APROM)) << 8); 4133 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4134 dev->dev_addr[i++] = (u_char) tmp; 4134 dev->dev_addr[i++] = (u_char) tmp;
4135 } 4135 }
4136 4136
4137 if (k > 0xffff) k-=0xffff; 4137 if (k > 0xffff) k-=0xffff;
4138 } 4138 }
4139 if (k == 0xffff) k=0; 4139 if (k == 0xffff) k=0;
4140 4140
4141 if (lp->bus == PCI) { 4141 if (lp->bus == PCI) {
4142 if (lp->chipset == DC21040) { 4142 if (lp->chipset == DC21040) {
4143 while ((tmp = inl(DE4X5_APROM)) < 0); 4143 while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4156,7 +4156,7 @@ get_hw_addr(struct net_device *dev)
4156 srom_repair(dev, broken); 4156 srom_repair(dev, broken);
4157 4157
4158#ifdef CONFIG_PPC_MULTIPLATFORM 4158#ifdef CONFIG_PPC_MULTIPLATFORM
4159 /* 4159 /*
4160 ** If the address starts with 00 a0, we have to bit-reverse 4160 ** If the address starts with 00 a0, we have to bit-reverse
4161 ** each byte of the address. 4161 ** each byte of the address.
4162 */ 4162 */
@@ -4245,7 +4245,7 @@ test_bad_enet(struct net_device *dev, int status)
4245 4245
4246 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i]; 4246 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4247 if ((tmp == 0) || (tmp == 0x5fa)) { 4247 if ((tmp == 0) || (tmp == 0x5fa)) {
4248 if ((lp->chipset == last.chipset) && 4248 if ((lp->chipset == last.chipset) &&
4249 (lp->bus_num == last.bus) && (lp->bus_num > 0)) { 4249 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4250 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i]; 4250 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4251 for (i=ETH_ALEN-1; i>2; --i) { 4251 for (i=ETH_ALEN-1; i>2; --i) {
@@ -4275,7 +4275,7 @@ test_bad_enet(struct net_device *dev, int status)
4275static int 4275static int
4276an_exception(struct de4x5_private *lp) 4276an_exception(struct de4x5_private *lp)
4277{ 4277{
4278 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && 4278 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4279 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { 4279 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4280 return -1; 4280 return -1;
4281 } 4281 }
@@ -4290,11 +4290,11 @@ static short
4290srom_rd(u_long addr, u_char offset) 4290srom_rd(u_long addr, u_char offset)
4291{ 4291{
4292 sendto_srom(SROM_RD | SROM_SR, addr); 4292 sendto_srom(SROM_RD | SROM_SR, addr);
4293 4293
4294 srom_latch(SROM_RD | SROM_SR | DT_CS, addr); 4294 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4295 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); 4295 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4296 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); 4296 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4297 4297
4298 return srom_data(SROM_RD | SROM_SR | DT_CS, addr); 4298 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4299} 4299}
4300 4300
@@ -4304,7 +4304,7 @@ srom_latch(u_int command, u_long addr)
4304 sendto_srom(command, addr); 4304 sendto_srom(command, addr);
4305 sendto_srom(command | DT_CLK, addr); 4305 sendto_srom(command | DT_CLK, addr);
4306 sendto_srom(command, addr); 4306 sendto_srom(command, addr);
4307 4307
4308 return; 4308 return;
4309} 4309}
4310 4310
@@ -4314,7 +4314,7 @@ srom_command(u_int command, u_long addr)
4314 srom_latch(command, addr); 4314 srom_latch(command, addr);
4315 srom_latch(command, addr); 4315 srom_latch(command, addr);
4316 srom_latch((command & 0x0000ff00) | DT_CS, addr); 4316 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4317 4317
4318 return; 4318 return;
4319} 4319}
4320 4320
@@ -4322,15 +4322,15 @@ static void
4322srom_address(u_int command, u_long addr, u_char offset) 4322srom_address(u_int command, u_long addr, u_char offset)
4323{ 4323{
4324 int i, a; 4324 int i, a;
4325 4325
4326 a = offset << 2; 4326 a = offset << 2;
4327 for (i=0; i<6; i++, a <<= 1) { 4327 for (i=0; i<6; i++, a <<= 1) {
4328 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); 4328 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4329 } 4329 }
4330 udelay(1); 4330 udelay(1);
4331 4331
4332 i = (getfrom_srom(addr) >> 3) & 0x01; 4332 i = (getfrom_srom(addr) >> 3) & 0x01;
4333 4333
4334 return; 4334 return;
4335} 4335}
4336 4336
@@ -4340,17 +4340,17 @@ srom_data(u_int command, u_long addr)
4340 int i; 4340 int i;
4341 short word = 0; 4341 short word = 0;
4342 s32 tmp; 4342 s32 tmp;
4343 4343
4344 for (i=0; i<16; i++) { 4344 for (i=0; i<16; i++) {
4345 sendto_srom(command | DT_CLK, addr); 4345 sendto_srom(command | DT_CLK, addr);
4346 tmp = getfrom_srom(addr); 4346 tmp = getfrom_srom(addr);
4347 sendto_srom(command, addr); 4347 sendto_srom(command, addr);
4348 4348
4349 word = (word << 1) | ((tmp >> 3) & 0x01); 4349 word = (word << 1) | ((tmp >> 3) & 0x01);
4350 } 4350 }
4351 4351
4352 sendto_srom(command & 0x0000ff00, addr); 4352 sendto_srom(command & 0x0000ff00, addr);
4353 4353
4354 return word; 4354 return word;
4355} 4355}
4356 4356
@@ -4359,13 +4359,13 @@ static void
4359srom_busy(u_int command, u_long addr) 4359srom_busy(u_int command, u_long addr)
4360{ 4360{
4361 sendto_srom((command & 0x0000ff00) | DT_CS, addr); 4361 sendto_srom((command & 0x0000ff00) | DT_CS, addr);
4362 4362
4363 while (!((getfrom_srom(addr) >> 3) & 0x01)) { 4363 while (!((getfrom_srom(addr) >> 3) & 0x01)) {
4364 mdelay(1); 4364 mdelay(1);
4365 } 4365 }
4366 4366
4367 sendto_srom(command & 0x0000ff00, addr); 4367 sendto_srom(command & 0x0000ff00, addr);
4368 4368
4369 return; 4369 return;
4370} 4370}
4371*/ 4371*/
@@ -4375,7 +4375,7 @@ sendto_srom(u_int command, u_long addr)
4375{ 4375{
4376 outl(command, addr); 4376 outl(command, addr);
4377 udelay(1); 4377 udelay(1);
4378 4378
4379 return; 4379 return;
4380} 4380}
4381 4381
@@ -4383,10 +4383,10 @@ static int
4383getfrom_srom(u_long addr) 4383getfrom_srom(u_long addr)
4384{ 4384{
4385 s32 tmp; 4385 s32 tmp;
4386 4386
4387 tmp = inl(addr); 4387 tmp = inl(addr);
4388 udelay(1); 4388 udelay(1);
4389 4389
4390 return tmp; 4390 return tmp;
4391} 4391}
4392 4392
@@ -4403,7 +4403,7 @@ srom_infoleaf_info(struct net_device *dev)
4403 } 4403 }
4404 if (i == INFOLEAF_SIZE) { 4404 if (i == INFOLEAF_SIZE) {
4405 lp->useSROM = FALSE; 4405 lp->useSROM = FALSE;
4406 printk("%s: Cannot find correct chipset for SROM decoding!\n", 4406 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4407 dev->name); 4407 dev->name);
4408 return -ENXIO; 4408 return -ENXIO;
4409 } 4409 }
@@ -4420,7 +4420,7 @@ srom_infoleaf_info(struct net_device *dev)
4420 } 4420 }
4421 if (i == 0) { 4421 if (i == 0) {
4422 lp->useSROM = FALSE; 4422 lp->useSROM = FALSE;
4423 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", 4423 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4424 dev->name, lp->device); 4424 dev->name, lp->device);
4425 return -ENXIO; 4425 return -ENXIO;
4426 } 4426 }
@@ -4494,9 +4494,9 @@ srom_exec(struct net_device *dev, u_char *p)
4494 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; 4494 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4495 4495
4496 if (lp->chipset != DC21140) RESET_SIA; 4496 if (lp->chipset != DC21140) RESET_SIA;
4497 4497
4498 while (count--) { 4498 while (count--) {
4499 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? 4499 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4500 *p++ : TWIDDLE(w++)), dev); 4500 *p++ : TWIDDLE(w++)), dev);
4501 mdelay(2); /* 2ms per action */ 4501 mdelay(2); /* 2ms per action */
4502 } 4502 }
@@ -4514,13 +4514,13 @@ srom_exec(struct net_device *dev, u_char *p)
4514** unless I implement the DC21041 SROM functions. There's no need 4514** unless I implement the DC21041 SROM functions. There's no need
4515** since the existing code will be satisfactory for all boards. 4515** since the existing code will be satisfactory for all boards.
4516*/ 4516*/
4517static int 4517static int
4518dc21041_infoleaf(struct net_device *dev) 4518dc21041_infoleaf(struct net_device *dev)
4519{ 4519{
4520 return DE4X5_AUTOSENSE_MS; 4520 return DE4X5_AUTOSENSE_MS;
4521} 4521}
4522 4522
4523static int 4523static int
4524dc21140_infoleaf(struct net_device *dev) 4524dc21140_infoleaf(struct net_device *dev)
4525{ 4525{
4526 struct de4x5_private *lp = netdev_priv(dev); 4526 struct de4x5_private *lp = netdev_priv(dev);
@@ -4558,7 +4558,7 @@ dc21140_infoleaf(struct net_device *dev)
4558 return next_tick & ~TIMER_CB; 4558 return next_tick & ~TIMER_CB;
4559} 4559}
4560 4560
4561static int 4561static int
4562dc21142_infoleaf(struct net_device *dev) 4562dc21142_infoleaf(struct net_device *dev)
4563{ 4563{
4564 struct de4x5_private *lp = netdev_priv(dev); 4564 struct de4x5_private *lp = netdev_priv(dev);
@@ -4593,7 +4593,7 @@ dc21142_infoleaf(struct net_device *dev)
4593 return next_tick & ~TIMER_CB; 4593 return next_tick & ~TIMER_CB;
4594} 4594}
4595 4595
4596static int 4596static int
4597dc21143_infoleaf(struct net_device *dev) 4597dc21143_infoleaf(struct net_device *dev)
4598{ 4598{
4599 struct de4x5_private *lp = netdev_priv(dev); 4599 struct de4x5_private *lp = netdev_priv(dev);
@@ -4631,7 +4631,7 @@ dc21143_infoleaf(struct net_device *dev)
4631** The compact infoblock is only designed for DC21140[A] chips, so 4631** The compact infoblock is only designed for DC21140[A] chips, so
4632** we'll reuse the dc21140m_autoconf function. Non MII media only. 4632** we'll reuse the dc21140m_autoconf function. Non MII media only.
4633*/ 4633*/
4634static int 4634static int
4635compact_infoblock(struct net_device *dev, u_char count, u_char *p) 4635compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4636{ 4636{
4637 struct de4x5_private *lp = netdev_priv(dev); 4637 struct de4x5_private *lp = netdev_priv(dev);
@@ -4671,7 +4671,7 @@ compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4671/* 4671/*
4672** This block describes non MII media for the DC21140[A] only. 4672** This block describes non MII media for the DC21140[A] only.
4673*/ 4673*/
4674static int 4674static int
4675type0_infoblock(struct net_device *dev, u_char count, u_char *p) 4675type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4676{ 4676{
4677 struct de4x5_private *lp = netdev_priv(dev); 4677 struct de4x5_private *lp = netdev_priv(dev);
@@ -4711,7 +4711,7 @@ type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4711 4711
4712/* These functions are under construction! */ 4712/* These functions are under construction! */
4713 4713
4714static int 4714static int
4715type1_infoblock(struct net_device *dev, u_char count, u_char *p) 4715type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4716{ 4716{
4717 struct de4x5_private *lp = netdev_priv(dev); 4717 struct de4x5_private *lp = netdev_priv(dev);
@@ -4750,7 +4750,7 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4750 return dc21140m_autoconf(dev); 4750 return dc21140m_autoconf(dev);
4751} 4751}
4752 4752
4753static int 4753static int
4754type2_infoblock(struct net_device *dev, u_char count, u_char *p) 4754type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4755{ 4755{
4756 struct de4x5_private *lp = netdev_priv(dev); 4756 struct de4x5_private *lp = netdev_priv(dev);
@@ -4791,7 +4791,7 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4791 return dc2114x_autoconf(dev); 4791 return dc2114x_autoconf(dev);
4792} 4792}
4793 4793
4794static int 4794static int
4795type3_infoblock(struct net_device *dev, u_char count, u_char *p) 4795type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4796{ 4796{
4797 struct de4x5_private *lp = netdev_priv(dev); 4797 struct de4x5_private *lp = netdev_priv(dev);
@@ -4833,7 +4833,7 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4833 return dc2114x_autoconf(dev); 4833 return dc2114x_autoconf(dev);
4834} 4834}
4835 4835
4836static int 4836static int
4837type4_infoblock(struct net_device *dev, u_char count, u_char *p) 4837type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4838{ 4838{
4839 struct de4x5_private *lp = netdev_priv(dev); 4839 struct de4x5_private *lp = netdev_priv(dev);
@@ -4878,7 +4878,7 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4878** This block type provides information for resetting external devices 4878** This block type provides information for resetting external devices
4879** (chips) through the General Purpose Register. 4879** (chips) through the General Purpose Register.
4880*/ 4880*/
4881static int 4881static int
4882type5_infoblock(struct net_device *dev, u_char count, u_char *p) 4882type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4883{ 4883{
4884 struct de4x5_private *lp = netdev_priv(dev); 4884 struct de4x5_private *lp = netdev_priv(dev);
@@ -4916,7 +4916,7 @@ mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4916 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ 4916 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
4917 mii_address(phyreg, ioaddr); /* PHY Register to read */ 4917 mii_address(phyreg, ioaddr); /* PHY Register to read */
4918 mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ 4918 mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
4919 4919
4920 return mii_rdata(ioaddr); /* Read data */ 4920 return mii_rdata(ioaddr); /* Read data */
4921} 4921}
4922 4922
@@ -4931,7 +4931,7 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4931 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ 4931 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
4932 data = mii_swap(data, 16); /* Swap data bit ordering */ 4932 data = mii_swap(data, 16); /* Swap data bit ordering */
4933 mii_wdata(data, 16, ioaddr); /* Write data */ 4933 mii_wdata(data, 16, ioaddr); /* Write data */
4934 4934
4935 return; 4935 return;
4936} 4936}
4937 4937
@@ -4940,12 +4940,12 @@ mii_rdata(u_long ioaddr)
4940{ 4940{
4941 int i; 4941 int i;
4942 s32 tmp = 0; 4942 s32 tmp = 0;
4943 4943
4944 for (i=0; i<16; i++) { 4944 for (i=0; i<16; i++) {
4945 tmp <<= 1; 4945 tmp <<= 1;
4946 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); 4946 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4947 } 4947 }
4948 4948
4949 return tmp; 4949 return tmp;
4950} 4950}
4951 4951
@@ -4953,12 +4953,12 @@ static void
4953mii_wdata(int data, int len, u_long ioaddr) 4953mii_wdata(int data, int len, u_long ioaddr)
4954{ 4954{
4955 int i; 4955 int i;
4956 4956
4957 for (i=0; i<len; i++) { 4957 for (i=0; i<len; i++) {
4958 sendto_mii(MII_MWR | MII_WR, data, ioaddr); 4958 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4959 data >>= 1; 4959 data >>= 1;
4960 } 4960 }
4961 4961
4962 return; 4962 return;
4963} 4963}
4964 4964
@@ -4966,13 +4966,13 @@ static void
4966mii_address(u_char addr, u_long ioaddr) 4966mii_address(u_char addr, u_long ioaddr)
4967{ 4967{
4968 int i; 4968 int i;
4969 4969
4970 addr = mii_swap(addr, 5); 4970 addr = mii_swap(addr, 5);
4971 for (i=0; i<5; i++) { 4971 for (i=0; i<5; i++) {
4972 sendto_mii(MII_MWR | MII_WR, addr, ioaddr); 4972 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4973 addr >>= 1; 4973 addr >>= 1;
4974 } 4974 }
4975 4975
4976 return; 4976 return;
4977} 4977}
4978 4978
@@ -4980,12 +4980,12 @@ static void
4980mii_ta(u_long rw, u_long ioaddr) 4980mii_ta(u_long rw, u_long ioaddr)
4981{ 4981{
4982 if (rw == MII_STWR) { 4982 if (rw == MII_STWR) {
4983 sendto_mii(MII_MWR | MII_WR, 1, ioaddr); 4983 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4984 sendto_mii(MII_MWR | MII_WR, 0, ioaddr); 4984 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4985 } else { 4985 } else {
4986 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ 4986 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
4987 } 4987 }
4988 4988
4989 return; 4989 return;
4990} 4990}
4991 4991
@@ -4993,13 +4993,13 @@ static int
4993mii_swap(int data, int len) 4993mii_swap(int data, int len)
4994{ 4994{
4995 int i, tmp = 0; 4995 int i, tmp = 0;
4996 4996
4997 for (i=0; i<len; i++) { 4997 for (i=0; i<len; i++) {
4998 tmp <<= 1; 4998 tmp <<= 1;
4999 tmp |= (data & 1); 4999 tmp |= (data & 1);
5000 data >>= 1; 5000 data >>= 1;
5001 } 5001 }
5002 5002
5003 return tmp; 5003 return tmp;
5004} 5004}
5005 5005
@@ -5007,13 +5007,13 @@ static void
5007sendto_mii(u32 command, int data, u_long ioaddr) 5007sendto_mii(u32 command, int data, u_long ioaddr)
5008{ 5008{
5009 u32 j; 5009 u32 j;
5010 5010
5011 j = (data & 1) << 17; 5011 j = (data & 1) << 17;
5012 outl(command | j, ioaddr); 5012 outl(command | j, ioaddr);
5013 udelay(1); 5013 udelay(1);
5014 outl(command | MII_MDC | j, ioaddr); 5014 outl(command | MII_MDC | j, ioaddr);
5015 udelay(1); 5015 udelay(1);
5016 5016
5017 return; 5017 return;
5018} 5018}
5019 5019
@@ -5024,7 +5024,7 @@ getfrom_mii(u32 command, u_long ioaddr)
5024 udelay(1); 5024 udelay(1);
5025 outl(command | MII_MDC, ioaddr); 5025 outl(command | MII_MDC, ioaddr);
5026 udelay(1); 5026 udelay(1);
5027 5027
5028 return ((inl(ioaddr) >> 19) & 1); 5028 return ((inl(ioaddr) >> 19) & 1);
5029} 5029}
5030 5030
@@ -5085,7 +5085,7 @@ mii_get_phy(struct net_device *dev)
5085 u_long iobase = dev->base_addr; 5085 u_long iobase = dev->base_addr;
5086 int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table); 5086 int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
5087 int id; 5087 int id;
5088 5088
5089 lp->active = 0; 5089 lp->active = 0;
5090 lp->useMII = TRUE; 5090 lp->useMII = TRUE;
5091 5091
@@ -5094,7 +5094,7 @@ mii_get_phy(struct net_device *dev)
5094 lp->phy[lp->active].addr = i; 5094 lp->phy[lp->active].addr = i;
5095 if (i==0) n++; /* Count cycles */ 5095 if (i==0) n++; /* Count cycles */
5096 while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ 5096 while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
5097 id = mii_get_oui(i, DE4X5_MII); 5097 id = mii_get_oui(i, DE4X5_MII);
5098 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ 5098 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
5099 for (j=0; j<limit; j++) { /* Search PHY table */ 5099 for (j=0; j<limit; j++) { /* Search PHY table */
5100 if (id != phy_info[j].id) continue; /* ID match? */ 5100 if (id != phy_info[j].id) continue; /* ID match? */
@@ -5133,7 +5133,7 @@ mii_get_phy(struct net_device *dev)
5133 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ 5133 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
5134 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); 5134 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5135 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); 5135 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5136 5136
5137 de4x5_dbg_mii(dev, k); 5137 de4x5_dbg_mii(dev, k);
5138 } 5138 }
5139 } 5139 }
@@ -5148,12 +5148,12 @@ build_setup_frame(struct net_device *dev, int mode)
5148 struct de4x5_private *lp = netdev_priv(dev); 5148 struct de4x5_private *lp = netdev_priv(dev);
5149 int i; 5149 int i;
5150 char *pa = lp->setup_frame; 5150 char *pa = lp->setup_frame;
5151 5151
5152 /* Initialise the setup frame */ 5152 /* Initialise the setup frame */
5153 if (mode == ALL) { 5153 if (mode == ALL) {
5154 memset(lp->setup_frame, 0, SETUP_FRAME_LEN); 5154 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5155 } 5155 }
5156 5156
5157 if (lp->setup_f == HASH_PERF) { 5157 if (lp->setup_f == HASH_PERF) {
5158 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) { 5158 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5159 *(pa + i) = dev->dev_addr[i]; /* Host address */ 5159 *(pa + i) = dev->dev_addr[i]; /* Host address */
@@ -5170,7 +5170,7 @@ build_setup_frame(struct net_device *dev, int mode)
5170 if (i & 0x01) pa += 4; 5170 if (i & 0x01) pa += 4;
5171 } 5171 }
5172 } 5172 }
5173 5173
5174 return pa; /* Points to the next entry */ 5174 return pa; /* Points to the next entry */
5175} 5175}
5176 5176
@@ -5178,7 +5178,7 @@ static void
5178enable_ast(struct net_device *dev, u32 time_out) 5178enable_ast(struct net_device *dev, u32 time_out)
5179{ 5179{
5180 timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out); 5180 timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
5181 5181
5182 return; 5182 return;
5183} 5183}
5184 5184
@@ -5186,9 +5186,9 @@ static void
5186disable_ast(struct net_device *dev) 5186disable_ast(struct net_device *dev)
5187{ 5187{
5188 struct de4x5_private *lp = netdev_priv(dev); 5188 struct de4x5_private *lp = netdev_priv(dev);
5189 5189
5190 del_timer(&lp->timer); 5190 del_timer(&lp->timer);
5191 5191
5192 return; 5192 return;
5193} 5193}
5194 5194
@@ -5207,10 +5207,10 @@ de4x5_switch_mac_port(struct net_device *dev)
5207 omr |= lp->infoblock_csr6; 5207 omr |= lp->infoblock_csr6;
5208 if (omr & OMR_PS) omr |= OMR_HBD; 5208 if (omr & OMR_PS) omr |= OMR_HBD;
5209 outl(omr, DE4X5_OMR); 5209 outl(omr, DE4X5_OMR);
5210 5210
5211 /* Soft Reset */ 5211 /* Soft Reset */
5212 RESET_DE4X5; 5212 RESET_DE4X5;
5213 5213
5214 /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ 5214 /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
5215 if (lp->chipset == DC21140) { 5215 if (lp->chipset == DC21140) {
5216 gep_wr(lp->cache.gepc, dev); 5216 gep_wr(lp->cache.gepc, dev);
@@ -5263,21 +5263,21 @@ timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long mse
5263{ 5263{
5264 struct de4x5_private *lp = netdev_priv(dev); 5264 struct de4x5_private *lp = netdev_priv(dev);
5265 int dt; 5265 int dt;
5266 5266
5267 /* First, cancel any pending timer events */ 5267 /* First, cancel any pending timer events */
5268 del_timer(&lp->timer); 5268 del_timer(&lp->timer);
5269 5269
5270 /* Convert msec to ticks */ 5270 /* Convert msec to ticks */
5271 dt = (msec * HZ) / 1000; 5271 dt = (msec * HZ) / 1000;
5272 if (dt==0) dt=1; 5272 if (dt==0) dt=1;
5273 5273
5274 /* Set up timer */ 5274 /* Set up timer */
5275 init_timer(&lp->timer); 5275 init_timer(&lp->timer);
5276 lp->timer.expires = jiffies + dt; 5276 lp->timer.expires = jiffies + dt;
5277 lp->timer.function = fn; 5277 lp->timer.function = fn;
5278 lp->timer.data = data; 5278 lp->timer.data = data;
5279 add_timer(&lp->timer); 5279 add_timer(&lp->timer);
5280 5280
5281 return; 5281 return;
5282} 5282}
5283 5283
@@ -5375,7 +5375,7 @@ de4x5_dbg_open(struct net_device *dev)
5375{ 5375{
5376 struct de4x5_private *lp = netdev_priv(dev); 5376 struct de4x5_private *lp = netdev_priv(dev);
5377 int i; 5377 int i;
5378 5378
5379 if (de4x5_debug & DEBUG_OPEN) { 5379 if (de4x5_debug & DEBUG_OPEN) {
5380 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); 5380 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5381 printk("\tphysical address: "); 5381 printk("\tphysical address: ");
@@ -5413,11 +5413,11 @@ de4x5_dbg_open(struct net_device *dev)
5413 } 5413 }
5414 } 5414 }
5415 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); 5415 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5416 printk("Ring size: \nRX: %d\nTX: %d\n", 5416 printk("Ring size: \nRX: %d\nTX: %d\n",
5417 (short)lp->rxRingSize, 5417 (short)lp->rxRingSize,
5418 (short)lp->txRingSize); 5418 (short)lp->txRingSize);
5419 } 5419 }
5420 5420
5421 return; 5421 return;
5422} 5422}
5423 5423
@@ -5426,7 +5426,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
5426{ 5426{
5427 struct de4x5_private *lp = netdev_priv(dev); 5427 struct de4x5_private *lp = netdev_priv(dev);
5428 u_long iobase = dev->base_addr; 5428 u_long iobase = dev->base_addr;
5429 5429
5430 if (de4x5_debug & DEBUG_MII) { 5430 if (de4x5_debug & DEBUG_MII) {
5431 printk("\nMII device address: %d\n", lp->phy[k].addr); 5431 printk("\nMII device address: %d\n", lp->phy[k].addr);
5432 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); 5432 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
@@ -5445,7 +5445,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
5445 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); 5445 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5446 } 5446 }
5447 } 5447 }
5448 5448
5449 return; 5449 return;
5450} 5450}
5451 5451
@@ -5453,17 +5453,17 @@ static void
5453de4x5_dbg_media(struct net_device *dev) 5453de4x5_dbg_media(struct net_device *dev)
5454{ 5454{
5455 struct de4x5_private *lp = netdev_priv(dev); 5455 struct de4x5_private *lp = netdev_priv(dev);
5456 5456
5457 if (lp->media != lp->c_media) { 5457 if (lp->media != lp->c_media) {
5458 if (de4x5_debug & DEBUG_MEDIA) { 5458 if (de4x5_debug & DEBUG_MEDIA) {
5459 printk("%s: media is %s%s\n", dev->name, 5459 printk("%s: media is %s%s\n", dev->name,
5460 (lp->media == NC ? "unconnected, link down or incompatible connection" : 5460 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5461 (lp->media == TP ? "TP" : 5461 (lp->media == TP ? "TP" :
5462 (lp->media == ANS ? "TP/Nway" : 5462 (lp->media == ANS ? "TP/Nway" :
5463 (lp->media == BNC ? "BNC" : 5463 (lp->media == BNC ? "BNC" :
5464 (lp->media == AUI ? "AUI" : 5464 (lp->media == AUI ? "AUI" :
5465 (lp->media == BNC_AUI ? "BNC/AUI" : 5465 (lp->media == BNC_AUI ? "BNC/AUI" :
5466 (lp->media == EXT_SIA ? "EXT SIA" : 5466 (lp->media == EXT_SIA ? "EXT SIA" :
5467 (lp->media == _100Mb ? "100Mb/s" : 5467 (lp->media == _100Mb ? "100Mb/s" :
5468 (lp->media == _10Mb ? "10Mb/s" : 5468 (lp->media == _10Mb ? "10Mb/s" :
5469 "???" 5469 "???"
@@ -5471,7 +5471,7 @@ de4x5_dbg_media(struct net_device *dev)
5471 } 5471 }
5472 lp->c_media = lp->media; 5472 lp->c_media = lp->media;
5473 } 5473 }
5474 5474
5475 return; 5475 return;
5476} 5476}
5477 5477
@@ -5554,7 +5554,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5554 u32 lval[36]; 5554 u32 lval[36];
5555 } tmp; 5555 } tmp;
5556 u_long flags = 0; 5556 u_long flags = 0;
5557 5557
5558 switch(ioc->cmd) { 5558 switch(ioc->cmd) {
5559 case DE4X5_GET_HWADDR: /* Get the hardware address */ 5559 case DE4X5_GET_HWADDR: /* Get the hardware address */
5560 ioc->len = ETH_ALEN; 5560 ioc->len = ETH_ALEN;
@@ -5575,7 +5575,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5575 } 5575 }
5576 build_setup_frame(dev, PHYS_ADDR_ONLY); 5576 build_setup_frame(dev, PHYS_ADDR_ONLY);
5577 /* Set up the descriptor and give ownership to the card */ 5577 /* Set up the descriptor and give ownership to the card */
5578 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 5578 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5579 SETUP_FRAME_LEN, (struct sk_buff *)1); 5579 SETUP_FRAME_LEN, (struct sk_buff *)1);
5580 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 5580 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
5581 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ 5581 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
@@ -5617,8 +5617,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5617 spin_lock_irqsave(&lp->lock, flags); 5617 spin_lock_irqsave(&lp->lock, flags);
5618 memcpy(&statbuf, &lp->pktStats, ioc->len); 5618 memcpy(&statbuf, &lp->pktStats, ioc->len);
5619 spin_unlock_irqrestore(&lp->lock, flags); 5619 spin_unlock_irqrestore(&lp->lock, flags);
5620 if (copy_to_user(ioc->data, &statbuf, ioc->len)) 5620 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5621 return -EFAULT; 5621 return -EFAULT;
5622 break; 5622 break;
5623 } 5623 }
5624 case DE4X5_CLR_STATS: /* Zero out the driver statistics */ 5624 case DE4X5_CLR_STATS: /* Zero out the driver statistics */
@@ -5652,9 +5652,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5652 ioc->len = j; 5652 ioc->len = j;
5653 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; 5653 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5654 break; 5654 break;
5655 5655
5656#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ 5656#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
5657/* 5657/*
5658 case DE4X5_DUMP: 5658 case DE4X5_DUMP:
5659 j = 0; 5659 j = 0;
5660 tmp.addr[j++] = dev->irq; 5660 tmp.addr[j++] = dev->irq;
@@ -5664,7 +5664,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5664 tmp.addr[j++] = lp->rxRingSize; 5664 tmp.addr[j++] = lp->rxRingSize;
5665 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; 5665 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
5666 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; 5666 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
5667 5667
5668 for (i=0;i<lp->rxRingSize-1;i++){ 5668 for (i=0;i<lp->rxRingSize-1;i++){
5669 if (i < 3) { 5669 if (i < 3) {
5670 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; 5670 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
@@ -5677,7 +5677,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5677 } 5677 }
5678 } 5678 }
5679 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; 5679 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
5680 5680
5681 for (i=0;i<lp->rxRingSize-1;i++){ 5681 for (i=0;i<lp->rxRingSize-1;i++){
5682 if (i < 3) { 5682 if (i < 3) {
5683 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; 5683 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
@@ -5690,14 +5690,14 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5690 } 5690 }
5691 } 5691 }
5692 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; 5692 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
5693 5693
5694 for (i=0;i<lp->rxRingSize;i++){ 5694 for (i=0;i<lp->rxRingSize;i++){
5695 tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; 5695 tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
5696 } 5696 }
5697 for (i=0;i<lp->txRingSize;i++){ 5697 for (i=0;i<lp->txRingSize;i++){
5698 tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; 5698 tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
5699 } 5699 }
5700 5700
5701 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; 5701 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
5702 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; 5702 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
5703 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; 5703 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
@@ -5706,18 +5706,18 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5706 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; 5706 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
5707 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; 5707 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
5708 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; 5708 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
5709 tmp.lval[j>>2] = lp->chipset; j+=4; 5709 tmp.lval[j>>2] = lp->chipset; j+=4;
5710 if (lp->chipset == DC21140) { 5710 if (lp->chipset == DC21140) {
5711 tmp.lval[j>>2] = gep_rd(dev); j+=4; 5711 tmp.lval[j>>2] = gep_rd(dev); j+=4;
5712 } else { 5712 } else {
5713 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; 5713 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
5714 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; 5714 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
5715 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; 5715 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
5716 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; 5716 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
5717 } 5717 }
5718 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; 5718 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
5719 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { 5719 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
5720 tmp.lval[j>>2] = lp->active; j+=4; 5720 tmp.lval[j>>2] = lp->active; j+=4;
5721 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; 5721 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5722 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; 5722 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5723 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; 5723 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
@@ -5734,10 +5734,10 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5734 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; 5734 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5735 } 5735 }
5736 } 5736 }
5737 5737
5738 tmp.addr[j++] = lp->txRingSize; 5738 tmp.addr[j++] = lp->txRingSize;
5739 tmp.addr[j++] = netif_queue_stopped(dev); 5739 tmp.addr[j++] = netif_queue_stopped(dev);
5740 5740
5741 ioc->len = j; 5741 ioc->len = j;
5742 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; 5742 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5743 break; 5743 break;
@@ -5746,7 +5746,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5746 default: 5746 default:
5747 return -EOPNOTSUPP; 5747 return -EOPNOTSUPP;
5748 } 5748 }
5749 5749
5750 return status; 5750 return status;
5751} 5751}
5752 5752
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index ad37a4074302..57226e5eb8a6 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -38,11 +38,11 @@
38/* 38/*
39** EISA Register Address Map 39** EISA Register Address Map
40*/ 40*/
41#define EISA_ID iobase+0x0c80 /* EISA ID Registers */ 41#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
42#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ 42#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
43#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ 43#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
44#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ 44#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
45#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ 45#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
46#define EISA_CR iobase+0x0c84 /* EISA Control Register */ 46#define EISA_CR iobase+0x0c84 /* EISA Control Register */
47#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ 47#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
48#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ 48#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
@@ -1008,8 +1008,8 @@ struct de4x5_ioctl {
1008 unsigned char __user *data; /* Pointer to the data buffer */ 1008 unsigned char __user *data; /* Pointer to the data buffer */
1009}; 1009};
1010 1010
1011/* 1011/*
1012** Recognised commands for the driver 1012** Recognised commands for the driver
1013*/ 1013*/
1014#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ 1014#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
1015#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ 1015#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 74e9075d9c48..ba5b112093f4 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -50,7 +50,7 @@
50 forget to unmap PCI mapped skbs. 50 forget to unmap PCI mapped skbs.
51 51
52 Alan Cox <alan@redhat.com> 52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi 53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device. 54 for their 1563 ethernet device.
55 55
56 TODO 56 TODO
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index fbd9ab60b052..5ffbd5b300c0 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -96,11 +96,11 @@ static const char *block_name[] __devinitdata = {
96 * tulip_build_fake_mediatable - Build a fake mediatable entry. 96 * tulip_build_fake_mediatable - Build a fake mediatable entry.
97 * @tp: Ptr to the tulip private data. 97 * @tp: Ptr to the tulip private data.
98 * 98 *
99 * Some cards like the 3x5 HSC cards (J3514A) do not have a standard 99 * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
100 * srom and can not be handled under the fixup routine. These cards 100 * srom and can not be handled under the fixup routine. These cards
101 * still need a valid mediatable entry for correct csr12 setup and 101 * still need a valid mediatable entry for correct csr12 setup and
102 * mii handling. 102 * mii handling.
103 * 103 *
104 * Since this is currently a parisc-linux specific function, the 104 * Since this is currently a parisc-linux specific function, the
105 * #ifdef __hppa__ should completely optimize this function away for 105 * #ifdef __hppa__ should completely optimize this function away for
106 * non-parisc hardware. 106 * non-parisc hardware.
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
140 tp->flags |= HAS_PHY_IRQ; 140 tp->flags |= HAS_PHY_IRQ;
141 tp->csr12_shadow = -1; 141 tp->csr12_shadow = -1;
142 } 142 }
143#endif 143#endif
144} 144}
145 145
146void __devinit tulip_parse_eeprom(struct net_device *dev) 146void __devinit tulip_parse_eeprom(struct net_device *dev)
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index bb3558164a5b..da4f7593c50f 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget)
139 } 139 }
140 /* Acknowledge current RX interrupt sources. */ 140 /* Acknowledge current RX interrupt sources. */
141 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); 141 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
142 142
143 143
144 /* If we own the next entry, it is a new packet. Send it up. */ 144 /* If we own the next entry, it is a new packet. Send it up. */
145 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { 145 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
146 s32 status = le32_to_cpu(tp->rx_ring[entry].status); 146 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
147 147
148 148
149 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) 149 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
150 break; 150 break;
151 151
152 if (tulip_debug > 5) 152 if (tulip_debug > 5)
153 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", 153 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
154 dev->name, entry, status); 154 dev->name, entry, status);
155 if (--rx_work_limit < 0) 155 if (--rx_work_limit < 0)
156 goto not_done; 156 goto not_done;
157 157
158 if ((status & 0x38008300) != 0x0300) { 158 if ((status & 0x38008300) != 0x0300) {
159 if ((status & 0x38000300) != 0x0300) { 159 if ((status & 0x38000300) != 0x0300) {
160 /* Ingore earlier buffers. */ 160 /* Ingore earlier buffers. */
@@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget)
180 /* Omit the four octet CRC from the length. */ 180 /* Omit the four octet CRC from the length. */
181 short pkt_len = ((status >> 16) & 0x7ff) - 4; 181 short pkt_len = ((status >> 16) & 0x7ff) - 4;
182 struct sk_buff *skb; 182 struct sk_buff *skb;
183 183
184#ifndef final_version 184#ifndef final_version
185 if (pkt_len > 1518) { 185 if (pkt_len > 1518) {
186 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", 186 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
@@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget)
213 } else { /* Pass up the skb already on the Rx ring. */ 213 } else { /* Pass up the skb already on the Rx ring. */
214 char *temp = skb_put(skb = tp->rx_buffers[entry].skb, 214 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
215 pkt_len); 215 pkt_len);
216 216
217#ifndef final_version 217#ifndef final_version
218 if (tp->rx_buffers[entry].mapping != 218 if (tp->rx_buffers[entry].mapping !=
219 le32_to_cpu(tp->rx_ring[entry].buffer1)) { 219 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
@@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget)
225 skb->head, temp); 225 skb->head, temp);
226 } 226 }
227#endif 227#endif
228 228
229 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, 229 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
230 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 230 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
231 231
232 tp->rx_buffers[entry].skb = NULL; 232 tp->rx_buffers[entry].skb = NULL;
233 tp->rx_buffers[entry].mapping = 0; 233 tp->rx_buffers[entry].mapping = 0;
234 } 234 }
235 skb->protocol = eth_type_trans(skb, dev); 235 skb->protocol = eth_type_trans(skb, dev);
236 236
237 netif_receive_skb(skb); 237 netif_receive_skb(skb);
238 238
239 dev->last_rx = jiffies; 239 dev->last_rx = jiffies;
240 tp->stats.rx_packets++; 240 tp->stats.rx_packets++;
241 tp->stats.rx_bytes += pkt_len; 241 tp->stats.rx_bytes += pkt_len;
@@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget)
245 entry = (++tp->cur_rx) % RX_RING_SIZE; 245 entry = (++tp->cur_rx) % RX_RING_SIZE;
246 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) 246 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
247 tulip_refill_rx(dev); 247 tulip_refill_rx(dev);
248 248
249 } 249 }
250 250
251 /* New ack strategy... irq does not ack Rx any longer 251 /* New ack strategy... irq does not ack Rx any longer
252 hopefully this helps */ 252 hopefully this helps */
253 253
254 /* Really bad things can happen here... If new packet arrives 254 /* Really bad things can happen here... If new packet arrives
255 * and an irq arrives (tx or just due to occasionally unset 255 * and an irq arrives (tx or just due to occasionally unset
256 * mask), it will be acked by irq handler, but new thread 256 * mask), it will be acked by irq handler, but new thread
@@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget)
259 * tomorrow (night 011029). If it will not fail, we won 259 * tomorrow (night 011029). If it will not fail, we won
260 * finally: amount of IO did not increase at all. */ 260 * finally: amount of IO did not increase at all. */
261 } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); 261 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
262 262
263done: 263done:
264 264
265 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION 265 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
266 266
267 /* We use this simplistic scheme for IM. It's proven by 267 /* We use this simplistic scheme for IM. It's proven by
268 real life installations. We can have IM enabled 268 real life installations. We can have IM enabled
269 continuesly but this would cause unnecessary latency. 269 continuesly but this would cause unnecessary latency.
270 Unfortunely we can't use all the NET_RX_* feedback here. 270 Unfortunely we can't use all the NET_RX_* feedback here.
271 This would turn on IM for devices that is not contributing 271 This would turn on IM for devices that is not contributing
272 to backlog congestion with unnecessary latency. 272 to backlog congestion with unnecessary latency.
273 273
274 We monitor the the device RX-ring and have: 274 We monitor the the device RX-ring and have:
275 275
276 HW Interrupt Mitigation either ON or OFF. 276 HW Interrupt Mitigation either ON or OFF.
277 277
278 ON: More then 1 pkt received (per intr.) OR we are dropping 278 ON: More then 1 pkt received (per intr.) OR we are dropping
279 OFF: Only 1 pkt received 279 OFF: Only 1 pkt received
280 280
281 Note. We only use min and max (0, 15) settings from mit_table */ 281 Note. We only use min and max (0, 15) settings from mit_table */
282 282
283 283
284 if( tp->flags & HAS_INTR_MITIGATION) { 284 if( tp->flags & HAS_INTR_MITIGATION) {
285 if( received > 1 ) { 285 if( received > 1 ) {
286 if( ! tp->mit_on ) { 286 if( ! tp->mit_on ) {
@@ -297,20 +297,20 @@ done:
297 } 297 }
298 298
299#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ 299#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
300 300
301 dev->quota -= received; 301 dev->quota -= received;
302 *budget -= received; 302 *budget -= received;
303 303
304 tulip_refill_rx(dev); 304 tulip_refill_rx(dev);
305 305
306 /* If RX ring is not full we are out of memory. */ 306 /* If RX ring is not full we are out of memory. */
307 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; 307 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
308 308
309 /* Remove us from polling list and enable RX intr. */ 309 /* Remove us from polling list and enable RX intr. */
310 310
311 netif_rx_complete(dev); 311 netif_rx_complete(dev);
312 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); 312 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
313 313
314 /* The last op happens after poll completion. Which means the following: 314 /* The last op happens after poll completion. Which means the following:
315 * 1. it can race with disabling irqs in irq handler 315 * 1. it can race with disabling irqs in irq handler
316 * 2. it can race with dise/enabling irqs in other poll threads 316 * 2. it can race with dise/enabling irqs in other poll threads
@@ -321,9 +321,9 @@ done:
321 * due to races in masking and due to too late acking of already 321 * due to races in masking and due to too late acking of already
322 * processed irqs. But it must not result in losing events. 322 * processed irqs. But it must not result in losing events.
323 */ 323 */
324 324
325 return 0; 325 return 0;
326 326
327 not_done: 327 not_done:
328 if (!received) { 328 if (!received) {
329 329
@@ -331,29 +331,29 @@ done:
331 } 331 }
332 dev->quota -= received; 332 dev->quota -= received;
333 *budget -= received; 333 *budget -= received;
334 334
335 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || 335 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
336 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) 336 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
337 tulip_refill_rx(dev); 337 tulip_refill_rx(dev);
338 338
339 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; 339 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
340 340
341 return 1; 341 return 1;
342 342
343 343
344 oom: /* Executed with RX ints disabled */ 344 oom: /* Executed with RX ints disabled */
345 345
346 346
347 /* Start timer, stop polling, but do not enable rx interrupts. */ 347 /* Start timer, stop polling, but do not enable rx interrupts. */
348 mod_timer(&tp->oom_timer, jiffies+1); 348 mod_timer(&tp->oom_timer, jiffies+1);
349 349
350 /* Think: timer_pending() was an explicit signature of bug. 350 /* Think: timer_pending() was an explicit signature of bug.
351 * Timer can be pending now but fired and completed 351 * Timer can be pending now but fired and completed
352 * before we did netif_rx_complete(). See? We would lose it. */ 352 * before we did netif_rx_complete(). See? We would lose it. */
353 353
354 /* remove ourselves from the polling list */ 354 /* remove ourselves from the polling list */
355 netif_rx_complete(dev); 355 netif_rx_complete(dev);
356 356
357 return 0; 357 return 0;
358} 358}
359 359
@@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
521 /* Let's see whether the interrupt really is for us */ 521 /* Let's see whether the interrupt really is for us */
522 csr5 = ioread32(ioaddr + CSR5); 522 csr5 = ioread32(ioaddr + CSR5);
523 523
524 if (tp->flags & HAS_PHY_IRQ) 524 if (tp->flags & HAS_PHY_IRQ)
525 handled = phy_interrupt (dev); 525 handled = phy_interrupt (dev);
526 526
527 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) 527 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
528 return IRQ_RETVAL(handled); 528 return IRQ_RETVAL(handled);
529 529
@@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
538 /* Mask RX intrs and add the device to poll list. */ 538 /* Mask RX intrs and add the device to poll list. */
539 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); 539 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
540 netif_rx_schedule(dev); 540 netif_rx_schedule(dev);
541 541
542 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) 542 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
543 break; 543 break;
544 } 544 }
545 545
546 /* Acknowledge the interrupt sources we handle here ASAP 546 /* Acknowledge the interrupt sources we handle here ASAP
547 the poll function does Rx and RxNoBuf acking */ 547 the poll function does Rx and RxNoBuf acking */
548 548
549 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); 549 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
550 550
551#else 551#else
552 /* Acknowledge all of the current interrupt sources ASAP. */ 552 /* Acknowledge all of the current interrupt sources ASAP. */
553 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); 553 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
554 554
@@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
559 } 559 }
560 560
561#endif /* CONFIG_TULIP_NAPI */ 561#endif /* CONFIG_TULIP_NAPI */
562 562
563 if (tulip_debug > 4) 563 if (tulip_debug > 4)
564 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", 564 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
565 dev->name, csr5, ioread32(ioaddr + CSR5)); 565 dev->name, csr5, ioread32(ioaddr + CSR5));
566 566
567 567
568 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { 568 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
569 unsigned int dirty_tx; 569 unsigned int dirty_tx;
@@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
737#ifdef CONFIG_TULIP_NAPI 737#ifdef CONFIG_TULIP_NAPI
738 if (rxd) 738 if (rxd)
739 csr5 &= ~RxPollInt; 739 csr5 &= ~RxPollInt;
740 } while ((csr5 & (TxNoBuf | 740 } while ((csr5 & (TxNoBuf |
741 TxDied | 741 TxDied |
742 TxIntr | 742 TxIntr |
743 TimerInt | 743 TimerInt |
744 /* Abnormal intr. */ 744 /* Abnormal intr. */
745 RxDied | 745 RxDied |
746 TxFIFOUnderflow | 746 TxFIFOUnderflow |
747 TxJabber | 747 TxJabber |
748 TPLnkFail | 748 TPLnkFail |
749 SytemError )) != 0); 749 SytemError )) != 0);
750#else 750#else
751 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); 751 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
752 752
753 tulip_refill_rx(dev); 753 tulip_refill_rx(dev);
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index f53396fe79c9..e9bc2a958c14 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -140,7 +140,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
140 spin_unlock_irqrestore(&tp->mii_lock, flags); 140 spin_unlock_irqrestore(&tp->mii_lock, flags);
141 return; 141 return;
142 } 142 }
143 143
144 /* Establish sync by sending 32 logic ones. */ 144 /* Establish sync by sending 32 logic ones. */
145 for (i = 32; i >= 0; i--) { 145 for (i = 32; i >= 0; i--) {
146 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); 146 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 05d2d96f7be2..d25020da6798 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -259,7 +259,7 @@ enum t21143_csr6_bits {
259 There are no ill effects from too-large receive rings. */ 259 There are no ill effects from too-large receive rings. */
260 260
261#define TX_RING_SIZE 32 261#define TX_RING_SIZE 32
262#define RX_RING_SIZE 128 262#define RX_RING_SIZE 128
263#define MEDIA_MASK 31 263#define MEDIA_MASK 31
264 264
265#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 265#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index c67c91251d04..b3cf11d32e24 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1224,7 +1224,7 @@ out:
1224 * Chips that have the MRM/reserved bit quirk and the burst quirk. That 1224 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1225 * is the DM910X and the on chip ULi devices 1225 * is the DM910X and the on chip ULi devices
1226 */ 1226 */
1227 1227
1228static int tulip_uli_dm_quirk(struct pci_dev *pdev) 1228static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1229{ 1229{
1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102) 1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
@@ -1297,7 +1297,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1297 */ 1297 */
1298 1298
1299 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache 1299 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1300 aligned. Aries might need this too. The Saturn errata are not 1300 aligned. Aries might need this too. The Saturn errata are not
1301 pretty reading but thankfully it's an old 486 chipset. 1301 pretty reading but thankfully it's an old 486 chipset.
1302 1302
1303 2. The dreaded SiS496 486 chipset. Same workaround as Intel 1303 2. The dreaded SiS496 486 chipset. Same workaround as Intel
@@ -1500,7 +1500,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1500 } 1500 }
1501#endif 1501#endif
1502#ifdef CONFIG_MIPS_COBALT 1502#ifdef CONFIG_MIPS_COBALT
1503 if ((pdev->bus->number == 0) && 1503 if ((pdev->bus->number == 0) &&
1504 ((PCI_SLOT(pdev->devfn) == 7) || 1504 ((PCI_SLOT(pdev->devfn) == 7) ||
1505 (PCI_SLOT(pdev->devfn) == 12))) { 1505 (PCI_SLOT(pdev->devfn) == 12))) {
1506 /* Cobalt MAC address in first EEPROM locations. */ 1506 /* Cobalt MAC address in first EEPROM locations. */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 238e9c72cb3a..8b3a28f53c3d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -9,7 +9,7 @@
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details. 10 GNU General Public License for more details.
11 11
12 12
13*/ 13*/
14 14
15#define DRV_NAME "uli526x" 15#define DRV_NAME "uli526x"
@@ -185,7 +185,7 @@ struct uli526x_board_info {
185 185
186 /* NIC SROM data */ 186 /* NIC SROM data */
187 unsigned char srom[128]; 187 unsigned char srom[128];
188 u8 init; 188 u8 init;
189}; 189};
190 190
191enum uli526x_offsets { 191enum uli526x_offsets {
@@ -258,7 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
258 struct uli526x_board_info *db; /* board information structure */ 258 struct uli526x_board_info *db; /* board information structure */
259 struct net_device *dev; 259 struct net_device *dev;
260 int i, err; 260 int i, err;
261 261
262 ULI526X_DBUG(0, "uli526x_init_one()", 0); 262 ULI526X_DBUG(0, "uli526x_init_one()", 0);
263 263
264 if (!printed_version++) 264 if (!printed_version++)
@@ -316,7 +316,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
316 err = -ENOMEM; 316 err = -ENOMEM;
317 goto err_out_nomem; 317 goto err_out_nomem;
318 } 318 }
319 319
320 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 320 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
321 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 321 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
322 db->buf_pool_start = db->buf_pool_ptr; 322 db->buf_pool_start = db->buf_pool_ptr;
@@ -324,14 +324,14 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
324 324
325 db->chip_id = ent->driver_data; 325 db->chip_id = ent->driver_data;
326 db->ioaddr = pci_resource_start(pdev, 0); 326 db->ioaddr = pci_resource_start(pdev, 0);
327 327
328 db->pdev = pdev; 328 db->pdev = pdev;
329 db->init = 1; 329 db->init = 1;
330 330
331 dev->base_addr = db->ioaddr; 331 dev->base_addr = db->ioaddr;
332 dev->irq = pdev->irq; 332 dev->irq = pdev->irq;
333 pci_set_drvdata(pdev, dev); 333 pci_set_drvdata(pdev, dev);
334 334
335 /* Register some necessary functions */ 335 /* Register some necessary functions */
336 dev->open = &uli526x_open; 336 dev->open = &uli526x_open;
337 dev->hard_start_xmit = &uli526x_start_xmit; 337 dev->hard_start_xmit = &uli526x_start_xmit;
@@ -341,7 +341,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
341 dev->ethtool_ops = &netdev_ethtool_ops; 341 dev->ethtool_ops = &netdev_ethtool_ops;
342 spin_lock_init(&db->lock); 342 spin_lock_init(&db->lock);
343 343
344 344
345 /* read 64 word srom data */ 345 /* read 64 word srom data */
346 for (i = 0; i < 64; i++) 346 for (i = 0; i < 64; i++)
347 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); 347 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
@@ -374,7 +374,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
374 goto err_out_res; 374 goto err_out_res;
375 375
376 printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); 376 printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
377 377
378 for (i = 0; i < 6; i++) 378 for (i = 0; i < 6; i++)
379 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); 379 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
380 printk(", irq %d.\n", dev->irq); 380 printk(", irq %d.\n", dev->irq);
@@ -389,7 +389,7 @@ err_out_nomem:
389 if(db->desc_pool_ptr) 389 if(db->desc_pool_ptr)
390 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, 390 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
391 db->desc_pool_ptr, db->desc_pool_dma_ptr); 391 db->desc_pool_ptr, db->desc_pool_dma_ptr);
392 392
393 if(db->buf_pool_ptr != NULL) 393 if(db->buf_pool_ptr != NULL)
394 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 394 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
395 db->buf_pool_ptr, db->buf_pool_dma_ptr); 395 db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -433,7 +433,7 @@ static int uli526x_open(struct net_device *dev)
433{ 433{
434 int ret; 434 int ret;
435 struct uli526x_board_info *db = netdev_priv(dev); 435 struct uli526x_board_info *db = netdev_priv(dev);
436 436
437 ULI526X_DBUG(0, "uli526x_open", 0); 437 ULI526X_DBUG(0, "uli526x_open", 0);
438 438
439 ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev); 439 ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
@@ -454,7 +454,7 @@ static int uli526x_open(struct net_device *dev)
454 /* CR6 operation mode decision */ 454 /* CR6 operation mode decision */
455 db->cr6_data |= ULI526X_TXTH_256; 455 db->cr6_data |= ULI526X_TXTH_256;
456 db->cr0_data = CR0_DEFAULT; 456 db->cr0_data = CR0_DEFAULT;
457 457
458 /* Initialize ULI526X board */ 458 /* Initialize ULI526X board */
459 uli526x_init(dev); 459 uli526x_init(dev);
460 460
@@ -604,7 +604,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
604 /* Restore CR7 to enable interrupt */ 604 /* Restore CR7 to enable interrupt */
605 spin_unlock_irqrestore(&db->lock, flags); 605 spin_unlock_irqrestore(&db->lock, flags);
606 outl(db->cr7_data, dev->base_addr + DCR7); 606 outl(db->cr7_data, dev->base_addr + DCR7);
607 607
608 /* free this SKB */ 608 /* free this SKB */
609 dev_kfree_skb(skb); 609 dev_kfree_skb(skb);
610 610
@@ -782,7 +782,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
782 struct sk_buff *skb; 782 struct sk_buff *skb;
783 int rxlen; 783 int rxlen;
784 u32 rdes0; 784 u32 rdes0;
785 785
786 rxptr = db->rx_ready_ptr; 786 rxptr = db->rx_ready_ptr;
787 787
788 while(db->rx_avail_cnt) { 788 while(db->rx_avail_cnt) {
@@ -821,7 +821,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
821 if ( !(rdes0 & 0x8000) || 821 if ( !(rdes0 & 0x8000) ||
822 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { 822 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
823 skb = rxptr->rx_skb_ptr; 823 skb = rxptr->rx_skb_ptr;
824 824
825 /* Good packet, send to upper layer */ 825 /* Good packet, send to upper layer */
826 /* Shorst packet used new SKB */ 826 /* Shorst packet used new SKB */
827 if ( (rxlen < RX_COPY_SIZE) && 827 if ( (rxlen < RX_COPY_SIZE) &&
@@ -841,7 +841,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
841 dev->last_rx = jiffies; 841 dev->last_rx = jiffies;
842 db->stats.rx_packets++; 842 db->stats.rx_packets++;
843 db->stats.rx_bytes += rxlen; 843 db->stats.rx_bytes += rxlen;
844 844
845 } else { 845 } else {
846 /* Reuse SKB buffer when the packet is error */ 846 /* Reuse SKB buffer when the packet is error */
847 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); 847 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
@@ -911,7 +911,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
911 SUPPORTED_100baseT_Full | 911 SUPPORTED_100baseT_Full |
912 SUPPORTED_Autoneg | 912 SUPPORTED_Autoneg |
913 SUPPORTED_MII); 913 SUPPORTED_MII);
914 914
915 ecmd->advertising = (ADVERTISED_10baseT_Half | 915 ecmd->advertising = (ADVERTISED_10baseT_Half |
916 ADVERTISED_10baseT_Full | 916 ADVERTISED_10baseT_Full |
917 ADVERTISED_100baseT_Half | 917 ADVERTISED_100baseT_Half |
@@ -924,13 +924,13 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
924 ecmd->phy_address = db->phy_addr; 924 ecmd->phy_address = db->phy_addr;
925 925
926 ecmd->transceiver = XCVR_EXTERNAL; 926 ecmd->transceiver = XCVR_EXTERNAL;
927 927
928 ecmd->speed = 10; 928 ecmd->speed = 10;
929 ecmd->duplex = DUPLEX_HALF; 929 ecmd->duplex = DUPLEX_HALF;
930 930
931 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) 931 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
932 { 932 {
933 ecmd->speed = 100; 933 ecmd->speed = 100;
934 } 934 }
935 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) 935 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
936 { 936 {
@@ -939,11 +939,11 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
939 if(db->link_failed) 939 if(db->link_failed)
940 { 940 {
941 ecmd->speed = -1; 941 ecmd->speed = -1;
942 ecmd->duplex = -1; 942 ecmd->duplex = -1;
943 } 943 }
944 944
945 if (db->media_mode & ULI526X_AUTO) 945 if (db->media_mode & ULI526X_AUTO)
946 { 946 {
947 ecmd->autoneg = AUTONEG_ENABLE; 947 ecmd->autoneg = AUTONEG_ENABLE;
948 } 948 }
949} 949}
@@ -964,15 +964,15 @@ static void netdev_get_drvinfo(struct net_device *dev,
964 964
965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
966 struct uli526x_board_info *np = netdev_priv(dev); 966 struct uli526x_board_info *np = netdev_priv(dev);
967 967
968 ULi_ethtool_gset(np, cmd); 968 ULi_ethtool_gset(np, cmd);
969 969
970 return 0; 970 return 0;
971} 971}
972 972
973static u32 netdev_get_link(struct net_device *dev) { 973static u32 netdev_get_link(struct net_device *dev) {
974 struct uli526x_board_info *np = netdev_priv(dev); 974 struct uli526x_board_info *np = netdev_priv(dev);
975 975
976 if(np->link_failed) 976 if(np->link_failed)
977 return 0; 977 return 0;
978 else 978 else
@@ -1005,11 +1005,11 @@ static void uli526x_timer(unsigned long data)
1005 struct uli526x_board_info *db = netdev_priv(dev); 1005 struct uli526x_board_info *db = netdev_priv(dev);
1006 unsigned long flags; 1006 unsigned long flags;
1007 u8 TmpSpeed=10; 1007 u8 TmpSpeed=10;
1008 1008
1009 //ULI526X_DBUG(0, "uli526x_timer()", 0); 1009 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1010 spin_lock_irqsave(&db->lock, flags); 1010 spin_lock_irqsave(&db->lock, flags);
1011 1011
1012 1012
1013 /* Dynamic reset ULI526X : system error or transmit time-out */ 1013 /* Dynamic reset ULI526X : system error or transmit time-out */
1014 tmp_cr8 = inl(db->ioaddr + DCR8); 1014 tmp_cr8 = inl(db->ioaddr + DCR8);
1015 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1015 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
@@ -1021,9 +1021,9 @@ static void uli526x_timer(unsigned long data)
1021 /* TX polling kick monitor */ 1021 /* TX polling kick monitor */
1022 if ( db->tx_packet_cnt && 1022 if ( db->tx_packet_cnt &&
1023 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) { 1023 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
1024 outl(0x1, dev->base_addr + DCR1); // Tx polling again 1024 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1025 1025
1026 // TX Timeout 1026 // TX Timeout
1027 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) { 1027 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
1028 db->reset_TXtimeout++; 1028 db->reset_TXtimeout++;
1029 db->wait_reset = 1; 1029 db->wait_reset = 1;
@@ -1073,7 +1073,7 @@ static void uli526x_timer(unsigned long data)
1073 uli526x_sense_speed(db) ) 1073 uli526x_sense_speed(db) )
1074 db->link_failed = 1; 1074 db->link_failed = 1;
1075 uli526x_process_mode(db); 1075 uli526x_process_mode(db);
1076 1076
1077 if(db->link_failed==0) 1077 if(db->link_failed==0)
1078 { 1078 {
1079 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) 1079 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
@@ -1404,7 +1404,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1404 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1404 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1405 1405
1406 if ( (phy_mode & 0x24) == 0x24 ) { 1406 if ( (phy_mode & 0x24) == 0x24 ) {
1407 1407
1408 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); 1408 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
1409 if(phy_mode&0x8000) 1409 if(phy_mode&0x8000)
1410 phy_mode = 0x8000; 1410 phy_mode = 0x8000;
@@ -1414,7 +1414,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1414 phy_mode = 0x2000; 1414 phy_mode = 0x2000;
1415 else 1415 else
1416 phy_mode = 0x1000; 1416 phy_mode = 0x1000;
1417 1417
1418 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ 1418 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1419 switch (phy_mode) { 1419 switch (phy_mode) {
1420 case 0x1000: db->op_mode = ULI526X_10MHF; break; 1420 case 0x1000: db->op_mode = ULI526X_10MHF; break;
@@ -1442,7 +1442,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1442static void uli526x_set_phyxcer(struct uli526x_board_info *db) 1442static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1443{ 1443{
1444 u16 phy_reg; 1444 u16 phy_reg;
1445 1445
1446 /* Phyxcer capability setting */ 1446 /* Phyxcer capability setting */
1447 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1447 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1448 1448
@@ -1457,7 +1457,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1457 case ULI526X_100MHF: phy_reg |= 0x80; break; 1457 case ULI526X_100MHF: phy_reg |= 0x80; break;
1458 case ULI526X_100MFD: phy_reg |= 0x100; break; 1458 case ULI526X_100MFD: phy_reg |= 0x100; break;
1459 } 1459 }
1460 1460
1461 } 1461 }
1462 1462
1463 /* Write new capability to Phyxcer Reg4 */ 1463 /* Write new capability to Phyxcer Reg4 */
@@ -1556,7 +1556,7 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
1556 /* Write a word data to PHY controller */ 1556 /* Write a word data to PHY controller */
1557 for ( i = 0x8000; i > 0; i >>= 1) 1557 for ( i = 0x8000; i > 0; i >>= 1)
1558 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1558 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1559 1559
1560} 1560}
1561 1561
1562 1562
@@ -1574,7 +1574,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1574 return phy_readby_cr10(iobase, phy_addr, offset); 1574 return phy_readby_cr10(iobase, phy_addr, offset);
1575 /* M5261/M5263 Chip */ 1575 /* M5261/M5263 Chip */
1576 ioaddr = iobase + DCR9; 1576 ioaddr = iobase + DCR9;
1577 1577
1578 /* Send 33 synchronization clock to Phy controller */ 1578 /* Send 33 synchronization clock to Phy controller */
1579 for (i = 0; i < 35; i++) 1579 for (i = 0; i < 35; i++)
1580 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1580 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
@@ -1610,7 +1610,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1610static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) 1610static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1611{ 1611{
1612 unsigned long ioaddr,cr10_value; 1612 unsigned long ioaddr,cr10_value;
1613 1613
1614 ioaddr = iobase + DCR10; 1614 ioaddr = iobase + DCR10;
1615 cr10_value = phy_addr; 1615 cr10_value = phy_addr;
1616 cr10_value = (cr10_value<<5) + offset; 1616 cr10_value = (cr10_value<<5) + offset;
@@ -1629,7 +1629,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1629static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1629static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
1630{ 1630{
1631 unsigned long ioaddr,cr10_value; 1631 unsigned long ioaddr,cr10_value;
1632 1632
1633 ioaddr = iobase + DCR10; 1633 ioaddr = iobase + DCR10;
1634 cr10_value = phy_addr; 1634 cr10_value = phy_addr;
1635 cr10_value = (cr10_value<<5) + offset; 1635 cr10_value = (cr10_value<<5) + offset;
@@ -1659,7 +1659,7 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1659static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) 1659static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1660{ 1660{
1661 u16 phy_data; 1661 u16 phy_data;
1662 1662
1663 outl(0x50000 , ioaddr); 1663 outl(0x50000 , ioaddr);
1664 udelay(1); 1664 udelay(1);
1665 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1665 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 136a70c4d5e4..64ecf929d2ac 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -38,12 +38,12 @@
38 Copyright (C) 2001 Manfred Spraul 38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik) 39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik) 40 * Replace some MII-related magic numbers with constants (jgarzik)
41 41
42 TODO: 42 TODO:
43 * enable pci_power_off 43 * enable pci_power_off
44 * Wake-On-LAN 44 * Wake-On-LAN
45*/ 45*/
46 46
47#define DRV_NAME "winbond-840" 47#define DRV_NAME "winbond-840"
48#define DRV_VERSION "1.01-d" 48#define DRV_VERSION "1.01-d"
49#define DRV_RELDATE "Nov-17-2001" 49#define DRV_RELDATE "Nov-17-2001"
@@ -57,7 +57,7 @@ c-help-name: Winbond W89c840 PCI Ethernet support
57c-help-symbol: CONFIG_WINBOND_840 57c-help-symbol: CONFIG_WINBOND_840
58c-help: This driver is for the Winbond W89c840 chip. It also works with 58c-help: This driver is for the Winbond W89c840 chip. It also works with
59c-help: the TX9882 chip on the Compex RL100-ATX board. 59c-help: the TX9882 chip on the Compex RL100-ATX board.
60c-help: More specific information and updates are available from 60c-help: More specific information and updates are available from
61c-help: http://www.scyld.com/network/drivers.html 61c-help: http://www.scyld.com/network/drivers.html
62*/ 62*/
63 63
@@ -207,7 +207,7 @@ Test with 'ping -s 10000' on a fast computer.
207 207
208*/ 208*/
209 209
210 210
211 211
212/* 212/*
213 PCI probe table. 213 PCI probe table.
@@ -374,7 +374,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
374static struct ethtool_ops netdev_ethtool_ops; 374static struct ethtool_ops netdev_ethtool_ops;
375static int netdev_close(struct net_device *dev); 375static int netdev_close(struct net_device *dev);
376 376
377 377
378 378
379static int __devinit w840_probe1 (struct pci_dev *pdev, 379static int __devinit w840_probe1 (struct pci_dev *pdev,
380 const struct pci_device_id *ent) 380 const struct pci_device_id *ent)
@@ -434,7 +434,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
434 np->mii_if.mdio_read = mdio_read; 434 np->mii_if.mdio_read = mdio_read;
435 np->mii_if.mdio_write = mdio_write; 435 np->mii_if.mdio_write = mdio_write;
436 np->base_addr = ioaddr; 436 np->base_addr = ioaddr;
437 437
438 pci_set_drvdata(pdev, dev); 438 pci_set_drvdata(pdev, dev);
439 439
440 if (dev->mem_start) 440 if (dev->mem_start)
@@ -510,7 +510,7 @@ err_out_netdev:
510 return -ENODEV; 510 return -ENODEV;
511} 511}
512 512
513 513
514/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are 514/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
515 often serial bit streams generated by the host processor. 515 often serial bit streams generated by the host processor.
516 The example below is for the common 93c46 EEPROM, 64 16 bit words. */ 516 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
@@ -660,7 +660,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
660 return; 660 return;
661} 661}
662 662
663 663
664static int netdev_open(struct net_device *dev) 664static int netdev_open(struct net_device *dev)
665{ 665{
666 struct netdev_private *np = netdev_priv(dev); 666 struct netdev_private *np = netdev_priv(dev);
@@ -731,7 +731,7 @@ static int update_link(struct net_device *dev)
731 dev->name, np->phys[0]); 731 dev->name, np->phys[0]);
732 netif_carrier_on(dev); 732 netif_carrier_on(dev);
733 } 733 }
734 734
735 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { 735 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
736 /* If the link partner doesn't support autonegotiation 736 /* If the link partner doesn't support autonegotiation
737 * the MII detects it's abilities with the "parallel detection". 737 * the MII detects it's abilities with the "parallel detection".
@@ -761,7 +761,7 @@ static int update_link(struct net_device *dev)
761 result |= 0x20000000; 761 result |= 0x20000000;
762 if (result != np->csr6 && debug) 762 if (result != np->csr6 && debug)
763 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", 763 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
764 dev->name, fasteth ? 100 : 10, 764 dev->name, fasteth ? 100 : 10,
765 duplex ? "full" : "half", np->phys[0]); 765 duplex ? "full" : "half", np->phys[0]);
766 return result; 766 return result;
767} 767}
@@ -947,7 +947,7 @@ static void init_registers(struct net_device *dev)
947 iowrite32(i, ioaddr + PCIBusCfg); 947 iowrite32(i, ioaddr + PCIBusCfg);
948 948
949 np->csr6 = 0; 949 np->csr6 = 0;
950 /* 128 byte Tx threshold; 950 /* 128 byte Tx threshold;
951 Transmit on; Receive on; */ 951 Transmit on; Receive on; */
952 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); 952 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
953 953
@@ -1584,7 +1584,7 @@ static int netdev_close(struct net_device *dev)
1584static void __devexit w840_remove1 (struct pci_dev *pdev) 1584static void __devexit w840_remove1 (struct pci_dev *pdev)
1585{ 1585{
1586 struct net_device *dev = pci_get_drvdata(pdev); 1586 struct net_device *dev = pci_get_drvdata(pdev);
1587 1587
1588 if (dev) { 1588 if (dev) {
1589 struct netdev_private *np = netdev_priv(dev); 1589 struct netdev_private *np = netdev_priv(dev);
1590 unregister_netdev(dev); 1590 unregister_netdev(dev);
@@ -1640,7 +1640,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1640 1640
1641 spin_unlock_wait(&dev->xmit_lock); 1641 spin_unlock_wait(&dev->xmit_lock);
1642 synchronize_irq(dev->irq); 1642 synchronize_irq(dev->irq);
1643 1643
1644 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1644 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1645 1645
1646 /* no more hardware accesses behind this line. */ 1646 /* no more hardware accesses behind this line. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 56344103ac23..63c2175ed138 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards 2 * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
3 * 3 *
4 * This software is (C) by the respective authors, and licensed under the GPL 4 * This software is (C) by the respective authors, and licensed under the GPL
5 * License. 5 * License.
6 * 6 *
7 * Written by Arjan van de Ven for Red Hat, Inc. 7 * Written by Arjan van de Ven for Red Hat, Inc.
8 * Based on work by Jeff Garzik, Doug Ledford and Donald Becker 8 * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
9 * 9 *
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference. 11 * of the GNU General Public License, incorporated herein by reference.
@@ -93,7 +93,7 @@ struct xircom_private {
93 93
94 unsigned long io_port; 94 unsigned long io_port;
95 int open; 95 int open;
96 96
97 /* transmit_used is the rotating counter that indicates which transmit 97 /* transmit_used is the rotating counter that indicates which transmit
98 descriptor has to be used next */ 98 descriptor has to be used next */
99 int transmit_used; 99 int transmit_used;
@@ -153,10 +153,10 @@ static struct pci_device_id xircom_pci_table[] = {
153MODULE_DEVICE_TABLE(pci, xircom_pci_table); 153MODULE_DEVICE_TABLE(pci, xircom_pci_table);
154 154
155static struct pci_driver xircom_ops = { 155static struct pci_driver xircom_ops = {
156 .name = "xircom_cb", 156 .name = "xircom_cb",
157 .id_table = xircom_pci_table, 157 .id_table = xircom_pci_table,
158 .probe = xircom_probe, 158 .probe = xircom_probe,
159 .remove = xircom_remove, 159 .remove = xircom_remove,
160 .suspend =NULL, 160 .suspend =NULL,
161 .resume =NULL 161 .resume =NULL
162}; 162};
@@ -174,7 +174,7 @@ static void print_binary(unsigned int number)
174 buffer[i2++]='1'; 174 buffer[i2++]='1';
175 else 175 else
176 buffer[i2++]='0'; 176 buffer[i2++]='0';
177 if ((i&3)==0) 177 if ((i&3)==0)
178 buffer[i2++]=' '; 178 buffer[i2++]=' ';
179 } 179 }
180 printk("%s\n",buffer); 180 printk("%s\n",buffer);
@@ -196,10 +196,10 @@ static struct ethtool_ops netdev_ethtool_ops = {
196 196
197/* xircom_probe is the code that gets called on device insertion. 197/* xircom_probe is the code that gets called on device insertion.
198 it sets up the hardware and registers the device to the networklayer. 198 it sets up the hardware and registers the device to the networklayer.
199 199
200 TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the 200 TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
201 first two packets that get send, and pump hates that. 201 first two packets that get send, and pump hates that.
202 202
203 */ 203 */
204static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) 204static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
205{ 205{
@@ -209,7 +209,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
209 unsigned long flags; 209 unsigned long flags;
210 unsigned short tmp16; 210 unsigned short tmp16;
211 enter("xircom_probe"); 211 enter("xircom_probe");
212 212
213 /* First do the PCI initialisation */ 213 /* First do the PCI initialisation */
214 214
215 if (pci_enable_device(pdev)) 215 if (pci_enable_device(pdev))
@@ -217,24 +217,24 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
217 217
218 /* disable all powermanagement */ 218 /* disable all powermanagement */
219 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); 219 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
220 220
221 pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ 221 pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
222 222
223 /* clear PCI status, if any */ 223 /* clear PCI status, if any */
224 pci_read_config_word (pdev,PCI_STATUS, &tmp16); 224 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
225 pci_write_config_word (pdev, PCI_STATUS,tmp16); 225 pci_write_config_word (pdev, PCI_STATUS,tmp16);
226 226
227 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev); 227 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
228 228
229 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 229 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
230 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); 230 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
231 return -ENODEV; 231 return -ENODEV;
232 } 232 }
233 233
234 /* 234 /*
235 Before changing the hardware, allocate the memory. 235 Before changing the hardware, allocate the memory.
236 This way, we can fail gracefully if not enough memory 236 This way, we can fail gracefully if not enough memory
237 is available. 237 is available.
238 */ 238 */
239 dev = alloc_etherdev(sizeof(struct xircom_private)); 239 dev = alloc_etherdev(sizeof(struct xircom_private));
240 if (!dev) { 240 if (!dev) {
@@ -242,13 +242,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
242 goto device_fail; 242 goto device_fail;
243 } 243 }
244 private = netdev_priv(dev); 244 private = netdev_priv(dev);
245 245
246 /* Allocate the send/receive buffers */ 246 /* Allocate the send/receive buffers */
247 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 247 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
248 if (private->rx_buffer == NULL) { 248 if (private->rx_buffer == NULL) {
249 printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); 249 printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
250 goto rx_buf_fail; 250 goto rx_buf_fail;
251 } 251 }
252 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 252 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
253 if (private->tx_buffer == NULL) { 253 if (private->tx_buffer == NULL) {
254 printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); 254 printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
@@ -265,11 +265,11 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
265 spin_lock_init(&private->lock); 265 spin_lock_init(&private->lock);
266 dev->irq = pdev->irq; 266 dev->irq = pdev->irq;
267 dev->base_addr = private->io_port; 267 dev->base_addr = private->io_port;
268 268
269 initialize_card(private); 269 initialize_card(private);
270 read_mac_address(private); 270 read_mac_address(private);
271 setup_descriptors(private); 271 setup_descriptors(private);
272 272
273 dev->open = &xircom_open; 273 dev->open = &xircom_open;
274 dev->hard_start_xmit = &xircom_start_xmit; 274 dev->hard_start_xmit = &xircom_start_xmit;
275 dev->stop = &xircom_close; 275 dev->stop = &xircom_close;
@@ -285,19 +285,19 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
285 printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); 285 printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
286 goto reg_fail; 286 goto reg_fail;
287 } 287 }
288 288
289 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq); 289 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
290 /* start the transmitter to get a heartbeat */ 290 /* start the transmitter to get a heartbeat */
291 /* TODO: send 2 dummy packets here */ 291 /* TODO: send 2 dummy packets here */
292 transceiver_voodoo(private); 292 transceiver_voodoo(private);
293 293
294 spin_lock_irqsave(&private->lock,flags); 294 spin_lock_irqsave(&private->lock,flags);
295 activate_transmitter(private); 295 activate_transmitter(private);
296 activate_receiver(private); 296 activate_receiver(private);
297 spin_unlock_irqrestore(&private->lock,flags); 297 spin_unlock_irqrestore(&private->lock,flags);
298 298
299 trigger_receive(private); 299 trigger_receive(private);
300 300
301 leave("xircom_probe"); 301 leave("xircom_probe");
302 return 0; 302 return 0;
303 303
@@ -332,7 +332,7 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
332 free_netdev(dev); 332 free_netdev(dev);
333 pci_set_drvdata(pdev, NULL); 333 pci_set_drvdata(pdev, NULL);
334 leave("xircom_remove"); 334 leave("xircom_remove");
335} 335}
336 336
337static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 337static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
338{ 338{
@@ -346,11 +346,11 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
346 spin_lock(&card->lock); 346 spin_lock(&card->lock);
347 status = inl(card->io_port+CSR5); 347 status = inl(card->io_port+CSR5);
348 348
349#ifdef DEBUG 349#ifdef DEBUG
350 print_binary(status); 350 print_binary(status);
351 printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); 351 printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
352 printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); 352 printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
353#endif 353#endif
354 /* Handle shared irq and hotplug */ 354 /* Handle shared irq and hotplug */
355 if (status == 0 || status == 0xffffffff) { 355 if (status == 0 || status == 0xffffffff) {
356 spin_unlock(&card->lock); 356 spin_unlock(&card->lock);
@@ -366,21 +366,21 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
366 netif_carrier_on(dev); 366 netif_carrier_on(dev);
367 else 367 else
368 netif_carrier_off(dev); 368 netif_carrier_off(dev);
369 369
370 } 370 }
371 371
372 /* Clear all remaining interrupts */ 372 /* Clear all remaining interrupts */
373 status |= 0xffffffff; /* FIXME: make this clear only the 373 status |= 0xffffffff; /* FIXME: make this clear only the
374 real existing bits */ 374 real existing bits */
375 outl(status,card->io_port+CSR5); 375 outl(status,card->io_port+CSR5);
376
377 376
378 for (i=0;i<NUMDESCRIPTORS;i++) 377
378 for (i=0;i<NUMDESCRIPTORS;i++)
379 investigate_write_descriptor(dev,card,i,bufferoffsets[i]); 379 investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
380 for (i=0;i<NUMDESCRIPTORS;i++) 380 for (i=0;i<NUMDESCRIPTORS;i++)
381 investigate_read_descriptor(dev,card,i,bufferoffsets[i]); 381 investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
382 382
383 383
384 spin_unlock(&card->lock); 384 spin_unlock(&card->lock);
385 leave("xircom_interrupt"); 385 leave("xircom_interrupt");
386 return IRQ_HANDLED; 386 return IRQ_HANDLED;
@@ -393,38 +393,38 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
393 int nextdescriptor; 393 int nextdescriptor;
394 int desc; 394 int desc;
395 enter("xircom_start_xmit"); 395 enter("xircom_start_xmit");
396 396
397 card = netdev_priv(dev); 397 card = netdev_priv(dev);
398 spin_lock_irqsave(&card->lock,flags); 398 spin_lock_irqsave(&card->lock,flags);
399 399
400 /* First see if we can free some descriptors */ 400 /* First see if we can free some descriptors */
401 for (desc=0;desc<NUMDESCRIPTORS;desc++) 401 for (desc=0;desc<NUMDESCRIPTORS;desc++)
402 investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); 402 investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
403 403
404 404
405 nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); 405 nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
406 desc = card->transmit_used; 406 desc = card->transmit_used;
407 407
408 /* only send the packet if the descriptor is free */ 408 /* only send the packet if the descriptor is free */
409 if (card->tx_buffer[4*desc]==0) { 409 if (card->tx_buffer[4*desc]==0) {
410 /* Copy the packet data; zero the memory first as the card 410 /* Copy the packet data; zero the memory first as the card
411 sometimes sends more than you ask it to. */ 411 sometimes sends more than you ask it to. */
412 412
413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); 413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
414 memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); 414 memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
415 415
416 416
417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of 417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of
418 4 bytes. */ 418 4 bytes. */
419 419
420 card->tx_buffer[4*desc+1] = skb->len; 420 card->tx_buffer[4*desc+1] = skb->len;
421 if (desc == NUMDESCRIPTORS-1) 421 if (desc == NUMDESCRIPTORS-1)
422 card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */ 422 card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */
423 423
424 card->tx_buffer[4*desc+1] |= 0xF0000000; 424 card->tx_buffer[4*desc+1] |= 0xF0000000;
425 /* 0xF0... means want interrupts*/ 425 /* 0xF0... means want interrupts*/
426 card->tx_skb[desc] = skb; 426 card->tx_skb[desc] = skb;
427 427
428 wmb(); 428 wmb();
429 /* This gives the descriptor to the card */ 429 /* This gives the descriptor to the card */
430 card->tx_buffer[4*desc] = 0x80000000; 430 card->tx_buffer[4*desc] = 0x80000000;
@@ -433,18 +433,18 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
433 netif_stop_queue(dev); 433 netif_stop_queue(dev);
434 } 434 }
435 card->transmit_used = nextdescriptor; 435 card->transmit_used = nextdescriptor;
436 leave("xircom-start_xmit - sent"); 436 leave("xircom-start_xmit - sent");
437 spin_unlock_irqrestore(&card->lock,flags); 437 spin_unlock_irqrestore(&card->lock,flags);
438 return 0; 438 return 0;
439 } 439 }
440 440
441 441
442 442
443 /* Uh oh... no free descriptor... drop the packet */ 443 /* Uh oh... no free descriptor... drop the packet */
444 netif_stop_queue(dev); 444 netif_stop_queue(dev);
445 spin_unlock_irqrestore(&card->lock,flags); 445 spin_unlock_irqrestore(&card->lock,flags);
446 trigger_transmit(card); 446 trigger_transmit(card);
447 447
448 return -EIO; 448 return -EIO;
449} 449}
450 450
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
462 leave("xircom_open - No IRQ"); 462 leave("xircom_open - No IRQ");
463 return retval; 463 return retval;
464 } 464 }
465 465
466 xircom_up(xp); 466 xircom_up(xp);
467 xp->open = 1; 467 xp->open = 1;
468 leave("xircom_open"); 468 leave("xircom_open");
@@ -473,31 +473,31 @@ static int xircom_close(struct net_device *dev)
473{ 473{
474 struct xircom_private *card; 474 struct xircom_private *card;
475 unsigned long flags; 475 unsigned long flags;
476 476
477 enter("xircom_close"); 477 enter("xircom_close");
478 card = netdev_priv(dev); 478 card = netdev_priv(dev);
479 netif_stop_queue(dev); /* we don't want new packets */ 479 netif_stop_queue(dev); /* we don't want new packets */
480 480
481 481
482 spin_lock_irqsave(&card->lock,flags); 482 spin_lock_irqsave(&card->lock,flags);
483 483
484 disable_all_interrupts(card); 484 disable_all_interrupts(card);
485#if 0 485#if 0
486 /* We can enable this again once we send dummy packets on ifconfig ethX up */ 486 /* We can enable this again once we send dummy packets on ifconfig ethX up */
487 deactivate_receiver(card); 487 deactivate_receiver(card);
488 deactivate_transmitter(card); 488 deactivate_transmitter(card);
489#endif 489#endif
490 remove_descriptors(card); 490 remove_descriptors(card);
491 491
492 spin_unlock_irqrestore(&card->lock,flags); 492 spin_unlock_irqrestore(&card->lock,flags);
493 493
494 card->open = 0; 494 card->open = 0;
495 free_irq(dev->irq,dev); 495 free_irq(dev->irq,dev);
496 496
497 leave("xircom_close"); 497 leave("xircom_close");
498 498
499 return 0; 499 return 0;
500 500
501} 501}
502 502
503 503
@@ -506,8 +506,8 @@ static struct net_device_stats *xircom_get_stats(struct net_device *dev)
506{ 506{
507 struct xircom_private *card = netdev_priv(dev); 507 struct xircom_private *card = netdev_priv(dev);
508 return &card->stats; 508 return &card->stats;
509} 509}
510 510
511 511
512#ifdef CONFIG_NET_POLL_CONTROLLER 512#ifdef CONFIG_NET_POLL_CONTROLLER
513static void xircom_poll_controller(struct net_device *dev) 513static void xircom_poll_controller(struct net_device *dev)
@@ -540,7 +540,7 @@ static void initialize_card(struct xircom_private *card)
540 outl(val, card->io_port + CSR0); 540 outl(val, card->io_port + CSR0);
541 541
542 542
543 val = 0; /* Value 0x00 is a safe and conservative value 543 val = 0; /* Value 0x00 is a safe and conservative value
544 for the PCI configuration settings */ 544 for the PCI configuration settings */
545 outl(val, card->io_port + CSR0); 545 outl(val, card->io_port + CSR0);
546 546
@@ -617,23 +617,23 @@ static void setup_descriptors(struct xircom_private *card)
617 617
618 /* Rx Descr2: address of the buffer 618 /* Rx Descr2: address of the buffer
619 we store the buffer at the 2nd half of the page */ 619 we store the buffer at the 2nd half of the page */
620 620
621 address = (unsigned long) card->rx_dma_handle; 621 address = (unsigned long) card->rx_dma_handle;
622 card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); 622 card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
623 /* Rx Desc3: address of 2nd buffer -> 0 */ 623 /* Rx Desc3: address of 2nd buffer -> 0 */
624 card->rx_buffer[i*4 + 3] = 0; 624 card->rx_buffer[i*4 + 3] = 0;
625 } 625 }
626 626
627 wmb(); 627 wmb();
628 /* Write the receive descriptor ring address to the card */ 628 /* Write the receive descriptor ring address to the card */
629 address = (unsigned long) card->rx_dma_handle; 629 address = (unsigned long) card->rx_dma_handle;
630 val = cpu_to_le32(address); 630 val = cpu_to_le32(address);
631 outl(val, card->io_port + CSR3); /* Receive descr list address */ 631 outl(val, card->io_port + CSR3); /* Receive descr list address */
632 632
633 633
634 /* transmit descriptors */ 634 /* transmit descriptors */
635 memset(card->tx_buffer, 0, 128); /* clear the descriptors */ 635 memset(card->tx_buffer, 0, 128); /* clear the descriptors */
636 636
637 for (i=0;i<NUMDESCRIPTORS;i++ ) { 637 for (i=0;i<NUMDESCRIPTORS;i++ ) {
638 /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ 638 /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
639 card->tx_buffer[i*4 + 0] = 0x00000000; 639 card->tx_buffer[i*4 + 0] = 0x00000000;
@@ -641,7 +641,7 @@ static void setup_descriptors(struct xircom_private *card)
641 card->tx_buffer[i*4 + 1] = 1536; 641 card->tx_buffer[i*4 + 1] = 1536;
642 if (i==NUMDESCRIPTORS-1) 642 if (i==NUMDESCRIPTORS-1)
643 card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */ 643 card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
644 644
645 /* Tx Descr2: address of the buffer 645 /* Tx Descr2: address of the buffer
646 we store the buffer at the 2nd half of the page */ 646 we store the buffer at the 2nd half of the page */
647 address = (unsigned long) card->tx_dma_handle; 647 address = (unsigned long) card->tx_dma_handle;
@@ -748,7 +748,7 @@ static int receive_active(struct xircom_private *card)
748activate_receiver enables the receiver on the card. 748activate_receiver enables the receiver on the card.
749Before being allowed to active the receiver, the receiver 749Before being allowed to active the receiver, the receiver
750must be completely de-activated. To achieve this, 750must be completely de-activated. To achieve this,
751this code actually disables the receiver first; then it waits for the 751this code actually disables the receiver first; then it waits for the
752receiver to become inactive, then it activates the receiver and then 752receiver to become inactive, then it activates the receiver and then
753it waits for the receiver to be active. 753it waits for the receiver to be active.
754 754
@@ -762,13 +762,13 @@ static void activate_receiver(struct xircom_private *card)
762 762
763 763
764 val = inl(card->io_port + CSR6); /* Operation mode */ 764 val = inl(card->io_port + CSR6); /* Operation mode */
765 765
766 /* If the "active" bit is set and the receiver is already 766 /* If the "active" bit is set and the receiver is already
767 active, no need to do the expensive thing */ 767 active, no need to do the expensive thing */
768 if ((val&2) && (receive_active(card))) 768 if ((val&2) && (receive_active(card)))
769 return; 769 return;
770 770
771 771
772 val = val & ~2; /* disable the receiver */ 772 val = val & ~2; /* disable the receiver */
773 outl(val, card->io_port + CSR6); 773 outl(val, card->io_port + CSR6);
774 774
@@ -805,7 +805,7 @@ static void activate_receiver(struct xircom_private *card)
805 805
806/* 806/*
807deactivate_receiver disables the receiver on the card. 807deactivate_receiver disables the receiver on the card.
808To achieve this this code disables the receiver first; 808To achieve this this code disables the receiver first;
809then it waits for the receiver to become inactive. 809then it waits for the receiver to become inactive.
810 810
811must be called with the lock held and interrupts disabled. 811must be called with the lock held and interrupts disabled.
@@ -840,7 +840,7 @@ static void deactivate_receiver(struct xircom_private *card)
840activate_transmitter enables the transmitter on the card. 840activate_transmitter enables the transmitter on the card.
841Before being allowed to active the transmitter, the transmitter 841Before being allowed to active the transmitter, the transmitter
842must be completely de-activated. To achieve this, 842must be completely de-activated. To achieve this,
843this code actually disables the transmitter first; then it waits for the 843this code actually disables the transmitter first; then it waits for the
844transmitter to become inactive, then it activates the transmitter and then 844transmitter to become inactive, then it activates the transmitter and then
845it waits for the transmitter to be active again. 845it waits for the transmitter to be active again.
846 846
@@ -856,7 +856,7 @@ static void activate_transmitter(struct xircom_private *card)
856 val = inl(card->io_port + CSR6); /* Operation mode */ 856 val = inl(card->io_port + CSR6); /* Operation mode */
857 857
858 /* If the "active" bit is set and the receiver is already 858 /* If the "active" bit is set and the receiver is already
859 active, no need to do the expensive thing */ 859 active, no need to do the expensive thing */
860 if ((val&(1<<13)) && (transmit_active(card))) 860 if ((val&(1<<13)) && (transmit_active(card)))
861 return; 861 return;
862 862
@@ -896,7 +896,7 @@ static void activate_transmitter(struct xircom_private *card)
896 896
897/* 897/*
898deactivate_transmitter disables the transmitter on the card. 898deactivate_transmitter disables the transmitter on the card.
899To achieve this this code disables the transmitter first; 899To achieve this this code disables the transmitter first;
900then it waits for the transmitter to become inactive. 900then it waits for the transmitter to become inactive.
901 901
902must be called with the lock held and interrupts disabled. 902must be called with the lock held and interrupts disabled.
@@ -990,7 +990,7 @@ static void disable_all_interrupts(struct xircom_private *card)
990{ 990{
991 unsigned int val; 991 unsigned int val;
992 enter("enable_all_interrupts"); 992 enter("enable_all_interrupts");
993 993
994 val = 0; /* disable all interrupts */ 994 val = 0; /* disable all interrupts */
995 outl(val, card->io_port + CSR7); 995 outl(val, card->io_port + CSR7);
996 996
@@ -1031,8 +1031,8 @@ static int enable_promisc(struct xircom_private *card)
1031 unsigned int val; 1031 unsigned int val;
1032 enter("enable_promisc"); 1032 enter("enable_promisc");
1033 1033
1034 val = inl(card->io_port + CSR6); 1034 val = inl(card->io_port + CSR6);
1035 val = val | (1 << 6); 1035 val = val | (1 << 6);
1036 outl(val, card->io_port + CSR6); 1036 outl(val, card->io_port + CSR6);
1037 1037
1038 leave("enable_promisc"); 1038 leave("enable_promisc");
@@ -1042,7 +1042,7 @@ static int enable_promisc(struct xircom_private *card)
1042 1042
1043 1043
1044 1044
1045/* 1045/*
1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. 1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
1047 1047
1048Must be called in locked state with interrupts disabled 1048Must be called in locked state with interrupts disabled
@@ -1051,15 +1051,15 @@ static int link_status(struct xircom_private *card)
1051{ 1051{
1052 unsigned int val; 1052 unsigned int val;
1053 enter("link_status"); 1053 enter("link_status");
1054 1054
1055 val = inb(card->io_port + CSR12); 1055 val = inb(card->io_port + CSR12);
1056 1056
1057 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ 1057 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
1058 return 10; 1058 return 10;
1059 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ 1059 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
1060 return 100; 1060 return 100;
1061 1061
1062 /* If we get here -> no link at all */ 1062 /* If we get here -> no link at all */
1063 1063
1064 leave("link_status"); 1064 leave("link_status");
1065 return 0; 1065 return 0;
@@ -1071,7 +1071,7 @@ static int link_status(struct xircom_private *card)
1071 1071
1072/* 1072/*
1073 read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. 1073 read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
1074 1074
1075 This function will take the spinlock itself and can, as a result, not be called with the lock helt. 1075 This function will take the spinlock itself and can, as a result, not be called with the lock helt.
1076 */ 1076 */
1077static void read_mac_address(struct xircom_private *card) 1077static void read_mac_address(struct xircom_private *card)
@@ -1081,7 +1081,7 @@ static void read_mac_address(struct xircom_private *card)
1081 int i; 1081 int i;
1082 1082
1083 enter("read_mac_address"); 1083 enter("read_mac_address");
1084 1084
1085 spin_lock_irqsave(&card->lock, flags); 1085 spin_lock_irqsave(&card->lock, flags);
1086 1086
1087 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ 1087 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
@@ -1095,7 +1095,7 @@ static void read_mac_address(struct xircom_private *card)
1095 outl(i + 3, card->io_port + CSR10); 1095 outl(i + 3, card->io_port + CSR10);
1096 data_count = inl(card->io_port + CSR9) & 0xff; 1096 data_count = inl(card->io_port + CSR9) & 0xff;
1097 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { 1097 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
1098 /* 1098 /*
1099 * This is it. We have the data we want. 1099 * This is it. We have the data we want.
1100 */ 1100 */
1101 for (j = 0; j < 6; j++) { 1101 for (j = 0; j < 6; j++) {
@@ -1136,12 +1136,12 @@ static void transceiver_voodoo(struct xircom_private *card)
1136 spin_lock_irqsave(&card->lock, flags); 1136 spin_lock_irqsave(&card->lock, flags);
1137 1137
1138 outl(0x0008, card->io_port + CSR15); 1138 outl(0x0008, card->io_port + CSR15);
1139 udelay(25); 1139 udelay(25);
1140 outl(0xa8050000, card->io_port + CSR15); 1140 outl(0xa8050000, card->io_port + CSR15);
1141 udelay(25); 1141 udelay(25);
1142 outl(0xa00f0000, card->io_port + CSR15); 1142 outl(0xa00f0000, card->io_port + CSR15);
1143 udelay(25); 1143 udelay(25);
1144 1144
1145 spin_unlock_irqrestore(&card->lock, flags); 1145 spin_unlock_irqrestore(&card->lock, flags);
1146 1146
1147 netif_start_queue(card->dev); 1147 netif_start_queue(card->dev);
@@ -1163,15 +1163,15 @@ static void xircom_up(struct xircom_private *card)
1163 1163
1164 spin_lock_irqsave(&card->lock, flags); 1164 spin_lock_irqsave(&card->lock, flags);
1165 1165
1166 1166
1167 enable_link_interrupt(card); 1167 enable_link_interrupt(card);
1168 enable_transmit_interrupt(card); 1168 enable_transmit_interrupt(card);
1169 enable_receive_interrupt(card); 1169 enable_receive_interrupt(card);
1170 enable_common_interrupts(card); 1170 enable_common_interrupts(card);
1171 enable_promisc(card); 1171 enable_promisc(card);
1172 1172
1173 /* The card can have received packets already, read them away now */ 1173 /* The card can have received packets already, read them away now */
1174 for (i=0;i<NUMDESCRIPTORS;i++) 1174 for (i=0;i<NUMDESCRIPTORS;i++)
1175 investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); 1175 investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
1176 1176
1177 1177
@@ -1185,15 +1185,15 @@ static void xircom_up(struct xircom_private *card)
1185/* Bufferoffset is in BYTES */ 1185/* Bufferoffset is in BYTES */
1186static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) 1186static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
1187{ 1187{
1188 int status; 1188 int status;
1189 1189
1190 enter("investigate_read_descriptor"); 1190 enter("investigate_read_descriptor");
1191 status = card->rx_buffer[4*descnr]; 1191 status = card->rx_buffer[4*descnr];
1192 1192
1193 if ((status > 0)) { /* packet received */ 1193 if ((status > 0)) { /* packet received */
1194 1194
1195 /* TODO: discard error packets */ 1195 /* TODO: discard error packets */
1196 1196
1197 short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ 1197 short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
1198 struct sk_buff *skb; 1198 struct sk_buff *skb;
1199 1199
@@ -1216,7 +1216,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1216 dev->last_rx = jiffies; 1216 dev->last_rx = jiffies;
1217 card->stats.rx_packets++; 1217 card->stats.rx_packets++;
1218 card->stats.rx_bytes += pkt_len; 1218 card->stats.rx_bytes += pkt_len;
1219 1219
1220 out: 1220 out:
1221 /* give the buffer back to the card */ 1221 /* give the buffer back to the card */
1222 card->rx_buffer[4*descnr] = 0x80000000; 1222 card->rx_buffer[4*descnr] = 0x80000000;
@@ -1234,9 +1234,9 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1234 int status; 1234 int status;
1235 1235
1236 enter("investigate_write_descriptor"); 1236 enter("investigate_write_descriptor");
1237 1237
1238 status = card->tx_buffer[4*descnr]; 1238 status = card->tx_buffer[4*descnr];
1239#if 0 1239#if 0
1240 if (status & 0x8000) { /* Major error */ 1240 if (status & 0x8000) { /* Major error */
1241 printk(KERN_ERR "Major transmit error status %x \n", status); 1241 printk(KERN_ERR "Major transmit error status %x \n", status);
1242 card->tx_buffer[4*descnr] = 0; 1242 card->tx_buffer[4*descnr] = 0;
@@ -1258,7 +1258,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1258 } 1258 }
1259 1259
1260 leave("investigate_write_descriptor"); 1260 leave("investigate_write_descriptor");
1261 1261
1262} 1262}
1263 1263
1264 1264
@@ -1271,8 +1271,8 @@ static int __init xircom_init(void)
1271static void __exit xircom_exit(void) 1271static void __exit xircom_exit(void)
1272{ 1272{
1273 pci_unregister_driver(&xircom_ops); 1273 pci_unregister_driver(&xircom_ops);
1274} 1274}
1275 1275
1276module_init(xircom_init) 1276module_init(xircom_init)
1277module_exit(xircom_exit) 1277module_exit(xircom_exit)
1278 1278