aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/arm/am79c961a.c4
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/bnx2.c579
-rw-r--r--drivers/net/bnx2.h97
-rw-r--r--drivers/net/cassini.c40
-rw-r--r--drivers/net/cassini.h2
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/irda/Kconfig8
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/ep7211_ir.c11
-rw-r--r--drivers/net/irda/irda-usb.c5
-rw-r--r--drivers/net/irda/irtty-sir.c19
-rw-r--r--drivers/net/irda/nsc-ircc.c320
-rw-r--r--drivers/net/irda/nsc-ircc.h2
-rw-r--r--drivers/net/irda/sir_dongle.c19
-rw-r--r--drivers/net/irda/toim3232-sir.c375
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixp2000/enp2611.c2
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/mv643xx_eth.h18
-rw-r--r--drivers/net/pcnet32.c4143
-rw-r--r--drivers/net/ppp_generic.c29
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skge.c346
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c616
-rw-r--r--drivers/net/sky2.h93
-rw-r--r--drivers/net/smc91x.c57
-rw-r--r--drivers/net/smc91x.h474
-rw-r--r--drivers/net/sungem.c37
-rw-r--r--drivers/net/sungem.h6
-rw-r--r--drivers/net/tg3.c744
-rw-r--r--drivers/net/tg3.h26
-rw-r--r--drivers/net/tulip/de2104x.c2
-rw-r--r--drivers/net/wan/sbni.c3
-rw-r--r--drivers/net/wireless/Kconfig9
-rw-r--r--drivers/net/wireless/airo.c455
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c13
55 files changed, 5067 insertions, 3577 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index e58d4c50c2e1..f5ee064ab6b2 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1605,7 +1605,7 @@ static void rtl8139_thread (void *_data)
1605 if (tp->watchdog_fired) { 1605 if (tp->watchdog_fired) {
1606 tp->watchdog_fired = 0; 1606 tp->watchdog_fired = 0;
1607 rtl8139_tx_timeout_task(_data); 1607 rtl8139_tx_timeout_task(_data);
1608 } else if (rtnl_shlock_nowait() == 0) { 1608 } else if (rtnl_trylock()) {
1609 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1609 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1610 rtnl_unlock (); 1610 rtnl_unlock ();
1611 } else { 1611 } else {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e0b11095b9da..e20b849a22e8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1914,7 +1914,7 @@ config E1000_DISABLE_PACKET_SPLIT
1914 depends on E1000 1914 depends on E1000
1915 help 1915 help
1916 Say Y here if you want to use the legacy receive path for PCI express 1916 Say Y here if you want to use the legacy receive path for PCI express
1917 hadware. 1917 hardware.
1918 1918
1919 If in doubt, say N. 1919 If in doubt, say N.
1920 1920
@@ -2172,6 +2172,7 @@ config BNX2
2172config SPIDER_NET 2172config SPIDER_NET
2173 tristate "Spider Gigabit Ethernet driver" 2173 tristate "Spider Gigabit Ethernet driver"
2174 depends on PCI && PPC_CELL 2174 depends on PCI && PPC_CELL
2175 select FW_LOADER
2175 help 2176 help
2176 This driver supports the Gigabit Ethernet chips present on the 2177 This driver supports the Gigabit Ethernet chips present on the
2177 Cell Processor-Based Blades from IBM. 2178 Cell Processor-Based Blades from IBM.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 00e72b12fb92..b90468aea077 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -58,8 +58,8 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
58obj-$(CONFIG_FEALNX) += fealnx.o 58obj-$(CONFIG_FEALNX) += fealnx.o
59obj-$(CONFIG_TIGON3) += tg3.o 59obj-$(CONFIG_TIGON3) += tg3.o
60obj-$(CONFIG_BNX2) += bnx2.o 60obj-$(CONFIG_BNX2) += bnx2.o
61spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o 61spidernet-y += spider_net.o spider_net_ethtool.o
62obj-$(CONFIG_SPIDER_NET) += spidernet.o 62obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
63obj-$(CONFIG_TC35815) += tc35815.o 63obj-$(CONFIG_TC35815) += tc35815.o
64obj-$(CONFIG_SKGE) += skge.o 64obj-$(CONFIG_SKGE) += skge.o
65obj-$(CONFIG_SKY2) += sky2.o 65obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 53e3afc1b7b7..09d5c3f26985 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -696,7 +696,9 @@ static int __init am79c961_probe(struct platform_device *pdev)
696 dev->base_addr = res->start; 696 dev->base_addr = res->start;
697 dev->irq = platform_get_irq(pdev, 0); 697 dev->irq = platform_get_irq(pdev, 0);
698 698
699 ret = -ENODEV; 699 ret = -ENODEV;
700 if (dev->irq < 0)
701 goto nodev;
700 if (!request_region(dev->base_addr, 0x18, dev->name)) 702 if (!request_region(dev->base_addr, 0x18, dev->name))
701 goto nodev; 703 goto nodev;
702 704
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index cd0b1dccfb61..1363083b4d83 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -90,8 +90,6 @@ static void au1000_tx_timeout(struct net_device *);
90static int au1000_set_config(struct net_device *dev, struct ifmap *map); 90static int au1000_set_config(struct net_device *dev, struct ifmap *map);
91static void set_rx_mode(struct net_device *); 91static void set_rx_mode(struct net_device *);
92static struct net_device_stats *au1000_get_stats(struct net_device *); 92static struct net_device_stats *au1000_get_stats(struct net_device *);
93static inline void update_tx_stats(struct net_device *, u32, u32);
94static inline void update_rx_stats(struct net_device *, u32);
95static void au1000_timer(unsigned long); 93static void au1000_timer(unsigned long);
96static int au1000_ioctl(struct net_device *, struct ifreq *, int); 94static int au1000_ioctl(struct net_device *, struct ifreq *, int);
97static int mdio_read(struct net_device *, int, int); 95static int mdio_read(struct net_device *, int, int);
@@ -1825,16 +1823,11 @@ static void __exit au1000_cleanup_module(void)
1825 } 1823 }
1826} 1824}
1827 1825
1828 1826static void update_tx_stats(struct net_device *dev, u32 status)
1829static inline void
1830update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
1831{ 1827{
1832 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1828 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1833 struct net_device_stats *ps = &aup->stats; 1829 struct net_device_stats *ps = &aup->stats;
1834 1830
1835 ps->tx_packets++;
1836 ps->tx_bytes += pkt_len;
1837
1838 if (status & TX_FRAME_ABORTED) { 1831 if (status & TX_FRAME_ABORTED) {
1839 if (dev->if_port == IF_PORT_100BASEFX) { 1832 if (dev->if_port == IF_PORT_100BASEFX) {
1840 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { 1833 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
@@ -1867,7 +1860,7 @@ static void au1000_tx_ack(struct net_device *dev)
1867 ptxd = aup->tx_dma_ring[aup->tx_tail]; 1860 ptxd = aup->tx_dma_ring[aup->tx_tail];
1868 1861
1869 while (ptxd->buff_stat & TX_T_DONE) { 1862 while (ptxd->buff_stat & TX_T_DONE) {
1870 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); 1863 update_tx_stats(dev, ptxd->status);
1871 ptxd->buff_stat &= ~TX_T_DONE; 1864 ptxd->buff_stat &= ~TX_T_DONE;
1872 ptxd->len = 0; 1865 ptxd->len = 0;
1873 au_sync(); 1866 au_sync();
@@ -1889,6 +1882,7 @@ static void au1000_tx_ack(struct net_device *dev)
1889static int au1000_tx(struct sk_buff *skb, struct net_device *dev) 1882static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1890{ 1883{
1891 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1884 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1885 struct net_device_stats *ps = &aup->stats;
1892 volatile tx_dma_t *ptxd; 1886 volatile tx_dma_t *ptxd;
1893 u32 buff_stat; 1887 u32 buff_stat;
1894 db_dest_t *pDB; 1888 db_dest_t *pDB;
@@ -1908,7 +1902,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1908 return 1; 1902 return 1;
1909 } 1903 }
1910 else if (buff_stat & TX_T_DONE) { 1904 else if (buff_stat & TX_T_DONE) {
1911 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); 1905 update_tx_stats(dev, ptxd->status);
1912 ptxd->len = 0; 1906 ptxd->len = 0;
1913 } 1907 }
1914 1908
@@ -1928,6 +1922,9 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1928 else 1922 else
1929 ptxd->len = skb->len; 1923 ptxd->len = skb->len;
1930 1924
1925 ps->tx_packets++;
1926 ps->tx_bytes += ptxd->len;
1927
1931 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; 1928 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1932 au_sync(); 1929 au_sync();
1933 dev_kfree_skb(skb); 1930 dev_kfree_skb(skb);
@@ -1936,7 +1933,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1936 return 0; 1933 return 0;
1937} 1934}
1938 1935
1939
1940static inline void update_rx_stats(struct net_device *dev, u32 status) 1936static inline void update_rx_stats(struct net_device *dev, u32 status)
1941{ 1937{
1942 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1938 struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b787b6582e50..2671da20a496 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -9,13 +9,54 @@
9 * Written by: Michael Chan (mchan@broadcom.com) 9 * Written by: Michael Chan (mchan@broadcom.com)
10 */ 10 */
11 11
12#include <linux/config.h>
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
30#include <asm/bitops.h>
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
51#include <linux/cache.h>
52
12#include "bnx2.h" 53#include "bnx2.h"
13#include "bnx2_fw.h" 54#include "bnx2_fw.h"
14 55
15#define DRV_MODULE_NAME "bnx2" 56#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": " 57#define PFX DRV_MODULE_NAME ": "
17#define DRV_MODULE_VERSION "1.4.31" 58#define DRV_MODULE_VERSION "1.4.39"
18#define DRV_MODULE_RELDATE "January 19, 2006" 59#define DRV_MODULE_RELDATE "March 22, 2006"
19 60
20#define RUN_AT(x) (jiffies + (x)) 61#define RUN_AT(x) (jiffies + (x))
21 62
@@ -313,8 +354,6 @@ bnx2_disable_int(struct bnx2 *bp)
313static void 354static void
314bnx2_enable_int(struct bnx2 *bp) 355bnx2_enable_int(struct bnx2 *bp)
315{ 356{
316 u32 val;
317
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 357 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 358 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); 359 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
@@ -322,8 +361,7 @@ bnx2_enable_int(struct bnx2 *bp)
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 361 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); 362 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
324 363
325 val = REG_RD(bp, BNX2_HC_COMMAND); 364 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
327} 365}
328 366
329static void 367static void
@@ -360,15 +398,13 @@ bnx2_netif_start(struct bnx2 *bp)
360static void 398static void
361bnx2_free_mem(struct bnx2 *bp) 399bnx2_free_mem(struct bnx2 *bp)
362{ 400{
363 if (bp->stats_blk) { 401 int i;
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block), 402
365 bp->stats_blk, bp->stats_blk_mapping);
366 bp->stats_blk = NULL;
367 }
368 if (bp->status_blk) { 403 if (bp->status_blk) {
369 pci_free_consistent(bp->pdev, sizeof(struct status_block), 404 pci_free_consistent(bp->pdev, bp->status_stats_size,
370 bp->status_blk, bp->status_blk_mapping); 405 bp->status_blk, bp->status_blk_mapping);
371 bp->status_blk = NULL; 406 bp->status_blk = NULL;
407 bp->stats_blk = NULL;
372 } 408 }
373 if (bp->tx_desc_ring) { 409 if (bp->tx_desc_ring) {
374 pci_free_consistent(bp->pdev, 410 pci_free_consistent(bp->pdev,
@@ -378,25 +414,28 @@ bnx2_free_mem(struct bnx2 *bp)
378 } 414 }
379 kfree(bp->tx_buf_ring); 415 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL; 416 bp->tx_buf_ring = NULL;
381 if (bp->rx_desc_ring) { 417 for (i = 0; i < bp->rx_max_ring; i++) {
382 pci_free_consistent(bp->pdev, 418 if (bp->rx_desc_ring[i])
383 sizeof(struct rx_bd) * RX_DESC_CNT, 419 pci_free_consistent(bp->pdev,
384 bp->rx_desc_ring, bp->rx_desc_mapping); 420 sizeof(struct rx_bd) * RX_DESC_CNT,
385 bp->rx_desc_ring = NULL; 421 bp->rx_desc_ring[i],
386 } 422 bp->rx_desc_mapping[i]);
387 kfree(bp->rx_buf_ring); 423 bp->rx_desc_ring[i] = NULL;
424 }
425 vfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL; 426 bp->rx_buf_ring = NULL;
389} 427}
390 428
391static int 429static int
392bnx2_alloc_mem(struct bnx2 *bp) 430bnx2_alloc_mem(struct bnx2 *bp)
393{ 431{
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, 432 int i, status_blk_size;
395 GFP_KERNEL); 433
434 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
435 GFP_KERNEL);
396 if (bp->tx_buf_ring == NULL) 436 if (bp->tx_buf_ring == NULL)
397 return -ENOMEM; 437 return -ENOMEM;
398 438
399 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
400 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, 439 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
401 sizeof(struct tx_bd) * 440 sizeof(struct tx_bd) *
402 TX_DESC_CNT, 441 TX_DESC_CNT,
@@ -404,34 +443,40 @@ bnx2_alloc_mem(struct bnx2 *bp)
404 if (bp->tx_desc_ring == NULL) 443 if (bp->tx_desc_ring == NULL)
405 goto alloc_mem_err; 444 goto alloc_mem_err;
406 445
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT, 446 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
408 GFP_KERNEL); 447 bp->rx_max_ring);
409 if (bp->rx_buf_ring == NULL) 448 if (bp->rx_buf_ring == NULL)
410 goto alloc_mem_err; 449 goto alloc_mem_err;
411 450
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT); 451 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev, 452 bp->rx_max_ring);
414 sizeof(struct rx_bd) * 453
415 RX_DESC_CNT, 454 for (i = 0; i < bp->rx_max_ring; i++) {
416 &bp->rx_desc_mapping); 455 bp->rx_desc_ring[i] =
417 if (bp->rx_desc_ring == NULL) 456 pci_alloc_consistent(bp->pdev,
418 goto alloc_mem_err; 457 sizeof(struct rx_bd) * RX_DESC_CNT,
458 &bp->rx_desc_mapping[i]);
459 if (bp->rx_desc_ring[i] == NULL)
460 goto alloc_mem_err;
461
462 }
463
464 /* Combine status and statistics blocks into one allocation. */
465 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
466 bp->status_stats_size = status_blk_size +
467 sizeof(struct statistics_block);
419 468
420 bp->status_blk = pci_alloc_consistent(bp->pdev, 469 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
421 sizeof(struct status_block),
422 &bp->status_blk_mapping); 470 &bp->status_blk_mapping);
423 if (bp->status_blk == NULL) 471 if (bp->status_blk == NULL)
424 goto alloc_mem_err; 472 goto alloc_mem_err;
425 473
426 memset(bp->status_blk, 0, sizeof(struct status_block)); 474 memset(bp->status_blk, 0, bp->status_stats_size);
427 475
428 bp->stats_blk = pci_alloc_consistent(bp->pdev, 476 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
429 sizeof(struct statistics_block), 477 status_blk_size);
430 &bp->stats_blk_mapping);
431 if (bp->stats_blk == NULL)
432 goto alloc_mem_err;
433 478
434 memset(bp->stats_blk, 0, sizeof(struct statistics_block)); 479 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
435 480
436 return 0; 481 return 0;
437 482
@@ -1520,7 +1565,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1520 struct sk_buff *skb; 1565 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; 1566 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1522 dma_addr_t mapping; 1567 dma_addr_t mapping;
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index]; 1568 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1524 unsigned long align; 1569 unsigned long align;
1525 1570
1526 skb = dev_alloc_skb(bp->rx_buf_size); 1571 skb = dev_alloc_skb(bp->rx_buf_size);
@@ -1656,23 +1701,30 @@ static inline void
1656bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, 1701bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1657 u16 cons, u16 prod) 1702 u16 cons, u16 prod)
1658{ 1703{
1659 struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons]; 1704 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1660 struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod]; 1705 struct rx_bd *cons_bd, *prod_bd;
1661 struct rx_bd *cons_bd = &bp->rx_desc_ring[cons]; 1706
1662 struct rx_bd *prod_bd = &bp->rx_desc_ring[prod]; 1707 cons_rx_buf = &bp->rx_buf_ring[cons];
1708 prod_rx_buf = &bp->rx_buf_ring[prod];
1663 1709
1664 pci_dma_sync_single_for_device(bp->pdev, 1710 pci_dma_sync_single_for_device(bp->pdev,
1665 pci_unmap_addr(cons_rx_buf, mapping), 1711 pci_unmap_addr(cons_rx_buf, mapping),
1666 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1712 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1667 1713
1668 prod_rx_buf->skb = cons_rx_buf->skb; 1714 bp->rx_prod_bseq += bp->rx_buf_use_size;
1669 pci_unmap_addr_set(prod_rx_buf, mapping,
1670 pci_unmap_addr(cons_rx_buf, mapping));
1671 1715
1672 memcpy(prod_bd, cons_bd, 8); 1716 prod_rx_buf->skb = skb;
1673 1717
1674 bp->rx_prod_bseq += bp->rx_buf_use_size; 1718 if (cons == prod)
1719 return;
1675 1720
1721 pci_unmap_addr_set(prod_rx_buf, mapping,
1722 pci_unmap_addr(cons_rx_buf, mapping));
1723
1724 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1725 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1726 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1727 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1676} 1728}
1677 1729
1678static int 1730static int
@@ -1699,14 +1751,19 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1699 u32 status; 1751 u32 status;
1700 struct sw_bd *rx_buf; 1752 struct sw_bd *rx_buf;
1701 struct sk_buff *skb; 1753 struct sk_buff *skb;
1754 dma_addr_t dma_addr;
1702 1755
1703 sw_ring_cons = RX_RING_IDX(sw_cons); 1756 sw_ring_cons = RX_RING_IDX(sw_cons);
1704 sw_ring_prod = RX_RING_IDX(sw_prod); 1757 sw_ring_prod = RX_RING_IDX(sw_prod);
1705 1758
1706 rx_buf = &bp->rx_buf_ring[sw_ring_cons]; 1759 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1707 skb = rx_buf->skb; 1760 skb = rx_buf->skb;
1708 pci_dma_sync_single_for_cpu(bp->pdev, 1761
1709 pci_unmap_addr(rx_buf, mapping), 1762 rx_buf->skb = NULL;
1763
1764 dma_addr = pci_unmap_addr(rx_buf, mapping);
1765
1766 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1710 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1767 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1711 1768
1712 rx_hdr = (struct l2_fhdr *) skb->data; 1769 rx_hdr = (struct l2_fhdr *) skb->data;
@@ -1747,8 +1804,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1747 skb = new_skb; 1804 skb = new_skb;
1748 } 1805 }
1749 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { 1806 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1750 pci_unmap_single(bp->pdev, 1807 pci_unmap_single(bp->pdev, dma_addr,
1751 pci_unmap_addr(rx_buf, mapping),
1752 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1808 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1753 1809
1754 skb_reserve(skb, bp->rx_offset); 1810 skb_reserve(skb, bp->rx_offset);
@@ -1794,8 +1850,6 @@ reuse_rx:
1794 rx_pkt++; 1850 rx_pkt++;
1795 1851
1796next_rx: 1852next_rx:
1797 rx_buf->skb = NULL;
1798
1799 sw_cons = NEXT_RX_BD(sw_cons); 1853 sw_cons = NEXT_RX_BD(sw_cons);
1800 sw_prod = NEXT_RX_BD(sw_prod); 1854 sw_prod = NEXT_RX_BD(sw_prod);
1801 1855
@@ -1906,6 +1960,13 @@ bnx2_poll(struct net_device *dev, int *budget)
1906 spin_lock(&bp->phy_lock); 1960 spin_lock(&bp->phy_lock);
1907 bnx2_phy_int(bp); 1961 bnx2_phy_int(bp);
1908 spin_unlock(&bp->phy_lock); 1962 spin_unlock(&bp->phy_lock);
1963
1964 /* This is needed to take care of transient status
1965 * during link changes.
1966 */
1967 REG_WR(bp, BNX2_HC_COMMAND,
1968 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1969 REG_RD(bp, BNX2_HC_COMMAND);
1909 } 1970 }
1910 1971
1911 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) 1972 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
@@ -3287,6 +3348,8 @@ bnx2_init_chip(struct bnx2 *bp)
3287 3348
3288 udelay(20); 3349 udelay(20);
3289 3350
3351 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3352
3290 return rc; 3353 return rc;
3291} 3354}
3292 3355
@@ -3340,27 +3403,35 @@ bnx2_init_rx_ring(struct bnx2 *bp)
3340 bp->hw_rx_cons = 0; 3403 bp->hw_rx_cons = 0;
3341 bp->rx_prod_bseq = 0; 3404 bp->rx_prod_bseq = 0;
3342 3405
3343 rxbd = &bp->rx_desc_ring[0]; 3406 for (i = 0; i < bp->rx_max_ring; i++) {
3344 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3407 int j;
3345 rxbd->rx_bd_len = bp->rx_buf_use_size;
3346 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3347 }
3348 3408
3349 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32; 3409 rxbd = &bp->rx_desc_ring[i][0];
3350 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff; 3410 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3411 rxbd->rx_bd_len = bp->rx_buf_use_size;
3412 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3413 }
3414 if (i == (bp->rx_max_ring - 1))
3415 j = 0;
3416 else
3417 j = i + 1;
3418 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3419 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3420 0xffffffff;
3421 }
3351 3422
3352 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 3423 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3353 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 3424 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3354 val |= 0x02 << 8; 3425 val |= 0x02 << 8;
3355 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); 3426 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3356 3427
3357 val = (u64) bp->rx_desc_mapping >> 32; 3428 val = (u64) bp->rx_desc_mapping[0] >> 32;
3358 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); 3429 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3359 3430
3360 val = (u64) bp->rx_desc_mapping & 0xffffffff; 3431 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3361 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); 3432 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3362 3433
3363 for ( ;ring_prod < bp->rx_ring_size; ) { 3434 for (i = 0; i < bp->rx_ring_size; i++) {
3364 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { 3435 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3365 break; 3436 break;
3366 } 3437 }
@@ -3375,6 +3446,29 @@ bnx2_init_rx_ring(struct bnx2 *bp)
3375} 3446}
3376 3447
3377static void 3448static void
3449bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3450{
3451 u32 num_rings, max;
3452
3453 bp->rx_ring_size = size;
3454 num_rings = 1;
3455 while (size > MAX_RX_DESC_CNT) {
3456 size -= MAX_RX_DESC_CNT;
3457 num_rings++;
3458 }
3459 /* round to next power of 2 */
3460 max = MAX_RX_RINGS;
3461 while ((max & num_rings) == 0)
3462 max >>= 1;
3463
3464 if (num_rings != max)
3465 max <<= 1;
3466
3467 bp->rx_max_ring = max;
3468 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3469}
3470
3471static void
3378bnx2_free_tx_skbs(struct bnx2 *bp) 3472bnx2_free_tx_skbs(struct bnx2 *bp)
3379{ 3473{
3380 int i; 3474 int i;
@@ -3419,7 +3513,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
3419 if (bp->rx_buf_ring == NULL) 3513 if (bp->rx_buf_ring == NULL)
3420 return; 3514 return;
3421 3515
3422 for (i = 0; i < RX_DESC_CNT; i++) { 3516 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3423 struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; 3517 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3424 struct sk_buff *skb = rx_buf->skb; 3518 struct sk_buff *skb = rx_buf->skb;
3425 3519
@@ -3506,74 +3600,9 @@ bnx2_test_registers(struct bnx2 *bp)
3506 { 0x0c00, 0, 0x00000000, 0x00000001 }, 3600 { 0x0c00, 0, 0x00000000, 0x00000001 },
3507 { 0x0c04, 0, 0x00000000, 0x03ff0001 }, 3601 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3508 { 0x0c08, 0, 0x0f0ff073, 0x00000000 }, 3602 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3509 { 0x0c0c, 0, 0x00ffffff, 0x00000000 },
3510 { 0x0c30, 0, 0x00000000, 0xffffffff },
3511 { 0x0c34, 0, 0x00000000, 0xffffffff },
3512 { 0x0c38, 0, 0x00000000, 0xffffffff },
3513 { 0x0c3c, 0, 0x00000000, 0xffffffff },
3514 { 0x0c40, 0, 0x00000000, 0xffffffff },
3515 { 0x0c44, 0, 0x00000000, 0xffffffff },
3516 { 0x0c48, 0, 0x00000000, 0x0007ffff },
3517 { 0x0c4c, 0, 0x00000000, 0xffffffff },
3518 { 0x0c50, 0, 0x00000000, 0xffffffff },
3519 { 0x0c54, 0, 0x00000000, 0xffffffff },
3520 { 0x0c58, 0, 0x00000000, 0xffffffff },
3521 { 0x0c5c, 0, 0x00000000, 0xffffffff },
3522 { 0x0c60, 0, 0x00000000, 0xffffffff },
3523 { 0x0c64, 0, 0x00000000, 0xffffffff },
3524 { 0x0c68, 0, 0x00000000, 0xffffffff },
3525 { 0x0c6c, 0, 0x00000000, 0xffffffff },
3526 { 0x0c70, 0, 0x00000000, 0xffffffff },
3527 { 0x0c74, 0, 0x00000000, 0xffffffff },
3528 { 0x0c78, 0, 0x00000000, 0xffffffff },
3529 { 0x0c7c, 0, 0x00000000, 0xffffffff },
3530 { 0x0c80, 0, 0x00000000, 0xffffffff },
3531 { 0x0c84, 0, 0x00000000, 0xffffffff },
3532 { 0x0c88, 0, 0x00000000, 0xffffffff },
3533 { 0x0c8c, 0, 0x00000000, 0xffffffff },
3534 { 0x0c90, 0, 0x00000000, 0xffffffff },
3535 { 0x0c94, 0, 0x00000000, 0xffffffff },
3536 { 0x0c98, 0, 0x00000000, 0xffffffff },
3537 { 0x0c9c, 0, 0x00000000, 0xffffffff },
3538 { 0x0ca0, 0, 0x00000000, 0xffffffff },
3539 { 0x0ca4, 0, 0x00000000, 0xffffffff },
3540 { 0x0ca8, 0, 0x00000000, 0x0007ffff },
3541 { 0x0cac, 0, 0x00000000, 0xffffffff },
3542 { 0x0cb0, 0, 0x00000000, 0xffffffff },
3543 { 0x0cb4, 0, 0x00000000, 0xffffffff },
3544 { 0x0cb8, 0, 0x00000000, 0xffffffff },
3545 { 0x0cbc, 0, 0x00000000, 0xffffffff },
3546 { 0x0cc0, 0, 0x00000000, 0xffffffff },
3547 { 0x0cc4, 0, 0x00000000, 0xffffffff },
3548 { 0x0cc8, 0, 0x00000000, 0xffffffff },
3549 { 0x0ccc, 0, 0x00000000, 0xffffffff },
3550 { 0x0cd0, 0, 0x00000000, 0xffffffff },
3551 { 0x0cd4, 0, 0x00000000, 0xffffffff },
3552 { 0x0cd8, 0, 0x00000000, 0xffffffff },
3553 { 0x0cdc, 0, 0x00000000, 0xffffffff },
3554 { 0x0ce0, 0, 0x00000000, 0xffffffff },
3555 { 0x0ce4, 0, 0x00000000, 0xffffffff },
3556 { 0x0ce8, 0, 0x00000000, 0xffffffff },
3557 { 0x0cec, 0, 0x00000000, 0xffffffff },
3558 { 0x0cf0, 0, 0x00000000, 0xffffffff },
3559 { 0x0cf4, 0, 0x00000000, 0xffffffff },
3560 { 0x0cf8, 0, 0x00000000, 0xffffffff },
3561 { 0x0cfc, 0, 0x00000000, 0xffffffff },
3562 { 0x0d00, 0, 0x00000000, 0xffffffff },
3563 { 0x0d04, 0, 0x00000000, 0xffffffff },
3564 3603
3565 { 0x1000, 0, 0x00000000, 0x00000001 }, 3604 { 0x1000, 0, 0x00000000, 0x00000001 },
3566 { 0x1004, 0, 0x00000000, 0x000f0001 }, 3605 { 0x1004, 0, 0x00000000, 0x000f0001 },
3567 { 0x1044, 0, 0x00000000, 0xffc003ff },
3568 { 0x1080, 0, 0x00000000, 0x0001ffff },
3569 { 0x1084, 0, 0x00000000, 0xffffffff },
3570 { 0x1088, 0, 0x00000000, 0xffffffff },
3571 { 0x108c, 0, 0x00000000, 0xffffffff },
3572 { 0x1090, 0, 0x00000000, 0xffffffff },
3573 { 0x1094, 0, 0x00000000, 0xffffffff },
3574 { 0x1098, 0, 0x00000000, 0xffffffff },
3575 { 0x109c, 0, 0x00000000, 0xffffffff },
3576 { 0x10a0, 0, 0x00000000, 0xffffffff },
3577 3606
3578 { 0x1408, 0, 0x01c00800, 0x00000000 }, 3607 { 0x1408, 0, 0x01c00800, 0x00000000 },
3579 { 0x149c, 0, 0x8000ffff, 0x00000000 }, 3608 { 0x149c, 0, 0x8000ffff, 0x00000000 },
@@ -3585,111 +3614,9 @@ bnx2_test_registers(struct bnx2 *bp)
3585 { 0x14c4, 0, 0x00003fff, 0x00000000 }, 3614 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3586 { 0x14cc, 0, 0x00000000, 0x00000001 }, 3615 { 0x14cc, 0, 0x00000000, 0x00000001 },
3587 { 0x14d0, 0, 0xffffffff, 0x00000000 }, 3616 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3588 { 0x1500, 0, 0x00000000, 0xffffffff },
3589 { 0x1504, 0, 0x00000000, 0xffffffff },
3590 { 0x1508, 0, 0x00000000, 0xffffffff },
3591 { 0x150c, 0, 0x00000000, 0xffffffff },
3592 { 0x1510, 0, 0x00000000, 0xffffffff },
3593 { 0x1514, 0, 0x00000000, 0xffffffff },
3594 { 0x1518, 0, 0x00000000, 0xffffffff },
3595 { 0x151c, 0, 0x00000000, 0xffffffff },
3596 { 0x1520, 0, 0x00000000, 0xffffffff },
3597 { 0x1524, 0, 0x00000000, 0xffffffff },
3598 { 0x1528, 0, 0x00000000, 0xffffffff },
3599 { 0x152c, 0, 0x00000000, 0xffffffff },
3600 { 0x1530, 0, 0x00000000, 0xffffffff },
3601 { 0x1534, 0, 0x00000000, 0xffffffff },
3602 { 0x1538, 0, 0x00000000, 0xffffffff },
3603 { 0x153c, 0, 0x00000000, 0xffffffff },
3604 { 0x1540, 0, 0x00000000, 0xffffffff },
3605 { 0x1544, 0, 0x00000000, 0xffffffff },
3606 { 0x1548, 0, 0x00000000, 0xffffffff },
3607 { 0x154c, 0, 0x00000000, 0xffffffff },
3608 { 0x1550, 0, 0x00000000, 0xffffffff },
3609 { 0x1554, 0, 0x00000000, 0xffffffff },
3610 { 0x1558, 0, 0x00000000, 0xffffffff },
3611 { 0x1600, 0, 0x00000000, 0xffffffff },
3612 { 0x1604, 0, 0x00000000, 0xffffffff },
3613 { 0x1608, 0, 0x00000000, 0xffffffff },
3614 { 0x160c, 0, 0x00000000, 0xffffffff },
3615 { 0x1610, 0, 0x00000000, 0xffffffff },
3616 { 0x1614, 0, 0x00000000, 0xffffffff },
3617 { 0x1618, 0, 0x00000000, 0xffffffff },
3618 { 0x161c, 0, 0x00000000, 0xffffffff },
3619 { 0x1620, 0, 0x00000000, 0xffffffff },
3620 { 0x1624, 0, 0x00000000, 0xffffffff },
3621 { 0x1628, 0, 0x00000000, 0xffffffff },
3622 { 0x162c, 0, 0x00000000, 0xffffffff },
3623 { 0x1630, 0, 0x00000000, 0xffffffff },
3624 { 0x1634, 0, 0x00000000, 0xffffffff },
3625 { 0x1638, 0, 0x00000000, 0xffffffff },
3626 { 0x163c, 0, 0x00000000, 0xffffffff },
3627 { 0x1640, 0, 0x00000000, 0xffffffff },
3628 { 0x1644, 0, 0x00000000, 0xffffffff },
3629 { 0x1648, 0, 0x00000000, 0xffffffff },
3630 { 0x164c, 0, 0x00000000, 0xffffffff },
3631 { 0x1650, 0, 0x00000000, 0xffffffff },
3632 { 0x1654, 0, 0x00000000, 0xffffffff },
3633 3617
3634 { 0x1800, 0, 0x00000000, 0x00000001 }, 3618 { 0x1800, 0, 0x00000000, 0x00000001 },
3635 { 0x1804, 0, 0x00000000, 0x00000003 }, 3619 { 0x1804, 0, 0x00000000, 0x00000003 },
3636 { 0x1840, 0, 0x00000000, 0xffffffff },
3637 { 0x1844, 0, 0x00000000, 0xffffffff },
3638 { 0x1848, 0, 0x00000000, 0xffffffff },
3639 { 0x184c, 0, 0x00000000, 0xffffffff },
3640 { 0x1850, 0, 0x00000000, 0xffffffff },
3641 { 0x1900, 0, 0x7ffbffff, 0x00000000 },
3642 { 0x1904, 0, 0xffffffff, 0x00000000 },
3643 { 0x190c, 0, 0xffffffff, 0x00000000 },
3644 { 0x1914, 0, 0xffffffff, 0x00000000 },
3645 { 0x191c, 0, 0xffffffff, 0x00000000 },
3646 { 0x1924, 0, 0xffffffff, 0x00000000 },
3647 { 0x192c, 0, 0xffffffff, 0x00000000 },
3648 { 0x1934, 0, 0xffffffff, 0x00000000 },
3649 { 0x193c, 0, 0xffffffff, 0x00000000 },
3650 { 0x1944, 0, 0xffffffff, 0x00000000 },
3651 { 0x194c, 0, 0xffffffff, 0x00000000 },
3652 { 0x1954, 0, 0xffffffff, 0x00000000 },
3653 { 0x195c, 0, 0xffffffff, 0x00000000 },
3654 { 0x1964, 0, 0xffffffff, 0x00000000 },
3655 { 0x196c, 0, 0xffffffff, 0x00000000 },
3656 { 0x1974, 0, 0xffffffff, 0x00000000 },
3657 { 0x197c, 0, 0xffffffff, 0x00000000 },
3658 { 0x1980, 0, 0x0700ffff, 0x00000000 },
3659
3660 { 0x1c00, 0, 0x00000000, 0x00000001 },
3661 { 0x1c04, 0, 0x00000000, 0x00000003 },
3662 { 0x1c08, 0, 0x0000000f, 0x00000000 },
3663 { 0x1c40, 0, 0x00000000, 0xffffffff },
3664 { 0x1c44, 0, 0x00000000, 0xffffffff },
3665 { 0x1c48, 0, 0x00000000, 0xffffffff },
3666 { 0x1c4c, 0, 0x00000000, 0xffffffff },
3667 { 0x1c50, 0, 0x00000000, 0xffffffff },
3668 { 0x1d00, 0, 0x7ffbffff, 0x00000000 },
3669 { 0x1d04, 0, 0xffffffff, 0x00000000 },
3670 { 0x1d0c, 0, 0xffffffff, 0x00000000 },
3671 { 0x1d14, 0, 0xffffffff, 0x00000000 },
3672 { 0x1d1c, 0, 0xffffffff, 0x00000000 },
3673 { 0x1d24, 0, 0xffffffff, 0x00000000 },
3674 { 0x1d2c, 0, 0xffffffff, 0x00000000 },
3675 { 0x1d34, 0, 0xffffffff, 0x00000000 },
3676 { 0x1d3c, 0, 0xffffffff, 0x00000000 },
3677 { 0x1d44, 0, 0xffffffff, 0x00000000 },
3678 { 0x1d4c, 0, 0xffffffff, 0x00000000 },
3679 { 0x1d54, 0, 0xffffffff, 0x00000000 },
3680 { 0x1d5c, 0, 0xffffffff, 0x00000000 },
3681 { 0x1d64, 0, 0xffffffff, 0x00000000 },
3682 { 0x1d6c, 0, 0xffffffff, 0x00000000 },
3683 { 0x1d74, 0, 0xffffffff, 0x00000000 },
3684 { 0x1d7c, 0, 0xffffffff, 0x00000000 },
3685 { 0x1d80, 0, 0x0700ffff, 0x00000000 },
3686
3687 { 0x2004, 0, 0x00000000, 0x0337000f },
3688 { 0x2008, 0, 0xffffffff, 0x00000000 },
3689 { 0x200c, 0, 0xffffffff, 0x00000000 },
3690 { 0x2010, 0, 0xffffffff, 0x00000000 },
3691 { 0x2014, 0, 0x801fff80, 0x00000000 },
3692 { 0x2018, 0, 0x000003ff, 0x00000000 },
3693 3620
3694 { 0x2800, 0, 0x00000000, 0x00000001 }, 3621 { 0x2800, 0, 0x00000000, 0x00000001 },
3695 { 0x2804, 0, 0x00000000, 0x00003f01 }, 3622 { 0x2804, 0, 0x00000000, 0x00003f01 },
@@ -3707,16 +3634,6 @@ bnx2_test_registers(struct bnx2 *bp)
3707 { 0x2c00, 0, 0x00000000, 0x00000011 }, 3634 { 0x2c00, 0, 0x00000000, 0x00000011 },
3708 { 0x2c04, 0, 0x00000000, 0x00030007 }, 3635 { 0x2c04, 0, 0x00000000, 0x00030007 },
3709 3636
3710 { 0x3000, 0, 0x00000000, 0x00000001 },
3711 { 0x3004, 0, 0x00000000, 0x007007ff },
3712 { 0x3008, 0, 0x00000003, 0x00000000 },
3713 { 0x300c, 0, 0xffffffff, 0x00000000 },
3714 { 0x3010, 0, 0xffffffff, 0x00000000 },
3715 { 0x3014, 0, 0xffffffff, 0x00000000 },
3716 { 0x3034, 0, 0xffffffff, 0x00000000 },
3717 { 0x3038, 0, 0xffffffff, 0x00000000 },
3718 { 0x3050, 0, 0x00000001, 0x00000000 },
3719
3720 { 0x3c00, 0, 0x00000000, 0x00000001 }, 3637 { 0x3c00, 0, 0x00000000, 0x00000001 },
3721 { 0x3c04, 0, 0x00000000, 0x00070000 }, 3638 { 0x3c04, 0, 0x00000000, 0x00070000 },
3722 { 0x3c08, 0, 0x00007f71, 0x07f00000 }, 3639 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
@@ -3726,88 +3643,11 @@ bnx2_test_registers(struct bnx2 *bp)
3726 { 0x3c18, 0, 0x00000000, 0xffffffff }, 3643 { 0x3c18, 0, 0x00000000, 0xffffffff },
3727 { 0x3c1c, 0, 0xfffff000, 0x00000000 }, 3644 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3728 { 0x3c20, 0, 0xffffff00, 0x00000000 }, 3645 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3729 { 0x3c24, 0, 0xffffffff, 0x00000000 },
3730 { 0x3c28, 0, 0xffffffff, 0x00000000 },
3731 { 0x3c2c, 0, 0xffffffff, 0x00000000 },
3732 { 0x3c30, 0, 0xffffffff, 0x00000000 },
3733 { 0x3c34, 0, 0xffffffff, 0x00000000 },
3734 { 0x3c38, 0, 0xffffffff, 0x00000000 },
3735 { 0x3c3c, 0, 0xffffffff, 0x00000000 },
3736 { 0x3c40, 0, 0xffffffff, 0x00000000 },
3737 { 0x3c44, 0, 0xffffffff, 0x00000000 },
3738 { 0x3c48, 0, 0xffffffff, 0x00000000 },
3739 { 0x3c4c, 0, 0xffffffff, 0x00000000 },
3740 { 0x3c50, 0, 0xffffffff, 0x00000000 },
3741 { 0x3c54, 0, 0xffffffff, 0x00000000 },
3742 { 0x3c58, 0, 0xffffffff, 0x00000000 },
3743 { 0x3c5c, 0, 0xffffffff, 0x00000000 },
3744 { 0x3c60, 0, 0xffffffff, 0x00000000 },
3745 { 0x3c64, 0, 0xffffffff, 0x00000000 },
3746 { 0x3c68, 0, 0xffffffff, 0x00000000 },
3747 { 0x3c6c, 0, 0xffffffff, 0x00000000 },
3748 { 0x3c70, 0, 0xffffffff, 0x00000000 },
3749 { 0x3c74, 0, 0x0000003f, 0x00000000 },
3750 { 0x3c78, 0, 0x00000000, 0x00000000 },
3751 { 0x3c7c, 0, 0x00000000, 0x00000000 },
3752 { 0x3c80, 0, 0x3fffffff, 0x00000000 },
3753 { 0x3c84, 0, 0x0000003f, 0x00000000 },
3754 { 0x3c88, 0, 0x00000000, 0xffffffff },
3755 { 0x3c8c, 0, 0x00000000, 0xffffffff },
3756
3757 { 0x4000, 0, 0x00000000, 0x00000001 },
3758 { 0x4004, 0, 0x00000000, 0x00030000 },
3759 { 0x4008, 0, 0x00000ff0, 0x00000000 },
3760 { 0x400c, 0, 0xffffffff, 0x00000000 },
3761 { 0x4088, 0, 0x00000000, 0x00070303 },
3762
3763 { 0x4400, 0, 0x00000000, 0x00000001 },
3764 { 0x4404, 0, 0x00000000, 0x00003f01 },
3765 { 0x4408, 0, 0x7fff00ff, 0x00000000 },
3766 { 0x440c, 0, 0xffffffff, 0x00000000 },
3767 { 0x4410, 0, 0xffff, 0x0000 },
3768 { 0x4414, 0, 0xffff, 0x0000 },
3769 { 0x4418, 0, 0xffff, 0x0000 },
3770 { 0x441c, 0, 0xffff, 0x0000 },
3771 { 0x4428, 0, 0xffffffff, 0x00000000 },
3772 { 0x442c, 0, 0xffffffff, 0x00000000 },
3773 { 0x4430, 0, 0xffffffff, 0x00000000 },
3774 { 0x4434, 0, 0xffffffff, 0x00000000 },
3775 { 0x4438, 0, 0xffffffff, 0x00000000 },
3776 { 0x443c, 0, 0xffffffff, 0x00000000 },
3777 { 0x4440, 0, 0xffffffff, 0x00000000 },
3778 { 0x4444, 0, 0xffffffff, 0x00000000 },
3779
3780 { 0x4c00, 0, 0x00000000, 0x00000001 },
3781 { 0x4c04, 0, 0x00000000, 0x0000003f },
3782 { 0x4c08, 0, 0xffffffff, 0x00000000 },
3783 { 0x4c0c, 0, 0x0007fc00, 0x00000000 },
3784 { 0x4c10, 0, 0x80003fe0, 0x00000000 },
3785 { 0x4c14, 0, 0xffffffff, 0x00000000 },
3786 { 0x4c44, 0, 0x00000000, 0x9fff9fff },
3787 { 0x4c48, 0, 0x00000000, 0xb3009fff },
3788 { 0x4c4c, 0, 0x00000000, 0x77f33b30 },
3789 { 0x4c50, 0, 0x00000000, 0xffffffff },
3790 3646
3791 { 0x5004, 0, 0x00000000, 0x0000007f }, 3647 { 0x5004, 0, 0x00000000, 0x0000007f },
3792 { 0x5008, 0, 0x0f0007ff, 0x00000000 }, 3648 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3793 { 0x500c, 0, 0xf800f800, 0x07ff07ff }, 3649 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3794 3650
3795 { 0x5400, 0, 0x00000008, 0x00000001 },
3796 { 0x5404, 0, 0x00000000, 0x0000003f },
3797 { 0x5408, 0, 0x0000001f, 0x00000000 },
3798 { 0x540c, 0, 0xffffffff, 0x00000000 },
3799 { 0x5410, 0, 0xffffffff, 0x00000000 },
3800 { 0x5414, 0, 0x0000ffff, 0x00000000 },
3801 { 0x5418, 0, 0x0000ffff, 0x00000000 },
3802 { 0x541c, 0, 0x0000ffff, 0x00000000 },
3803 { 0x5420, 0, 0x0000ffff, 0x00000000 },
3804 { 0x5428, 0, 0x000000ff, 0x00000000 },
3805 { 0x542c, 0, 0xff00ffff, 0x00000000 },
3806 { 0x5430, 0, 0x001fff80, 0x00000000 },
3807 { 0x5438, 0, 0xffffffff, 0x00000000 },
3808 { 0x543c, 0, 0xffffffff, 0x00000000 },
3809 { 0x5440, 0, 0xf800f800, 0x07ff07ff },
3810
3811 { 0x5c00, 0, 0x00000000, 0x00000001 }, 3651 { 0x5c00, 0, 0x00000000, 0x00000001 },
3812 { 0x5c04, 0, 0x00000000, 0x0003000f }, 3652 { 0x5c04, 0, 0x00000000, 0x0003000f },
3813 { 0x5c08, 0, 0x00000003, 0x00000000 }, 3653 { 0x5c08, 0, 0x00000003, 0x00000000 },
@@ -3949,7 +3789,6 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3949 struct sk_buff *skb, *rx_skb; 3789 struct sk_buff *skb, *rx_skb;
3950 unsigned char *packet; 3790 unsigned char *packet;
3951 u16 rx_start_idx, rx_idx; 3791 u16 rx_start_idx, rx_idx;
3952 u32 val;
3953 dma_addr_t map; 3792 dma_addr_t map;
3954 struct tx_bd *txbd; 3793 struct tx_bd *txbd;
3955 struct sw_bd *rx_buf; 3794 struct sw_bd *rx_buf;
@@ -3980,8 +3819,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3980 map = pci_map_single(bp->pdev, skb->data, pkt_size, 3819 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3981 PCI_DMA_TODEVICE); 3820 PCI_DMA_TODEVICE);
3982 3821
3983 val = REG_RD(bp, BNX2_HC_COMMAND); 3822 REG_WR(bp, BNX2_HC_COMMAND,
3984 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3823 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3824
3985 REG_RD(bp, BNX2_HC_COMMAND); 3825 REG_RD(bp, BNX2_HC_COMMAND);
3986 3826
3987 udelay(5); 3827 udelay(5);
@@ -4005,8 +3845,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4005 3845
4006 udelay(100); 3846 udelay(100);
4007 3847
4008 val = REG_RD(bp, BNX2_HC_COMMAND); 3848 REG_WR(bp, BNX2_HC_COMMAND,
4009 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3849 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3850
4010 REG_RD(bp, BNX2_HC_COMMAND); 3851 REG_RD(bp, BNX2_HC_COMMAND);
4011 3852
4012 udelay(5); 3853 udelay(5);
@@ -4142,7 +3983,6 @@ static int
4142bnx2_test_intr(struct bnx2 *bp) 3983bnx2_test_intr(struct bnx2 *bp)
4143{ 3984{
4144 int i; 3985 int i;
4145 u32 val;
4146 u16 status_idx; 3986 u16 status_idx;
4147 3987
4148 if (!netif_running(bp->dev)) 3988 if (!netif_running(bp->dev))
@@ -4151,8 +3991,7 @@ bnx2_test_intr(struct bnx2 *bp)
4151 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; 3991 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4152 3992
4153 /* This register is not touched during run-time. */ 3993 /* This register is not touched during run-time. */
4154 val = REG_RD(bp, BNX2_HC_COMMAND); 3994 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4155 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
4156 REG_RD(bp, BNX2_HC_COMMAND); 3995 REG_RD(bp, BNX2_HC_COMMAND);
4157 3996
4158 for (i = 0; i < 10; i++) { 3997 for (i = 0; i < 10; i++) {
@@ -4794,6 +4633,64 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4794 info->fw_version[5] = 0; 4633 info->fw_version[5] = 0;
4795} 4634}
4796 4635
4636#define BNX2_REGDUMP_LEN (32 * 1024)
4637
4638static int
4639bnx2_get_regs_len(struct net_device *dev)
4640{
4641 return BNX2_REGDUMP_LEN;
4642}
4643
4644static void
4645bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4646{
4647 u32 *p = _p, i, offset;
4648 u8 *orig_p = _p;
4649 struct bnx2 *bp = netdev_priv(dev);
4650 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4651 0x0800, 0x0880, 0x0c00, 0x0c10,
4652 0x0c30, 0x0d08, 0x1000, 0x101c,
4653 0x1040, 0x1048, 0x1080, 0x10a4,
4654 0x1400, 0x1490, 0x1498, 0x14f0,
4655 0x1500, 0x155c, 0x1580, 0x15dc,
4656 0x1600, 0x1658, 0x1680, 0x16d8,
4657 0x1800, 0x1820, 0x1840, 0x1854,
4658 0x1880, 0x1894, 0x1900, 0x1984,
4659 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4660 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4661 0x2000, 0x2030, 0x23c0, 0x2400,
4662 0x2800, 0x2820, 0x2830, 0x2850,
4663 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4664 0x3c00, 0x3c94, 0x4000, 0x4010,
4665 0x4080, 0x4090, 0x43c0, 0x4458,
4666 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4667 0x4fc0, 0x5010, 0x53c0, 0x5444,
4668 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4669 0x5fc0, 0x6000, 0x6400, 0x6428,
4670 0x6800, 0x6848, 0x684c, 0x6860,
4671 0x6888, 0x6910, 0x8000 };
4672
4673 regs->version = 0;
4674
4675 memset(p, 0, BNX2_REGDUMP_LEN);
4676
4677 if (!netif_running(bp->dev))
4678 return;
4679
4680 i = 0;
4681 offset = reg_boundaries[0];
4682 p += offset;
4683 while (offset < BNX2_REGDUMP_LEN) {
4684 *p++ = REG_RD(bp, offset);
4685 offset += 4;
4686 if (offset == reg_boundaries[i + 1]) {
4687 offset = reg_boundaries[i + 2];
4688 p = (u32 *) (orig_p + offset);
4689 i += 2;
4690 }
4691 }
4692}
4693
4797static void 4694static void
4798bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 4695bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4799{ 4696{
@@ -4979,7 +4876,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4979{ 4876{
4980 struct bnx2 *bp = netdev_priv(dev); 4877 struct bnx2 *bp = netdev_priv(dev);
4981 4878
4982 ering->rx_max_pending = MAX_RX_DESC_CNT; 4879 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4983 ering->rx_mini_max_pending = 0; 4880 ering->rx_mini_max_pending = 0;
4984 ering->rx_jumbo_max_pending = 0; 4881 ering->rx_jumbo_max_pending = 0;
4985 4882
@@ -4996,17 +4893,28 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4996{ 4893{
4997 struct bnx2 *bp = netdev_priv(dev); 4894 struct bnx2 *bp = netdev_priv(dev);
4998 4895
4999 if ((ering->rx_pending > MAX_RX_DESC_CNT) || 4896 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5000 (ering->tx_pending > MAX_TX_DESC_CNT) || 4897 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5001 (ering->tx_pending <= MAX_SKB_FRAGS)) { 4898 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5002 4899
5003 return -EINVAL; 4900 return -EINVAL;
5004 } 4901 }
5005 bp->rx_ring_size = ering->rx_pending; 4902 if (netif_running(bp->dev)) {
4903 bnx2_netif_stop(bp);
4904 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4905 bnx2_free_skbs(bp);
4906 bnx2_free_mem(bp);
4907 }
4908
4909 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5006 bp->tx_ring_size = ering->tx_pending; 4910 bp->tx_ring_size = ering->tx_pending;
5007 4911
5008 if (netif_running(bp->dev)) { 4912 if (netif_running(bp->dev)) {
5009 bnx2_netif_stop(bp); 4913 int rc;
4914
4915 rc = bnx2_alloc_mem(bp);
4916 if (rc)
4917 return rc;
5010 bnx2_init_nic(bp); 4918 bnx2_init_nic(bp);
5011 bnx2_netif_start(bp); 4919 bnx2_netif_start(bp);
5012 } 4920 }
@@ -5360,6 +5268,8 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5360 .get_settings = bnx2_get_settings, 5268 .get_settings = bnx2_get_settings,
5361 .set_settings = bnx2_set_settings, 5269 .set_settings = bnx2_set_settings,
5362 .get_drvinfo = bnx2_get_drvinfo, 5270 .get_drvinfo = bnx2_get_drvinfo,
5271 .get_regs_len = bnx2_get_regs_len,
5272 .get_regs = bnx2_get_regs,
5363 .get_wol = bnx2_get_wol, 5273 .get_wol = bnx2_get_wol,
5364 .set_wol = bnx2_set_wol, 5274 .set_wol = bnx2_set_wol,
5365 .nway_reset = bnx2_nway_reset, 5275 .nway_reset = bnx2_nway_reset,
@@ -5678,7 +5588,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5678 bp->mac_addr[5] = (u8) reg; 5588 bp->mac_addr[5] = (u8) reg;
5679 5589
5680 bp->tx_ring_size = MAX_TX_DESC_CNT; 5590 bp->tx_ring_size = MAX_TX_DESC_CNT;
5681 bp->rx_ring_size = 100; 5591 bnx2_set_rx_ring_size(bp, 100);
5682 5592
5683 bp->rx_csum = 1; 5593 bp->rx_csum = 1;
5684 5594
@@ -5897,6 +5807,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5897 if (!netif_running(dev)) 5807 if (!netif_running(dev))
5898 return 0; 5808 return 0;
5899 5809
5810 flush_scheduled_work();
5900 bnx2_netif_stop(bp); 5811 bnx2_netif_stop(bp);
5901 netif_device_detach(dev); 5812 netif_device_detach(dev);
5902 del_timer_sync(&bp->timer); 5813 del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 9f691cbd666b..b87925f6a228 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -13,45 +13,6 @@
13#ifndef BNX2_H 13#ifndef BNX2_H
14#define BNX2_H 14#define BNX2_H
15 15
16#include <linux/config.h>
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20
21#include <linux/kernel.h>
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <asm/bitops.h>
34#include <asm/io.h>
35#include <asm/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42#include <linux/if_vlan.h>
43#define BCM_VLAN 1
44#endif
45#ifdef NETIF_F_TSO
46#include <net/ip.h>
47#include <net/tcp.h>
48#include <net/checksum.h>
49#define BCM_TSO 1
50#endif
51#include <linux/workqueue.h>
52#include <linux/crc32.h>
53#include <linux/prefetch.h>
54
55/* Hardware data structures and register definitions automatically 16/* Hardware data structures and register definitions automatically
56 * generated from RTL code. Do not modify. 17 * generated from RTL code. Do not modify.
57 */ 18 */
@@ -3792,8 +3753,10 @@ struct l2_fhdr {
3792#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) 3753#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
3793#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 3754#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
3794 3755
3756#define MAX_RX_RINGS 4
3795#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) 3757#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
3796#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) 3758#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
3759#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
3797 3760
3798#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ 3761#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \
3799 (MAX_TX_DESC_CNT - 1)) ? \ 3762 (MAX_TX_DESC_CNT - 1)) ? \
@@ -3805,8 +3768,10 @@ struct l2_fhdr {
3805 (MAX_RX_DESC_CNT - 1)) ? \ 3768 (MAX_RX_DESC_CNT - 1)) ? \
3806 (x) + 2 : (x) + 1 3769 (x) + 2 : (x) + 1
3807 3770
3808#define RX_RING_IDX(x) ((x) & MAX_RX_DESC_CNT) 3771#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
3809 3772
3773#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8)
3774#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
3810 3775
3811/* Context size. */ 3776/* Context size. */
3812#define CTX_SHIFT 7 3777#define CTX_SHIFT 7
@@ -3903,15 +3868,26 @@ struct bnx2 {
3903 struct status_block *status_blk; 3868 struct status_block *status_blk;
3904 u32 last_status_idx; 3869 u32 last_status_idx;
3905 3870
3906 struct tx_bd *tx_desc_ring; 3871 u32 flags;
3907 struct sw_bd *tx_buf_ring; 3872#define PCIX_FLAG 1
3908 u32 tx_prod_bseq; 3873#define PCI_32BIT_FLAG 2
3909 u16 tx_prod; 3874#define ONE_TDMA_FLAG 4 /* no longer used */
3910 u16 tx_cons; 3875#define NO_WOL_FLAG 8
3911 int tx_ring_size; 3876#define USING_DAC_FLAG 0x10
3877#define USING_MSI_FLAG 0x20
3878#define ASF_ENABLE_FLAG 0x40
3912 3879
3913 u16 hw_tx_cons; 3880 /* Put tx producer and consumer fields in separate cache lines. */
3914 u16 hw_rx_cons; 3881
3882 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
3883 u16 tx_prod;
3884
3885 struct tx_bd *tx_desc_ring;
3886 struct sw_bd *tx_buf_ring;
3887 int tx_ring_size;
3888
3889 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
3890 u16 hw_tx_cons;
3915 3891
3916#ifdef BCM_VLAN 3892#ifdef BCM_VLAN
3917 struct vlan_group *vlgrp; 3893 struct vlan_group *vlgrp;
@@ -3920,19 +3896,23 @@ struct bnx2 {
3920 u32 rx_offset; 3896 u32 rx_offset;
3921 u32 rx_buf_use_size; /* useable size */ 3897 u32 rx_buf_use_size; /* useable size */
3922 u32 rx_buf_size; /* with alignment */ 3898 u32 rx_buf_size; /* with alignment */
3923 struct rx_bd *rx_desc_ring; 3899 u32 rx_max_ring_idx;
3924 struct sw_bd *rx_buf_ring; 3900
3925 u32 rx_prod_bseq; 3901 u32 rx_prod_bseq;
3926 u16 rx_prod; 3902 u16 rx_prod;
3927 u16 rx_cons; 3903 u16 rx_cons;
3904 u16 hw_rx_cons;
3928 3905
3929 u32 rx_csum; 3906 u32 rx_csum;
3930 3907
3908 struct sw_bd *rx_buf_ring;
3909 struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
3910
3931 /* Only used to synchronize netif_stop_queue/wake_queue when tx */ 3911 /* Only used to synchronize netif_stop_queue/wake_queue when tx */
3932 /* ring is full */ 3912 /* ring is full */
3933 spinlock_t tx_lock; 3913 spinlock_t tx_lock;
3934 3914
3935 /* End of fileds used in the performance code paths. */ 3915 /* End of fields used in the performance code paths. */
3936 3916
3937 char *name; 3917 char *name;
3938 3918
@@ -3945,15 +3925,6 @@ struct bnx2 {
3945 /* Used to synchronize phy accesses. */ 3925 /* Used to synchronize phy accesses. */
3946 spinlock_t phy_lock; 3926 spinlock_t phy_lock;
3947 3927
3948 u32 flags;
3949#define PCIX_FLAG 1
3950#define PCI_32BIT_FLAG 2
3951#define ONE_TDMA_FLAG 4 /* no longer used */
3952#define NO_WOL_FLAG 8
3953#define USING_DAC_FLAG 0x10
3954#define USING_MSI_FLAG 0x20
3955#define ASF_ENABLE_FLAG 0x40
3956
3957 u32 phy_flags; 3928 u32 phy_flags;
3958#define PHY_SERDES_FLAG 1 3929#define PHY_SERDES_FLAG 1
3959#define PHY_CRC_FIX_FLAG 2 3930#define PHY_CRC_FIX_FLAG 2
@@ -4004,8 +3975,9 @@ struct bnx2 {
4004 dma_addr_t tx_desc_mapping; 3975 dma_addr_t tx_desc_mapping;
4005 3976
4006 3977
3978 int rx_max_ring;
4007 int rx_ring_size; 3979 int rx_ring_size;
4008 dma_addr_t rx_desc_mapping; 3980 dma_addr_t rx_desc_mapping[MAX_RX_RINGS];
4009 3981
4010 u16 tx_quick_cons_trip; 3982 u16 tx_quick_cons_trip;
4011 u16 tx_quick_cons_trip_int; 3983 u16 tx_quick_cons_trip_int;
@@ -4029,6 +4001,7 @@ struct bnx2 {
4029 struct statistics_block *stats_blk; 4001 struct statistics_block *stats_blk;
4030 dma_addr_t stats_blk_mapping; 4002 dma_addr_t stats_blk_mapping;
4031 4003
4004 u32 hc_cmd;
4032 u32 rx_mode; 4005 u32 rx_mode;
4033 4006
4034 u16 req_line_speed; 4007 u16 req_line_speed;
@@ -4073,6 +4046,8 @@ struct bnx2 {
4073 4046
4074 struct flash_spec *flash_info; 4047 struct flash_spec *flash_info;
4075 u32 flash_size; 4048 u32 flash_size;
4049
4050 int status_stats_size;
4076}; 4051};
4077 4052
4078static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); 4053static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 6e295fce5c6f..8f1573e658a5 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -91,6 +91,7 @@
91#include <linux/mii.h> 91#include <linux/mii.h>
92#include <linux/ip.h> 92#include <linux/ip.h>
93#include <linux/tcp.h> 93#include <linux/tcp.h>
94#include <linux/mutex.h>
94 95
95#include <net/checksum.h> 96#include <net/checksum.h>
96 97
@@ -3892,7 +3893,7 @@ static void cas_reset(struct cas *cp, int blkflag)
3892 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3893 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3893} 3894}
3894 3895
3895/* Shut down the chip, must be called with pm_sem held. */ 3896/* Shut down the chip, must be called with pm_mutex held. */
3896static void cas_shutdown(struct cas *cp) 3897static void cas_shutdown(struct cas *cp)
3897{ 3898{
3898 unsigned long flags; 3899 unsigned long flags;
@@ -4311,11 +4312,11 @@ static int cas_open(struct net_device *dev)
4311 int hw_was_up, err; 4312 int hw_was_up, err;
4312 unsigned long flags; 4313 unsigned long flags;
4313 4314
4314 down(&cp->pm_sem); 4315 mutex_lock(&cp->pm_mutex);
4315 4316
4316 hw_was_up = cp->hw_running; 4317 hw_was_up = cp->hw_running;
4317 4318
4318 /* The power-management semaphore protects the hw_running 4319 /* The power-management mutex protects the hw_running
4319 * etc. state so it is safe to do this bit without cp->lock 4320 * etc. state so it is safe to do this bit without cp->lock
4320 */ 4321 */
4321 if (!cp->hw_running) { 4322 if (!cp->hw_running) {
@@ -4364,7 +4365,7 @@ static int cas_open(struct net_device *dev)
4364 cas_unlock_all_restore(cp, flags); 4365 cas_unlock_all_restore(cp, flags);
4365 4366
4366 netif_start_queue(dev); 4367 netif_start_queue(dev);
4367 up(&cp->pm_sem); 4368 mutex_unlock(&cp->pm_mutex);
4368 return 0; 4369 return 0;
4369 4370
4370err_spare: 4371err_spare:
@@ -4372,7 +4373,7 @@ err_spare:
4372 cas_free_rxds(cp); 4373 cas_free_rxds(cp);
4373err_tx_tiny: 4374err_tx_tiny:
4374 cas_tx_tiny_free(cp); 4375 cas_tx_tiny_free(cp);
4375 up(&cp->pm_sem); 4376 mutex_unlock(&cp->pm_mutex);
4376 return err; 4377 return err;
4377} 4378}
4378 4379
@@ -4382,7 +4383,7 @@ static int cas_close(struct net_device *dev)
4382 struct cas *cp = netdev_priv(dev); 4383 struct cas *cp = netdev_priv(dev);
4383 4384
4384 /* Make sure we don't get distracted by suspend/resume */ 4385 /* Make sure we don't get distracted by suspend/resume */
4385 down(&cp->pm_sem); 4386 mutex_lock(&cp->pm_mutex);
4386 4387
4387 netif_stop_queue(dev); 4388 netif_stop_queue(dev);
4388 4389
@@ -4399,7 +4400,7 @@ static int cas_close(struct net_device *dev)
4399 cas_spare_free(cp); 4400 cas_spare_free(cp);
4400 cas_free_rxds(cp); 4401 cas_free_rxds(cp);
4401 cas_tx_tiny_free(cp); 4402 cas_tx_tiny_free(cp);
4402 up(&cp->pm_sem); 4403 mutex_unlock(&cp->pm_mutex);
4403 return 0; 4404 return 0;
4404} 4405}
4405 4406
@@ -4834,10 +4835,10 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4834 unsigned long flags; 4835 unsigned long flags;
4835 int rc = -EOPNOTSUPP; 4836 int rc = -EOPNOTSUPP;
4836 4837
4837 /* Hold the PM semaphore while doing ioctl's or we may collide 4838 /* Hold the PM mutex while doing ioctl's or we may collide
4838 * with open/close and power management and oops. 4839 * with open/close and power management and oops.
4839 */ 4840 */
4840 down(&cp->pm_sem); 4841 mutex_lock(&cp->pm_mutex);
4841 switch (cmd) { 4842 switch (cmd) {
4842 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4843 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4843 data->phy_id = cp->phy_addr; 4844 data->phy_id = cp->phy_addr;
@@ -4867,7 +4868,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4867 break; 4868 break;
4868 }; 4869 };
4869 4870
4870 up(&cp->pm_sem); 4871 mutex_unlock(&cp->pm_mutex);
4871 return rc; 4872 return rc;
4872} 4873}
4873 4874
@@ -4994,7 +4995,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4994 spin_lock_init(&cp->tx_lock[i]); 4995 spin_lock_init(&cp->tx_lock[i]);
4995 } 4996 }
4996 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); 4997 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4997 init_MUTEX(&cp->pm_sem); 4998 mutex_init(&cp->pm_mutex);
4998 4999
4999 init_timer(&cp->link_timer); 5000 init_timer(&cp->link_timer);
5000 cp->link_timer.function = cas_link_timer; 5001 cp->link_timer.function = cas_link_timer;
@@ -5116,10 +5117,10 @@ err_out_free_consistent:
5116 cp->init_block, cp->block_dvma); 5117 cp->init_block, cp->block_dvma);
5117 5118
5118err_out_iounmap: 5119err_out_iounmap:
5119 down(&cp->pm_sem); 5120 mutex_lock(&cp->pm_mutex);
5120 if (cp->hw_running) 5121 if (cp->hw_running)
5121 cas_shutdown(cp); 5122 cas_shutdown(cp);
5122 up(&cp->pm_sem); 5123 mutex_unlock(&cp->pm_mutex);
5123 5124
5124 iounmap(cp->regs); 5125 iounmap(cp->regs);
5125 5126
@@ -5152,11 +5153,11 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5152 cp = netdev_priv(dev); 5153 cp = netdev_priv(dev);
5153 unregister_netdev(dev); 5154 unregister_netdev(dev);
5154 5155
5155 down(&cp->pm_sem); 5156 mutex_lock(&cp->pm_mutex);
5156 flush_scheduled_work(); 5157 flush_scheduled_work();
5157 if (cp->hw_running) 5158 if (cp->hw_running)
5158 cas_shutdown(cp); 5159 cas_shutdown(cp);
5159 up(&cp->pm_sem); 5160 mutex_unlock(&cp->pm_mutex);
5160 5161
5161#if 1 5162#if 1
5162 if (cp->orig_cacheline_size) { 5163 if (cp->orig_cacheline_size) {
@@ -5183,10 +5184,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5183 struct cas *cp = netdev_priv(dev); 5184 struct cas *cp = netdev_priv(dev);
5184 unsigned long flags; 5185 unsigned long flags;
5185 5186
5186 /* We hold the PM semaphore during entire driver 5187 mutex_lock(&cp->pm_mutex);
5187 * sleep time
5188 */
5189 down(&cp->pm_sem);
5190 5188
5191 /* If the driver is opened, we stop the DMA */ 5189 /* If the driver is opened, we stop the DMA */
5192 if (cp->opened) { 5190 if (cp->opened) {
@@ -5206,6 +5204,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5206 5204
5207 if (cp->hw_running) 5205 if (cp->hw_running)
5208 cas_shutdown(cp); 5206 cas_shutdown(cp);
5207 mutex_unlock(&cp->pm_mutex);
5209 5208
5210 return 0; 5209 return 0;
5211} 5210}
@@ -5217,6 +5216,7 @@ static int cas_resume(struct pci_dev *pdev)
5217 5216
5218 printk(KERN_INFO "%s: resuming\n", dev->name); 5217 printk(KERN_INFO "%s: resuming\n", dev->name);
5219 5218
5219 mutex_lock(&cp->pm_mutex);
5220 cas_hard_reset(cp); 5220 cas_hard_reset(cp);
5221 if (cp->opened) { 5221 if (cp->opened) {
5222 unsigned long flags; 5222 unsigned long flags;
@@ -5229,7 +5229,7 @@ static int cas_resume(struct pci_dev *pdev)
5229 5229
5230 netif_device_attach(dev); 5230 netif_device_attach(dev);
5231 } 5231 }
5232 up(&cp->pm_sem); 5232 mutex_unlock(&cp->pm_mutex);
5233 return 0; 5233 return 0;
5234} 5234}
5235#endif /* CONFIG_PM */ 5235#endif /* CONFIG_PM */
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index 88063ef16cf6..ab55c7ee1012 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -4284,7 +4284,7 @@ struct cas {
4284 * (ie. not power managed) */ 4284 * (ie. not power managed) */
4285 int hw_running; 4285 int hw_running;
4286 int opened; 4286 int opened;
4287 struct semaphore pm_sem; /* open/close/suspend/resume */ 4287 struct mutex pm_mutex; /* open/close/suspend/resume */
4288 4288
4289 struct cas_init_block *init_block; 4289 struct cas_init_block *init_block;
4290 struct cas_tx_desc *init_txds[MAX_TX_RINGS]; 4290 struct cas_tx_desc *init_txds[MAX_TX_RINGS];
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 03804cc38be0..0941d40f046f 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1412,7 +1412,7 @@ static int __init depca_mca_probe(struct device *device)
1412 irq = 11; 1412 irq = 11;
1413 break; 1413 break;
1414 default: 1414 default:
1415 printk("%s: mca_probe IRQ error. You should never get here (%d).\n", dev->name, where); 1415 printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where);
1416 return -EINVAL; 1416 return -EINVAL;
1417 } 1417 }
1418 1418
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f39de16e6b97..49cd096a3c3d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -920,7 +920,7 @@ e1000_remove(struct pci_dev *pdev)
920 unregister_netdev(netdev); 920 unregister_netdev(netdev);
921#ifdef CONFIG_E1000_NAPI 921#ifdef CONFIG_E1000_NAPI
922 for (i = 0; i < adapter->num_rx_queues; i++) 922 for (i = 0; i < adapter->num_rx_queues; i++)
923 __dev_put(&adapter->polling_netdev[i]); 923 dev_put(&adapter->polling_netdev[i]);
924#endif 924#endif
925 925
926 if (!e1000_check_phy_reset_block(&adapter->hw)) 926 if (!e1000_check_phy_reset_block(&adapter->hw))
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index e67b1d06611c..95e2bb8dd7b4 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
118 118
119 /* Fill out IRQ field */ 119 /* Fill out IRQ field */
120 fep->interrupt = platform_get_irq(pdev, 0); 120 fep->interrupt = platform_get_irq(pdev, 0);
121 if (fep->interrupt < 0)
122 return -EINVAL;
121 123
122 /* Attach the memory for the FCC Parameter RAM */ 124 /* Attach the memory for the FCC Parameter RAM */
123 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); 125 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 2e8f44469699..3dad69dfdb2c 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -144,6 +144,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
144 144
145 /* Fill out IRQ field */ 145 /* Fill out IRQ field */
146 fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); 146 fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
147 if (fep->interrupt < 0)
148 return -EINVAL;
147 149
148 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 150 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
149 fep->fec.fecp =(void*)r->start; 151 fep->fec.fecp =(void*)r->start;
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index a3897fda71fa..a772b286f96d 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
118 118
119 /* Fill out IRQ field */ 119 /* Fill out IRQ field */
120 fep->interrupt = platform_get_irq_byname(pdev, "interrupt"); 120 fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
121 if (fep->interrupt < 0)
122 return -EINVAL;
121 123
122 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 124 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
123 fep->scc.sccp = (void *)r->start; 125 fep->scc.sccp = (void *)r->start;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0e8e3fcde9ff..771e25d8c417 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -193,8 +193,12 @@ static int gfar_probe(struct platform_device *pdev)
193 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 193 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
194 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 194 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
195 priv->interruptError = platform_get_irq_byname(pdev, "error"); 195 priv->interruptError = platform_get_irq_byname(pdev, "error");
196 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
197 goto regs_fail;
196 } else { 198 } else {
197 priv->interruptTransmit = platform_get_irq(pdev, 0); 199 priv->interruptTransmit = platform_get_irq(pdev, 0);
200 if (priv->interruptTransmit < 0)
201 goto regs_fail;
198 } 202 }
199 203
200 /* get a pointer to the register memory */ 204 /* get a pointer to the register memory */
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index c81fe1c382d5..5e6d00752990 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,14 @@ config TEKRAM_DONGLE
64 dongles you will have to start irattach like this: 64 dongles you will have to start irattach like this:
65 "irattach -d tekram". 65 "irattach -d tekram".
66 66
67config TOIM3232_DONGLE
68 tristate "TOIM3232 IrDa dongle"
69 depends on DONGLE && IRDA
70 help
71 Say Y here if you want to build support for the Vishay/Temic
72 TOIM3232 and TOIM4232 based dongles.
73 To compile it as a module, choose M here.
74
67config LITELINK_DONGLE 75config LITELINK_DONGLE
68 tristate "Parallax LiteLink dongle" 76 tristate "Parallax LiteLink dongle"
69 depends on DONGLE && IRDA 77 depends on DONGLE && IRDA
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 72cbfdc9cfcc..27ab75f20799 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
43obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o 43obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
44obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o 44obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
45obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o 45obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
46 47
47# The SIR helper module 48# The SIR helper module
48sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o 49sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 3137592d60c0..910c0cab35b0 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1778,7 +1778,7 @@ static struct pci_driver donauboe_pci_driver = {
1778static int __init 1778static int __init
1779donauboe_init (void) 1779donauboe_init (void)
1780{ 1780{
1781 return pci_module_init(&donauboe_pci_driver); 1781 return pci_register_driver(&donauboe_pci_driver);
1782} 1782}
1783 1783
1784static void __exit 1784static void __exit
diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c
index 31896262d21c..4cba38f7e4a8 100644
--- a/drivers/net/irda/ep7211_ir.c
+++ b/drivers/net/irda/ep7211_ir.c
@@ -8,6 +8,7 @@
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/tty.h> 9#include <linux/tty.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/spinlock.h>
11 12
12#include <net/irda/irda.h> 13#include <net/irda/irda.h>
13#include <net/irda/irda_device.h> 14#include <net/irda/irda_device.h>
@@ -23,6 +24,8 @@ static void ep7211_ir_close(dongle_t *self);
23static int ep7211_ir_change_speed(struct irda_task *task); 24static int ep7211_ir_change_speed(struct irda_task *task);
24static int ep7211_ir_reset(struct irda_task *task); 25static int ep7211_ir_reset(struct irda_task *task);
25 26
27static DEFINE_SPINLOCK(ep7211_lock);
28
26static struct dongle_reg dongle = { 29static struct dongle_reg dongle = {
27 .type = IRDA_EP7211_IR, 30 .type = IRDA_EP7211_IR,
28 .open = ep7211_ir_open, 31 .open = ep7211_ir_open,
@@ -36,7 +39,7 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
36{ 39{
37 unsigned int syscon1, flags; 40 unsigned int syscon1, flags;
38 41
39 save_flags(flags); cli(); 42 spin_lock_irqsave(&ep7211_lock, flags);
40 43
41 /* Turn on the SIR encoder. */ 44 /* Turn on the SIR encoder. */
42 syscon1 = clps_readl(SYSCON1); 45 syscon1 = clps_readl(SYSCON1);
@@ -46,14 +49,14 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
46 /* XXX: We should disable modem status interrupts on the first 49 /* XXX: We should disable modem status interrupts on the first
47 UART (interrupt #14). */ 50 UART (interrupt #14). */
48 51
49 restore_flags(flags); 52 spin_unlock_irqrestore(&ep7211_lock, flags);
50} 53}
51 54
52static void ep7211_ir_close(dongle_t *self) 55static void ep7211_ir_close(dongle_t *self)
53{ 56{
54 unsigned int syscon1, flags; 57 unsigned int syscon1, flags;
55 58
56 save_flags(flags); cli(); 59 spin_lock_irqsave(&ep7211_lock, flags);
57 60
58 /* Turn off the SIR encoder. */ 61 /* Turn off the SIR encoder. */
59 syscon1 = clps_readl(SYSCON1); 62 syscon1 = clps_readl(SYSCON1);
@@ -63,7 +66,7 @@ static void ep7211_ir_close(dongle_t *self)
63 /* XXX: If we've disabled the modem status interrupts, we should 66 /* XXX: If we've disabled the modem status interrupts, we should
64 reset them back to their original state. */ 67 reset them back to their original state. */
65 68
66 restore_flags(flags); 69 spin_unlock_irqrestore(&ep7211_lock, flags);
67} 70}
68 71
69/* 72/*
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8936058a3cce..6e2ec56cde0b 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -740,7 +740,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
740 struct sk_buff *newskb; 740 struct sk_buff *newskb;
741 struct sk_buff *dataskb; 741 struct sk_buff *dataskb;
742 struct urb *next_urb; 742 struct urb *next_urb;
743 int docopy; 743 unsigned int len, docopy;
744 744
745 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 745 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
746 746
@@ -851,10 +851,11 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
851 dataskb->dev = self->netdev; 851 dataskb->dev = self->netdev;
852 dataskb->mac.raw = dataskb->data; 852 dataskb->mac.raw = dataskb->data;
853 dataskb->protocol = htons(ETH_P_IRDA); 853 dataskb->protocol = htons(ETH_P_IRDA);
854 len = dataskb->len;
854 netif_rx(dataskb); 855 netif_rx(dataskb);
855 856
856 /* Keep stats up to date */ 857 /* Keep stats up to date */
857 self->stats.rx_bytes += dataskb->len; 858 self->stats.rx_bytes += len;
858 self->stats.rx_packets++; 859 self->stats.rx_packets++;
859 self->netdev->last_rx = jiffies; 860 self->netdev->last_rx = jiffies;
860 861
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 101750bf210f..6a98b7ae4975 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -33,6 +33,7 @@
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/mutex.h>
36 37
37#include <net/irda/irda.h> 38#include <net/irda/irda.h>
38#include <net/irda/irda_device.h> 39#include <net/irda/irda_device.h>
@@ -338,7 +339,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
338/*****************************************************************/ 339/*****************************************************************/
339 340
340/* serialize ldisc open/close with sir_dev */ 341/* serialize ldisc open/close with sir_dev */
341static DECLARE_MUTEX(irtty_sem); 342static DEFINE_MUTEX(irtty_mutex);
342 343
343/* notifier from sir_dev when irda% device gets opened (ifup) */ 344/* notifier from sir_dev when irda% device gets opened (ifup) */
344 345
@@ -348,11 +349,11 @@ static int irtty_start_dev(struct sir_dev *dev)
348 struct tty_struct *tty; 349 struct tty_struct *tty;
349 350
350 /* serialize with ldisc open/close */ 351 /* serialize with ldisc open/close */
351 down(&irtty_sem); 352 mutex_lock(&irtty_mutex);
352 353
353 priv = dev->priv; 354 priv = dev->priv;
354 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { 355 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
355 up(&irtty_sem); 356 mutex_unlock(&irtty_mutex);
356 return -ESTALE; 357 return -ESTALE;
357 } 358 }
358 359
@@ -363,7 +364,7 @@ static int irtty_start_dev(struct sir_dev *dev)
363 /* Make sure we can receive more data */ 364 /* Make sure we can receive more data */
364 irtty_stop_receiver(tty, FALSE); 365 irtty_stop_receiver(tty, FALSE);
365 366
366 up(&irtty_sem); 367 mutex_unlock(&irtty_mutex);
367 return 0; 368 return 0;
368} 369}
369 370
@@ -375,11 +376,11 @@ static int irtty_stop_dev(struct sir_dev *dev)
375 struct tty_struct *tty; 376 struct tty_struct *tty;
376 377
377 /* serialize with ldisc open/close */ 378 /* serialize with ldisc open/close */
378 down(&irtty_sem); 379 mutex_lock(&irtty_mutex);
379 380
380 priv = dev->priv; 381 priv = dev->priv;
381 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { 382 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
382 up(&irtty_sem); 383 mutex_unlock(&irtty_mutex);
383 return -ESTALE; 384 return -ESTALE;
384 } 385 }
385 386
@@ -390,7 +391,7 @@ static int irtty_stop_dev(struct sir_dev *dev)
390 if (tty->driver->stop) 391 if (tty->driver->stop)
391 tty->driver->stop(tty); 392 tty->driver->stop(tty);
392 393
393 up(&irtty_sem); 394 mutex_unlock(&irtty_mutex);
394 395
395 return 0; 396 return 0;
396} 397}
@@ -514,13 +515,13 @@ static int irtty_open(struct tty_struct *tty)
514 priv->dev = dev; 515 priv->dev = dev;
515 516
516 /* serialize with start_dev - in case we were racing with ifup */ 517 /* serialize with start_dev - in case we were racing with ifup */
517 down(&irtty_sem); 518 mutex_lock(&irtty_mutex);
518 519
519 dev->priv = priv; 520 dev->priv = priv;
520 tty->disc_data = priv; 521 tty->disc_data = priv;
521 tty->receive_room = 65536; 522 tty->receive_room = 65536;
522 523
523 up(&irtty_sem); 524 mutex_unlock(&irtty_mutex);
524 525
525 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); 526 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name);
526 527
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index ee717d0e939e..83141a3ff546 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -12,6 +12,7 @@
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> 12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> 13 * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
14 * Copyright (c) 1998 Actisys Corp., www.actisys.com 14 * Copyright (c) 1998 Actisys Corp., www.actisys.com
15 * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
15 * All Rights Reserved 16 * All Rights Reserved
16 * 17 *
17 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
@@ -53,14 +54,13 @@
53#include <linux/init.h> 54#include <linux/init.h>
54#include <linux/rtnetlink.h> 55#include <linux/rtnetlink.h>
55#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/pnp.h>
58#include <linux/platform_device.h>
56 59
57#include <asm/io.h> 60#include <asm/io.h>
58#include <asm/dma.h> 61#include <asm/dma.h>
59#include <asm/byteorder.h> 62#include <asm/byteorder.h>
60 63
61#include <linux/pm.h>
62#include <linux/pm_legacy.h>
63
64#include <net/irda/wrapper.h> 64#include <net/irda/wrapper.h>
65#include <net/irda/irda.h> 65#include <net/irda/irda.h>
66#include <net/irda/irda_device.h> 66#include <net/irda/irda_device.h>
@@ -72,14 +72,27 @@
72 72
73static char *driver_name = "nsc-ircc"; 73static char *driver_name = "nsc-ircc";
74 74
75/* Power Management */
76#define NSC_IRCC_DRIVER_NAME "nsc-ircc"
77static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
78static int nsc_ircc_resume(struct platform_device *dev);
79
80static struct platform_driver nsc_ircc_driver = {
81 .suspend = nsc_ircc_suspend,
82 .resume = nsc_ircc_resume,
83 .driver = {
84 .name = NSC_IRCC_DRIVER_NAME,
85 },
86};
87
75/* Module parameters */ 88/* Module parameters */
76static int qos_mtt_bits = 0x07; /* 1 ms or more */ 89static int qos_mtt_bits = 0x07; /* 1 ms or more */
77static int dongle_id; 90static int dongle_id;
78 91
79/* Use BIOS settions by default, but user may supply module parameters */ 92/* Use BIOS settions by default, but user may supply module parameters */
80static unsigned int io[] = { ~0, ~0, ~0, ~0 }; 93static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 };
81static unsigned int irq[] = { 0, 0, 0, 0, 0 }; 94static unsigned int irq[] = { 0, 0, 0, 0, 0 };
82static unsigned int dma[] = { 0, 0, 0, 0, 0 }; 95static unsigned int dma[] = { 0, 0, 0, 0, 0 };
83 96
84static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); 97static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
85static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); 98static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
@@ -87,6 +100,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
87static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); 100static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
88static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); 101static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
89static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); 102static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
103static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
90 104
91/* These are the known NSC chips */ 105/* These are the known NSC chips */
92static nsc_chip_t chips[] = { 106static nsc_chip_t chips[] = {
@@ -101,11 +115,12 @@ static nsc_chip_t chips[] = {
101 /* Contributed by Jan Frey - IBM A30/A31 */ 115 /* Contributed by Jan Frey - IBM A30/A31 */
102 { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, 116 { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
103 nsc_ircc_probe_39x, nsc_ircc_init_39x }, 117 nsc_ircc_probe_39x, nsc_ircc_init_39x },
118 { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
119 nsc_ircc_probe_39x, nsc_ircc_init_39x },
104 { NULL } 120 { NULL }
105}; 121};
106 122
107/* Max 4 instances for now */ 123static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL };
108static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
109 124
110static char *dongle_types[] = { 125static char *dongle_types[] = {
111 "Differential serial interface", 126 "Differential serial interface",
@@ -126,8 +141,24 @@ static char *dongle_types[] = {
126 "No dongle connected", 141 "No dongle connected",
127}; 142};
128 143
144/* PNP probing */
145static chipio_t pnp_info;
146static const struct pnp_device_id nsc_ircc_pnp_table[] = {
147 { .id = "NSC6001", .driver_data = 0 },
148 { .id = "IBM0071", .driver_data = 0 },
149 { }
150};
151
152MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
153
154static struct pnp_driver nsc_ircc_pnp_driver = {
155 .name = "nsc-ircc",
156 .id_table = nsc_ircc_pnp_table,
157 .probe = nsc_ircc_pnp_probe,
158};
159
129/* Some prototypes */ 160/* Some prototypes */
130static int nsc_ircc_open(int i, chipio_t *info); 161static int nsc_ircc_open(chipio_t *info);
131static int nsc_ircc_close(struct nsc_ircc_cb *self); 162static int nsc_ircc_close(struct nsc_ircc_cb *self);
132static int nsc_ircc_setup(chipio_t *info); 163static int nsc_ircc_setup(chipio_t *info);
133static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); 164static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
@@ -146,7 +177,10 @@ static int nsc_ircc_net_open(struct net_device *dev);
146static int nsc_ircc_net_close(struct net_device *dev); 177static int nsc_ircc_net_close(struct net_device *dev);
147static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 178static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
148static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev); 179static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
149static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data); 180
181/* Globals */
182static int pnp_registered;
183static int pnp_succeeded;
150 184
151/* 185/*
152 * Function nsc_ircc_init () 186 * Function nsc_ircc_init ()
@@ -158,28 +192,36 @@ static int __init nsc_ircc_init(void)
158{ 192{
159 chipio_t info; 193 chipio_t info;
160 nsc_chip_t *chip; 194 nsc_chip_t *chip;
161 int ret = -ENODEV; 195 int ret;
162 int cfg_base; 196 int cfg_base;
163 int cfg, id; 197 int cfg, id;
164 int reg; 198 int reg;
165 int i = 0; 199 int i = 0;
166 200
201 ret = platform_driver_register(&nsc_ircc_driver);
202 if (ret) {
203 IRDA_ERROR("%s, Can't register driver!\n", driver_name);
204 return ret;
205 }
206
207 /* Register with PnP subsystem to detect disable ports */
208 ret = pnp_register_driver(&nsc_ircc_pnp_driver);
209
210 if (ret >= 0)
211 pnp_registered = 1;
212
213 ret = -ENODEV;
214
167 /* Probe for all the NSC chipsets we know about */ 215 /* Probe for all the NSC chipsets we know about */
168 for (chip=chips; chip->name ; chip++) { 216 for (chip = chips; chip->name ; chip++) {
169 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, 217 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
170 chip->name); 218 chip->name);
171 219
172 /* Try all config registers for this chip */ 220 /* Try all config registers for this chip */
173 for (cfg=0; cfg<3; cfg++) { 221 for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
174 cfg_base = chip->cfg[cfg]; 222 cfg_base = chip->cfg[cfg];
175 if (!cfg_base) 223 if (!cfg_base)
176 continue; 224 continue;
177
178 memset(&info, 0, sizeof(chipio_t));
179 info.cfg_base = cfg_base;
180 info.fir_base = io[i];
181 info.dma = dma[i];
182 info.irq = irq[i];
183 225
184 /* Read index register */ 226 /* Read index register */
185 reg = inb(cfg_base); 227 reg = inb(cfg_base);
@@ -194,24 +236,65 @@ static int __init nsc_ircc_init(void)
194 if ((id & chip->cid_mask) == chip->cid_value) { 236 if ((id & chip->cid_mask) == chip->cid_value) {
195 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", 237 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
196 __FUNCTION__, chip->name, id & ~chip->cid_mask); 238 __FUNCTION__, chip->name, id & ~chip->cid_mask);
197 /*
198 * If the user supplies the base address, then
199 * we init the chip, if not we probe the values
200 * set by the BIOS
201 */
202 if (io[i] < 0x2000) {
203 chip->init(chip, &info);
204 } else
205 chip->probe(chip, &info);
206 239
207 if (nsc_ircc_open(i, &info) == 0) 240 /*
208 ret = 0; 241 * If we found a correct PnP setting,
242 * we first try it.
243 */
244 if (pnp_succeeded) {
245 memset(&info, 0, sizeof(chipio_t));
246 info.cfg_base = cfg_base;
247 info.fir_base = pnp_info.fir_base;
248 info.dma = pnp_info.dma;
249 info.irq = pnp_info.irq;
250
251 if (info.fir_base < 0x2000) {
252 IRDA_MESSAGE("%s, chip->init\n", driver_name);
253 chip->init(chip, &info);
254 } else
255 chip->probe(chip, &info);
256
257 if (nsc_ircc_open(&info) >= 0)
258 ret = 0;
259 }
260
261 /*
262 * Opening based on PnP values failed.
263 * Let's fallback to user values, or probe
264 * the chip.
265 */
266 if (ret) {
267 IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name);
268 memset(&info, 0, sizeof(chipio_t));
269 info.cfg_base = cfg_base;
270 info.fir_base = io[i];
271 info.dma = dma[i];
272 info.irq = irq[i];
273
274 /*
275 * If the user supplies the base address, then
276 * we init the chip, if not we probe the values
277 * set by the BIOS
278 */
279 if (io[i] < 0x2000) {
280 chip->init(chip, &info);
281 } else
282 chip->probe(chip, &info);
283
284 if (nsc_ircc_open(&info) >= 0)
285 ret = 0;
286 }
209 i++; 287 i++;
210 } else { 288 } else {
211 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); 289 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
212 } 290 }
213 } 291 }
214 292 }
293
294 if (ret) {
295 platform_driver_unregister(&nsc_ircc_driver);
296 pnp_unregister_driver(&nsc_ircc_pnp_driver);
297 pnp_registered = 0;
215 } 298 }
216 299
217 return ret; 300 return ret;
@@ -227,12 +310,17 @@ static void __exit nsc_ircc_cleanup(void)
227{ 310{
228 int i; 311 int i;
229 312
230 pm_unregister_all(nsc_ircc_pmproc); 313 for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
231
232 for (i=0; i < 4; i++) {
233 if (dev_self[i]) 314 if (dev_self[i])
234 nsc_ircc_close(dev_self[i]); 315 nsc_ircc_close(dev_self[i]);
235 } 316 }
317
318 platform_driver_unregister(&nsc_ircc_driver);
319
320 if (pnp_registered)
321 pnp_unregister_driver(&nsc_ircc_pnp_driver);
322
323 pnp_registered = 0;
236} 324}
237 325
238/* 326/*
@@ -241,16 +329,26 @@ static void __exit nsc_ircc_cleanup(void)
241 * Open driver instance 329 * Open driver instance
242 * 330 *
243 */ 331 */
244static int __init nsc_ircc_open(int i, chipio_t *info) 332static int __init nsc_ircc_open(chipio_t *info)
245{ 333{
246 struct net_device *dev; 334 struct net_device *dev;
247 struct nsc_ircc_cb *self; 335 struct nsc_ircc_cb *self;
248 struct pm_dev *pmdev;
249 void *ret; 336 void *ret;
250 int err; 337 int err, chip_index;
251 338
252 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 339 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
253 340
341
342 for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
343 if (!dev_self[chip_index])
344 break;
345 }
346
347 if (chip_index == ARRAY_SIZE(dev_self)) {
348 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__);
349 return -ENOMEM;
350 }
351
254 IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, 352 IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
255 info->cfg_base); 353 info->cfg_base);
256 354
@@ -271,8 +369,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
271 spin_lock_init(&self->lock); 369 spin_lock_init(&self->lock);
272 370
273 /* Need to store self somewhere */ 371 /* Need to store self somewhere */
274 dev_self[i] = self; 372 dev_self[chip_index] = self;
275 self->index = i; 373 self->index = chip_index;
276 374
277 /* Initialize IO */ 375 /* Initialize IO */
278 self->io.cfg_base = info->cfg_base; 376 self->io.cfg_base = info->cfg_base;
@@ -351,7 +449,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
351 449
352 /* Check if user has supplied a valid dongle id or not */ 450 /* Check if user has supplied a valid dongle id or not */
353 if ((dongle_id <= 0) || 451 if ((dongle_id <= 0) ||
354 (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) { 452 (dongle_id >= ARRAY_SIZE(dongle_types))) {
355 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); 453 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
356 454
357 IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, 455 IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
@@ -364,11 +462,18 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
364 self->io.dongle_id = dongle_id; 462 self->io.dongle_id = dongle_id;
365 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); 463 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
366 464
367 pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc); 465 self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME,
368 if (pmdev) 466 self->index, NULL, 0);
369 pmdev->data = self; 467 if (IS_ERR(self->pldev)) {
468 err = PTR_ERR(self->pldev);
469 goto out5;
470 }
471 platform_set_drvdata(self->pldev, self);
370 472
371 return 0; 473 return chip_index;
474
475 out5:
476 unregister_netdev(dev);
372 out4: 477 out4:
373 dma_free_coherent(NULL, self->tx_buff.truesize, 478 dma_free_coherent(NULL, self->tx_buff.truesize,
374 self->tx_buff.head, self->tx_buff_dma); 479 self->tx_buff.head, self->tx_buff_dma);
@@ -379,7 +484,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
379 release_region(self->io.fir_base, self->io.fir_ext); 484 release_region(self->io.fir_base, self->io.fir_ext);
380 out1: 485 out1:
381 free_netdev(dev); 486 free_netdev(dev);
382 dev_self[i] = NULL; 487 dev_self[chip_index] = NULL;
383 return err; 488 return err;
384} 489}
385 490
@@ -399,6 +504,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
399 504
400 iobase = self->io.fir_base; 505 iobase = self->io.fir_base;
401 506
507 platform_device_unregister(self->pldev);
508
402 /* Remove netdevice */ 509 /* Remove netdevice */
403 unregister_netdev(self->netdev); 510 unregister_netdev(self->netdev);
404 511
@@ -806,6 +913,43 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
806 return 0; 913 return 0;
807} 914}
808 915
916/* PNP probing */
917static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
918{
919 memset(&pnp_info, 0, sizeof(chipio_t));
920 pnp_info.irq = -1;
921 pnp_info.dma = -1;
922 pnp_succeeded = 1;
923
924 /* There don't seem to be any way to get the cfg_base.
925 * On my box, cfg_base is in the PnP descriptor of the
926 * motherboard. Oh well... Jean II */
927
928 if (pnp_port_valid(dev, 0) &&
929 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED))
930 pnp_info.fir_base = pnp_port_start(dev, 0);
931
932 if (pnp_irq_valid(dev, 0) &&
933 !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED))
934 pnp_info.irq = pnp_irq(dev, 0);
935
936 if (pnp_dma_valid(dev, 0) &&
937 !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
938 pnp_info.dma = pnp_dma(dev, 0);
939
940 IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
941 __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
942
943 if((pnp_info.fir_base == 0) ||
944 (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
945 /* Returning an error will disable the device. Yuck ! */
946 //return -EINVAL;
947 pnp_succeeded = 0;
948 }
949
950 return 0;
951}
952
809/* 953/*
810 * Function nsc_ircc_setup (info) 954 * Function nsc_ircc_setup (info)
811 * 955 *
@@ -2161,45 +2305,83 @@ static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
2161 return &self->stats; 2305 return &self->stats;
2162} 2306}
2163 2307
2164static void nsc_ircc_suspend(struct nsc_ircc_cb *self) 2308static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
2165{ 2309{
2166 IRDA_MESSAGE("%s, Suspending\n", driver_name); 2310 struct nsc_ircc_cb *self = platform_get_drvdata(dev);
2311 int bank;
2312 unsigned long flags;
2313 int iobase = self->io.fir_base;
2167 2314
2168 if (self->io.suspended) 2315 if (self->io.suspended)
2169 return; 2316 return 0;
2170 2317
2171 nsc_ircc_net_close(self->netdev); 2318 IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
2172 2319
2320 rtnl_lock();
2321 if (netif_running(self->netdev)) {
2322 netif_device_detach(self->netdev);
2323 spin_lock_irqsave(&self->lock, flags);
2324 /* Save current bank */
2325 bank = inb(iobase+BSR);
2326
2327 /* Disable interrupts */
2328 switch_bank(iobase, BANK0);
2329 outb(0, iobase+IER);
2330
2331 /* Restore bank register */
2332 outb(bank, iobase+BSR);
2333
2334 spin_unlock_irqrestore(&self->lock, flags);
2335 free_irq(self->io.irq, self->netdev);
2336 disable_dma(self->io.dma);
2337 }
2173 self->io.suspended = 1; 2338 self->io.suspended = 1;
2339 rtnl_unlock();
2340
2341 return 0;
2174} 2342}
2175 2343
2176static void nsc_ircc_wakeup(struct nsc_ircc_cb *self) 2344static int nsc_ircc_resume(struct platform_device *dev)
2177{ 2345{
2346 struct nsc_ircc_cb *self = platform_get_drvdata(dev);
2347 unsigned long flags;
2348
2178 if (!self->io.suspended) 2349 if (!self->io.suspended)
2179 return; 2350 return 0;
2180 2351
2352 IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
2353
2354 rtnl_lock();
2181 nsc_ircc_setup(&self->io); 2355 nsc_ircc_setup(&self->io);
2182 nsc_ircc_net_open(self->netdev); 2356 nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id);
2183
2184 IRDA_MESSAGE("%s, Waking up\n", driver_name);
2185 2357
2358 if (netif_running(self->netdev)) {
2359 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
2360 self->netdev->name, self->netdev)) {
2361 IRDA_WARNING("%s, unable to allocate irq=%d\n",
2362 driver_name, self->io.irq);
2363
2364 /*
2365 * Don't fail resume process, just kill this
2366 * network interface
2367 */
2368 unregister_netdevice(self->netdev);
2369 } else {
2370 spin_lock_irqsave(&self->lock, flags);
2371 nsc_ircc_change_speed(self, self->io.speed);
2372 spin_unlock_irqrestore(&self->lock, flags);
2373 netif_device_attach(self->netdev);
2374 }
2375
2376 } else {
2377 spin_lock_irqsave(&self->lock, flags);
2378 nsc_ircc_change_speed(self, 9600);
2379 spin_unlock_irqrestore(&self->lock, flags);
2380 }
2186 self->io.suspended = 0; 2381 self->io.suspended = 0;
2187} 2382 rtnl_unlock();
2188 2383
2189static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data) 2384 return 0;
2190{
2191 struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data;
2192 if (self) {
2193 switch (rqst) {
2194 case PM_SUSPEND:
2195 nsc_ircc_suspend(self);
2196 break;
2197 case PM_RESUME:
2198 nsc_ircc_wakeup(self);
2199 break;
2200 }
2201 }
2202 return 0;
2203} 2385}
2204 2386
2205MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); 2387MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 6edf7e514624..dacf671abcd6 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -269,7 +269,7 @@ struct nsc_ircc_cb {
269 __u32 new_speed; 269 __u32 new_speed;
270 int index; /* Instance index */ 270 int index; /* Instance index */
271 271
272 struct pm_dev *dev; 272 struct platform_device *pldev;
273}; 273};
274 274
275static inline void switch_bank(int iobase, int bank) 275static inline void switch_bank(int iobase, int bank)
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 8d225921ae7b..d7e32d9554fc 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/smp_lock.h> 17#include <linux/smp_lock.h>
18#include <linux/kmod.h> 18#include <linux/kmod.h>
19#include <linux/mutex.h>
19 20
20#include <net/irda/irda.h> 21#include <net/irda/irda.h>
21 22
@@ -28,7 +29,7 @@
28 */ 29 */
29 30
30static LIST_HEAD(dongle_list); /* list of registered dongle drivers */ 31static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
31static DECLARE_MUTEX(dongle_list_lock); /* protects the list */ 32static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
32 33
33int irda_register_dongle(struct dongle_driver *new) 34int irda_register_dongle(struct dongle_driver *new)
34{ 35{
@@ -38,25 +39,25 @@ int irda_register_dongle(struct dongle_driver *new)
38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", 39 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
39 __FUNCTION__, new->driver_name, new->type); 40 __FUNCTION__, new->driver_name, new->type);
40 41
41 down(&dongle_list_lock); 42 mutex_lock(&dongle_list_lock);
42 list_for_each(entry, &dongle_list) { 43 list_for_each(entry, &dongle_list) {
43 drv = list_entry(entry, struct dongle_driver, dongle_list); 44 drv = list_entry(entry, struct dongle_driver, dongle_list);
44 if (new->type == drv->type) { 45 if (new->type == drv->type) {
45 up(&dongle_list_lock); 46 mutex_unlock(&dongle_list_lock);
46 return -EEXIST; 47 return -EEXIST;
47 } 48 }
48 } 49 }
49 list_add(&new->dongle_list, &dongle_list); 50 list_add(&new->dongle_list, &dongle_list);
50 up(&dongle_list_lock); 51 mutex_unlock(&dongle_list_lock);
51 return 0; 52 return 0;
52} 53}
53EXPORT_SYMBOL(irda_register_dongle); 54EXPORT_SYMBOL(irda_register_dongle);
54 55
55int irda_unregister_dongle(struct dongle_driver *drv) 56int irda_unregister_dongle(struct dongle_driver *drv)
56{ 57{
57 down(&dongle_list_lock); 58 mutex_lock(&dongle_list_lock);
58 list_del(&drv->dongle_list); 59 list_del(&drv->dongle_list);
59 up(&dongle_list_lock); 60 mutex_unlock(&dongle_list_lock);
60 return 0; 61 return 0;
61} 62}
62EXPORT_SYMBOL(irda_unregister_dongle); 63EXPORT_SYMBOL(irda_unregister_dongle);
@@ -75,7 +76,7 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
75 return -EBUSY; 76 return -EBUSY;
76 77
77 /* serialize access to the list of registered dongles */ 78 /* serialize access to the list of registered dongles */
78 down(&dongle_list_lock); 79 mutex_lock(&dongle_list_lock);
79 80
80 list_for_each(entry, &dongle_list) { 81 list_for_each(entry, &dongle_list) {
81 drv = list_entry(entry, struct dongle_driver, dongle_list); 82 drv = list_entry(entry, struct dongle_driver, dongle_list);
@@ -109,14 +110,14 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
109 if (!drv->open || (err=drv->open(dev))!=0) 110 if (!drv->open || (err=drv->open(dev))!=0)
110 goto out_reject; /* failed to open driver */ 111 goto out_reject; /* failed to open driver */
111 112
112 up(&dongle_list_lock); 113 mutex_unlock(&dongle_list_lock);
113 return 0; 114 return 0;
114 115
115out_reject: 116out_reject:
116 dev->dongle_drv = NULL; 117 dev->dongle_drv = NULL;
117 module_put(drv->owner); 118 module_put(drv->owner);
118out_unlock: 119out_unlock:
119 up(&dongle_list_lock); 120 mutex_unlock(&dongle_list_lock);
120 return err; 121 return err;
121} 122}
122 123
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
new file mode 100644
index 000000000000..aa1a9b0ed83e
--- /dev/null
+++ b/drivers/net/irda/toim3232-sir.c
@@ -0,0 +1,375 @@
1/*********************************************************************
2 *
3 * Filename: toim3232-sir.c
4 * Version: 1.0
5 * Description: Implementation of dongles based on the Vishay/Temic
6 * TOIM3232 SIR Endec chipset. Currently only the
7 * IRWave IR320ST-2 is tested, although it should work
8 * with any TOIM3232 or TOIM4232 chipset based RS232
9 * dongle with minimal modification.
10 * Based heavily on the Tekram driver (tekram.c),
11 * with thanks to Dag Brattli and Martin Diehl.
12 * Status: Experimental.
13 * Author: David Basden <davidb-irda@rcpt.to>
14 * Created at: Thu Feb 09 23:47:32 2006
15 *
16 * Copyright (c) 2006 David Basden.
17 * Copyright (c) 1998-1999 Dag Brattli,
18 * Copyright (c) 2002 Martin Diehl,
19 * All Rights Reserved.
20 *
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License as
23 * published by the Free Software Foundation; either version 2 of
24 * the License, or (at your option) any later version.
25 *
26 * Neither Dag Brattli nor University of Tromsø admit liability nor
27 * provide warranty for any of this software. This material is
28 * provided "AS-IS" and at no charge.
29 *
30 ********************************************************************/
31
32/*
33 * This driver has currently only been tested on the IRWave IR320ST-2
34 *
35 * PROTOCOL:
36 *
37 * The protocol for talking to the TOIM3232 is quite easy, and is
38 * designed to interface with RS232 with only level convertors. The
39 * BR/~D line on the chip is brought high to signal 'command mode',
40 * where a command byte is sent to select the baudrate of the RS232
41 * interface and the pulse length of the IRDA output. When BR/~D
42 * is brought low, the dongle then changes to the selected baudrate,
43 * and the RS232 interface is used for data until BR/~D is brought
44 * high again. The initial speed for the TOIMx323 after RESET is
45 * 9600 baud. The baudrate for command-mode is the last selected
46 * baud-rate, or 9600 after a RESET.
47 *
48 * The dongle I have (below) adds some extra hardware on the front end,
49 * but this is mostly directed towards pariasitic power from the RS232
50 * line rather than changing very much about how to communicate with
51 * the TOIM3232.
52 *
53 * The protocol to talk to the TOIM4232 chipset seems to be almost
54 * identical to the TOIM3232 (and the 4232 datasheet is more detailed)
55 * so this code will probably work on that as well, although I haven't
56 * tested it on that hardware.
57 *
58 * Target dongle variations that might be common:
59 *
60 * DTR and RTS function:
61 * The data sheet for the 4232 has a sample implementation that hooks the
62 * DTR and RTS lines to the RESET and BaudRate/~Data lines of the
63 * chip (through line-converters). Given both DTR and RTS would have to
64 * be held low in normal operation, and the TOIMx232 requires +5V to
65 * signal ground, most dongle designers would almost certainly choose
66 * an implementation that kept at least one of DTR or RTS high in
67 * normal operation to provide power to the dongle, but will likely
68 * vary between designs.
69 *
70 * User specified command bits:
71 * There are two user-controllable output lines from the TOIMx232 that
72 * can be set low or high by setting the appropriate bits in the
73 * high-nibble of the command byte (when setting speed and pulse length).
74 * These might be used to switch on and off added hardware or extra
75 * dongle features.
76 *
77 *
78 * Target hardware: IRWave IR320ST-2
79 *
80 * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic
81 * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transciever.
82 * It uses a hex inverter and some discrete components to buffer and
83 * line convert the RS232 down to 5V.
84 *
85 * The dongle is powered through a voltage regulator, fed by a large
86 * capacitor. To switch the dongle on, DTR is brought high to charge
87 * the capacitor and drive the voltage regulator. DTR isn't associated
88 * with any control lines on the TOIM3232. Parisitic power is also taken
89 * from the RTS, TD and RD lines when brought high, but through resistors.
90 * When DTR is low, the circuit might lose power even with RTS high.
91 *
92 * RTS is inverted and attached to the BR/~D input pin. When RTS
93 * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode.
94 * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command
95 * mode'.
96 *
97 * For some unknown reason, the RESET line isn't actually connected
98 * to anything. This means to reset the dongle to get it to a known
99 * state (9600 baud) you must drop DTR and RTS low, wait for the power
100 * capacitor to discharge, and then bring DTR (and RTS for data mode)
101 * high again, and wait for the capacitor to charge, the power supply
102 * to stabilise, and the oscillator clock to stabilise.
103 *
104 * Fortunately, if the current baudrate is known, the chipset can
105 * easily change speed by entering command mode without having to
106 * reset the dongle first.
107 *
108 * Major Components:
109 *
110 * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings
111 * to IRDA pulse timings
112 * - 3.6864MHz crystal to drive TOIM3232 clock oscillator
113 * - DM74lS04M Inverting Hex line buffer for RS232 input buffering
114 * and level conversion
115 * - PJ2951AC 150mA voltage regulator
116 * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver
117 *
118 */
119
120#include <linux/module.h>
121#include <linux/delay.h>
122#include <linux/init.h>
123
124#include <net/irda/irda.h>
125
126#include "sir-dev.h"
127
128static int toim3232delay = 150; /* default is 150 ms */
129module_param(toim3232delay, int, 0);
130MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
131
132#if 0
133static int toim3232flipdtr = 0; /* default is DTR high to reset */
134module_param(toim3232flipdtr, int, 0);
135MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)");
136
137static int toim3232fliprts = 0; /* default is RTS high for baud change */
138module_param(toim3232fliptrs, int, 0);
139MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)");
140#endif
141
142static int toim3232_open(struct sir_dev *);
143static int toim3232_close(struct sir_dev *);
144static int toim3232_change_speed(struct sir_dev *, unsigned);
145static int toim3232_reset(struct sir_dev *);
146
147#define TOIM3232_115200 0x00
148#define TOIM3232_57600 0x01
149#define TOIM3232_38400 0x02
150#define TOIM3232_19200 0x03
151#define TOIM3232_9600 0x06
152#define TOIM3232_2400 0x0A
153
154#define TOIM3232_PW 0x10 /* Pulse select bit */
155
156static struct dongle_driver toim3232 = {
157 .owner = THIS_MODULE,
158 .driver_name = "Vishay TOIM3232",
159 .type = IRDA_TOIM3232_DONGLE,
160 .open = toim3232_open,
161 .close = toim3232_close,
162 .reset = toim3232_reset,
163 .set_speed = toim3232_change_speed,
164};
165
166static int __init toim3232_sir_init(void)
167{
168 if (toim3232delay < 1 || toim3232delay > 500)
169 toim3232delay = 200;
170 IRDA_DEBUG(1, "%s - using %d ms delay\n",
171 toim3232.driver_name, toim3232delay);
172 return irda_register_dongle(&toim3232);
173}
174
175static void __exit toim3232_sir_cleanup(void)
176{
177 irda_unregister_dongle(&toim3232);
178}
179
180static int toim3232_open(struct sir_dev *dev)
181{
182 struct qos_info *qos = &dev->qos;
183
184 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
185
186 /* Pull the lines high to start with.
187 *
188 * For the IR320ST-2, we need to charge the main supply capacitor to
189 * switch the device on. We keep DTR high throughout to do this.
190 * When RTS, TD and RD are high, they will also trickle-charge the
191 * cap. RTS is high for data transmission, and low for baud rate select.
192 * -- DGB
193 */
194 sirdev_set_dtr_rts(dev, TRUE, TRUE);
195
196 /* The TOI3232 supports many speeds between 1200bps and 115000bps.
197 * We really only care about those supported by the IRDA spec, but
198 * 38400 seems to be implemented in many places */
199 qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
200
201 /* From the tekram driver. Not sure what a reasonable value is -- DGB */
202 qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
203 irda_qos_bits_to_value(qos);
204
205 /* irda thread waits 50 msec for power settling */
206
207 return 0;
208}
209
210static int toim3232_close(struct sir_dev *dev)
211{
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
213
214 /* Power off dongle */
215 sirdev_set_dtr_rts(dev, FALSE, FALSE);
216
217 return 0;
218}
219
220/*
221 * Function toim3232change_speed (dev, state, speed)
222 *
223 * Set the speed for the TOIM3232 based dongle. Warning, this
224 * function must be called with a process context!
225 *
226 * Algorithm
227 * 1. keep DTR high but clear RTS to bring into baud programming mode
228 * 2. wait at least 7us to enter programming mode
229 * 3. send control word to set baud rate and timing
230 * 4. wait at least 1us
231 * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver)
232 * 6. should take effect immediately (although probably worth waiting)
233 */
234
235#define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
236
237static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
238{
239 unsigned state = dev->fsm.substate;
240 unsigned delay = 0;
241 u8 byte;
242 static int ret = 0;
243
244 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
245
246 switch(state) {
247 case SIRDEV_STATE_DONGLE_SPEED:
248
249 /* Figure out what we are going to send as a control byte */
250 switch (speed) {
251 case 2400:
252 byte = TOIM3232_PW|TOIM3232_2400;
253 break;
254 default:
255 speed = 9600;
256 ret = -EINVAL;
257 /* fall thru */
258 case 9600:
259 byte = TOIM3232_PW|TOIM3232_9600;
260 break;
261 case 19200:
262 byte = TOIM3232_PW|TOIM3232_19200;
263 break;
264 case 38400:
265 byte = TOIM3232_PW|TOIM3232_38400;
266 break;
267 case 57600:
268 byte = TOIM3232_PW|TOIM3232_57600;
269 break;
270 case 115200:
271 byte = TOIM3232_115200;
272 break;
273 }
274
275 /* Set DTR, Clear RTS: Go into baud programming mode */
276 sirdev_set_dtr_rts(dev, TRUE, FALSE);
277
278 /* Wait at least 7us */
279 udelay(14);
280
281 /* Write control byte */
282 sirdev_raw_write(dev, &byte, 1);
283
284 dev->speed = speed;
285
286 state = TOIM3232_STATE_WAIT_SPEED;
287 delay = toim3232delay;
288 break;
289
290 case TOIM3232_STATE_WAIT_SPEED:
291 /* Have transmitted control byte * Wait for 'at least 1us' */
292 udelay(14);
293
294 /* Set DTR, Set RTS: Go into normal data mode */
295 sirdev_set_dtr_rts(dev, TRUE, TRUE);
296
297 /* Wait (TODO: check this is needed) */
298 udelay(50);
299 break;
300
301 default:
302 printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state);
303 ret = -EINVAL;
304 break;
305 }
306
307 dev->fsm.substate = state;
308 return (delay > 0) ? delay : ret;
309}
310
311/*
312 * Function toim3232reset (driver)
313 *
314 * This function resets the toim3232 dongle. Warning, this function
315 * must be called with a process context!!
316 *
317 * What we should do is:
318 * 0. Pull RESET high
319 * 1. Wait for at least 7us
320 * 2. Pull RESET low
321 * 3. Wait for at least 7us
322 * 4. Pull BR/~D high
323 * 5. Wait for at least 7us
324 * 6. Send control byte to set baud rate
325 * 7. Wait at least 1us after stop bit
326 * 8. Pull BR/~D low
327 * 9. Should then be in data mode
328 *
329 * Because the IR320ST-2 doesn't have the RESET line connected for some reason,
330 * we'll have to do something else.
331 *
332 * The default speed after a RESET is 9600, so lets try just bringing it up in
333 * data mode after switching it off, waiting for the supply capacitor to
334 * discharge, and then switch it back on. This isn't actually pulling RESET
335 * high, but it seems to have the same effect.
336 *
337 * This behaviour will probably work on dongles that have the RESET line connected,
338 * but if not, add a flag for the IR320ST-2, and implment the above-listed proper
339 * behaviour.
340 *
341 * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we
342 * need to have pull RTS low
343 */
344
345static int toim3232_reset(struct sir_dev *dev)
346{
347 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
348
349 /* Switch off both DTR and RTS to switch off dongle */
350 sirdev_set_dtr_rts(dev, FALSE, FALSE);
351
352 /* Should sleep a while. This might be evil doing it this way.*/
353 set_current_state(TASK_UNINTERRUPTIBLE);
354 schedule_timeout(msecs_to_jiffies(50));
355
356 /* Set DTR, Set RTS (data mode) */
357 sirdev_set_dtr_rts(dev, TRUE, TRUE);
358
359 /* Wait at least 10 ms for power to stabilize again */
360 set_current_state(TASK_UNINTERRUPTIBLE);
361 schedule_timeout(msecs_to_jiffies(10));
362
363 /* Speed should now be 9600 */
364 dev->speed = 9600;
365
366 return 0;
367}
368
369MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>");
370MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver");
371MODULE_LICENSE("GPL");
372MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */
373
374module_init(toim3232_sir_init);
375module_exit(toim3232_sir_cleanup);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a9f49f058cfb..97a49e0be76b 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1887,7 +1887,7 @@ static int __init vlsi_mod_init(void)
1887 vlsi_proc_root->owner = THIS_MODULE; 1887 vlsi_proc_root->owner = THIS_MODULE;
1888 } 1888 }
1889 1889
1890 ret = pci_module_init(&vlsi_irda_driver); 1890 ret = pci_register_driver(&vlsi_irda_driver);
1891 1891
1892 if (ret && vlsi_proc_root) 1892 if (ret && vlsi_proc_root)
1893 remove_proc_entry(PROC_DIR, NULL); 1893 remove_proc_entry(PROC_DIR, NULL);
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index d82651a97bae..6f7dce8eba51 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -16,7 +16,7 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h> 19#include <asm/hardware/uengine.h>
20#include <asm/mach-types.h> 20#include <asm/mach-types.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include "ixpdev.h" 22#include "ixpdev.h"
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 09f03f493bea..77f104a005f3 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -16,7 +16,7 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h> 19#include <asm/hardware/uengine.h>
20#include <asm/mach-types.h> 20#include <asm/mach-types.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include "ixp2400_rx.ucode" 22#include "ixp2400_rx.ucode"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 690a1aae0b34..0c13795dca38 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev)
172 172
173 memset(stats, 0, sizeof(struct net_device_stats)); 173 memset(stats, 0, sizeof(struct net_device_stats));
174 174
175 for (i=0; i < NR_CPUS; i++) { 175 for_each_cpu(i) {
176 struct net_device_stats *lb_stats; 176 struct net_device_stats *lb_stats;
177 177
178 if (!cpu_possible(i))
179 continue;
180 lb_stats = &per_cpu(loopback_stats, i); 178 lb_stats = &per_cpu(loopback_stats, i);
181 stats->rx_bytes += lb_stats->rx_bytes; 179 stats->rx_bytes += lb_stats->rx_bytes;
182 stats->tx_bytes += lb_stats->tx_bytes; 180 stats->tx_bytes += lb_stats->tx_bytes;
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 7754d1974b9e..4262c1da6d4a 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -42,13 +42,23 @@
42#define MAX_DESCS_PER_SKB 1 42#define MAX_DESCS_PER_SKB 1
43#endif 43#endif
44 44
45/*
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
49 */
50#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51#define ETH_DMA_ALIGN L1_CACHE_BYTES
52#else
53#define ETH_DMA_ALIGN 8
54#endif
55
45#define ETH_VLAN_HLEN 4 56#define ETH_VLAN_HLEN 4
46#define ETH_FCS_LEN 4 57#define ETH_FCS_LEN 4
47#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ 58#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
48#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
49#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ 59#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
50 ETH_VLAN_HLEN + ETH_FCS_LEN) 60 ETH_VLAN_HLEN + ETH_FCS_LEN)
51#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) 61#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
52 62
53#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ 63#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
54#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ 64#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 7e900572eaf8..9595f74da93f 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,12 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.31c" 25#define DRV_VERSION "1.32"
26#define DRV_RELDATE "01.Nov.2005" 26#define DRV_RELDATE "18.Mar.2006"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char * const version = 29static const char *const version =
30DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; 30 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
58 * PCI device identifiers for "new style" Linux PCI Device Drivers 58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */ 59 */
60static struct pci_device_id pcnet32_pci_tbl[] = { 60static struct pci_device_id pcnet32_pci_tbl[] = {
61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME,
62 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 62 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
63 /* 63 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
64 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have 64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 * the incorrect vendor id. 65
66 */ 66 /*
67 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 67 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
68 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 }, 68 * the incorrect vendor id.
69 { 0, } 69 */
70 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE,
71 PCI_ANY_ID, PCI_ANY_ID,
72 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
73
74 { } /* terminate list */
70}; 75};
71 76
72MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl); 77MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
73 78
74static int cards_found; 79static int cards_found;
75 80
@@ -77,13 +82,11 @@ static int cards_found;
77 * VLB I/O addresses 82 * VLB I/O addresses
78 */ 83 */
79static unsigned int pcnet32_portlist[] __initdata = 84static unsigned int pcnet32_portlist[] __initdata =
80 { 0x300, 0x320, 0x340, 0x360, 0 }; 85 { 0x300, 0x320, 0x340, 0x360, 0 };
81
82
83 86
84static int pcnet32_debug = 0; 87static int pcnet32_debug = 0;
85static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ 88static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
86static int pcnet32vlb; /* check for VLB cards ? */ 89static int pcnet32vlb; /* check for VLB cards ? */
87 90
88static struct net_device *pcnet32_dev; 91static struct net_device *pcnet32_dev;
89 92
@@ -110,32 +113,34 @@ static int rx_copybreak = 200;
110 * to internal options 113 * to internal options
111 */ 114 */
112static const unsigned char options_mapping[] = { 115static const unsigned char options_mapping[] = {
113 PCNET32_PORT_ASEL, /* 0 Auto-select */ 116 PCNET32_PORT_ASEL, /* 0 Auto-select */
114 PCNET32_PORT_AUI, /* 1 BNC/AUI */ 117 PCNET32_PORT_AUI, /* 1 BNC/AUI */
115 PCNET32_PORT_AUI, /* 2 AUI/BNC */ 118 PCNET32_PORT_AUI, /* 2 AUI/BNC */
116 PCNET32_PORT_ASEL, /* 3 not supported */ 119 PCNET32_PORT_ASEL, /* 3 not supported */
117 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ 120 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
118 PCNET32_PORT_ASEL, /* 5 not supported */ 121 PCNET32_PORT_ASEL, /* 5 not supported */
119 PCNET32_PORT_ASEL, /* 6 not supported */ 122 PCNET32_PORT_ASEL, /* 6 not supported */
120 PCNET32_PORT_ASEL, /* 7 not supported */ 123 PCNET32_PORT_ASEL, /* 7 not supported */
121 PCNET32_PORT_ASEL, /* 8 not supported */ 124 PCNET32_PORT_ASEL, /* 8 not supported */
122 PCNET32_PORT_MII, /* 9 MII 10baseT */ 125 PCNET32_PORT_MII, /* 9 MII 10baseT */
123 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ 126 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
124 PCNET32_PORT_MII, /* 11 MII (autosel) */ 127 PCNET32_PORT_MII, /* 11 MII (autosel) */
125 PCNET32_PORT_10BT, /* 12 10BaseT */ 128 PCNET32_PORT_10BT, /* 12 10BaseT */
126 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ 129 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
127 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ 130 /* 14 MII 100BaseTx-FD */
128 PCNET32_PORT_ASEL /* 15 not supported */ 131 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
132 PCNET32_PORT_ASEL /* 15 not supported */
129}; 133};
130 134
131static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { 135static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
132 "Loopback test (offline)" 136 "Loopback test (offline)"
133}; 137};
138
134#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) 139#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
135 140
136#define PCNET32_NUM_REGS 168 141#define PCNET32_NUM_REGS 136
137 142
138#define MAX_UNITS 8 /* More are supported, limit only on options */ 143#define MAX_UNITS 8 /* More are supported, limit only on options */
139static int options[MAX_UNITS]; 144static int options[MAX_UNITS];
140static int full_duplex[MAX_UNITS]; 145static int full_duplex[MAX_UNITS];
141static int homepna[MAX_UNITS]; 146static int homepna[MAX_UNITS];
@@ -151,124 +156,6 @@ static int homepna[MAX_UNITS];
151 */ 156 */
152 157
153/* 158/*
154 * History:
155 * v0.01: Initial version
156 * only tested on Alpha Noname Board
157 * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
158 * tested on a ASUS SP3G
159 * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
160 * looks like the 974 doesn't like stopping and restarting in a
161 * short period of time; now we do a reinit of the lance; the
162 * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
163 * and hangs the machine (thanks to Klaus Liedl for debugging)
164 * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
165 * made it standalone (no need for lance.c)
166 * v0.13: added additional PCI detecting for special PCI devices (Compaq)
167 * v0.14: stripped down additional PCI probe (thanks to David C Niemi
168 * and sveneric@xs4all.nl for testing this on their Compaq boxes)
169 * v0.15: added 79C965 (VLB) probe
170 * added interrupt sharing for PCI chips
171 * v0.16: fixed set_multicast_list on Alpha machines
172 * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
173 * v0.19: changed setting of autoselect bit
174 * v0.20: removed additional Compaq PCI probe; there is now a working one
175 * in arch/i386/bios32.c
176 * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
177 * v0.22: added printing of status to ring dump
178 * v0.23: changed enet_statistics to net_devive_stats
179 * v0.90: added multicast filter
180 * added module support
181 * changed irq probe to new style
182 * added PCnetFast chip id
183 * added fix for receive stalls with Intel saturn chipsets
184 * added in-place rx skbs like in the tulip driver
185 * minor cleanups
186 * v0.91: added PCnetFast+ chip id
187 * back port to 2.0.x
188 * v1.00: added some stuff from Donald Becker's 2.0.34 version
189 * added support for byte counters in net_dev_stats
190 * v1.01: do ring dumps, only when debugging the driver
191 * increased the transmit timeout
192 * v1.02: fixed memory leak in pcnet32_init_ring()
193 * v1.10: workaround for stopped transmitter
194 * added port selection for modules
195 * detect special T1/E1 WAN card and setup port selection
196 * v1.11: fixed wrong checking of Tx errors
197 * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
198 * added save original kmalloc addr for freeing (mcr@solidum.com)
199 * added support for PCnetHome chip (joe@MIT.EDU)
200 * rewritten PCI card detection
201 * added dwio mode to get driver working on some PPC machines
202 * v1.21: added mii selection and mii ioctl
203 * v1.22: changed pci scanning code to make PPC people happy
204 * fixed switching to 32bit mode in pcnet32_open() (thanks
205 * to Michael Richard <mcr@solidum.com> for noticing this one)
206 * added sub vendor/device id matching (thanks again to
207 * Michael Richard <mcr@solidum.com>)
208 * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
209 * v1.23 fixed small bug, when manual selecting MII speed/duplex
210 * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
211 * underflows. Added tx_start_pt module parameter. Increased
212 * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
213 * for FAST[+] chipsets. <kaf@fc.hp.com>
214 * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
215 * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
216 * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
217 * <jamey@crl.dec.com>
218 * - Fixed a few bugs, related to running the controller in 32bit mode.
219 * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
220 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
221 * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
222 * v1.27 improved CSR/PROM address detection, lots of cleanups,
223 * new pcnet32vlb module option, HP-PARISC support,
224 * added module parameter descriptions,
225 * initial ethtool support - Helge Deller <deller@gmx.de>
226 * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
227 * use alloc_etherdev and register_netdev
228 * fix pci probe not increment cards_found
229 * FD auto negotiate error workaround for xSeries250
230 * clean up and using new mii module
231 * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
232 * Added timer for cable connection state changes.
233 * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com>
234 * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com>
235 * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl.
236 * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single
237 * length errors, and transmit hangs. Cleans up after errors in open.
238 * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test.
239 * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections.
240 * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical
241 * identification code (blink led's) and register dump.
242 * Don Fry added timer for 971/972 so skbufs don't remain on tx ring
243 * forever.
244 * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt
245 * (ltint) as they added complexity and didn't give good throughput.
246 * v1.30a 22 May 2004 Don Fry limit frames received during interrupt.
247 * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973,
248 * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>.
249 * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart.
250 * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets.
251 * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang.
252 * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI,
253 * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>,
254 * and Brian Murphy <brian@murphy.dk>.
255 * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option
256 * homepna for selecting HomePNA mode for PCNet/Home 79C978.
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged.
263 * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
264 * if allocation fails
265 * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
266 * Force 100Mbit FD if Auto (ASEL) is selected.
267 * See Bugzilla 2669 and 4551.
268 */
269
270
271/*
272 * Set the number of Tx and Rx buffers, using Log_2(# buffers). 159 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
273 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. 160 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
274 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 161 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
@@ -303,42 +190,42 @@ static int homepna[MAX_UNITS];
303 190
304/* The PCNET32 Rx and Tx ring descriptors. */ 191/* The PCNET32 Rx and Tx ring descriptors. */
305struct pcnet32_rx_head { 192struct pcnet32_rx_head {
306 u32 base; 193 u32 base;
307 s16 buf_length; 194 s16 buf_length;
308 s16 status; 195 s16 status;
309 u32 msg_length; 196 u32 msg_length;
310 u32 reserved; 197 u32 reserved;
311}; 198};
312 199
313struct pcnet32_tx_head { 200struct pcnet32_tx_head {
314 u32 base; 201 u32 base;
315 s16 length; 202 s16 length;
316 s16 status; 203 s16 status;
317 u32 misc; 204 u32 misc;
318 u32 reserved; 205 u32 reserved;
319}; 206};
320 207
321/* The PCNET32 32-Bit initialization block, described in databook. */ 208/* The PCNET32 32-Bit initialization block, described in databook. */
322struct pcnet32_init_block { 209struct pcnet32_init_block {
323 u16 mode; 210 u16 mode;
324 u16 tlen_rlen; 211 u16 tlen_rlen;
325 u8 phys_addr[6]; 212 u8 phys_addr[6];
326 u16 reserved; 213 u16 reserved;
327 u32 filter[2]; 214 u32 filter[2];
328 /* Receive and transmit ring base, along with extra bits. */ 215 /* Receive and transmit ring base, along with extra bits. */
329 u32 rx_ring; 216 u32 rx_ring;
330 u32 tx_ring; 217 u32 tx_ring;
331}; 218};
332 219
333/* PCnet32 access functions */ 220/* PCnet32 access functions */
334struct pcnet32_access { 221struct pcnet32_access {
335 u16 (*read_csr)(unsigned long, int); 222 u16 (*read_csr) (unsigned long, int);
336 void (*write_csr)(unsigned long, int, u16); 223 void (*write_csr) (unsigned long, int, u16);
337 u16 (*read_bcr)(unsigned long, int); 224 u16 (*read_bcr) (unsigned long, int);
338 void (*write_bcr)(unsigned long, int, u16); 225 void (*write_bcr) (unsigned long, int, u16);
339 u16 (*read_rap)(unsigned long); 226 u16 (*read_rap) (unsigned long);
340 void (*write_rap)(unsigned long, u16); 227 void (*write_rap) (unsigned long, u16);
341 void (*reset)(unsigned long); 228 void (*reset) (unsigned long);
342}; 229};
343 230
344/* 231/*
@@ -346,760 +233,794 @@ struct pcnet32_access {
346 * so the structure should be allocated using pci_alloc_consistent(). 233 * so the structure should be allocated using pci_alloc_consistent().
347 */ 234 */
348struct pcnet32_private { 235struct pcnet32_private {
349 struct pcnet32_init_block init_block; 236 struct pcnet32_init_block init_block;
350 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ 237 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
351 struct pcnet32_rx_head *rx_ring; 238 struct pcnet32_rx_head *rx_ring;
352 struct pcnet32_tx_head *tx_ring; 239 struct pcnet32_tx_head *tx_ring;
353 dma_addr_t dma_addr; /* DMA address of beginning of this 240 dma_addr_t dma_addr;/* DMA address of beginning of this
354 object, returned by 241 object, returned by pci_alloc_consistent */
355 pci_alloc_consistent */ 242 struct pci_dev *pci_dev;
356 struct pci_dev *pci_dev; /* Pointer to the associated pci device 243 const char *name;
357 structure */ 244 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
358 const char *name; 245 struct sk_buff **tx_skbuff;
359 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 246 struct sk_buff **rx_skbuff;
360 struct sk_buff **tx_skbuff; 247 dma_addr_t *tx_dma_addr;
361 struct sk_buff **rx_skbuff; 248 dma_addr_t *rx_dma_addr;
362 dma_addr_t *tx_dma_addr; 249 struct pcnet32_access a;
363 dma_addr_t *rx_dma_addr; 250 spinlock_t lock; /* Guard lock */
364 struct pcnet32_access a; 251 unsigned int cur_rx, cur_tx; /* The next free ring entry */
365 spinlock_t lock; /* Guard lock */ 252 unsigned int rx_ring_size; /* current rx ring size */
366 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 253 unsigned int tx_ring_size; /* current tx ring size */
367 unsigned int rx_ring_size; /* current rx ring size */ 254 unsigned int rx_mod_mask; /* rx ring modular mask */
368 unsigned int tx_ring_size; /* current tx ring size */ 255 unsigned int tx_mod_mask; /* tx ring modular mask */
369 unsigned int rx_mod_mask; /* rx ring modular mask */ 256 unsigned short rx_len_bits;
370 unsigned int tx_mod_mask; /* tx ring modular mask */ 257 unsigned short tx_len_bits;
371 unsigned short rx_len_bits; 258 dma_addr_t rx_ring_dma_addr;
372 unsigned short tx_len_bits; 259 dma_addr_t tx_ring_dma_addr;
373 dma_addr_t rx_ring_dma_addr; 260 unsigned int dirty_rx, /* ring entries to be freed. */
374 dma_addr_t tx_ring_dma_addr; 261 dirty_tx;
375 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 262
376 struct net_device_stats stats; 263 struct net_device_stats stats;
377 char tx_full; 264 char tx_full;
378 int options; 265 char phycount; /* number of phys found */
379 unsigned int shared_irq:1, /* shared irq possible */ 266 int options;
380 dxsuflo:1, /* disable transmit stop on uflo */ 267 unsigned int shared_irq:1, /* shared irq possible */
381 mii:1; /* mii port available */ 268 dxsuflo:1, /* disable transmit stop on uflo */
382 struct net_device *next; 269 mii:1; /* mii port available */
383 struct mii_if_info mii_if; 270 struct net_device *next;
384 struct timer_list watchdog_timer; 271 struct mii_if_info mii_if;
385 struct timer_list blink_timer; 272 struct timer_list watchdog_timer;
386 u32 msg_enable; /* debug message level */ 273 struct timer_list blink_timer;
274 u32 msg_enable; /* debug message level */
275
276 /* each bit indicates an available PHY */
277 u32 phymask;
387}; 278};
388 279
389static void pcnet32_probe_vlbus(void); 280static void pcnet32_probe_vlbus(void);
390static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 281static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
391static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 282static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
392static int pcnet32_open(struct net_device *); 283static int pcnet32_open(struct net_device *);
393static int pcnet32_init_ring(struct net_device *); 284static int pcnet32_init_ring(struct net_device *);
394static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); 285static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
395static int pcnet32_rx(struct net_device *); 286static int pcnet32_rx(struct net_device *);
396static void pcnet32_tx_timeout (struct net_device *dev); 287static void pcnet32_tx_timeout(struct net_device *dev);
397static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); 288static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
398static int pcnet32_close(struct net_device *); 289static int pcnet32_close(struct net_device *);
399static struct net_device_stats *pcnet32_get_stats(struct net_device *); 290static struct net_device_stats *pcnet32_get_stats(struct net_device *);
400static void pcnet32_load_multicast(struct net_device *dev); 291static void pcnet32_load_multicast(struct net_device *dev);
401static void pcnet32_set_multicast_list(struct net_device *); 292static void pcnet32_set_multicast_list(struct net_device *);
402static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); 293static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
403static void pcnet32_watchdog(struct net_device *); 294static void pcnet32_watchdog(struct net_device *);
404static int mdio_read(struct net_device *dev, int phy_id, int reg_num); 295static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
405static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); 296static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
297 int val);
406static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); 298static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
407static void pcnet32_ethtool_test(struct net_device *dev, 299static void pcnet32_ethtool_test(struct net_device *dev,
408 struct ethtool_test *eth_test, u64 *data); 300 struct ethtool_test *eth_test, u64 * data);
409static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1); 301static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
410static int pcnet32_phys_id(struct net_device *dev, u32 data); 302static int pcnet32_phys_id(struct net_device *dev, u32 data);
411static void pcnet32_led_blink_callback(struct net_device *dev); 303static void pcnet32_led_blink_callback(struct net_device *dev);
412static int pcnet32_get_regs_len(struct net_device *dev); 304static int pcnet32_get_regs_len(struct net_device *dev);
413static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 305static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
414 void *ptr); 306 void *ptr);
415static void pcnet32_purge_tx_ring(struct net_device *dev); 307static void pcnet32_purge_tx_ring(struct net_device *dev);
416static int pcnet32_alloc_ring(struct net_device *dev, char *name); 308static int pcnet32_alloc_ring(struct net_device *dev, char *name);
417static void pcnet32_free_ring(struct net_device *dev); 309static void pcnet32_free_ring(struct net_device *dev);
418 310static void pcnet32_check_media(struct net_device *dev, int verbose);
419 311
420enum pci_flags_bit { 312enum pci_flags_bit {
421 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 313 PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
422 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, 314 PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
315 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
423}; 316};
424 317
425 318static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
426static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
427{ 319{
428 outw (index, addr+PCNET32_WIO_RAP); 320 outw(index, addr + PCNET32_WIO_RAP);
429 return inw (addr+PCNET32_WIO_RDP); 321 return inw(addr + PCNET32_WIO_RDP);
430} 322}
431 323
432static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val) 324static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
433{ 325{
434 outw (index, addr+PCNET32_WIO_RAP); 326 outw(index, addr + PCNET32_WIO_RAP);
435 outw (val, addr+PCNET32_WIO_RDP); 327 outw(val, addr + PCNET32_WIO_RDP);
436} 328}
437 329
438static u16 pcnet32_wio_read_bcr (unsigned long addr, int index) 330static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
439{ 331{
440 outw (index, addr+PCNET32_WIO_RAP); 332 outw(index, addr + PCNET32_WIO_RAP);
441 return inw (addr+PCNET32_WIO_BDP); 333 return inw(addr + PCNET32_WIO_BDP);
442} 334}
443 335
444static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val) 336static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
445{ 337{
446 outw (index, addr+PCNET32_WIO_RAP); 338 outw(index, addr + PCNET32_WIO_RAP);
447 outw (val, addr+PCNET32_WIO_BDP); 339 outw(val, addr + PCNET32_WIO_BDP);
448} 340}
449 341
450static u16 pcnet32_wio_read_rap (unsigned long addr) 342static u16 pcnet32_wio_read_rap(unsigned long addr)
451{ 343{
452 return inw (addr+PCNET32_WIO_RAP); 344 return inw(addr + PCNET32_WIO_RAP);
453} 345}
454 346
455static void pcnet32_wio_write_rap (unsigned long addr, u16 val) 347static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
456{ 348{
457 outw (val, addr+PCNET32_WIO_RAP); 349 outw(val, addr + PCNET32_WIO_RAP);
458} 350}
459 351
460static void pcnet32_wio_reset (unsigned long addr) 352static void pcnet32_wio_reset(unsigned long addr)
461{ 353{
462 inw (addr+PCNET32_WIO_RESET); 354 inw(addr + PCNET32_WIO_RESET);
463} 355}
464 356
465static int pcnet32_wio_check (unsigned long addr) 357static int pcnet32_wio_check(unsigned long addr)
466{ 358{
467 outw (88, addr+PCNET32_WIO_RAP); 359 outw(88, addr + PCNET32_WIO_RAP);
468 return (inw (addr+PCNET32_WIO_RAP) == 88); 360 return (inw(addr + PCNET32_WIO_RAP) == 88);
469} 361}
470 362
471static struct pcnet32_access pcnet32_wio = { 363static struct pcnet32_access pcnet32_wio = {
472 .read_csr = pcnet32_wio_read_csr, 364 .read_csr = pcnet32_wio_read_csr,
473 .write_csr = pcnet32_wio_write_csr, 365 .write_csr = pcnet32_wio_write_csr,
474 .read_bcr = pcnet32_wio_read_bcr, 366 .read_bcr = pcnet32_wio_read_bcr,
475 .write_bcr = pcnet32_wio_write_bcr, 367 .write_bcr = pcnet32_wio_write_bcr,
476 .read_rap = pcnet32_wio_read_rap, 368 .read_rap = pcnet32_wio_read_rap,
477 .write_rap = pcnet32_wio_write_rap, 369 .write_rap = pcnet32_wio_write_rap,
478 .reset = pcnet32_wio_reset 370 .reset = pcnet32_wio_reset
479}; 371};
480 372
481static u16 pcnet32_dwio_read_csr (unsigned long addr, int index) 373static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
482{ 374{
483 outl (index, addr+PCNET32_DWIO_RAP); 375 outl(index, addr + PCNET32_DWIO_RAP);
484 return (inl (addr+PCNET32_DWIO_RDP) & 0xffff); 376 return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
485} 377}
486 378
487static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val) 379static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
488{ 380{
489 outl (index, addr+PCNET32_DWIO_RAP); 381 outl(index, addr + PCNET32_DWIO_RAP);
490 outl (val, addr+PCNET32_DWIO_RDP); 382 outl(val, addr + PCNET32_DWIO_RDP);
491} 383}
492 384
493static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index) 385static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
494{ 386{
495 outl (index, addr+PCNET32_DWIO_RAP); 387 outl(index, addr + PCNET32_DWIO_RAP);
496 return (inl (addr+PCNET32_DWIO_BDP) & 0xffff); 388 return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
497} 389}
498 390
499static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val) 391static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
500{ 392{
501 outl (index, addr+PCNET32_DWIO_RAP); 393 outl(index, addr + PCNET32_DWIO_RAP);
502 outl (val, addr+PCNET32_DWIO_BDP); 394 outl(val, addr + PCNET32_DWIO_BDP);
503} 395}
504 396
505static u16 pcnet32_dwio_read_rap (unsigned long addr) 397static u16 pcnet32_dwio_read_rap(unsigned long addr)
506{ 398{
507 return (inl (addr+PCNET32_DWIO_RAP) & 0xffff); 399 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
508} 400}
509 401
510static void pcnet32_dwio_write_rap (unsigned long addr, u16 val) 402static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
511{ 403{
512 outl (val, addr+PCNET32_DWIO_RAP); 404 outl(val, addr + PCNET32_DWIO_RAP);
513} 405}
514 406
515static void pcnet32_dwio_reset (unsigned long addr) 407static void pcnet32_dwio_reset(unsigned long addr)
516{ 408{
517 inl (addr+PCNET32_DWIO_RESET); 409 inl(addr + PCNET32_DWIO_RESET);
518} 410}
519 411
520static int pcnet32_dwio_check (unsigned long addr) 412static int pcnet32_dwio_check(unsigned long addr)
521{ 413{
522 outl (88, addr+PCNET32_DWIO_RAP); 414 outl(88, addr + PCNET32_DWIO_RAP);
523 return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88); 415 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
524} 416}
525 417
526static struct pcnet32_access pcnet32_dwio = { 418static struct pcnet32_access pcnet32_dwio = {
527 .read_csr = pcnet32_dwio_read_csr, 419 .read_csr = pcnet32_dwio_read_csr,
528 .write_csr = pcnet32_dwio_write_csr, 420 .write_csr = pcnet32_dwio_write_csr,
529 .read_bcr = pcnet32_dwio_read_bcr, 421 .read_bcr = pcnet32_dwio_read_bcr,
530 .write_bcr = pcnet32_dwio_write_bcr, 422 .write_bcr = pcnet32_dwio_write_bcr,
531 .read_rap = pcnet32_dwio_read_rap, 423 .read_rap = pcnet32_dwio_read_rap,
532 .write_rap = pcnet32_dwio_write_rap, 424 .write_rap = pcnet32_dwio_write_rap,
533 .reset = pcnet32_dwio_reset 425 .reset = pcnet32_dwio_reset
534}; 426};
535 427
536#ifdef CONFIG_NET_POLL_CONTROLLER 428#ifdef CONFIG_NET_POLL_CONTROLLER
537static void pcnet32_poll_controller(struct net_device *dev) 429static void pcnet32_poll_controller(struct net_device *dev)
538{ 430{
539 disable_irq(dev->irq); 431 disable_irq(dev->irq);
540 pcnet32_interrupt(0, dev, NULL); 432 pcnet32_interrupt(0, dev, NULL);
541 enable_irq(dev->irq); 433 enable_irq(dev->irq);
542} 434}
543#endif 435#endif
544 436
545
546static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 437static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
547{ 438{
548 struct pcnet32_private *lp = dev->priv; 439 struct pcnet32_private *lp = dev->priv;
549 unsigned long flags; 440 unsigned long flags;
550 int r = -EOPNOTSUPP; 441 int r = -EOPNOTSUPP;
551 442
552 if (lp->mii) { 443 if (lp->mii) {
553 spin_lock_irqsave(&lp->lock, flags); 444 spin_lock_irqsave(&lp->lock, flags);
554 mii_ethtool_gset(&lp->mii_if, cmd); 445 mii_ethtool_gset(&lp->mii_if, cmd);
555 spin_unlock_irqrestore(&lp->lock, flags); 446 spin_unlock_irqrestore(&lp->lock, flags);
556 r = 0; 447 r = 0;
557 } 448 }
558 return r; 449 return r;
559} 450}
560 451
561static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 452static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
562{ 453{
563 struct pcnet32_private *lp = dev->priv; 454 struct pcnet32_private *lp = dev->priv;
564 unsigned long flags; 455 unsigned long flags;
565 int r = -EOPNOTSUPP; 456 int r = -EOPNOTSUPP;
566 457
567 if (lp->mii) { 458 if (lp->mii) {
568 spin_lock_irqsave(&lp->lock, flags); 459 spin_lock_irqsave(&lp->lock, flags);
569 r = mii_ethtool_sset(&lp->mii_if, cmd); 460 r = mii_ethtool_sset(&lp->mii_if, cmd);
570 spin_unlock_irqrestore(&lp->lock, flags); 461 spin_unlock_irqrestore(&lp->lock, flags);
571 } 462 }
572 return r; 463 return r;
573} 464}
574 465
575static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 466static void pcnet32_get_drvinfo(struct net_device *dev,
467 struct ethtool_drvinfo *info)
576{ 468{
577 struct pcnet32_private *lp = dev->priv; 469 struct pcnet32_private *lp = dev->priv;
578 470
579 strcpy (info->driver, DRV_NAME); 471 strcpy(info->driver, DRV_NAME);
580 strcpy (info->version, DRV_VERSION); 472 strcpy(info->version, DRV_VERSION);
581 if (lp->pci_dev) 473 if (lp->pci_dev)
582 strcpy (info->bus_info, pci_name(lp->pci_dev)); 474 strcpy(info->bus_info, pci_name(lp->pci_dev));
583 else 475 else
584 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); 476 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
585} 477}
586 478
587static u32 pcnet32_get_link(struct net_device *dev) 479static u32 pcnet32_get_link(struct net_device *dev)
588{ 480{
589 struct pcnet32_private *lp = dev->priv; 481 struct pcnet32_private *lp = dev->priv;
590 unsigned long flags; 482 unsigned long flags;
591 int r; 483 int r;
592
593 spin_lock_irqsave(&lp->lock, flags);
594 if (lp->mii) {
595 r = mii_link_ok(&lp->mii_if);
596 } else {
597 ulong ioaddr = dev->base_addr; /* card base I/O address */
598 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
599 }
600 spin_unlock_irqrestore(&lp->lock, flags);
601 484
602 return r; 485 spin_lock_irqsave(&lp->lock, flags);
486 if (lp->mii) {
487 r = mii_link_ok(&lp->mii_if);
488 } else {
489 ulong ioaddr = dev->base_addr; /* card base I/O address */
490 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
491 }
492 spin_unlock_irqrestore(&lp->lock, flags);
493
494 return r;
603} 495}
604 496
605static u32 pcnet32_get_msglevel(struct net_device *dev) 497static u32 pcnet32_get_msglevel(struct net_device *dev)
606{ 498{
607 struct pcnet32_private *lp = dev->priv; 499 struct pcnet32_private *lp = dev->priv;
608 return lp->msg_enable; 500 return lp->msg_enable;
609} 501}
610 502
611static void pcnet32_set_msglevel(struct net_device *dev, u32 value) 503static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
612{ 504{
613 struct pcnet32_private *lp = dev->priv; 505 struct pcnet32_private *lp = dev->priv;
614 lp->msg_enable = value; 506 lp->msg_enable = value;
615} 507}
616 508
617static int pcnet32_nway_reset(struct net_device *dev) 509static int pcnet32_nway_reset(struct net_device *dev)
618{ 510{
619 struct pcnet32_private *lp = dev->priv; 511 struct pcnet32_private *lp = dev->priv;
620 unsigned long flags; 512 unsigned long flags;
621 int r = -EOPNOTSUPP; 513 int r = -EOPNOTSUPP;
622 514
623 if (lp->mii) { 515 if (lp->mii) {
624 spin_lock_irqsave(&lp->lock, flags); 516 spin_lock_irqsave(&lp->lock, flags);
625 r = mii_nway_restart(&lp->mii_if); 517 r = mii_nway_restart(&lp->mii_if);
626 spin_unlock_irqrestore(&lp->lock, flags); 518 spin_unlock_irqrestore(&lp->lock, flags);
627 } 519 }
628 return r; 520 return r;
629} 521}
630 522
631static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 523static void pcnet32_get_ringparam(struct net_device *dev,
524 struct ethtool_ringparam *ering)
632{ 525{
633 struct pcnet32_private *lp = dev->priv; 526 struct pcnet32_private *lp = dev->priv;
634 527
635 ering->tx_max_pending = TX_MAX_RING_SIZE - 1; 528 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
636 ering->tx_pending = lp->tx_ring_size - 1; 529 ering->tx_pending = lp->tx_ring_size - 1;
637 ering->rx_max_pending = RX_MAX_RING_SIZE - 1; 530 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
638 ering->rx_pending = lp->rx_ring_size - 1; 531 ering->rx_pending = lp->rx_ring_size - 1;
639} 532}
640 533
641static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 534static int pcnet32_set_ringparam(struct net_device *dev,
535 struct ethtool_ringparam *ering)
642{ 536{
643 struct pcnet32_private *lp = dev->priv; 537 struct pcnet32_private *lp = dev->priv;
644 unsigned long flags; 538 unsigned long flags;
645 int i; 539 int i;
646 540
647 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 541 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
648 return -EINVAL; 542 return -EINVAL;
649 543
650 if (netif_running(dev)) 544 if (netif_running(dev))
651 pcnet32_close(dev); 545 pcnet32_close(dev);
652 546
653 spin_lock_irqsave(&lp->lock, flags); 547 spin_lock_irqsave(&lp->lock, flags);
654 pcnet32_free_ring(dev);
655 lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
656 lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
657
658 /* set the minimum ring size to 4, to allow the loopback test to work
659 * unchanged.
660 */
661 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
662 if (lp->tx_ring_size <= (1 << i))
663 break;
664 }
665 lp->tx_ring_size = (1 << i);
666 lp->tx_mod_mask = lp->tx_ring_size - 1;
667 lp->tx_len_bits = (i << 12);
668
669 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
670 if (lp->rx_ring_size <= (1 << i))
671 break;
672 }
673 lp->rx_ring_size = (1 << i);
674 lp->rx_mod_mask = lp->rx_ring_size - 1;
675 lp->rx_len_bits = (i << 4);
676
677 if (pcnet32_alloc_ring(dev, dev->name)) {
678 pcnet32_free_ring(dev); 548 pcnet32_free_ring(dev);
679 spin_unlock_irqrestore(&lp->lock, flags); 549 lp->tx_ring_size =
680 return -ENOMEM; 550 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
681 } 551 lp->rx_ring_size =
552 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
553
554 /* set the minimum ring size to 4, to allow the loopback test to work
555 * unchanged.
556 */
557 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
558 if (lp->tx_ring_size <= (1 << i))
559 break;
560 }
561 lp->tx_ring_size = (1 << i);
562 lp->tx_mod_mask = lp->tx_ring_size - 1;
563 lp->tx_len_bits = (i << 12);
682 564
683 spin_unlock_irqrestore(&lp->lock, flags); 565 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
566 if (lp->rx_ring_size <= (1 << i))
567 break;
568 }
569 lp->rx_ring_size = (1 << i);
570 lp->rx_mod_mask = lp->rx_ring_size - 1;
571 lp->rx_len_bits = (i << 4);
572
573 if (pcnet32_alloc_ring(dev, dev->name)) {
574 pcnet32_free_ring(dev);
575 spin_unlock_irqrestore(&lp->lock, flags);
576 return -ENOMEM;
577 }
684 578
685 if (pcnet32_debug & NETIF_MSG_DRV) 579 spin_unlock_irqrestore(&lp->lock, flags);
686 printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
687 dev->name, lp->rx_ring_size, lp->tx_ring_size);
688 580
689 if (netif_running(dev)) 581 if (pcnet32_debug & NETIF_MSG_DRV)
690 pcnet32_open(dev); 582 printk(KERN_INFO PFX
583 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
584 lp->rx_ring_size, lp->tx_ring_size);
691 585
692 return 0; 586 if (netif_running(dev))
587 pcnet32_open(dev);
588
589 return 0;
693} 590}
694 591
695static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) 592static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
593 u8 * data)
696{ 594{
697 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); 595 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
698} 596}
699 597
700static int pcnet32_self_test_count(struct net_device *dev) 598static int pcnet32_self_test_count(struct net_device *dev)
701{ 599{
702 return PCNET32_TEST_LEN; 600 return PCNET32_TEST_LEN;
703} 601}
704 602
705static void pcnet32_ethtool_test(struct net_device *dev, 603static void pcnet32_ethtool_test(struct net_device *dev,
706 struct ethtool_test *test, u64 *data) 604 struct ethtool_test *test, u64 * data)
707{ 605{
708 struct pcnet32_private *lp = dev->priv; 606 struct pcnet32_private *lp = dev->priv;
709 int rc; 607 int rc;
710 608
711 if (test->flags == ETH_TEST_FL_OFFLINE) { 609 if (test->flags == ETH_TEST_FL_OFFLINE) {
712 rc = pcnet32_loopback_test(dev, data); 610 rc = pcnet32_loopback_test(dev, data);
713 if (rc) { 611 if (rc) {
714 if (netif_msg_hw(lp)) 612 if (netif_msg_hw(lp))
715 printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name); 613 printk(KERN_DEBUG "%s: Loopback test failed.\n",
716 test->flags |= ETH_TEST_FL_FAILED; 614 dev->name);
615 test->flags |= ETH_TEST_FL_FAILED;
616 } else if (netif_msg_hw(lp))
617 printk(KERN_DEBUG "%s: Loopback test passed.\n",
618 dev->name);
717 } else if (netif_msg_hw(lp)) 619 } else if (netif_msg_hw(lp))
718 printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name); 620 printk(KERN_DEBUG
719 } else if (netif_msg_hw(lp)) 621 "%s: No tests to run (specify 'Offline' on ethtool).",
720 printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name); 622 dev->name);
721} /* end pcnet32_ethtool_test */ 623} /* end pcnet32_ethtool_test */
722 624
723static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) 625static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
724{ 626{
725 struct pcnet32_private *lp = dev->priv; 627 struct pcnet32_private *lp = dev->priv;
726 struct pcnet32_access *a = &lp->a; /* access to registers */ 628 struct pcnet32_access *a = &lp->a; /* access to registers */
727 ulong ioaddr = dev->base_addr; /* card base I/O address */ 629 ulong ioaddr = dev->base_addr; /* card base I/O address */
728 struct sk_buff *skb; /* sk buff */ 630 struct sk_buff *skb; /* sk buff */
729 int x, i; /* counters */ 631 int x, i; /* counters */
730 int numbuffs = 4; /* number of TX/RX buffers and descs */ 632 int numbuffs = 4; /* number of TX/RX buffers and descs */
731 u16 status = 0x8300; /* TX ring status */ 633 u16 status = 0x8300; /* TX ring status */
732 u16 teststatus; /* test of ring status */ 634 u16 teststatus; /* test of ring status */
733 int rc; /* return code */ 635 int rc; /* return code */
734 int size; /* size of packets */ 636 int size; /* size of packets */
735 unsigned char *packet; /* source packet data */ 637 unsigned char *packet; /* source packet data */
736 static const int data_len = 60; /* length of source packets */ 638 static const int data_len = 60; /* length of source packets */
737 unsigned long flags; 639 unsigned long flags;
738 unsigned long ticks; 640 unsigned long ticks;
739 641
740 *data1 = 1; /* status of test, default to fail */ 642 *data1 = 1; /* status of test, default to fail */
741 rc = 1; /* default to fail */ 643 rc = 1; /* default to fail */
742 644
743 if (netif_running(dev)) 645 if (netif_running(dev))
744 pcnet32_close(dev); 646 pcnet32_close(dev);
745 647
746 spin_lock_irqsave(&lp->lock, flags); 648 spin_lock_irqsave(&lp->lock, flags);
747 649
748 /* Reset the PCNET32 */ 650 /* Reset the PCNET32 */
749 lp->a.reset (ioaddr); 651 lp->a.reset(ioaddr);
750 652
751 /* switch pcnet32 to 32bit mode */ 653 /* switch pcnet32 to 32bit mode */
752 lp->a.write_bcr (ioaddr, 20, 2); 654 lp->a.write_bcr(ioaddr, 20, 2);
753 655
754 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 656 lp->init_block.mode =
755 lp->init_block.filter[0] = 0; 657 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
756 lp->init_block.filter[1] = 0; 658 lp->init_block.filter[0] = 0;
757 659 lp->init_block.filter[1] = 0;
758 /* purge & init rings but don't actually restart */ 660
759 pcnet32_restart(dev, 0x0000); 661 /* purge & init rings but don't actually restart */
760 662 pcnet32_restart(dev, 0x0000);
761 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 663
762 664 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
763 /* Initialize Transmit buffers. */ 665
764 size = data_len + 15; 666 /* Initialize Transmit buffers. */
765 for (x=0; x<numbuffs; x++) { 667 size = data_len + 15;
766 if (!(skb = dev_alloc_skb(size))) { 668 for (x = 0; x < numbuffs; x++) {
767 if (netif_msg_hw(lp)) 669 if (!(skb = dev_alloc_skb(size))) {
768 printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n", 670 if (netif_msg_hw(lp))
769 dev->name, __LINE__); 671 printk(KERN_DEBUG
770 goto clean_up; 672 "%s: Cannot allocate skb at line: %d!\n",
771 } else { 673 dev->name, __LINE__);
772 packet = skb->data; 674 goto clean_up;
773 skb_put(skb, size); /* create space for data */ 675 } else {
774 lp->tx_skbuff[x] = skb; 676 packet = skb->data;
775 lp->tx_ring[x].length = le16_to_cpu(-skb->len); 677 skb_put(skb, size); /* create space for data */
776 lp->tx_ring[x].misc = 0; 678 lp->tx_skbuff[x] = skb;
777 679 lp->tx_ring[x].length = le16_to_cpu(-skb->len);
778 /* put DA and SA into the skb */ 680 lp->tx_ring[x].misc = 0;
779 for (i=0; i<6; i++) 681
780 *packet++ = dev->dev_addr[i]; 682 /* put DA and SA into the skb */
781 for (i=0; i<6; i++) 683 for (i = 0; i < 6; i++)
782 *packet++ = dev->dev_addr[i]; 684 *packet++ = dev->dev_addr[i];
783 /* type */ 685 for (i = 0; i < 6; i++)
784 *packet++ = 0x08; 686 *packet++ = dev->dev_addr[i];
785 *packet++ = 0x06; 687 /* type */
786 /* packet number */ 688 *packet++ = 0x08;
787 *packet++ = x; 689 *packet++ = 0x06;
788 /* fill packet with data */ 690 /* packet number */
789 for (i=0; i<data_len; i++) 691 *packet++ = x;
790 *packet++ = i; 692 /* fill packet with data */
791 693 for (i = 0; i < data_len; i++)
792 lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, 694 *packet++ = i;
793 skb->len, PCI_DMA_TODEVICE); 695
794 lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]); 696 lp->tx_dma_addr[x] =
795 wmb(); /* Make sure owner changes after all others are visible */ 697 pci_map_single(lp->pci_dev, skb->data, skb->len,
796 lp->tx_ring[x].status = le16_to_cpu(status); 698 PCI_DMA_TODEVICE);
797 } 699 lp->tx_ring[x].base =
798 } 700 (u32) le32_to_cpu(lp->tx_dma_addr[x]);
799 701 wmb(); /* Make sure owner changes after all others are visible */
800 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ 702 lp->tx_ring[x].status = le16_to_cpu(status);
801 x = x | 0x0002; 703 }
802 a->write_bcr(ioaddr, 32, x); 704 }
803 705
804 lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ 706 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
805 707 x = x | 0x0002;
806 teststatus = le16_to_cpu(0x8000); 708 a->write_bcr(ioaddr, 32, x);
807 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ 709
808 710 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
809 /* Check status of descriptors */ 711
810 for (x=0; x<numbuffs; x++) { 712 teststatus = le16_to_cpu(0x8000);
811 ticks = 0; 713 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
812 rmb(); 714
813 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { 715 /* Check status of descriptors */
814 spin_unlock_irqrestore(&lp->lock, flags); 716 for (x = 0; x < numbuffs; x++) {
815 mdelay(1); 717 ticks = 0;
816 spin_lock_irqsave(&lp->lock, flags); 718 rmb();
817 rmb(); 719 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
818 ticks++; 720 spin_unlock_irqrestore(&lp->lock, flags);
819 } 721 mdelay(1);
820 if (ticks == 200) { 722 spin_lock_irqsave(&lp->lock, flags);
821 if (netif_msg_hw(lp)) 723 rmb();
822 printk("%s: Desc %d failed to reset!\n",dev->name,x); 724 ticks++;
823 break; 725 }
824 } 726 if (ticks == 200) {
825 } 727 if (netif_msg_hw(lp))
826 728 printk("%s: Desc %d failed to reset!\n",
827 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 729 dev->name, x);
828 wmb(); 730 break;
829 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 731 }
830 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); 732 }
831 733
832 for (x=0; x<numbuffs; x++) { 734 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
833 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); 735 wmb();
834 skb = lp->rx_skbuff[x]; 736 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
835 for (i=0; i<size; i++) { 737 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
836 printk("%02x ", *(skb->data+i)); 738
837 } 739 for (x = 0; x < numbuffs; x++) {
838 printk("\n"); 740 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
839 } 741 skb = lp->rx_skbuff[x];
840 } 742 for (i = 0; i < size; i++) {
841 743 printk("%02x ", *(skb->data + i));
842 x = 0; 744 }
843 rc = 0; 745 printk("\n");
844 while (x<numbuffs && !rc) { 746 }
845 skb = lp->rx_skbuff[x]; 747 }
846 packet = lp->tx_skbuff[x]->data; 748
847 for (i=0; i<size; i++) { 749 x = 0;
848 if (*(skb->data+i) != packet[i]) { 750 rc = 0;
849 if (netif_msg_hw(lp)) 751 while (x < numbuffs && !rc) {
850 printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n", 752 skb = lp->rx_skbuff[x];
851 dev->name, i, *(skb->data+i), packet[i]); 753 packet = lp->tx_skbuff[x]->data;
852 rc = 1; 754 for (i = 0; i < size; i++) {
853 break; 755 if (*(skb->data + i) != packet[i]) {
854 } 756 if (netif_msg_hw(lp))
757 printk(KERN_DEBUG
758 "%s: Error in compare! %2x - %02x %02x\n",
759 dev->name, i, *(skb->data + i),
760 packet[i]);
761 rc = 1;
762 break;
763 }
764 }
765 x++;
766 }
767 if (!rc) {
768 *data1 = 0;
855 } 769 }
856 x++;
857 }
858 if (!rc) {
859 *data1 = 0;
860 }
861 770
862clean_up: 771 clean_up:
863 pcnet32_purge_tx_ring(dev); 772 pcnet32_purge_tx_ring(dev);
864 x = a->read_csr(ioaddr, 15) & 0xFFFF; 773 x = a->read_csr(ioaddr, 15) & 0xFFFF;
865 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ 774 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
866 775
867 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 776 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
868 x = x & ~0x0002; 777 x = x & ~0x0002;
869 a->write_bcr(ioaddr, 32, x); 778 a->write_bcr(ioaddr, 32, x);
870 779
871 spin_unlock_irqrestore(&lp->lock, flags); 780 spin_unlock_irqrestore(&lp->lock, flags);
872 781
873 if (netif_running(dev)) { 782 if (netif_running(dev)) {
874 pcnet32_open(dev); 783 pcnet32_open(dev);
875 } else { 784 } else {
876 lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */ 785 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
877 } 786 }
878 787
879 return(rc); 788 return (rc);
880} /* end pcnet32_loopback_test */ 789} /* end pcnet32_loopback_test */
881 790
882static void pcnet32_led_blink_callback(struct net_device *dev) 791static void pcnet32_led_blink_callback(struct net_device *dev)
883{ 792{
884 struct pcnet32_private *lp = dev->priv; 793 struct pcnet32_private *lp = dev->priv;
885 struct pcnet32_access *a = &lp->a; 794 struct pcnet32_access *a = &lp->a;
886 ulong ioaddr = dev->base_addr; 795 ulong ioaddr = dev->base_addr;
887 unsigned long flags; 796 unsigned long flags;
888 int i; 797 int i;
889 798
890 spin_lock_irqsave(&lp->lock, flags); 799 spin_lock_irqsave(&lp->lock, flags);
891 for (i=4; i<8; i++) { 800 for (i = 4; i < 8; i++) {
892 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 801 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
893 } 802 }
894 spin_unlock_irqrestore(&lp->lock, flags); 803 spin_unlock_irqrestore(&lp->lock, flags);
895 804
896 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); 805 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
897} 806}
898 807
899static int pcnet32_phys_id(struct net_device *dev, u32 data) 808static int pcnet32_phys_id(struct net_device *dev, u32 data)
900{ 809{
901 struct pcnet32_private *lp = dev->priv; 810 struct pcnet32_private *lp = dev->priv;
902 struct pcnet32_access *a = &lp->a; 811 struct pcnet32_access *a = &lp->a;
903 ulong ioaddr = dev->base_addr; 812 ulong ioaddr = dev->base_addr;
904 unsigned long flags; 813 unsigned long flags;
905 int i, regs[4]; 814 int i, regs[4];
906 815
907 if (!lp->blink_timer.function) { 816 if (!lp->blink_timer.function) {
908 init_timer(&lp->blink_timer); 817 init_timer(&lp->blink_timer);
909 lp->blink_timer.function = (void *) pcnet32_led_blink_callback; 818 lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
910 lp->blink_timer.data = (unsigned long) dev; 819 lp->blink_timer.data = (unsigned long)dev;
911 } 820 }
912 821
913 /* Save the current value of the bcrs */ 822 /* Save the current value of the bcrs */
914 spin_lock_irqsave(&lp->lock, flags); 823 spin_lock_irqsave(&lp->lock, flags);
915 for (i=4; i<8; i++) { 824 for (i = 4; i < 8; i++) {
916 regs[i-4] = a->read_bcr(ioaddr, i); 825 regs[i - 4] = a->read_bcr(ioaddr, i);
917 } 826 }
918 spin_unlock_irqrestore(&lp->lock, flags); 827 spin_unlock_irqrestore(&lp->lock, flags);
919 828
920 mod_timer(&lp->blink_timer, jiffies); 829 mod_timer(&lp->blink_timer, jiffies);
921 set_current_state(TASK_INTERRUPTIBLE); 830 set_current_state(TASK_INTERRUPTIBLE);
922 831
923 if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))) 832 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
924 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 833 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
925 834
926 msleep_interruptible(data * 1000); 835 msleep_interruptible(data * 1000);
927 del_timer_sync(&lp->blink_timer); 836 del_timer_sync(&lp->blink_timer);
928 837
929 /* Restore the original value of the bcrs */ 838 /* Restore the original value of the bcrs */
930 spin_lock_irqsave(&lp->lock, flags); 839 spin_lock_irqsave(&lp->lock, flags);
931 for (i=4; i<8; i++) { 840 for (i = 4; i < 8; i++) {
932 a->write_bcr(ioaddr, i, regs[i-4]); 841 a->write_bcr(ioaddr, i, regs[i - 4]);
933 } 842 }
934 spin_unlock_irqrestore(&lp->lock, flags); 843 spin_unlock_irqrestore(&lp->lock, flags);
935 844
936 return 0; 845 return 0;
937} 846}
938 847
848#define PCNET32_REGS_PER_PHY 32
849#define PCNET32_MAX_PHYS 32
939static int pcnet32_get_regs_len(struct net_device *dev) 850static int pcnet32_get_regs_len(struct net_device *dev)
940{ 851{
941 return(PCNET32_NUM_REGS * sizeof(u16)); 852 struct pcnet32_private *lp = dev->priv;
853 int j = lp->phycount * PCNET32_REGS_PER_PHY;
854
855 return ((PCNET32_NUM_REGS + j) * sizeof(u16));
942} 856}
943 857
944static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 858static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
945 void *ptr) 859 void *ptr)
946{ 860{
947 int i, csr0; 861 int i, csr0;
948 u16 *buff = ptr; 862 u16 *buff = ptr;
949 struct pcnet32_private *lp = dev->priv; 863 struct pcnet32_private *lp = dev->priv;
950 struct pcnet32_access *a = &lp->a; 864 struct pcnet32_access *a = &lp->a;
951 ulong ioaddr = dev->base_addr; 865 ulong ioaddr = dev->base_addr;
952 int ticks; 866 int ticks;
953 unsigned long flags; 867 unsigned long flags;
954
955 spin_lock_irqsave(&lp->lock, flags);
956
957 csr0 = a->read_csr(ioaddr, 0);
958 if (!(csr0 & 0x0004)) { /* If not stopped */
959 /* set SUSPEND (SPND) - CSR5 bit 0 */
960 a->write_csr(ioaddr, 5, 0x0001);
961
962 /* poll waiting for bit to be set */
963 ticks = 0;
964 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
965 spin_unlock_irqrestore(&lp->lock, flags);
966 mdelay(1);
967 spin_lock_irqsave(&lp->lock, flags);
968 ticks++;
969 if (ticks > 200) {
970 if (netif_msg_hw(lp))
971 printk(KERN_DEBUG "%s: Error getting into suspend!\n",
972 dev->name);
973 break;
974 }
975 }
976 }
977 868
978 /* read address PROM */ 869 spin_lock_irqsave(&lp->lock, flags);
979 for (i=0; i<16; i += 2)
980 *buff++ = inw(ioaddr + i);
981 870
982 /* read control and status registers */ 871 csr0 = a->read_csr(ioaddr, 0);
983 for (i=0; i<90; i++) { 872 if (!(csr0 & 0x0004)) { /* If not stopped */
984 *buff++ = a->read_csr(ioaddr, i); 873 /* set SUSPEND (SPND) - CSR5 bit 0 */
985 } 874 a->write_csr(ioaddr, 5, 0x0001);
875
876 /* poll waiting for bit to be set */
877 ticks = 0;
878 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
879 spin_unlock_irqrestore(&lp->lock, flags);
880 mdelay(1);
881 spin_lock_irqsave(&lp->lock, flags);
882 ticks++;
883 if (ticks > 200) {
884 if (netif_msg_hw(lp))
885 printk(KERN_DEBUG
886 "%s: Error getting into suspend!\n",
887 dev->name);
888 break;
889 }
890 }
891 }
986 892
987 *buff++ = a->read_csr(ioaddr, 112); 893 /* read address PROM */
988 *buff++ = a->read_csr(ioaddr, 114); 894 for (i = 0; i < 16; i += 2)
895 *buff++ = inw(ioaddr + i);
989 896
990 /* read bus configuration registers */ 897 /* read control and status registers */
991 for (i=0; i<30; i++) { 898 for (i = 0; i < 90; i++) {
992 *buff++ = a->read_bcr(ioaddr, i); 899 *buff++ = a->read_csr(ioaddr, i);
993 } 900 }
994 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ 901
995 for (i=31; i<36; i++) { 902 *buff++ = a->read_csr(ioaddr, 112);
996 *buff++ = a->read_bcr(ioaddr, i); 903 *buff++ = a->read_csr(ioaddr, 114);
997 }
998 904
999 /* read mii phy registers */ 905 /* read bus configuration registers */
1000 if (lp->mii) { 906 for (i = 0; i < 30; i++) {
1001 for (i=0; i<32; i++) { 907 *buff++ = a->read_bcr(ioaddr, i);
1002 lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i); 908 }
1003 *buff++ = lp->a.read_bcr(ioaddr, 34); 909 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
910 for (i = 31; i < 36; i++) {
911 *buff++ = a->read_bcr(ioaddr, i);
1004 } 912 }
1005 }
1006 913
1007 if (!(csr0 & 0x0004)) { /* If not stopped */ 914 /* read mii phy registers */
1008 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 915 if (lp->mii) {
1009 a->write_csr(ioaddr, 5, 0x0000); 916 int j;
1010 } 917 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
918 if (lp->phymask & (1 << j)) {
919 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
920 lp->a.write_bcr(ioaddr, 33,
921 (j << 5) | i);
922 *buff++ = lp->a.read_bcr(ioaddr, 34);
923 }
924 }
925 }
926 }
1011 927
1012 i = buff - (u16 *)ptr; 928 if (!(csr0 & 0x0004)) { /* If not stopped */
1013 for (; i < PCNET32_NUM_REGS; i++) 929 /* clear SUSPEND (SPND) - CSR5 bit 0 */
1014 *buff++ = 0; 930 a->write_csr(ioaddr, 5, 0x0000);
931 }
1015 932
1016 spin_unlock_irqrestore(&lp->lock, flags); 933 spin_unlock_irqrestore(&lp->lock, flags);
1017} 934}
1018 935
1019static struct ethtool_ops pcnet32_ethtool_ops = { 936static struct ethtool_ops pcnet32_ethtool_ops = {
1020 .get_settings = pcnet32_get_settings, 937 .get_settings = pcnet32_get_settings,
1021 .set_settings = pcnet32_set_settings, 938 .set_settings = pcnet32_set_settings,
1022 .get_drvinfo = pcnet32_get_drvinfo, 939 .get_drvinfo = pcnet32_get_drvinfo,
1023 .get_msglevel = pcnet32_get_msglevel, 940 .get_msglevel = pcnet32_get_msglevel,
1024 .set_msglevel = pcnet32_set_msglevel, 941 .set_msglevel = pcnet32_set_msglevel,
1025 .nway_reset = pcnet32_nway_reset, 942 .nway_reset = pcnet32_nway_reset,
1026 .get_link = pcnet32_get_link, 943 .get_link = pcnet32_get_link,
1027 .get_ringparam = pcnet32_get_ringparam, 944 .get_ringparam = pcnet32_get_ringparam,
1028 .set_ringparam = pcnet32_set_ringparam, 945 .set_ringparam = pcnet32_set_ringparam,
1029 .get_tx_csum = ethtool_op_get_tx_csum, 946 .get_tx_csum = ethtool_op_get_tx_csum,
1030 .get_sg = ethtool_op_get_sg, 947 .get_sg = ethtool_op_get_sg,
1031 .get_tso = ethtool_op_get_tso, 948 .get_tso = ethtool_op_get_tso,
1032 .get_strings = pcnet32_get_strings, 949 .get_strings = pcnet32_get_strings,
1033 .self_test_count = pcnet32_self_test_count, 950 .self_test_count = pcnet32_self_test_count,
1034 .self_test = pcnet32_ethtool_test, 951 .self_test = pcnet32_ethtool_test,
1035 .phys_id = pcnet32_phys_id, 952 .phys_id = pcnet32_phys_id,
1036 .get_regs_len = pcnet32_get_regs_len, 953 .get_regs_len = pcnet32_get_regs_len,
1037 .get_regs = pcnet32_get_regs, 954 .get_regs = pcnet32_get_regs,
1038 .get_perm_addr = ethtool_op_get_perm_addr, 955 .get_perm_addr = ethtool_op_get_perm_addr,
1039}; 956};
1040 957
1041/* only probes for non-PCI devices, the rest are handled by 958/* only probes for non-PCI devices, the rest are handled by
1042 * pci_register_driver via pcnet32_probe_pci */ 959 * pci_register_driver via pcnet32_probe_pci */
1043 960
1044static void __devinit 961static void __devinit pcnet32_probe_vlbus(void)
1045pcnet32_probe_vlbus(void)
1046{ 962{
1047 unsigned int *port, ioaddr; 963 unsigned int *port, ioaddr;
1048 964
1049 /* search for PCnet32 VLB cards at known addresses */ 965 /* search for PCnet32 VLB cards at known addresses */
1050 for (port = pcnet32_portlist; (ioaddr = *port); port++) { 966 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1051 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { 967 if (request_region
1052 /* check if there is really a pcnet chip on that ioaddr */ 968 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1053 if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { 969 /* check if there is really a pcnet chip on that ioaddr */
1054 pcnet32_probe1(ioaddr, 0, NULL); 970 if ((inb(ioaddr + 14) == 0x57)
1055 } else { 971 && (inb(ioaddr + 15) == 0x57)) {
1056 release_region(ioaddr, PCNET32_TOTAL_SIZE); 972 pcnet32_probe1(ioaddr, 0, NULL);
1057 } 973 } else {
1058 } 974 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1059 } 975 }
976 }
977 }
1060} 978}
1061 979
1062
1063static int __devinit 980static int __devinit
1064pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) 981pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1065{ 982{
1066 unsigned long ioaddr; 983 unsigned long ioaddr;
1067 int err; 984 int err;
1068 985
1069 err = pci_enable_device(pdev); 986 err = pci_enable_device(pdev);
1070 if (err < 0) { 987 if (err < 0) {
1071 if (pcnet32_debug & NETIF_MSG_PROBE) 988 if (pcnet32_debug & NETIF_MSG_PROBE)
1072 printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); 989 printk(KERN_ERR PFX
1073 return err; 990 "failed to enable device -- err=%d\n", err);
1074 } 991 return err;
1075 pci_set_master(pdev); 992 }
993 pci_set_master(pdev);
994
995 ioaddr = pci_resource_start(pdev, 0);
996 if (!ioaddr) {
997 if (pcnet32_debug & NETIF_MSG_PROBE)
998 printk(KERN_ERR PFX
999 "card has no PCI IO resources, aborting\n");
1000 return -ENODEV;
1001 }
1076 1002
1077 ioaddr = pci_resource_start (pdev, 0); 1003 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1078 if (!ioaddr) { 1004 if (pcnet32_debug & NETIF_MSG_PROBE)
1079 if (pcnet32_debug & NETIF_MSG_PROBE) 1005 printk(KERN_ERR PFX
1080 printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n"); 1006 "architecture does not support 32bit PCI busmaster DMA\n");
1081 return -ENODEV; 1007 return -ENODEV;
1082 } 1008 }
1009 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
1010 NULL) {
1011 if (pcnet32_debug & NETIF_MSG_PROBE)
1012 printk(KERN_ERR PFX
1013 "io address range already allocated\n");
1014 return -EBUSY;
1015 }
1083 1016
1084 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { 1017 err = pcnet32_probe1(ioaddr, 1, pdev);
1085 if (pcnet32_debug & NETIF_MSG_PROBE) 1018 if (err < 0) {
1086 printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n"); 1019 pci_disable_device(pdev);
1087 return -ENODEV; 1020 }
1088 } 1021 return err;
1089 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) {
1090 if (pcnet32_debug & NETIF_MSG_PROBE)
1091 printk(KERN_ERR PFX "io address range already allocated\n");
1092 return -EBUSY;
1093 }
1094
1095 err = pcnet32_probe1(ioaddr, 1, pdev);
1096 if (err < 0) {
1097 pci_disable_device(pdev);
1098 }
1099 return err;
1100} 1022}
1101 1023
1102
1103/* pcnet32_probe1 1024/* pcnet32_probe1
1104 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. 1025 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1105 * pdev will be NULL when called from pcnet32_probe_vlbus. 1026 * pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1107static int __devinit 1028static int __devinit
1108pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) 1029pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1109{ 1030{
1110 struct pcnet32_private *lp; 1031 struct pcnet32_private *lp;
1111 dma_addr_t lp_dma_addr; 1032 dma_addr_t lp_dma_addr;
1112 int i, media; 1033 int i, media;
1113 int fdx, mii, fset, dxsuflo; 1034 int fdx, mii, fset, dxsuflo;
1114 int chip_version; 1035 int chip_version;
1115 char *chipname; 1036 char *chipname;
1116 struct net_device *dev; 1037 struct net_device *dev;
1117 struct pcnet32_access *a = NULL; 1038 struct pcnet32_access *a = NULL;
1118 u8 promaddr[6]; 1039 u8 promaddr[6];
1119 int ret = -ENODEV; 1040 int ret = -ENODEV;
1120 1041
1121 /* reset the chip */ 1042 /* reset the chip */
1122 pcnet32_wio_reset(ioaddr); 1043 pcnet32_wio_reset(ioaddr);
1123 1044
1124 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ 1045 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1125 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { 1046 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1126 a = &pcnet32_wio; 1047 a = &pcnet32_wio;
1127 } else { 1048 } else {
1128 pcnet32_dwio_reset(ioaddr); 1049 pcnet32_dwio_reset(ioaddr);
1129 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { 1050 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1130 a = &pcnet32_dwio; 1051 && pcnet32_dwio_check(ioaddr)) {
1131 } else 1052 a = &pcnet32_dwio;
1132 goto err_release_region; 1053 } else
1133 } 1054 goto err_release_region;
1134 1055 }
1135 chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16); 1056
1136 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) 1057 chip_version =
1137 printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version); 1058 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1138 if ((chip_version & 0xfff) != 0x003) { 1059 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1139 if (pcnet32_debug & NETIF_MSG_PROBE) 1060 printk(KERN_INFO " PCnet chip version is %#x.\n",
1140 printk(KERN_INFO PFX "Unsupported chip version.\n"); 1061 chip_version);
1141 goto err_release_region; 1062 if ((chip_version & 0xfff) != 0x003) {
1142 } 1063 if (pcnet32_debug & NETIF_MSG_PROBE)
1143 1064 printk(KERN_INFO PFX "Unsupported chip version.\n");
1144 /* initialize variables */ 1065 goto err_release_region;
1145 fdx = mii = fset = dxsuflo = 0; 1066 }
1146 chip_version = (chip_version >> 12) & 0xffff; 1067
1147 1068 /* initialize variables */
1148 switch (chip_version) { 1069 fdx = mii = fset = dxsuflo = 0;
1149 case 0x2420: 1070 chip_version = (chip_version >> 12) & 0xffff;
1150 chipname = "PCnet/PCI 79C970"; /* PCI */ 1071
1151 break; 1072 switch (chip_version) {
1152 case 0x2430: 1073 case 0x2420:
1153 if (shared) 1074 chipname = "PCnet/PCI 79C970"; /* PCI */
1154 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ 1075 break;
1155 else 1076 case 0x2430:
1156 chipname = "PCnet/32 79C965"; /* 486/VL bus */ 1077 if (shared)
1157 break; 1078 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1158 case 0x2621: 1079 else
1159 chipname = "PCnet/PCI II 79C970A"; /* PCI */ 1080 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1160 fdx = 1; 1081 break;
1161 break; 1082 case 0x2621:
1162 case 0x2623: 1083 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1163 chipname = "PCnet/FAST 79C971"; /* PCI */ 1084 fdx = 1;
1164 fdx = 1; mii = 1; fset = 1; 1085 break;
1165 break; 1086 case 0x2623:
1166 case 0x2624: 1087 chipname = "PCnet/FAST 79C971"; /* PCI */
1167 chipname = "PCnet/FAST+ 79C972"; /* PCI */ 1088 fdx = 1;
1168 fdx = 1; mii = 1; fset = 1; 1089 mii = 1;
1169 break; 1090 fset = 1;
1170 case 0x2625: 1091 break;
1171 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1092 case 0x2624:
1172 fdx = 1; mii = 1; 1093 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1173 break; 1094 fdx = 1;
1174 case 0x2626: 1095 mii = 1;
1175 chipname = "PCnet/Home 79C978"; /* PCI */ 1096 fset = 1;
1176 fdx = 1; 1097 break;
1098 case 0x2625:
1099 chipname = "PCnet/FAST III 79C973"; /* PCI */
1100 fdx = 1;
1101 mii = 1;
1102 break;
1103 case 0x2626:
1104 chipname = "PCnet/Home 79C978"; /* PCI */
1105 fdx = 1;
1106 /*
1107 * This is based on specs published at www.amd.com. This section
1108 * assumes that a card with a 79C978 wants to go into standard
1109 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1110 * and the module option homepna=1 can select this instead.
1111 */
1112 media = a->read_bcr(ioaddr, 49);
1113 media &= ~3; /* default to 10Mb ethernet */
1114 if (cards_found < MAX_UNITS && homepna[cards_found])
1115 media |= 1; /* switch to home wiring mode */
1116 if (pcnet32_debug & NETIF_MSG_PROBE)
1117 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
1118 (media & 1) ? "1" : "10");
1119 a->write_bcr(ioaddr, 49, media);
1120 break;
1121 case 0x2627:
1122 chipname = "PCnet/FAST III 79C975"; /* PCI */
1123 fdx = 1;
1124 mii = 1;
1125 break;
1126 case 0x2628:
1127 chipname = "PCnet/PRO 79C976";
1128 fdx = 1;
1129 mii = 1;
1130 break;
1131 default:
1132 if (pcnet32_debug & NETIF_MSG_PROBE)
1133 printk(KERN_INFO PFX
1134 "PCnet version %#x, no PCnet32 chip.\n",
1135 chip_version);
1136 goto err_release_region;
1137 }
1138
1177 /* 1139 /*
1178 * This is based on specs published at www.amd.com. This section 1140 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1179 * assumes that a card with a 79C978 wants to go into standard 1141 * starting until the packet is loaded. Strike one for reliability, lose
1180 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, 1142 * one for latency - although on PCI this isnt a big loss. Older chips
1181 * and the module option homepna=1 can select this instead. 1143 * have FIFO's smaller than a packet, so you can't do this.
1144 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1182 */ 1145 */
1183 media = a->read_bcr(ioaddr, 49); 1146
1184 media &= ~3; /* default to 10Mb ethernet */ 1147 if (fset) {
1185 if (cards_found < MAX_UNITS && homepna[cards_found]) 1148 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1186 media |= 1; /* switch to home wiring mode */ 1149 a->write_csr(ioaddr, 80,
1187 if (pcnet32_debug & NETIF_MSG_PROBE) 1150 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1188 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", 1151 dxsuflo = 1;
1189 (media & 1) ? "1" : "10"); 1152 }
1190 a->write_bcr(ioaddr, 49, media); 1153
1191 break; 1154 dev = alloc_etherdev(0);
1192 case 0x2627: 1155 if (!dev) {
1193 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1156 if (pcnet32_debug & NETIF_MSG_PROBE)
1194 fdx = 1; mii = 1; 1157 printk(KERN_ERR PFX "Memory allocation failed.\n");
1195 break; 1158 ret = -ENOMEM;
1196 case 0x2628: 1159 goto err_release_region;
1197 chipname = "PCnet/PRO 79C976"; 1160 }
1198 fdx = 1; mii = 1; 1161 SET_NETDEV_DEV(dev, &pdev->dev);
1199 break; 1162
1200 default:
1201 if (pcnet32_debug & NETIF_MSG_PROBE)
1202 printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
1203 chip_version);
1204 goto err_release_region;
1205 }
1206
1207 /*
1208 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1209 * starting until the packet is loaded. Strike one for reliability, lose
1210 * one for latency - although on PCI this isnt a big loss. Older chips
1211 * have FIFO's smaller than a packet, so you can't do this.
1212 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1213 */
1214
1215 if (fset) {
1216 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1217 a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1218 dxsuflo = 1;
1219 }
1220
1221 dev = alloc_etherdev(0);
1222 if (!dev) {
1223 if (pcnet32_debug & NETIF_MSG_PROBE) 1163 if (pcnet32_debug & NETIF_MSG_PROBE)
1224 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1164 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
1225 ret = -ENOMEM; 1165
1226 goto err_release_region; 1166 /* In most chips, after a chip reset, the ethernet address is read from the
1227 } 1167 * station address PROM at the base address and programmed into the
1228 SET_NETDEV_DEV(dev, &pdev->dev); 1168 * "Physical Address Registers" CSR12-14.
1229 1169 * As a precautionary measure, we read the PROM values and complain if
1230 if (pcnet32_debug & NETIF_MSG_PROBE) 1170 * they disagree with the CSRs. Either way, we use the CSR values, and
1231 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); 1171 * double check that they are valid.
1232 1172 */
1233 /* In most chips, after a chip reset, the ethernet address is read from the 1173 for (i = 0; i < 3; i++) {
1234 * station address PROM at the base address and programmed into the 1174 unsigned int val;
1235 * "Physical Address Registers" CSR12-14. 1175 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1236 * As a precautionary measure, we read the PROM values and complain if 1176 /* There may be endianness issues here. */
1237 * they disagree with the CSRs. Either way, we use the CSR values, and 1177 dev->dev_addr[2 * i] = val & 0x0ff;
1238 * double check that they are valid. 1178 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1239 */ 1179 }
1240 for (i = 0; i < 3; i++) { 1180
1241 unsigned int val; 1181 /* read PROM address and compare with CSR address */
1242 val = a->read_csr(ioaddr, i+12) & 0x0ffff;
1243 /* There may be endianness issues here. */
1244 dev->dev_addr[2*i] = val & 0x0ff;
1245 dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
1246 }
1247
1248 /* read PROM address and compare with CSR address */
1249 for (i = 0; i < 6; i++)
1250 promaddr[i] = inb(ioaddr + i);
1251
1252 if (memcmp(promaddr, dev->dev_addr, 6)
1253 || !is_valid_ether_addr(dev->dev_addr)) {
1254 if (is_valid_ether_addr(promaddr)) {
1255 if (pcnet32_debug & NETIF_MSG_PROBE) {
1256 printk(" warning: CSR address invalid,\n");
1257 printk(KERN_INFO " using instead PROM address of");
1258 }
1259 memcpy(dev->dev_addr, promaddr, 6);
1260 }
1261 }
1262 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1263
1264 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1265 if (!is_valid_ether_addr(dev->perm_addr))
1266 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1267
1268 if (pcnet32_debug & NETIF_MSG_PROBE) {
1269 for (i = 0; i < 6; i++) 1182 for (i = 0; i < 6; i++)
1270 printk(" %2.2x", dev->dev_addr[i]); 1183 promaddr[i] = inb(ioaddr + i);
1271 1184
1272 /* Version 0x2623 and 0x2624 */ 1185 if (memcmp(promaddr, dev->dev_addr, 6)
1273 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1186 || !is_valid_ether_addr(dev->dev_addr)) {
1274 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ 1187 if (is_valid_ether_addr(promaddr)) {
1275 printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i); 1188 if (pcnet32_debug & NETIF_MSG_PROBE) {
1276 switch(i>>10) { 1189 printk(" warning: CSR address invalid,\n");
1277 case 0: printk(" 20 bytes,"); break; 1190 printk(KERN_INFO
1278 case 1: printk(" 64 bytes,"); break; 1191 " using instead PROM address of");
1279 case 2: printk(" 128 bytes,"); break; 1192 }
1280 case 3: printk("~220 bytes,"); break; 1193 memcpy(dev->dev_addr, promaddr, 6);
1281 } 1194 }
1282 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ 1195 }
1283 printk(" BCR18(%x):",i&0xffff); 1196 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1284 if (i & (1<<5)) printk("BurstWrEn "); 1197
1285 if (i & (1<<6)) printk("BurstRdEn "); 1198 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1286 if (i & (1<<7)) printk("DWordIO "); 1199 if (!is_valid_ether_addr(dev->perm_addr))
1287 if (i & (1<<11)) printk("NoUFlow "); 1200 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1288 i = a->read_bcr(ioaddr, 25); 1201
1289 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8); 1202 if (pcnet32_debug & NETIF_MSG_PROBE) {
1290 i = a->read_bcr(ioaddr, 26); 1203 for (i = 0; i < 6; i++)
1291 printk(" SRAM_BND=0x%04x,",i<<8); 1204 printk(" %2.2x", dev->dev_addr[i]);
1292 i = a->read_bcr(ioaddr, 27); 1205
1293 if (i & (1<<14)) printk("LowLatRx"); 1206 /* Version 0x2623 and 0x2624 */
1294 } 1207 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1295 } 1208 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1296 1209 printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
1297 dev->base_addr = ioaddr; 1210 switch (i >> 10) {
1298 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ 1211 case 0:
1299 if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { 1212 printk(" 20 bytes,");
1300 if (pcnet32_debug & NETIF_MSG_PROBE) 1213 break;
1301 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1214 case 1:
1302 ret = -ENOMEM; 1215 printk(" 64 bytes,");
1303 goto err_free_netdev; 1216 break;
1304 } 1217 case 2:
1305 1218 printk(" 128 bytes,");
1306 memset(lp, 0, sizeof(*lp)); 1219 break;
1307 lp->dma_addr = lp_dma_addr; 1220 case 3:
1308 lp->pci_dev = pdev; 1221 printk("~220 bytes,");
1309 1222 break;
1310 spin_lock_init(&lp->lock); 1223 }
1311 1224 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1312 SET_MODULE_OWNER(dev); 1225 printk(" BCR18(%x):", i & 0xffff);
1313 SET_NETDEV_DEV(dev, &pdev->dev); 1226 if (i & (1 << 5))
1314 dev->priv = lp; 1227 printk("BurstWrEn ");
1315 lp->name = chipname; 1228 if (i & (1 << 6))
1316 lp->shared_irq = shared; 1229 printk("BurstRdEn ");
1317 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ 1230 if (i & (1 << 7))
1318 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ 1231 printk("DWordIO ");
1319 lp->tx_mod_mask = lp->tx_ring_size - 1; 1232 if (i & (1 << 11))
1320 lp->rx_mod_mask = lp->rx_ring_size - 1; 1233 printk("NoUFlow ");
1321 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); 1234 i = a->read_bcr(ioaddr, 25);
1322 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); 1235 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
1323 lp->mii_if.full_duplex = fdx; 1236 i = a->read_bcr(ioaddr, 26);
1324 lp->mii_if.phy_id_mask = 0x1f; 1237 printk(" SRAM_BND=0x%04x,", i << 8);
1325 lp->mii_if.reg_num_mask = 0x1f; 1238 i = a->read_bcr(ioaddr, 27);
1326 lp->dxsuflo = dxsuflo; 1239 if (i & (1 << 14))
1327 lp->mii = mii; 1240 printk("LowLatRx");
1328 lp->msg_enable = pcnet32_debug; 1241 }
1329 if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) 1242 }
1330 lp->options = PCNET32_PORT_ASEL; 1243
1331 else 1244 dev->base_addr = ioaddr;
1332 lp->options = options_mapping[options[cards_found]]; 1245 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1333 lp->mii_if.dev = dev; 1246 if ((lp =
1334 lp->mii_if.mdio_read = mdio_read; 1247 pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
1335 lp->mii_if.mdio_write = mdio_write; 1248 if (pcnet32_debug & NETIF_MSG_PROBE)
1336 1249 printk(KERN_ERR PFX
1337 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && 1250 "Consistent memory allocation failed.\n");
1338 ((cards_found>=MAX_UNITS) || full_duplex[cards_found])) 1251 ret = -ENOMEM;
1339 lp->options |= PCNET32_PORT_FD; 1252 goto err_free_netdev;
1340 1253 }
1341 if (!a) { 1254
1342 if (pcnet32_debug & NETIF_MSG_PROBE) 1255 memset(lp, 0, sizeof(*lp));
1343 printk(KERN_ERR PFX "No access methods\n"); 1256 lp->dma_addr = lp_dma_addr;
1344 ret = -ENODEV; 1257 lp->pci_dev = pdev;
1345 goto err_free_consistent; 1258
1346 } 1259 spin_lock_init(&lp->lock);
1347 lp->a = *a; 1260
1348 1261 SET_MODULE_OWNER(dev);
1349 /* prior to register_netdev, dev->name is not yet correct */ 1262 SET_NETDEV_DEV(dev, &pdev->dev);
1350 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { 1263 dev->priv = lp;
1351 ret = -ENOMEM; 1264 lp->name = chipname;
1352 goto err_free_ring; 1265 lp->shared_irq = shared;
1353 } 1266 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1354 /* detect special T1/E1 WAN card by checking for MAC address */ 1267 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1355 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 1268 lp->tx_mod_mask = lp->tx_ring_size - 1;
1269 lp->rx_mod_mask = lp->rx_ring_size - 1;
1270 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1271 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1272 lp->mii_if.full_duplex = fdx;
1273 lp->mii_if.phy_id_mask = 0x1f;
1274 lp->mii_if.reg_num_mask = 0x1f;
1275 lp->dxsuflo = dxsuflo;
1276 lp->mii = mii;
1277 lp->msg_enable = pcnet32_debug;
1278 if ((cards_found >= MAX_UNITS)
1279 || (options[cards_found] > sizeof(options_mapping)))
1280 lp->options = PCNET32_PORT_ASEL;
1281 else
1282 lp->options = options_mapping[options[cards_found]];
1283 lp->mii_if.dev = dev;
1284 lp->mii_if.mdio_read = mdio_read;
1285 lp->mii_if.mdio_write = mdio_write;
1286
1287 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1288 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1289 lp->options |= PCNET32_PORT_FD;
1290
1291 if (!a) {
1292 if (pcnet32_debug & NETIF_MSG_PROBE)
1293 printk(KERN_ERR PFX "No access methods\n");
1294 ret = -ENODEV;
1295 goto err_free_consistent;
1296 }
1297 lp->a = *a;
1298
1299 /* prior to register_netdev, dev->name is not yet correct */
1300 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1301 ret = -ENOMEM;
1302 goto err_free_ring;
1303 }
1304 /* detect special T1/E1 WAN card by checking for MAC address */
1305 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1356 && dev->dev_addr[2] == 0x75) 1306 && dev->dev_addr[2] == 0x75)
1357 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1307 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1358
1359 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1360 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1361 for (i = 0; i < 6; i++)
1362 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1363 lp->init_block.filter[0] = 0x00000000;
1364 lp->init_block.filter[1] = 0x00000000;
1365 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1366 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1367
1368 /* switch pcnet32 to 32bit mode */
1369 a->write_bcr(ioaddr, 20, 2);
1370
1371 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1372 init_block)) & 0xffff);
1373 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1374 init_block)) >> 16);
1375
1376 if (pdev) { /* use the IRQ provided by PCI */
1377 dev->irq = pdev->irq;
1378 if (pcnet32_debug & NETIF_MSG_PROBE)
1379 printk(" assigned IRQ %d.\n", dev->irq);
1380 } else {
1381 unsigned long irq_mask = probe_irq_on();
1382 1308
1383 /* 1309 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1384 * To auto-IRQ we enable the initialization-done and DMA error 1310 lp->init_block.tlen_rlen =
1385 * interrupts. For ISA boards we get a DMA error, but VLB and PCI 1311 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1386 * boards will work. 1312 for (i = 0; i < 6; i++)
1387 */ 1313 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1388 /* Trigger an initialization just for the interrupt. */ 1314 lp->init_block.filter[0] = 0x00000000;
1389 a->write_csr (ioaddr, 0, 0x41); 1315 lp->init_block.filter[1] = 0x00000000;
1390 mdelay (1); 1316 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1317 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1318
1319 /* switch pcnet32 to 32bit mode */
1320 a->write_bcr(ioaddr, 20, 2);
1321
1322 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1323 init_block)) & 0xffff);
1324 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1325 init_block)) >> 16);
1326
1327 if (pdev) { /* use the IRQ provided by PCI */
1328 dev->irq = pdev->irq;
1329 if (pcnet32_debug & NETIF_MSG_PROBE)
1330 printk(" assigned IRQ %d.\n", dev->irq);
1331 } else {
1332 unsigned long irq_mask = probe_irq_on();
1333
1334 /*
1335 * To auto-IRQ we enable the initialization-done and DMA error
1336 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1337 * boards will work.
1338 */
1339 /* Trigger an initialization just for the interrupt. */
1340 a->write_csr(ioaddr, 0, 0x41);
1341 mdelay(1);
1342
1343 dev->irq = probe_irq_off(irq_mask);
1344 if (!dev->irq) {
1345 if (pcnet32_debug & NETIF_MSG_PROBE)
1346 printk(", failed to detect IRQ line.\n");
1347 ret = -ENODEV;
1348 goto err_free_ring;
1349 }
1350 if (pcnet32_debug & NETIF_MSG_PROBE)
1351 printk(", probed IRQ %d.\n", dev->irq);
1352 }
1391 1353
1392 dev->irq = probe_irq_off (irq_mask); 1354 /* Set the mii phy_id so that we can query the link state */
1393 if (!dev->irq) { 1355 if (lp->mii) {
1394 if (pcnet32_debug & NETIF_MSG_PROBE) 1356 /* lp->phycount and lp->phymask are set to 0 by memset above */
1395 printk(", failed to detect IRQ line.\n"); 1357
1396 ret = -ENODEV; 1358 lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1397 goto err_free_ring; 1359 /* scan for PHYs */
1360 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1361 unsigned short id1, id2;
1362
1363 id1 = mdio_read(dev, i, MII_PHYSID1);
1364 if (id1 == 0xffff)
1365 continue;
1366 id2 = mdio_read(dev, i, MII_PHYSID2);
1367 if (id2 == 0xffff)
1368 continue;
1369 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1370 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1371 lp->phycount++;
1372 lp->phymask |= (1 << i);
1373 lp->mii_if.phy_id = i;
1374 if (pcnet32_debug & NETIF_MSG_PROBE)
1375 printk(KERN_INFO PFX
1376 "Found PHY %04x:%04x at address %d.\n",
1377 id1, id2, i);
1378 }
1379 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1380 if (lp->phycount > 1) {
1381 lp->options |= PCNET32_PORT_MII;
1382 }
1398 } 1383 }
1399 if (pcnet32_debug & NETIF_MSG_PROBE) 1384
1400 printk(", probed IRQ %d.\n", dev->irq); 1385 init_timer(&lp->watchdog_timer);
1401 } 1386 lp->watchdog_timer.data = (unsigned long)dev;
1402 1387 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1403 /* Set the mii phy_id so that we can query the link state */ 1388
1404 if (lp->mii) 1389 /* The PCNET32-specific entries in the device structure. */
1405 lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f; 1390 dev->open = &pcnet32_open;
1406 1391 dev->hard_start_xmit = &pcnet32_start_xmit;
1407 init_timer (&lp->watchdog_timer); 1392 dev->stop = &pcnet32_close;
1408 lp->watchdog_timer.data = (unsigned long) dev; 1393 dev->get_stats = &pcnet32_get_stats;
1409 lp->watchdog_timer.function = (void *) &pcnet32_watchdog; 1394 dev->set_multicast_list = &pcnet32_set_multicast_list;
1410 1395 dev->do_ioctl = &pcnet32_ioctl;
1411 /* The PCNET32-specific entries in the device structure. */ 1396 dev->ethtool_ops = &pcnet32_ethtool_ops;
1412 dev->open = &pcnet32_open; 1397 dev->tx_timeout = pcnet32_tx_timeout;
1413 dev->hard_start_xmit = &pcnet32_start_xmit; 1398 dev->watchdog_timeo = (5 * HZ);
1414 dev->stop = &pcnet32_close;
1415 dev->get_stats = &pcnet32_get_stats;
1416 dev->set_multicast_list = &pcnet32_set_multicast_list;
1417 dev->do_ioctl = &pcnet32_ioctl;
1418 dev->ethtool_ops = &pcnet32_ethtool_ops;
1419 dev->tx_timeout = pcnet32_tx_timeout;
1420 dev->watchdog_timeo = (5*HZ);
1421 1399
1422#ifdef CONFIG_NET_POLL_CONTROLLER 1400#ifdef CONFIG_NET_POLL_CONTROLLER
1423 dev->poll_controller = pcnet32_poll_controller; 1401 dev->poll_controller = pcnet32_poll_controller;
1424#endif 1402#endif
1425 1403
1426 /* Fill in the generic fields of the device structure. */ 1404 /* Fill in the generic fields of the device structure. */
1427 if (register_netdev(dev)) 1405 if (register_netdev(dev))
1428 goto err_free_ring; 1406 goto err_free_ring;
1429 1407
1430 if (pdev) { 1408 if (pdev) {
1431 pci_set_drvdata(pdev, dev); 1409 pci_set_drvdata(pdev, dev);
1432 } else { 1410 } else {
1433 lp->next = pcnet32_dev; 1411 lp->next = pcnet32_dev;
1434 pcnet32_dev = dev; 1412 pcnet32_dev = dev;
1435 } 1413 }
1436
1437 if (pcnet32_debug & NETIF_MSG_PROBE)
1438 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1439 cards_found++;
1440
1441 /* enable LED writes */
1442 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1443
1444 return 0;
1445
1446err_free_ring:
1447 pcnet32_free_ring(dev);
1448err_free_consistent:
1449 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1450err_free_netdev:
1451 free_netdev(dev);
1452err_release_region:
1453 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1454 return ret;
1455}
1456 1414
1415 if (pcnet32_debug & NETIF_MSG_PROBE)
1416 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1417 cards_found++;
1418
1419 /* enable LED writes */
1420 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1421
1422 return 0;
1423
1424 err_free_ring:
1425 pcnet32_free_ring(dev);
1426 err_free_consistent:
1427 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1428 err_free_netdev:
1429 free_netdev(dev);
1430 err_release_region:
1431 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1432 return ret;
1433}
1457 1434
1458/* if any allocation fails, caller must also call pcnet32_free_ring */ 1435/* if any allocation fails, caller must also call pcnet32_free_ring */
1459static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1436static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1460{ 1437{
1461 struct pcnet32_private *lp = dev->priv; 1438 struct pcnet32_private *lp = dev->priv;
1462 1439
1463 lp->tx_ring = pci_alloc_consistent(lp->pci_dev, 1440 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1464 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1441 sizeof(struct pcnet32_tx_head) *
1465 &lp->tx_ring_dma_addr); 1442 lp->tx_ring_size,
1466 if (lp->tx_ring == NULL) { 1443 &lp->tx_ring_dma_addr);
1467 if (pcnet32_debug & NETIF_MSG_DRV) 1444 if (lp->tx_ring == NULL) {
1468 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", 1445 if (pcnet32_debug & NETIF_MSG_DRV)
1469 name); 1446 printk("\n" KERN_ERR PFX
1470 return -ENOMEM; 1447 "%s: Consistent memory allocation failed.\n",
1471 } 1448 name);
1472 1449 return -ENOMEM;
1473 lp->rx_ring = pci_alloc_consistent(lp->pci_dev, 1450 }
1474 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1475 &lp->rx_ring_dma_addr);
1476 if (lp->rx_ring == NULL) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1479 name);
1480 return -ENOMEM;
1481 }
1482
1483 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1484 GFP_ATOMIC);
1485 if (!lp->tx_dma_addr) {
1486 if (pcnet32_debug & NETIF_MSG_DRV)
1487 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1488 return -ENOMEM;
1489 }
1490 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1491
1492 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1493 GFP_ATOMIC);
1494 if (!lp->rx_dma_addr) {
1495 if (pcnet32_debug & NETIF_MSG_DRV)
1496 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1497 return -ENOMEM;
1498 }
1499 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1500
1501 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1502 GFP_ATOMIC);
1503 if (!lp->tx_skbuff) {
1504 if (pcnet32_debug & NETIF_MSG_DRV)
1505 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1506 return -ENOMEM;
1507 }
1508 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1509
1510 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1511 GFP_ATOMIC);
1512 if (!lp->rx_skbuff) {
1513 if (pcnet32_debug & NETIF_MSG_DRV)
1514 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1515 return -ENOMEM;
1516 }
1517 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1518 1451
1519 return 0; 1452 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1520} 1453 sizeof(struct pcnet32_rx_head) *
1454 lp->rx_ring_size,
1455 &lp->rx_ring_dma_addr);
1456 if (lp->rx_ring == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk("\n" KERN_ERR PFX
1459 "%s: Consistent memory allocation failed.\n",
1460 name);
1461 return -ENOMEM;
1462 }
1521 1463
1464 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1465 GFP_ATOMIC);
1466 if (!lp->tx_dma_addr) {
1467 if (pcnet32_debug & NETIF_MSG_DRV)
1468 printk("\n" KERN_ERR PFX
1469 "%s: Memory allocation failed.\n", name);
1470 return -ENOMEM;
1471 }
1472 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1473
1474 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1475 GFP_ATOMIC);
1476 if (!lp->rx_dma_addr) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk("\n" KERN_ERR PFX
1479 "%s: Memory allocation failed.\n", name);
1480 return -ENOMEM;
1481 }
1482 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1483
1484 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1485 GFP_ATOMIC);
1486 if (!lp->tx_skbuff) {
1487 if (pcnet32_debug & NETIF_MSG_DRV)
1488 printk("\n" KERN_ERR PFX
1489 "%s: Memory allocation failed.\n", name);
1490 return -ENOMEM;
1491 }
1492 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1493
1494 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1495 GFP_ATOMIC);
1496 if (!lp->rx_skbuff) {
1497 if (pcnet32_debug & NETIF_MSG_DRV)
1498 printk("\n" KERN_ERR PFX
1499 "%s: Memory allocation failed.\n", name);
1500 return -ENOMEM;
1501 }
1502 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1503
1504 return 0;
1505}
1522 1506
1523static void pcnet32_free_ring(struct net_device *dev) 1507static void pcnet32_free_ring(struct net_device *dev)
1524{ 1508{
1525 struct pcnet32_private *lp = dev->priv; 1509 struct pcnet32_private *lp = dev->priv;
1526 1510
1527 kfree(lp->tx_skbuff); 1511 kfree(lp->tx_skbuff);
1528 lp->tx_skbuff = NULL; 1512 lp->tx_skbuff = NULL;
1529 1513
1530 kfree(lp->rx_skbuff); 1514 kfree(lp->rx_skbuff);
1531 lp->rx_skbuff = NULL; 1515 lp->rx_skbuff = NULL;
1532 1516
1533 kfree(lp->tx_dma_addr); 1517 kfree(lp->tx_dma_addr);
1534 lp->tx_dma_addr = NULL; 1518 lp->tx_dma_addr = NULL;
1535 1519
1536 kfree(lp->rx_dma_addr); 1520 kfree(lp->rx_dma_addr);
1537 lp->rx_dma_addr = NULL; 1521 lp->rx_dma_addr = NULL;
1538 1522
1539 if (lp->tx_ring) { 1523 if (lp->tx_ring) {
1540 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1524 pci_free_consistent(lp->pci_dev,
1541 lp->tx_ring, lp->tx_ring_dma_addr); 1525 sizeof(struct pcnet32_tx_head) *
1542 lp->tx_ring = NULL; 1526 lp->tx_ring_size, lp->tx_ring,
1543 } 1527 lp->tx_ring_dma_addr);
1528 lp->tx_ring = NULL;
1529 }
1544 1530
1545 if (lp->rx_ring) { 1531 if (lp->rx_ring) {
1546 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, 1532 pci_free_consistent(lp->pci_dev,
1547 lp->rx_ring, lp->rx_ring_dma_addr); 1533 sizeof(struct pcnet32_rx_head) *
1548 lp->rx_ring = NULL; 1534 lp->rx_ring_size, lp->rx_ring,
1549 } 1535 lp->rx_ring_dma_addr);
1536 lp->rx_ring = NULL;
1537 }
1550} 1538}
1551 1539
1552 1540static int pcnet32_open(struct net_device *dev)
1553static int
1554pcnet32_open(struct net_device *dev)
1555{ 1541{
1556 struct pcnet32_private *lp = dev->priv; 1542 struct pcnet32_private *lp = dev->priv;
1557 unsigned long ioaddr = dev->base_addr; 1543 unsigned long ioaddr = dev->base_addr;
1558 u16 val; 1544 u16 val;
1559 int i; 1545 int i;
1560 int rc; 1546 int rc;
1561 unsigned long flags; 1547 unsigned long flags;
1562 1548
1563 if (request_irq(dev->irq, &pcnet32_interrupt, 1549 if (request_irq(dev->irq, &pcnet32_interrupt,
1564 lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) { 1550 lp->shared_irq ? SA_SHIRQ : 0, dev->name,
1565 return -EAGAIN; 1551 (void *)dev)) {
1566 } 1552 return -EAGAIN;
1567 1553 }
1568 spin_lock_irqsave(&lp->lock, flags); 1554
1569 /* Check for a valid station address */ 1555 spin_lock_irqsave(&lp->lock, flags);
1570 if (!is_valid_ether_addr(dev->dev_addr)) { 1556 /* Check for a valid station address */
1571 rc = -EINVAL; 1557 if (!is_valid_ether_addr(dev->dev_addr)) {
1572 goto err_free_irq; 1558 rc = -EINVAL;
1573 } 1559 goto err_free_irq;
1574 1560 }
1575 /* Reset the PCNET32 */ 1561
1576 lp->a.reset (ioaddr); 1562 /* Reset the PCNET32 */
1577 1563 lp->a.reset(ioaddr);
1578 /* switch pcnet32 to 32bit mode */ 1564
1579 lp->a.write_bcr (ioaddr, 20, 2); 1565 /* switch pcnet32 to 32bit mode */
1580 1566 lp->a.write_bcr(ioaddr, 20, 2);
1581 if (netif_msg_ifup(lp)) 1567
1582 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 1568 if (netif_msg_ifup(lp))
1583 dev->name, dev->irq, 1569 printk(KERN_DEBUG
1584 (u32) (lp->tx_ring_dma_addr), 1570 "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1585 (u32) (lp->rx_ring_dma_addr), 1571 dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
1586 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); 1572 (u32) (lp->rx_ring_dma_addr),
1587 1573 (u32) (lp->dma_addr +
1588 /* set/reset autoselect bit */ 1574 offsetof(struct pcnet32_private, init_block)));
1589 val = lp->a.read_bcr (ioaddr, 2) & ~2; 1575
1590 if (lp->options & PCNET32_PORT_ASEL) 1576 /* set/reset autoselect bit */
1591 val |= 2; 1577 val = lp->a.read_bcr(ioaddr, 2) & ~2;
1592 lp->a.write_bcr (ioaddr, 2, val); 1578 if (lp->options & PCNET32_PORT_ASEL)
1593
1594 /* handle full duplex setting */
1595 if (lp->mii_if.full_duplex) {
1596 val = lp->a.read_bcr (ioaddr, 9) & ~3;
1597 if (lp->options & PCNET32_PORT_FD) {
1598 val |= 1;
1599 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1600 val |= 2; 1579 val |= 2;
1601 } else if (lp->options & PCNET32_PORT_ASEL) { 1580 lp->a.write_bcr(ioaddr, 2, val);
1602 /* workaround of xSeries250, turn on for 79C975 only */ 1581
1603 i = ((lp->a.read_csr(ioaddr, 88) | 1582 /* handle full duplex setting */
1604 (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff; 1583 if (lp->mii_if.full_duplex) {
1605 if (i == 0x2627) 1584 val = lp->a.read_bcr(ioaddr, 9) & ~3;
1606 val |= 3; 1585 if (lp->options & PCNET32_PORT_FD) {
1607 } 1586 val |= 1;
1608 lp->a.write_bcr (ioaddr, 9, val); 1587 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1609 } 1588 val |= 2;
1610 1589 } else if (lp->options & PCNET32_PORT_ASEL) {
1611 /* set/reset GPSI bit in test register */ 1590 /* workaround of xSeries250, turn on for 79C975 only */
1612 val = lp->a.read_csr (ioaddr, 124) & ~0x10; 1591 i = ((lp->a.read_csr(ioaddr, 88) |
1613 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) 1592 (lp->a.
1614 val |= 0x10; 1593 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1615 lp->a.write_csr (ioaddr, 124, val); 1594 if (i == 0x2627)
1616 1595 val |= 3;
1617 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 1596 }
1618 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 1597 lp->a.write_bcr(ioaddr, 9, val);
1598 }
1599
1600 /* set/reset GPSI bit in test register */
1601 val = lp->a.read_csr(ioaddr, 124) & ~0x10;
1602 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
1603 val |= 0x10;
1604 lp->a.write_csr(ioaddr, 124, val);
1605
1606 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1607 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
1619 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 1608 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1620 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 1609 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
1621 if (lp->options & PCNET32_PORT_ASEL) { 1610 if (lp->options & PCNET32_PORT_ASEL) {
1622 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 1611 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1623 if (netif_msg_link(lp)) 1612 if (netif_msg_link(lp))
1624 printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n", 1613 printk(KERN_DEBUG
1625 dev->name); 1614 "%s: Setting 100Mb-Full Duplex.\n",
1626 } 1615 dev->name);
1627 } 1616 }
1628 { 1617 }
1629 /* 1618 if (lp->phycount < 2) {
1630 * 24 Jun 2004 according AMD, in order to change the PHY, 1619 /*
1631 * DANAS (or DISPM for 79C976) must be set; then select the speed, 1620 * 24 Jun 2004 according AMD, in order to change the PHY,
1632 * duplex, and/or enable auto negotiation, and clear DANAS 1621 * DANAS (or DISPM for 79C976) must be set; then select the speed,
1633 */ 1622 * duplex, and/or enable auto negotiation, and clear DANAS
1634 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { 1623 */
1635 lp->a.write_bcr(ioaddr, 32, 1624 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
1636 lp->a.read_bcr(ioaddr, 32) | 0x0080); 1625 lp->a.write_bcr(ioaddr, 32,
1637 /* disable Auto Negotiation, set 10Mpbs, HD */ 1626 lp->a.read_bcr(ioaddr, 32) | 0x0080);
1638 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; 1627 /* disable Auto Negotiation, set 10Mpbs, HD */
1639 if (lp->options & PCNET32_PORT_FD) 1628 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
1640 val |= 0x10; 1629 if (lp->options & PCNET32_PORT_FD)
1641 if (lp->options & PCNET32_PORT_100) 1630 val |= 0x10;
1642 val |= 0x08; 1631 if (lp->options & PCNET32_PORT_100)
1643 lp->a.write_bcr (ioaddr, 32, val); 1632 val |= 0x08;
1633 lp->a.write_bcr(ioaddr, 32, val);
1634 } else {
1635 if (lp->options & PCNET32_PORT_ASEL) {
1636 lp->a.write_bcr(ioaddr, 32,
1637 lp->a.read_bcr(ioaddr,
1638 32) | 0x0080);
1639 /* enable auto negotiate, setup, disable fd */
1640 val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
1641 val |= 0x20;
1642 lp->a.write_bcr(ioaddr, 32, val);
1643 }
1644 }
1644 } else { 1645 } else {
1645 if (lp->options & PCNET32_PORT_ASEL) { 1646 int first_phy = -1;
1646 lp->a.write_bcr(ioaddr, 32, 1647 u16 bmcr;
1647 lp->a.read_bcr(ioaddr, 32) | 0x0080); 1648 u32 bcr9;
1648 /* enable auto negotiate, setup, disable fd */ 1649 struct ethtool_cmd ecmd;
1649 val = lp->a.read_bcr(ioaddr, 32) & ~0x98; 1650
1650 val |= 0x20; 1651 /*
1651 lp->a.write_bcr(ioaddr, 32, val); 1652 * There is really no good other way to handle multiple PHYs
1652 } 1653 * other than turning off all automatics
1654 */
1655 val = lp->a.read_bcr(ioaddr, 2);
1656 lp->a.write_bcr(ioaddr, 2, val & ~2);
1657 val = lp->a.read_bcr(ioaddr, 32);
1658 lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
1659
1660 if (!(lp->options & PCNET32_PORT_ASEL)) {
1661 /* setup ecmd */
1662 ecmd.port = PORT_MII;
1663 ecmd.transceiver = XCVR_INTERNAL;
1664 ecmd.autoneg = AUTONEG_DISABLE;
1665 ecmd.speed =
1666 lp->
1667 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
1668 bcr9 = lp->a.read_bcr(ioaddr, 9);
1669
1670 if (lp->options & PCNET32_PORT_FD) {
1671 ecmd.duplex = DUPLEX_FULL;
1672 bcr9 |= (1 << 0);
1673 } else {
1674 ecmd.duplex = DUPLEX_HALF;
1675 bcr9 |= ~(1 << 0);
1676 }
1677 lp->a.write_bcr(ioaddr, 9, bcr9);
1678 }
1679
1680 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1681 if (lp->phymask & (1 << i)) {
1682 /* isolate all but the first PHY */
1683 bmcr = mdio_read(dev, i, MII_BMCR);
1684 if (first_phy == -1) {
1685 first_phy = i;
1686 mdio_write(dev, i, MII_BMCR,
1687 bmcr & ~BMCR_ISOLATE);
1688 } else {
1689 mdio_write(dev, i, MII_BMCR,
1690 bmcr | BMCR_ISOLATE);
1691 }
1692 /* use mii_ethtool_sset to setup PHY */
1693 lp->mii_if.phy_id = i;
1694 ecmd.phy_address = i;
1695 if (lp->options & PCNET32_PORT_ASEL) {
1696 mii_ethtool_gset(&lp->mii_if, &ecmd);
1697 ecmd.autoneg = AUTONEG_ENABLE;
1698 }
1699 mii_ethtool_sset(&lp->mii_if, &ecmd);
1700 }
1701 }
1702 lp->mii_if.phy_id = first_phy;
1703 if (netif_msg_link(lp))
1704 printk(KERN_INFO "%s: Using PHY number %d.\n",
1705 dev->name, first_phy);
1653 } 1706 }
1654 }
1655 1707
1656#ifdef DO_DXSUFLO 1708#ifdef DO_DXSUFLO
1657 if (lp->dxsuflo) { /* Disable transmit stop on underflow */ 1709 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
1658 val = lp->a.read_csr (ioaddr, 3); 1710 val = lp->a.read_csr(ioaddr, 3);
1659 val |= 0x40; 1711 val |= 0x40;
1660 lp->a.write_csr (ioaddr, 3, val); 1712 lp->a.write_csr(ioaddr, 3, val);
1661 } 1713 }
1662#endif 1714#endif
1663 1715
1664 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 1716 lp->init_block.mode =
1665 pcnet32_load_multicast(dev); 1717 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
1666 1718 pcnet32_load_multicast(dev);
1667 if (pcnet32_init_ring(dev)) { 1719
1668 rc = -ENOMEM; 1720 if (pcnet32_init_ring(dev)) {
1669 goto err_free_ring; 1721 rc = -ENOMEM;
1670 } 1722 goto err_free_ring;
1671 1723 }
1672 /* Re-initialize the PCNET32, and start it when done. */ 1724
1673 lp->a.write_csr (ioaddr, 1, (lp->dma_addr + 1725 /* Re-initialize the PCNET32, and start it when done. */
1674 offsetof(struct pcnet32_private, init_block)) & 0xffff); 1726 lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
1675 lp->a.write_csr (ioaddr, 2, (lp->dma_addr + 1727 offsetof(struct pcnet32_private,
1676 offsetof(struct pcnet32_private, init_block)) >> 16); 1728 init_block)) & 0xffff);
1677 1729 lp->a.write_csr(ioaddr, 2,
1678 lp->a.write_csr (ioaddr, 4, 0x0915); 1730 (lp->dma_addr +
1679 lp->a.write_csr (ioaddr, 0, 0x0001); 1731 offsetof(struct pcnet32_private, init_block)) >> 16);
1680 1732
1681 netif_start_queue(dev); 1733 lp->a.write_csr(ioaddr, 4, 0x0915);
1682 1734 lp->a.write_csr(ioaddr, 0, 0x0001);
1683 /* If we have mii, print the link status and start the watchdog */ 1735
1684 if (lp->mii) { 1736 netif_start_queue(dev);
1685 mii_check_media (&lp->mii_if, netif_msg_link(lp), 1); 1737
1686 mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 1738 /* Print the link status and start the watchdog */
1687 } 1739 pcnet32_check_media(dev, 1);
1688 1740 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
1689 i = 0; 1741
1690 while (i++ < 100) 1742 i = 0;
1691 if (lp->a.read_csr (ioaddr, 0) & 0x0100) 1743 while (i++ < 100)
1692 break; 1744 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1693 /* 1745 break;
1694 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton 1746 /*
1695 * reports that doing so triggers a bug in the '974. 1747 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
1696 */ 1748 * reports that doing so triggers a bug in the '974.
1697 lp->a.write_csr (ioaddr, 0, 0x0042); 1749 */
1698 1750 lp->a.write_csr(ioaddr, 0, 0x0042);
1699 if (netif_msg_ifup(lp)) 1751
1700 printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", 1752 if (netif_msg_ifup(lp))
1701 dev->name, i, (u32) (lp->dma_addr + 1753 printk(KERN_DEBUG
1702 offsetof(struct pcnet32_private, init_block)), 1754 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
1703 lp->a.read_csr(ioaddr, 0)); 1755 dev->name, i,
1704 1756 (u32) (lp->dma_addr +
1705 spin_unlock_irqrestore(&lp->lock, flags); 1757 offsetof(struct pcnet32_private, init_block)),
1706 1758 lp->a.read_csr(ioaddr, 0));
1707 return 0; /* Always succeed */ 1759
1708 1760 spin_unlock_irqrestore(&lp->lock, flags);
1709err_free_ring: 1761
1710 /* free any allocated skbuffs */ 1762 return 0; /* Always succeed */
1711 for (i = 0; i < lp->rx_ring_size; i++) { 1763
1712 lp->rx_ring[i].status = 0; 1764 err_free_ring:
1713 if (lp->rx_skbuff[i]) { 1765 /* free any allocated skbuffs */
1714 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 1766 for (i = 0; i < lp->rx_ring_size; i++) {
1715 PCI_DMA_FROMDEVICE); 1767 lp->rx_ring[i].status = 0;
1716 dev_kfree_skb(lp->rx_skbuff[i]); 1768 if (lp->rx_skbuff[i]) {
1717 } 1769 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1718 lp->rx_skbuff[i] = NULL; 1770 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1719 lp->rx_dma_addr[i] = 0; 1771 dev_kfree_skb(lp->rx_skbuff[i]);
1720 } 1772 }
1721 1773 lp->rx_skbuff[i] = NULL;
1722 pcnet32_free_ring(dev); 1774 lp->rx_dma_addr[i] = 0;
1723 1775 }
1724 /* 1776
1725 * Switch back to 16bit mode to avoid problems with dumb 1777 pcnet32_free_ring(dev);
1726 * DOS packet driver after a warm reboot 1778
1727 */ 1779 /*
1728 lp->a.write_bcr (ioaddr, 20, 4); 1780 * Switch back to 16bit mode to avoid problems with dumb
1729 1781 * DOS packet driver after a warm reboot
1730err_free_irq: 1782 */
1731 spin_unlock_irqrestore(&lp->lock, flags); 1783 lp->a.write_bcr(ioaddr, 20, 4);
1732 free_irq(dev->irq, dev); 1784
1733 return rc; 1785 err_free_irq:
1786 spin_unlock_irqrestore(&lp->lock, flags);
1787 free_irq(dev->irq, dev);
1788 return rc;
1734} 1789}
1735 1790
1736/* 1791/*
@@ -1746,727 +1801,893 @@ err_free_irq:
1746 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com 1801 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
1747 */ 1802 */
1748 1803
1749static void 1804static void pcnet32_purge_tx_ring(struct net_device *dev)
1750pcnet32_purge_tx_ring(struct net_device *dev)
1751{ 1805{
1752 struct pcnet32_private *lp = dev->priv; 1806 struct pcnet32_private *lp = dev->priv;
1753 int i; 1807 int i;
1754
1755 for (i = 0; i < lp->tx_ring_size; i++) {
1756 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1757 wmb(); /* Make sure adapter sees owner change */
1758 if (lp->tx_skbuff[i]) {
1759 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
1760 lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1761 dev_kfree_skb_any(lp->tx_skbuff[i]);
1762 }
1763 lp->tx_skbuff[i] = NULL;
1764 lp->tx_dma_addr[i] = 0;
1765 }
1766}
1767 1808
1809 for (i = 0; i < lp->tx_ring_size; i++) {
1810 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1811 wmb(); /* Make sure adapter sees owner change */
1812 if (lp->tx_skbuff[i]) {
1813 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
1814 lp->tx_skbuff[i]->len,
1815 PCI_DMA_TODEVICE);
1816 dev_kfree_skb_any(lp->tx_skbuff[i]);
1817 }
1818 lp->tx_skbuff[i] = NULL;
1819 lp->tx_dma_addr[i] = 0;
1820 }
1821}
1768 1822
1769/* Initialize the PCNET32 Rx and Tx rings. */ 1823/* Initialize the PCNET32 Rx and Tx rings. */
1770static int 1824static int pcnet32_init_ring(struct net_device *dev)
1771pcnet32_init_ring(struct net_device *dev)
1772{ 1825{
1773 struct pcnet32_private *lp = dev->priv; 1826 struct pcnet32_private *lp = dev->priv;
1774 int i; 1827 int i;
1775 1828
1776 lp->tx_full = 0; 1829 lp->tx_full = 0;
1777 lp->cur_rx = lp->cur_tx = 0; 1830 lp->cur_rx = lp->cur_tx = 0;
1778 lp->dirty_rx = lp->dirty_tx = 0; 1831 lp->dirty_rx = lp->dirty_tx = 0;
1779 1832
1780 for (i = 0; i < lp->rx_ring_size; i++) { 1833 for (i = 0; i < lp->rx_ring_size; i++) {
1781 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 1834 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1782 if (rx_skbuff == NULL) { 1835 if (rx_skbuff == NULL) {
1783 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { 1836 if (!
1784 /* there is not much, we can do at this point */ 1837 (rx_skbuff = lp->rx_skbuff[i] =
1785 if (pcnet32_debug & NETIF_MSG_DRV) 1838 dev_alloc_skb(PKT_BUF_SZ))) {
1786 printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", 1839 /* there is not much, we can do at this point */
1787 dev->name); 1840 if (pcnet32_debug & NETIF_MSG_DRV)
1788 return -1; 1841 printk(KERN_ERR
1789 } 1842 "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
1790 skb_reserve (rx_skbuff, 2); 1843 dev->name);
1791 } 1844 return -1;
1792 1845 }
1793 rmb(); 1846 skb_reserve(rx_skbuff, 2);
1794 if (lp->rx_dma_addr[i] == 0) 1847 }
1795 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, 1848
1796 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1849 rmb();
1797 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); 1850 if (lp->rx_dma_addr[i] == 0)
1798 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 1851 lp->rx_dma_addr[i] =
1799 wmb(); /* Make sure owner changes after all others are visible */ 1852 pci_map_single(lp->pci_dev, rx_skbuff->data,
1800 lp->rx_ring[i].status = le16_to_cpu(0x8000); 1853 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1801 } 1854 lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
1802 /* The Tx buffer address is filled in as needed, but we do need to clear 1855 lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
1803 * the upper ownership bit. */ 1856 wmb(); /* Make sure owner changes after all others are visible */
1804 for (i = 0; i < lp->tx_ring_size; i++) { 1857 lp->rx_ring[i].status = le16_to_cpu(0x8000);
1805 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1858 }
1806 wmb(); /* Make sure adapter sees owner change */ 1859 /* The Tx buffer address is filled in as needed, but we do need to clear
1807 lp->tx_ring[i].base = 0; 1860 * the upper ownership bit. */
1808 lp->tx_dma_addr[i] = 0; 1861 for (i = 0; i < lp->tx_ring_size; i++) {
1809 } 1862 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1810 1863 wmb(); /* Make sure adapter sees owner change */
1811 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); 1864 lp->tx_ring[i].base = 0;
1812 for (i = 0; i < 6; i++) 1865 lp->tx_dma_addr[i] = 0;
1813 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1866 }
1814 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); 1867
1815 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); 1868 lp->init_block.tlen_rlen =
1816 wmb(); /* Make sure all changes are visible */ 1869 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1817 return 0; 1870 for (i = 0; i < 6; i++)
1871 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1872 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1873 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1874 wmb(); /* Make sure all changes are visible */
1875 return 0;
1818} 1876}
1819 1877
1820/* the pcnet32 has been issued a stop or reset. Wait for the stop bit 1878/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
1821 * then flush the pending transmit operations, re-initialize the ring, 1879 * then flush the pending transmit operations, re-initialize the ring,
1822 * and tell the chip to initialize. 1880 * and tell the chip to initialize.
1823 */ 1881 */
1824static void 1882static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1825pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1826{ 1883{
1827 struct pcnet32_private *lp = dev->priv; 1884 struct pcnet32_private *lp = dev->priv;
1828 unsigned long ioaddr = dev->base_addr; 1885 unsigned long ioaddr = dev->base_addr;
1829 int i; 1886 int i;
1830 1887
1831 /* wait for stop */ 1888 /* wait for stop */
1832 for (i=0; i<100; i++) 1889 for (i = 0; i < 100; i++)
1833 if (lp->a.read_csr(ioaddr, 0) & 0x0004) 1890 if (lp->a.read_csr(ioaddr, 0) & 0x0004)
1834 break; 1891 break;
1835 1892
1836 if (i >= 100 && netif_msg_drv(lp)) 1893 if (i >= 100 && netif_msg_drv(lp))
1837 printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n", 1894 printk(KERN_ERR
1838 dev->name); 1895 "%s: pcnet32_restart timed out waiting for stop.\n",
1896 dev->name);
1839 1897
1840 pcnet32_purge_tx_ring(dev); 1898 pcnet32_purge_tx_ring(dev);
1841 if (pcnet32_init_ring(dev)) 1899 if (pcnet32_init_ring(dev))
1842 return; 1900 return;
1843 1901
1844 /* ReInit Ring */ 1902 /* ReInit Ring */
1845 lp->a.write_csr (ioaddr, 0, 1); 1903 lp->a.write_csr(ioaddr, 0, 1);
1846 i = 0; 1904 i = 0;
1847 while (i++ < 1000) 1905 while (i++ < 1000)
1848 if (lp->a.read_csr (ioaddr, 0) & 0x0100) 1906 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1849 break; 1907 break;
1850 1908
1851 lp->a.write_csr (ioaddr, 0, csr0_bits); 1909 lp->a.write_csr(ioaddr, 0, csr0_bits);
1852} 1910}
1853 1911
1854 1912static void pcnet32_tx_timeout(struct net_device *dev)
1855static void
1856pcnet32_tx_timeout (struct net_device *dev)
1857{ 1913{
1858 struct pcnet32_private *lp = dev->priv; 1914 struct pcnet32_private *lp = dev->priv;
1859 unsigned long ioaddr = dev->base_addr, flags; 1915 unsigned long ioaddr = dev->base_addr, flags;
1860 1916
1861 spin_lock_irqsave(&lp->lock, flags); 1917 spin_lock_irqsave(&lp->lock, flags);
1862 /* Transmitter timeout, serious problems. */ 1918 /* Transmitter timeout, serious problems. */
1863 if (pcnet32_debug & NETIF_MSG_DRV) 1919 if (pcnet32_debug & NETIF_MSG_DRV)
1864 printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", 1920 printk(KERN_ERR
1865 dev->name, lp->a.read_csr(ioaddr, 0)); 1921 "%s: transmit timed out, status %4.4x, resetting.\n",
1866 lp->a.write_csr (ioaddr, 0, 0x0004); 1922 dev->name, lp->a.read_csr(ioaddr, 0));
1867 lp->stats.tx_errors++; 1923 lp->a.write_csr(ioaddr, 0, 0x0004);
1868 if (netif_msg_tx_err(lp)) { 1924 lp->stats.tx_errors++;
1869 int i; 1925 if (netif_msg_tx_err(lp)) {
1870 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 1926 int i;
1871 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 1927 printk(KERN_DEBUG
1872 lp->cur_rx); 1928 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1873 for (i = 0 ; i < lp->rx_ring_size; i++) 1929 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1874 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1930 lp->cur_rx);
1875 le32_to_cpu(lp->rx_ring[i].base), 1931 for (i = 0; i < lp->rx_ring_size; i++)
1876 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, 1932 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1877 le32_to_cpu(lp->rx_ring[i].msg_length), 1933 le32_to_cpu(lp->rx_ring[i].base),
1878 le16_to_cpu(lp->rx_ring[i].status)); 1934 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
1879 for (i = 0 ; i < lp->tx_ring_size; i++) 1935 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
1880 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1936 le16_to_cpu(lp->rx_ring[i].status));
1881 le32_to_cpu(lp->tx_ring[i].base), 1937 for (i = 0; i < lp->tx_ring_size; i++)
1882 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 1938 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1883 le32_to_cpu(lp->tx_ring[i].misc), 1939 le32_to_cpu(lp->tx_ring[i].base),
1884 le16_to_cpu(lp->tx_ring[i].status)); 1940 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
1885 printk("\n"); 1941 le32_to_cpu(lp->tx_ring[i].misc),
1886 } 1942 le16_to_cpu(lp->tx_ring[i].status));
1887 pcnet32_restart(dev, 0x0042); 1943 printk("\n");
1888 1944 }
1889 dev->trans_start = jiffies; 1945 pcnet32_restart(dev, 0x0042);
1890 netif_wake_queue(dev); 1946
1891 1947 dev->trans_start = jiffies;
1892 spin_unlock_irqrestore(&lp->lock, flags); 1948 netif_wake_queue(dev);
1893}
1894 1949
1950 spin_unlock_irqrestore(&lp->lock, flags);
1951}
1895 1952
1896static int 1953static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1897pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1898{ 1954{
1899 struct pcnet32_private *lp = dev->priv; 1955 struct pcnet32_private *lp = dev->priv;
1900 unsigned long ioaddr = dev->base_addr; 1956 unsigned long ioaddr = dev->base_addr;
1901 u16 status; 1957 u16 status;
1902 int entry; 1958 int entry;
1903 unsigned long flags; 1959 unsigned long flags;
1904 1960
1905 spin_lock_irqsave(&lp->lock, flags); 1961 spin_lock_irqsave(&lp->lock, flags);
1906 1962
1907 if (netif_msg_tx_queued(lp)) { 1963 if (netif_msg_tx_queued(lp)) {
1908 printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", 1964 printk(KERN_DEBUG
1909 dev->name, lp->a.read_csr(ioaddr, 0)); 1965 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
1910 } 1966 dev->name, lp->a.read_csr(ioaddr, 0));
1967 }
1911 1968
1912 /* Default status -- will not enable Successful-TxDone 1969 /* Default status -- will not enable Successful-TxDone
1913 * interrupt when that option is available to us. 1970 * interrupt when that option is available to us.
1914 */ 1971 */
1915 status = 0x8300; 1972 status = 0x8300;
1916 1973
1917 /* Fill in a Tx ring entry */ 1974 /* Fill in a Tx ring entry */
1918 1975
1919 /* Mask to ring buffer boundary. */ 1976 /* Mask to ring buffer boundary. */
1920 entry = lp->cur_tx & lp->tx_mod_mask; 1977 entry = lp->cur_tx & lp->tx_mod_mask;
1921 1978
1922 /* Caution: the write order is important here, set the status 1979 /* Caution: the write order is important here, set the status
1923 * with the "ownership" bits last. */ 1980 * with the "ownership" bits last. */
1924 1981
1925 lp->tx_ring[entry].length = le16_to_cpu(-skb->len); 1982 lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
1926 1983
1927 lp->tx_ring[entry].misc = 0x00000000; 1984 lp->tx_ring[entry].misc = 0x00000000;
1928 1985
1929 lp->tx_skbuff[entry] = skb; 1986 lp->tx_skbuff[entry] = skb;
1930 lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, 1987 lp->tx_dma_addr[entry] =
1931 PCI_DMA_TODEVICE); 1988 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1932 lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); 1989 lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
1933 wmb(); /* Make sure owner changes after all others are visible */ 1990 wmb(); /* Make sure owner changes after all others are visible */
1934 lp->tx_ring[entry].status = le16_to_cpu(status); 1991 lp->tx_ring[entry].status = le16_to_cpu(status);
1935 1992
1936 lp->cur_tx++; 1993 lp->cur_tx++;
1937 lp->stats.tx_bytes += skb->len; 1994 lp->stats.tx_bytes += skb->len;
1938 1995
1939 /* Trigger an immediate send poll. */ 1996 /* Trigger an immediate send poll. */
1940 lp->a.write_csr (ioaddr, 0, 0x0048); 1997 lp->a.write_csr(ioaddr, 0, 0x0048);
1941 1998
1942 dev->trans_start = jiffies; 1999 dev->trans_start = jiffies;
1943 2000
1944 if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { 2001 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
1945 lp->tx_full = 1; 2002 lp->tx_full = 1;
1946 netif_stop_queue(dev); 2003 netif_stop_queue(dev);
1947 } 2004 }
1948 spin_unlock_irqrestore(&lp->lock, flags); 2005 spin_unlock_irqrestore(&lp->lock, flags);
1949 return 0; 2006 return 0;
1950} 2007}
1951 2008
1952/* The PCNET32 interrupt handler. */ 2009/* The PCNET32 interrupt handler. */
1953static irqreturn_t 2010static irqreturn_t
1954pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) 2011pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1955{ 2012{
1956 struct net_device *dev = dev_id; 2013 struct net_device *dev = dev_id;
1957 struct pcnet32_private *lp; 2014 struct pcnet32_private *lp;
1958 unsigned long ioaddr; 2015 unsigned long ioaddr;
1959 u16 csr0,rap; 2016 u16 csr0, rap;
1960 int boguscnt = max_interrupt_work; 2017 int boguscnt = max_interrupt_work;
1961 int must_restart; 2018 int must_restart;
1962 2019
1963 if (!dev) { 2020 if (!dev) {
1964 if (pcnet32_debug & NETIF_MSG_INTR) 2021 if (pcnet32_debug & NETIF_MSG_INTR)
1965 printk (KERN_DEBUG "%s(): irq %d for unknown device\n", 2022 printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
1966 __FUNCTION__, irq); 2023 __FUNCTION__, irq);
1967 return IRQ_NONE; 2024 return IRQ_NONE;
1968 }
1969
1970 ioaddr = dev->base_addr;
1971 lp = dev->priv;
1972
1973 spin_lock(&lp->lock);
1974
1975 rap = lp->a.read_rap(ioaddr);
1976 while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
1977 if (csr0 == 0xffff) {
1978 break; /* PCMCIA remove happened */
1979 } 2025 }
1980 /* Acknowledge all of the current interrupt sources ASAP. */
1981 lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
1982 2026
1983 must_restart = 0; 2027 ioaddr = dev->base_addr;
2028 lp = dev->priv;
1984 2029
1985 if (netif_msg_intr(lp)) 2030 spin_lock(&lp->lock);
1986 printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", 2031
1987 dev->name, csr0, lp->a.read_csr (ioaddr, 0)); 2032 rap = lp->a.read_rap(ioaddr);
1988 2033 while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
1989 if (csr0 & 0x0400) /* Rx interrupt */ 2034 if (csr0 == 0xffff) {
1990 pcnet32_rx(dev); 2035 break; /* PCMCIA remove happened */
1991 2036 }
1992 if (csr0 & 0x0200) { /* Tx-done interrupt */ 2037 /* Acknowledge all of the current interrupt sources ASAP. */
1993 unsigned int dirty_tx = lp->dirty_tx; 2038 lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
1994 int delta; 2039
1995 2040 must_restart = 0;
1996 while (dirty_tx != lp->cur_tx) { 2041
1997 int entry = dirty_tx & lp->tx_mod_mask; 2042 if (netif_msg_intr(lp))
1998 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 2043 printk(KERN_DEBUG
1999 2044 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
2000 if (status < 0) 2045 dev->name, csr0, lp->a.read_csr(ioaddr, 0));
2001 break; /* It still hasn't been Txed */ 2046
2002 2047 if (csr0 & 0x0400) /* Rx interrupt */
2003 lp->tx_ring[entry].base = 0; 2048 pcnet32_rx(dev);
2004 2049
2005 if (status & 0x4000) { 2050 if (csr0 & 0x0200) { /* Tx-done interrupt */
2006 /* There was an major error, log it. */ 2051 unsigned int dirty_tx = lp->dirty_tx;
2007 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 2052 int delta;
2008 lp->stats.tx_errors++; 2053
2009 if (netif_msg_tx_err(lp)) 2054 while (dirty_tx != lp->cur_tx) {
2010 printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n", 2055 int entry = dirty_tx & lp->tx_mod_mask;
2011 dev->name, status, err_status); 2056 int status =
2012 if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; 2057 (short)le16_to_cpu(lp->tx_ring[entry].
2013 if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; 2058 status);
2014 if (err_status & 0x10000000) lp->stats.tx_window_errors++; 2059
2060 if (status < 0)
2061 break; /* It still hasn't been Txed */
2062
2063 lp->tx_ring[entry].base = 0;
2064
2065 if (status & 0x4000) {
2066 /* There was an major error, log it. */
2067 int err_status =
2068 le32_to_cpu(lp->tx_ring[entry].
2069 misc);
2070 lp->stats.tx_errors++;
2071 if (netif_msg_tx_err(lp))
2072 printk(KERN_ERR
2073 "%s: Tx error status=%04x err_status=%08x\n",
2074 dev->name, status,
2075 err_status);
2076 if (err_status & 0x04000000)
2077 lp->stats.tx_aborted_errors++;
2078 if (err_status & 0x08000000)
2079 lp->stats.tx_carrier_errors++;
2080 if (err_status & 0x10000000)
2081 lp->stats.tx_window_errors++;
2015#ifndef DO_DXSUFLO 2082#ifndef DO_DXSUFLO
2016 if (err_status & 0x40000000) { 2083 if (err_status & 0x40000000) {
2017 lp->stats.tx_fifo_errors++; 2084 lp->stats.tx_fifo_errors++;
2018 /* Ackk! On FIFO errors the Tx unit is turned off! */ 2085 /* Ackk! On FIFO errors the Tx unit is turned off! */
2019 /* Remove this verbosity later! */ 2086 /* Remove this verbosity later! */
2020 if (netif_msg_tx_err(lp)) 2087 if (netif_msg_tx_err(lp))
2021 printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", 2088 printk(KERN_ERR
2022 dev->name, csr0); 2089 "%s: Tx FIFO error! CSR0=%4.4x\n",
2023 must_restart = 1; 2090 dev->name, csr0);
2024 } 2091 must_restart = 1;
2092 }
2025#else 2093#else
2026 if (err_status & 0x40000000) { 2094 if (err_status & 0x40000000) {
2027 lp->stats.tx_fifo_errors++; 2095 lp->stats.tx_fifo_errors++;
2028 if (! lp->dxsuflo) { /* If controller doesn't recover ... */ 2096 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
2029 /* Ackk! On FIFO errors the Tx unit is turned off! */ 2097 /* Ackk! On FIFO errors the Tx unit is turned off! */
2030 /* Remove this verbosity later! */ 2098 /* Remove this verbosity later! */
2031 if (netif_msg_tx_err(lp)) 2099 if (netif_msg_tx_err
2032 printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", 2100 (lp))
2033 dev->name, csr0); 2101 printk(KERN_ERR
2034 must_restart = 1; 2102 "%s: Tx FIFO error! CSR0=%4.4x\n",
2035 } 2103 dev->
2036 } 2104 name,
2105 csr0);
2106 must_restart = 1;
2107 }
2108 }
2037#endif 2109#endif
2038 } else { 2110 } else {
2039 if (status & 0x1800) 2111 if (status & 0x1800)
2040 lp->stats.collisions++; 2112 lp->stats.collisions++;
2041 lp->stats.tx_packets++; 2113 lp->stats.tx_packets++;
2114 }
2115
2116 /* We must free the original skb */
2117 if (lp->tx_skbuff[entry]) {
2118 pci_unmap_single(lp->pci_dev,
2119 lp->tx_dma_addr[entry],
2120 lp->tx_skbuff[entry]->
2121 len, PCI_DMA_TODEVICE);
2122 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
2123 lp->tx_skbuff[entry] = NULL;
2124 lp->tx_dma_addr[entry] = 0;
2125 }
2126 dirty_tx++;
2127 }
2128
2129 delta =
2130 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
2131 lp->tx_ring_size);
2132 if (delta > lp->tx_ring_size) {
2133 if (netif_msg_drv(lp))
2134 printk(KERN_ERR
2135 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
2136 dev->name, dirty_tx, lp->cur_tx,
2137 lp->tx_full);
2138 dirty_tx += lp->tx_ring_size;
2139 delta -= lp->tx_ring_size;
2140 }
2141
2142 if (lp->tx_full &&
2143 netif_queue_stopped(dev) &&
2144 delta < lp->tx_ring_size - 2) {
2145 /* The ring is no longer full, clear tbusy. */
2146 lp->tx_full = 0;
2147 netif_wake_queue(dev);
2148 }
2149 lp->dirty_tx = dirty_tx;
2150 }
2151
2152 /* Log misc errors. */
2153 if (csr0 & 0x4000)
2154 lp->stats.tx_errors++; /* Tx babble. */
2155 if (csr0 & 0x1000) {
2156 /*
2157 * this happens when our receive ring is full. This shouldn't
2158 * be a problem as we will see normal rx interrupts for the frames
2159 * in the receive ring. But there are some PCI chipsets (I can
2160 * reproduce this on SP3G with Intel saturn chipset) which have
2161 * sometimes problems and will fill up the receive ring with
2162 * error descriptors. In this situation we don't get a rx
2163 * interrupt, but a missed frame interrupt sooner or later.
2164 * So we try to clean up our receive ring here.
2165 */
2166 pcnet32_rx(dev);
2167 lp->stats.rx_errors++; /* Missed a Rx frame. */
2168 }
2169 if (csr0 & 0x0800) {
2170 if (netif_msg_drv(lp))
2171 printk(KERN_ERR
2172 "%s: Bus master arbitration failure, status %4.4x.\n",
2173 dev->name, csr0);
2174 /* unlike for the lance, there is no restart needed */
2042 } 2175 }
2043 2176
2044 /* We must free the original skb */ 2177 if (must_restart) {
2045 if (lp->tx_skbuff[entry]) { 2178 /* reset the chip to clear the error condition, then restart */
2046 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], 2179 lp->a.reset(ioaddr);
2047 lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); 2180 lp->a.write_csr(ioaddr, 4, 0x0915);
2048 dev_kfree_skb_irq(lp->tx_skbuff[entry]); 2181 pcnet32_restart(dev, 0x0002);
2049 lp->tx_skbuff[entry] = NULL; 2182 netif_wake_queue(dev);
2050 lp->tx_dma_addr[entry] = 0;
2051 } 2183 }
2052 dirty_tx++; 2184 }
2053 } 2185
2054 2186 /* Set interrupt enable. */
2055 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); 2187 lp->a.write_csr(ioaddr, 0, 0x0040);
2056 if (delta > lp->tx_ring_size) { 2188 lp->a.write_rap(ioaddr, rap);
2057 if (netif_msg_drv(lp)) 2189
2058 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 2190 if (netif_msg_intr(lp))
2059 dev->name, dirty_tx, lp->cur_tx, lp->tx_full); 2191 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2060 dirty_tx += lp->tx_ring_size; 2192 dev->name, lp->a.read_csr(ioaddr, 0));
2061 delta -= lp->tx_ring_size; 2193
2062 } 2194 spin_unlock(&lp->lock);
2063 2195
2064 if (lp->tx_full && 2196 return IRQ_HANDLED;
2065 netif_queue_stopped(dev) &&
2066 delta < lp->tx_ring_size - 2) {
2067 /* The ring is no longer full, clear tbusy. */
2068 lp->tx_full = 0;
2069 netif_wake_queue (dev);
2070 }
2071 lp->dirty_tx = dirty_tx;
2072 }
2073
2074 /* Log misc errors. */
2075 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
2076 if (csr0 & 0x1000) {
2077 /*
2078 * this happens when our receive ring is full. This shouldn't
2079 * be a problem as we will see normal rx interrupts for the frames
2080 * in the receive ring. But there are some PCI chipsets (I can
2081 * reproduce this on SP3G with Intel saturn chipset) which have
2082 * sometimes problems and will fill up the receive ring with
2083 * error descriptors. In this situation we don't get a rx
2084 * interrupt, but a missed frame interrupt sooner or later.
2085 * So we try to clean up our receive ring here.
2086 */
2087 pcnet32_rx(dev);
2088 lp->stats.rx_errors++; /* Missed a Rx frame. */
2089 }
2090 if (csr0 & 0x0800) {
2091 if (netif_msg_drv(lp))
2092 printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
2093 dev->name, csr0);
2094 /* unlike for the lance, there is no restart needed */
2095 }
2096
2097 if (must_restart) {
2098 /* reset the chip to clear the error condition, then restart */
2099 lp->a.reset(ioaddr);
2100 lp->a.write_csr(ioaddr, 4, 0x0915);
2101 pcnet32_restart(dev, 0x0002);
2102 netif_wake_queue(dev);
2103 }
2104 }
2105
2106 /* Set interrupt enable. */
2107 lp->a.write_csr (ioaddr, 0, 0x0040);
2108 lp->a.write_rap (ioaddr,rap);
2109
2110 if (netif_msg_intr(lp))
2111 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2112 dev->name, lp->a.read_csr (ioaddr, 0));
2113
2114 spin_unlock(&lp->lock);
2115
2116 return IRQ_HANDLED;
2117} 2197}
2118 2198
2119static int 2199static int pcnet32_rx(struct net_device *dev)
2120pcnet32_rx(struct net_device *dev)
2121{ 2200{
2122 struct pcnet32_private *lp = dev->priv; 2201 struct pcnet32_private *lp = dev->priv;
2123 int entry = lp->cur_rx & lp->rx_mod_mask; 2202 int entry = lp->cur_rx & lp->rx_mod_mask;
2124 int boguscnt = lp->rx_ring_size / 2; 2203 int boguscnt = lp->rx_ring_size / 2;
2125 2204
2126 /* If we own the next entry, it's a new packet. Send it up. */ 2205 /* If we own the next entry, it's a new packet. Send it up. */
2127 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 2206 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
2128 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; 2207 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
2129 2208
2130 if (status != 0x03) { /* There was an error. */ 2209 if (status != 0x03) { /* There was an error. */
2131 /* 2210 /*
2132 * There is a tricky error noted by John Murphy, 2211 * There is a tricky error noted by John Murphy,
2133 * <murf@perftech.com> to Russ Nelson: Even with full-sized 2212 * <murf@perftech.com> to Russ Nelson: Even with full-sized
2134 * buffers it's possible for a jabber packet to use two 2213 * buffers it's possible for a jabber packet to use two
2135 * buffers, with only the last correctly noting the error. 2214 * buffers, with only the last correctly noting the error.
2136 */ 2215 */
2137 if (status & 0x01) /* Only count a general error at the */ 2216 if (status & 0x01) /* Only count a general error at the */
2138 lp->stats.rx_errors++; /* end of a packet.*/ 2217 lp->stats.rx_errors++; /* end of a packet. */
2139 if (status & 0x20) lp->stats.rx_frame_errors++; 2218 if (status & 0x20)
2140 if (status & 0x10) lp->stats.rx_over_errors++; 2219 lp->stats.rx_frame_errors++;
2141 if (status & 0x08) lp->stats.rx_crc_errors++; 2220 if (status & 0x10)
2142 if (status & 0x04) lp->stats.rx_fifo_errors++; 2221 lp->stats.rx_over_errors++;
2143 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); 2222 if (status & 0x08)
2144 } else { 2223 lp->stats.rx_crc_errors++;
2145 /* Malloc up new buffer, compatible with net-2e. */ 2224 if (status & 0x04)
2146 short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; 2225 lp->stats.rx_fifo_errors++;
2147 struct sk_buff *skb; 2226 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
2148
2149 /* Discard oversize frames. */
2150 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2151 if (netif_msg_drv(lp))
2152 printk(KERN_ERR "%s: Impossible packet size %d!\n",
2153 dev->name, pkt_len);
2154 lp->stats.rx_errors++;
2155 } else if (pkt_len < 60) {
2156 if (netif_msg_rx_err(lp))
2157 printk(KERN_ERR "%s: Runt packet!\n", dev->name);
2158 lp->stats.rx_errors++;
2159 } else {
2160 int rx_in_place = 0;
2161
2162 if (pkt_len > rx_copybreak) {
2163 struct sk_buff *newskb;
2164
2165 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
2166 skb_reserve (newskb, 2);
2167 skb = lp->rx_skbuff[entry];
2168 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry],
2169 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
2170 skb_put (skb, pkt_len);
2171 lp->rx_skbuff[entry] = newskb;
2172 newskb->dev = dev;
2173 lp->rx_dma_addr[entry] =
2174 pci_map_single(lp->pci_dev, newskb->data,
2175 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
2176 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
2177 rx_in_place = 1;
2178 } else
2179 skb = NULL;
2180 } else { 2227 } else {
2181 skb = dev_alloc_skb(pkt_len+2); 2228 /* Malloc up new buffer, compatible with net-2e. */
2182 } 2229 short pkt_len =
2183 2230 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
2184 if (skb == NULL) { 2231 - 4;
2185 int i; 2232 struct sk_buff *skb;
2186 if (netif_msg_drv(lp)) 2233
2187 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", 2234 /* Discard oversize frames. */
2188 dev->name); 2235 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2189 for (i = 0; i < lp->rx_ring_size; i++) 2236 if (netif_msg_drv(lp))
2190 if ((short)le16_to_cpu(lp->rx_ring[(entry+i) 2237 printk(KERN_ERR
2191 & lp->rx_mod_mask].status) < 0) 2238 "%s: Impossible packet size %d!\n",
2192 break; 2239 dev->name, pkt_len);
2193 2240 lp->stats.rx_errors++;
2194 if (i > lp->rx_ring_size -2) { 2241 } else if (pkt_len < 60) {
2195 lp->stats.rx_dropped++; 2242 if (netif_msg_rx_err(lp))
2196 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2243 printk(KERN_ERR "%s: Runt packet!\n",
2197 wmb(); /* Make sure adapter sees owner change */ 2244 dev->name);
2198 lp->cur_rx++; 2245 lp->stats.rx_errors++;
2199 } 2246 } else {
2200 break; 2247 int rx_in_place = 0;
2201 } 2248
2202 skb->dev = dev; 2249 if (pkt_len > rx_copybreak) {
2203 if (!rx_in_place) { 2250 struct sk_buff *newskb;
2204 skb_reserve(skb,2); /* 16 byte align */ 2251
2205 skb_put(skb,pkt_len); /* Make room */ 2252 if ((newskb =
2206 pci_dma_sync_single_for_cpu(lp->pci_dev, 2253 dev_alloc_skb(PKT_BUF_SZ))) {
2207 lp->rx_dma_addr[entry], 2254 skb_reserve(newskb, 2);
2208 PKT_BUF_SZ-2, 2255 skb = lp->rx_skbuff[entry];
2209 PCI_DMA_FROMDEVICE); 2256 pci_unmap_single(lp->pci_dev,
2210 eth_copy_and_sum(skb, 2257 lp->
2211 (unsigned char *)(lp->rx_skbuff[entry]->data), 2258 rx_dma_addr
2212 pkt_len,0); 2259 [entry],
2213 pci_dma_sync_single_for_device(lp->pci_dev, 2260 PKT_BUF_SZ - 2,
2214 lp->rx_dma_addr[entry], 2261 PCI_DMA_FROMDEVICE);
2215 PKT_BUF_SZ-2, 2262 skb_put(skb, pkt_len);
2216 PCI_DMA_FROMDEVICE); 2263 lp->rx_skbuff[entry] = newskb;
2264 newskb->dev = dev;
2265 lp->rx_dma_addr[entry] =
2266 pci_map_single(lp->pci_dev,
2267 newskb->data,
2268 PKT_BUF_SZ -
2269 2,
2270 PCI_DMA_FROMDEVICE);
2271 lp->rx_ring[entry].base =
2272 le32_to_cpu(lp->
2273 rx_dma_addr
2274 [entry]);
2275 rx_in_place = 1;
2276 } else
2277 skb = NULL;
2278 } else {
2279 skb = dev_alloc_skb(pkt_len + 2);
2280 }
2281
2282 if (skb == NULL) {
2283 int i;
2284 if (netif_msg_drv(lp))
2285 printk(KERN_ERR
2286 "%s: Memory squeeze, deferring packet.\n",
2287 dev->name);
2288 for (i = 0; i < lp->rx_ring_size; i++)
2289 if ((short)
2290 le16_to_cpu(lp->
2291 rx_ring[(entry +
2292 i)
2293 & lp->
2294 rx_mod_mask].
2295 status) < 0)
2296 break;
2297
2298 if (i > lp->rx_ring_size - 2) {
2299 lp->stats.rx_dropped++;
2300 lp->rx_ring[entry].status |=
2301 le16_to_cpu(0x8000);
2302 wmb(); /* Make sure adapter sees owner change */
2303 lp->cur_rx++;
2304 }
2305 break;
2306 }
2307 skb->dev = dev;
2308 if (!rx_in_place) {
2309 skb_reserve(skb, 2); /* 16 byte align */
2310 skb_put(skb, pkt_len); /* Make room */
2311 pci_dma_sync_single_for_cpu(lp->pci_dev,
2312 lp->
2313 rx_dma_addr
2314 [entry],
2315 PKT_BUF_SZ -
2316 2,
2317 PCI_DMA_FROMDEVICE);
2318 eth_copy_and_sum(skb,
2319 (unsigned char *)(lp->
2320 rx_skbuff
2321 [entry]->
2322 data),
2323 pkt_len, 0);
2324 pci_dma_sync_single_for_device(lp->
2325 pci_dev,
2326 lp->
2327 rx_dma_addr
2328 [entry],
2329 PKT_BUF_SZ
2330 - 2,
2331 PCI_DMA_FROMDEVICE);
2332 }
2333 lp->stats.rx_bytes += skb->len;
2334 skb->protocol = eth_type_trans(skb, dev);
2335 netif_rx(skb);
2336 dev->last_rx = jiffies;
2337 lp->stats.rx_packets++;
2338 }
2217 } 2339 }
2218 lp->stats.rx_bytes += skb->len; 2340 /*
2219 skb->protocol=eth_type_trans(skb,dev); 2341 * The docs say that the buffer length isn't touched, but Andrew Boyd
2220 netif_rx(skb); 2342 * of QNX reports that some revs of the 79C965 clear it.
2221 dev->last_rx = jiffies; 2343 */
2222 lp->stats.rx_packets++; 2344 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2223 } 2345 wmb(); /* Make sure owner changes after all others are visible */
2346 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2347 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2348 if (--boguscnt <= 0)
2349 break; /* don't stay in loop forever */
2224 } 2350 }
2225 /* 2351
2226 * The docs say that the buffer length isn't touched, but Andrew Boyd 2352 return 0;
2227 * of QNX reports that some revs of the 79C965 clear it.
2228 */
2229 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
2230 wmb(); /* Make sure owner changes after all others are visible */
2231 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2232 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2233 if (--boguscnt <= 0) break; /* don't stay in loop forever */
2234 }
2235
2236 return 0;
2237} 2353}
2238 2354
2239static int 2355static int pcnet32_close(struct net_device *dev)
2240pcnet32_close(struct net_device *dev)
2241{ 2356{
2242 unsigned long ioaddr = dev->base_addr; 2357 unsigned long ioaddr = dev->base_addr;
2243 struct pcnet32_private *lp = dev->priv; 2358 struct pcnet32_private *lp = dev->priv;
2244 int i; 2359 int i;
2245 unsigned long flags; 2360 unsigned long flags;
2246 2361
2247 del_timer_sync(&lp->watchdog_timer); 2362 del_timer_sync(&lp->watchdog_timer);
2248 2363
2249 netif_stop_queue(dev); 2364 netif_stop_queue(dev);
2250 2365
2251 spin_lock_irqsave(&lp->lock, flags); 2366 spin_lock_irqsave(&lp->lock, flags);
2252 2367
2253 lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); 2368 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2254 2369
2255 if (netif_msg_ifdown(lp)) 2370 if (netif_msg_ifdown(lp))
2256 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 2371 printk(KERN_DEBUG
2257 dev->name, lp->a.read_csr (ioaddr, 0)); 2372 "%s: Shutting down ethercard, status was %2.2x.\n",
2373 dev->name, lp->a.read_csr(ioaddr, 0));
2258 2374
2259 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ 2375 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2260 lp->a.write_csr (ioaddr, 0, 0x0004); 2376 lp->a.write_csr(ioaddr, 0, 0x0004);
2261 2377
2262 /* 2378 /*
2263 * Switch back to 16bit mode to avoid problems with dumb 2379 * Switch back to 16bit mode to avoid problems with dumb
2264 * DOS packet driver after a warm reboot 2380 * DOS packet driver after a warm reboot
2265 */ 2381 */
2266 lp->a.write_bcr (ioaddr, 20, 4); 2382 lp->a.write_bcr(ioaddr, 20, 4);
2267 2383
2268 spin_unlock_irqrestore(&lp->lock, flags); 2384 spin_unlock_irqrestore(&lp->lock, flags);
2269 2385
2270 free_irq(dev->irq, dev); 2386 free_irq(dev->irq, dev);
2271 2387
2272 spin_lock_irqsave(&lp->lock, flags); 2388 spin_lock_irqsave(&lp->lock, flags);
2273 2389
2274 /* free all allocated skbuffs */ 2390 /* free all allocated skbuffs */
2275 for (i = 0; i < lp->rx_ring_size; i++) { 2391 for (i = 0; i < lp->rx_ring_size; i++) {
2276 lp->rx_ring[i].status = 0; 2392 lp->rx_ring[i].status = 0;
2277 wmb(); /* Make sure adapter sees owner change */ 2393 wmb(); /* Make sure adapter sees owner change */
2278 if (lp->rx_skbuff[i]) { 2394 if (lp->rx_skbuff[i]) {
2279 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 2395 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2280 PCI_DMA_FROMDEVICE); 2396 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2281 dev_kfree_skb(lp->rx_skbuff[i]); 2397 dev_kfree_skb(lp->rx_skbuff[i]);
2398 }
2399 lp->rx_skbuff[i] = NULL;
2400 lp->rx_dma_addr[i] = 0;
2282 } 2401 }
2283 lp->rx_skbuff[i] = NULL;
2284 lp->rx_dma_addr[i] = 0;
2285 }
2286 2402
2287 for (i = 0; i < lp->tx_ring_size; i++) { 2403 for (i = 0; i < lp->tx_ring_size; i++) {
2288 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2404 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2289 wmb(); /* Make sure adapter sees owner change */ 2405 wmb(); /* Make sure adapter sees owner change */
2290 if (lp->tx_skbuff[i]) { 2406 if (lp->tx_skbuff[i]) {
2291 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], 2407 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2292 lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); 2408 lp->tx_skbuff[i]->len,
2293 dev_kfree_skb(lp->tx_skbuff[i]); 2409 PCI_DMA_TODEVICE);
2410 dev_kfree_skb(lp->tx_skbuff[i]);
2411 }
2412 lp->tx_skbuff[i] = NULL;
2413 lp->tx_dma_addr[i] = 0;
2294 } 2414 }
2295 lp->tx_skbuff[i] = NULL;
2296 lp->tx_dma_addr[i] = 0;
2297 }
2298 2415
2299 spin_unlock_irqrestore(&lp->lock, flags); 2416 spin_unlock_irqrestore(&lp->lock, flags);
2300 2417
2301 return 0; 2418 return 0;
2302} 2419}
2303 2420
2304static struct net_device_stats * 2421static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2305pcnet32_get_stats(struct net_device *dev)
2306{ 2422{
2307 struct pcnet32_private *lp = dev->priv; 2423 struct pcnet32_private *lp = dev->priv;
2308 unsigned long ioaddr = dev->base_addr; 2424 unsigned long ioaddr = dev->base_addr;
2309 u16 saved_addr; 2425 u16 saved_addr;
2310 unsigned long flags; 2426 unsigned long flags;
2311 2427
2312 spin_lock_irqsave(&lp->lock, flags); 2428 spin_lock_irqsave(&lp->lock, flags);
2313 saved_addr = lp->a.read_rap(ioaddr); 2429 saved_addr = lp->a.read_rap(ioaddr);
2314 lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); 2430 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2315 lp->a.write_rap(ioaddr, saved_addr); 2431 lp->a.write_rap(ioaddr, saved_addr);
2316 spin_unlock_irqrestore(&lp->lock, flags); 2432 spin_unlock_irqrestore(&lp->lock, flags);
2317 2433
2318 return &lp->stats; 2434 return &lp->stats;
2319} 2435}
2320 2436
2321/* taken from the sunlance driver, which it took from the depca driver */ 2437/* taken from the sunlance driver, which it took from the depca driver */
2322static void pcnet32_load_multicast (struct net_device *dev) 2438static void pcnet32_load_multicast(struct net_device *dev)
2323{ 2439{
2324 struct pcnet32_private *lp = dev->priv; 2440 struct pcnet32_private *lp = dev->priv;
2325 volatile struct pcnet32_init_block *ib = &lp->init_block; 2441 volatile struct pcnet32_init_block *ib = &lp->init_block;
2326 volatile u16 *mcast_table = (u16 *)&ib->filter; 2442 volatile u16 *mcast_table = (u16 *) & ib->filter;
2327 struct dev_mc_list *dmi=dev->mc_list; 2443 struct dev_mc_list *dmi = dev->mc_list;
2328 char *addrs; 2444 char *addrs;
2329 int i; 2445 int i;
2330 u32 crc; 2446 u32 crc;
2331 2447
2332 /* set all multicast bits */ 2448 /* set all multicast bits */
2333 if (dev->flags & IFF_ALLMULTI) { 2449 if (dev->flags & IFF_ALLMULTI) {
2334 ib->filter[0] = 0xffffffff; 2450 ib->filter[0] = 0xffffffff;
2335 ib->filter[1] = 0xffffffff; 2451 ib->filter[1] = 0xffffffff;
2452 return;
2453 }
2454 /* clear the multicast filter */
2455 ib->filter[0] = 0;
2456 ib->filter[1] = 0;
2457
2458 /* Add addresses */
2459 for (i = 0; i < dev->mc_count; i++) {
2460 addrs = dmi->dmi_addr;
2461 dmi = dmi->next;
2462
2463 /* multicast address? */
2464 if (!(*addrs & 1))
2465 continue;
2466
2467 crc = ether_crc_le(6, addrs);
2468 crc = crc >> 26;
2469 mcast_table[crc >> 4] =
2470 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2471 (1 << (crc & 0xf)));
2472 }
2336 return; 2473 return;
2337 }
2338 /* clear the multicast filter */
2339 ib->filter[0] = 0;
2340 ib->filter[1] = 0;
2341
2342 /* Add addresses */
2343 for (i = 0; i < dev->mc_count; i++) {
2344 addrs = dmi->dmi_addr;
2345 dmi = dmi->next;
2346
2347 /* multicast address? */
2348 if (!(*addrs & 1))
2349 continue;
2350
2351 crc = ether_crc_le(6, addrs);
2352 crc = crc >> 26;
2353 mcast_table [crc >> 4] = le16_to_cpu(
2354 le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf)));
2355 }
2356 return;
2357} 2474}
2358 2475
2359
2360/* 2476/*
2361 * Set or clear the multicast filter for this adaptor. 2477 * Set or clear the multicast filter for this adaptor.
2362 */ 2478 */
2363static void pcnet32_set_multicast_list(struct net_device *dev) 2479static void pcnet32_set_multicast_list(struct net_device *dev)
2364{ 2480{
2365 unsigned long ioaddr = dev->base_addr, flags; 2481 unsigned long ioaddr = dev->base_addr, flags;
2366 struct pcnet32_private *lp = dev->priv; 2482 struct pcnet32_private *lp = dev->priv;
2367 2483
2368 spin_lock_irqsave(&lp->lock, flags); 2484 spin_lock_irqsave(&lp->lock, flags);
2369 if (dev->flags&IFF_PROMISC) { 2485 if (dev->flags & IFF_PROMISC) {
2370 /* Log any net taps. */ 2486 /* Log any net taps. */
2371 if (netif_msg_hw(lp)) 2487 if (netif_msg_hw(lp))
2372 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); 2488 printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2373 lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); 2489 dev->name);
2374 } else { 2490 lp->init_block.mode =
2375 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 2491 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2376 pcnet32_load_multicast (dev); 2492 7);
2377 } 2493 } else {
2378 2494 lp->init_block.mode =
2379 lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ 2495 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2380 pcnet32_restart(dev, 0x0042); /* Resume normal operation */ 2496 pcnet32_load_multicast(dev);
2381 netif_wake_queue(dev); 2497 }
2382 2498
2383 spin_unlock_irqrestore(&lp->lock, flags); 2499 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
2500 pcnet32_restart(dev, 0x0042); /* Resume normal operation */
2501 netif_wake_queue(dev);
2502
2503 spin_unlock_irqrestore(&lp->lock, flags);
2384} 2504}
2385 2505
2386/* This routine assumes that the lp->lock is held */ 2506/* This routine assumes that the lp->lock is held */
2387static int mdio_read(struct net_device *dev, int phy_id, int reg_num) 2507static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2388{ 2508{
2389 struct pcnet32_private *lp = dev->priv; 2509 struct pcnet32_private *lp = dev->priv;
2390 unsigned long ioaddr = dev->base_addr; 2510 unsigned long ioaddr = dev->base_addr;
2391 u16 val_out; 2511 u16 val_out;
2392 2512
2393 if (!lp->mii) 2513 if (!lp->mii)
2394 return 0; 2514 return 0;
2395 2515
2396 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2516 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2397 val_out = lp->a.read_bcr(ioaddr, 34); 2517 val_out = lp->a.read_bcr(ioaddr, 34);
2398 2518
2399 return val_out; 2519 return val_out;
2400} 2520}
2401 2521
2402/* This routine assumes that the lp->lock is held */ 2522/* This routine assumes that the lp->lock is held */
2403static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) 2523static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2404{ 2524{
2405 struct pcnet32_private *lp = dev->priv; 2525 struct pcnet32_private *lp = dev->priv;
2406 unsigned long ioaddr = dev->base_addr; 2526 unsigned long ioaddr = dev->base_addr;
2407 2527
2408 if (!lp->mii) 2528 if (!lp->mii)
2409 return; 2529 return;
2410 2530
2411 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2531 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2412 lp->a.write_bcr(ioaddr, 34, val); 2532 lp->a.write_bcr(ioaddr, 34, val);
2413} 2533}
2414 2534
2415static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2535static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2416{ 2536{
2417 struct pcnet32_private *lp = dev->priv; 2537 struct pcnet32_private *lp = dev->priv;
2418 int rc; 2538 int rc;
2419 unsigned long flags; 2539 unsigned long flags;
2540
2541 /* SIOC[GS]MIIxxx ioctls */
2542 if (lp->mii) {
2543 spin_lock_irqsave(&lp->lock, flags);
2544 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2545 spin_unlock_irqrestore(&lp->lock, flags);
2546 } else {
2547 rc = -EOPNOTSUPP;
2548 }
2549
2550 return rc;
2551}
2552
2553static int pcnet32_check_otherphy(struct net_device *dev)
2554{
2555 struct pcnet32_private *lp = dev->priv;
2556 struct mii_if_info mii = lp->mii_if;
2557 u16 bmcr;
2558 int i;
2420 2559
2421 /* SIOC[GS]MIIxxx ioctls */ 2560 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2422 if (lp->mii) { 2561 if (i == lp->mii_if.phy_id)
2423 spin_lock_irqsave(&lp->lock, flags); 2562 continue; /* skip active phy */
2424 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); 2563 if (lp->phymask & (1 << i)) {
2425 spin_unlock_irqrestore(&lp->lock, flags); 2564 mii.phy_id = i;
2426 } else { 2565 if (mii_link_ok(&mii)) {
2427 rc = -EOPNOTSUPP; 2566 /* found PHY with active link */
2428 } 2567 if (netif_msg_link(lp))
2568 printk(KERN_INFO
2569 "%s: Using PHY number %d.\n",
2570 dev->name, i);
2571
2572 /* isolate inactive phy */
2573 bmcr =
2574 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2575 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2576 bmcr | BMCR_ISOLATE);
2577
2578 /* de-isolate new phy */
2579 bmcr = mdio_read(dev, i, MII_BMCR);
2580 mdio_write(dev, i, MII_BMCR,
2581 bmcr & ~BMCR_ISOLATE);
2582
2583 /* set new phy address */
2584 lp->mii_if.phy_id = i;
2585 return 1;
2586 }
2587 }
2588 }
2589 return 0;
2590}
2591
2592/*
2593 * Show the status of the media. Similar to mii_check_media however it
2594 * correctly shows the link speed for all (tested) pcnet32 variants.
2595 * Devices with no mii just report link state without speed.
2596 *
2597 * Caller is assumed to hold and release the lp->lock.
2598 */
2429 2599
2430 return rc; 2600static void pcnet32_check_media(struct net_device *dev, int verbose)
2601{
2602 struct pcnet32_private *lp = dev->priv;
2603 int curr_link;
2604 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2605 u32 bcr9;
2606
2607 if (lp->mii) {
2608 curr_link = mii_link_ok(&lp->mii_if);
2609 } else {
2610 ulong ioaddr = dev->base_addr; /* card base I/O address */
2611 curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
2612 }
2613 if (!curr_link) {
2614 if (prev_link || verbose) {
2615 netif_carrier_off(dev);
2616 if (netif_msg_link(lp))
2617 printk(KERN_INFO "%s: link down\n", dev->name);
2618 }
2619 if (lp->phycount > 1) {
2620 curr_link = pcnet32_check_otherphy(dev);
2621 prev_link = 0;
2622 }
2623 } else if (verbose || !prev_link) {
2624 netif_carrier_on(dev);
2625 if (lp->mii) {
2626 if (netif_msg_link(lp)) {
2627 struct ethtool_cmd ecmd;
2628 mii_ethtool_gset(&lp->mii_if, &ecmd);
2629 printk(KERN_INFO
2630 "%s: link up, %sMbps, %s-duplex\n",
2631 dev->name,
2632 (ecmd.speed == SPEED_100) ? "100" : "10",
2633 (ecmd.duplex ==
2634 DUPLEX_FULL) ? "full" : "half");
2635 }
2636 bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2637 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2638 if (lp->mii_if.full_duplex)
2639 bcr9 |= (1 << 0);
2640 else
2641 bcr9 &= ~(1 << 0);
2642 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2643 }
2644 } else {
2645 if (netif_msg_link(lp))
2646 printk(KERN_INFO "%s: link up\n", dev->name);
2647 }
2648 }
2431} 2649}
2432 2650
2651/*
2652 * Check for loss of link and link establishment.
2653 * Can not use mii_check_media because it does nothing if mode is forced.
2654 */
2655
2433static void pcnet32_watchdog(struct net_device *dev) 2656static void pcnet32_watchdog(struct net_device *dev)
2434{ 2657{
2435 struct pcnet32_private *lp = dev->priv; 2658 struct pcnet32_private *lp = dev->priv;
2436 unsigned long flags; 2659 unsigned long flags;
2437 2660
2438 /* Print the link status if it has changed */ 2661 /* Print the link status if it has changed */
2439 if (lp->mii) {
2440 spin_lock_irqsave(&lp->lock, flags); 2662 spin_lock_irqsave(&lp->lock, flags);
2441 mii_check_media (&lp->mii_if, netif_msg_link(lp), 0); 2663 pcnet32_check_media(dev, 0);
2442 spin_unlock_irqrestore(&lp->lock, flags); 2664 spin_unlock_irqrestore(&lp->lock, flags);
2443 }
2444 2665
2445 mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 2666 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2446} 2667}
2447 2668
2448static void __devexit pcnet32_remove_one(struct pci_dev *pdev) 2669static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2449{ 2670{
2450 struct net_device *dev = pci_get_drvdata(pdev); 2671 struct net_device *dev = pci_get_drvdata(pdev);
2451 2672
2452 if (dev) { 2673 if (dev) {
2453 struct pcnet32_private *lp = dev->priv; 2674 struct pcnet32_private *lp = dev->priv;
2454 2675
2455 unregister_netdev(dev); 2676 unregister_netdev(dev);
2456 pcnet32_free_ring(dev); 2677 pcnet32_free_ring(dev);
2457 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2678 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2458 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2679 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2459 free_netdev(dev); 2680 free_netdev(dev);
2460 pci_disable_device(pdev); 2681 pci_disable_device(pdev);
2461 pci_set_drvdata(pdev, NULL); 2682 pci_set_drvdata(pdev, NULL);
2462 } 2683 }
2463} 2684}
2464 2685
2465static struct pci_driver pcnet32_driver = { 2686static struct pci_driver pcnet32_driver = {
2466 .name = DRV_NAME, 2687 .name = DRV_NAME,
2467 .probe = pcnet32_probe_pci, 2688 .probe = pcnet32_probe_pci,
2468 .remove = __devexit_p(pcnet32_remove_one), 2689 .remove = __devexit_p(pcnet32_remove_one),
2469 .id_table = pcnet32_pci_tbl, 2690 .id_table = pcnet32_pci_tbl,
2470}; 2691};
2471 2692
2472/* An additional parameter that may be passed in... */ 2693/* An additional parameter that may be passed in... */
@@ -2477,9 +2698,11 @@ static int pcnet32_have_pci;
2477module_param(debug, int, 0); 2698module_param(debug, int, 0);
2478MODULE_PARM_DESC(debug, DRV_NAME " debug level"); 2699MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2479module_param(max_interrupt_work, int, 0); 2700module_param(max_interrupt_work, int, 0);
2480MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); 2701MODULE_PARM_DESC(max_interrupt_work,
2702 DRV_NAME " maximum events handled per interrupt");
2481module_param(rx_copybreak, int, 0); 2703module_param(rx_copybreak, int, 0);
2482MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); 2704MODULE_PARM_DESC(rx_copybreak,
2705 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
2483module_param(tx_start_pt, int, 0); 2706module_param(tx_start_pt, int, 0);
2484MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); 2707MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2485module_param(pcnet32vlb, int, 0); 2708module_param(pcnet32vlb, int, 0);
@@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0);
2490MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); 2713MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2491/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ 2714/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2492module_param_array(homepna, int, NULL, 0); 2715module_param_array(homepna, int, NULL, 0);
2493MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); 2716MODULE_PARM_DESC(homepna,
2717 DRV_NAME
2718 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
2494 2719
2495MODULE_AUTHOR("Thomas Bogendoerfer"); 2720MODULE_AUTHOR("Thomas Bogendoerfer");
2496MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); 2721MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
@@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL");
2500 2725
2501static int __init pcnet32_init_module(void) 2726static int __init pcnet32_init_module(void)
2502{ 2727{
2503 printk(KERN_INFO "%s", version); 2728 printk(KERN_INFO "%s", version);
2504 2729
2505 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); 2730 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
2506 2731
2507 if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) 2732 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
2508 tx_start = tx_start_pt; 2733 tx_start = tx_start_pt;
2509 2734
2510 /* find the PCI devices */ 2735 /* find the PCI devices */
2511 if (!pci_module_init(&pcnet32_driver)) 2736 if (!pci_module_init(&pcnet32_driver))
2512 pcnet32_have_pci = 1; 2737 pcnet32_have_pci = 1;
2513 2738
2514 /* should we find any remaining VLbus devices ? */ 2739 /* should we find any remaining VLbus devices ? */
2515 if (pcnet32vlb) 2740 if (pcnet32vlb)
2516 pcnet32_probe_vlbus(); 2741 pcnet32_probe_vlbus();
2517 2742
2518 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2743 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2519 printk(KERN_INFO PFX "%d cards_found.\n", cards_found); 2744 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
2520 2745
2521 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; 2746 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
2522} 2747}
2523 2748
2524static void __exit pcnet32_cleanup_module(void) 2749static void __exit pcnet32_cleanup_module(void)
2525{ 2750{
2526 struct net_device *next_dev; 2751 struct net_device *next_dev;
2527 2752
2528 while (pcnet32_dev) { 2753 while (pcnet32_dev) {
2529 struct pcnet32_private *lp = pcnet32_dev->priv; 2754 struct pcnet32_private *lp = pcnet32_dev->priv;
2530 next_dev = lp->next; 2755 next_dev = lp->next;
2531 unregister_netdev(pcnet32_dev); 2756 unregister_netdev(pcnet32_dev);
2532 pcnet32_free_ring(pcnet32_dev); 2757 pcnet32_free_ring(pcnet32_dev);
2533 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2758 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2534 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2759 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2535 free_netdev(pcnet32_dev); 2760 free_netdev(pcnet32_dev);
2536 pcnet32_dev = next_dev; 2761 pcnet32_dev = next_dev;
2537 } 2762 }
2538 2763
2539 if (pcnet32_have_pci) 2764 if (pcnet32_have_pci)
2540 pci_unregister_driver(&pcnet32_driver); 2765 pci_unregister_driver(&pcnet32_driver);
2541} 2766}
2542 2767
2543module_init(pcnet32_init_module); 2768module_init(pcnet32_init_module);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0245e40b51a1..b2073fce8216 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
46#include <linux/rwsem.h> 46#include <linux/rwsem.h>
47#include <linux/stddef.h> 47#include <linux/stddef.h>
48#include <linux/device.h> 48#include <linux/device.h>
49#include <linux/mutex.h>
49#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
50#include <asm/atomic.h> 51#include <asm/atomic.h>
51 52
@@ -198,11 +199,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map);
198static void cardmap_destroy(struct cardmap **map); 199static void cardmap_destroy(struct cardmap **map);
199 200
200/* 201/*
201 * all_ppp_sem protects the all_ppp_units mapping. 202 * all_ppp_mutex protects the all_ppp_units mapping.
202 * It also ensures that finding a ppp unit in the all_ppp_units map 203 * It also ensures that finding a ppp unit in the all_ppp_units map
203 * and updating its file.refcnt field is atomic. 204 * and updating its file.refcnt field is atomic.
204 */ 205 */
205static DECLARE_MUTEX(all_ppp_sem); 206static DEFINE_MUTEX(all_ppp_mutex);
206static struct cardmap *all_ppp_units; 207static struct cardmap *all_ppp_units;
207static atomic_t ppp_unit_count = ATOMIC_INIT(0); 208static atomic_t ppp_unit_count = ATOMIC_INIT(0);
208 209
@@ -804,7 +805,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
804 /* Attach to an existing ppp unit */ 805 /* Attach to an existing ppp unit */
805 if (get_user(unit, p)) 806 if (get_user(unit, p))
806 break; 807 break;
807 down(&all_ppp_sem); 808 mutex_lock(&all_ppp_mutex);
808 err = -ENXIO; 809 err = -ENXIO;
809 ppp = ppp_find_unit(unit); 810 ppp = ppp_find_unit(unit);
810 if (ppp != 0) { 811 if (ppp != 0) {
@@ -812,7 +813,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
812 file->private_data = &ppp->file; 813 file->private_data = &ppp->file;
813 err = 0; 814 err = 0;
814 } 815 }
815 up(&all_ppp_sem); 816 mutex_unlock(&all_ppp_mutex);
816 break; 817 break;
817 818
818 case PPPIOCATTCHAN: 819 case PPPIOCATTCHAN:
@@ -1691,8 +1692,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1691 || ppp->npmode[npi] != NPMODE_PASS) { 1692 || ppp->npmode[npi] != NPMODE_PASS) {
1692 kfree_skb(skb); 1693 kfree_skb(skb);
1693 } else { 1694 } else {
1694 skb_pull(skb, 2); /* chop off protocol */ 1695 /* chop off protocol */
1695 skb_postpull_rcsum(skb, skb->data - 2, 2); 1696 skb_pull_rcsum(skb, 2);
1696 skb->dev = ppp->dev; 1697 skb->dev = ppp->dev;
1697 skb->protocol = htons(npindex_to_ethertype[npi]); 1698 skb->protocol = htons(npindex_to_ethertype[npi]);
1698 skb->mac.raw = skb->data; 1699 skb->mac.raw = skb->data;
@@ -2446,7 +2447,7 @@ ppp_create_interface(int unit, int *retp)
2446 dev->do_ioctl = ppp_net_ioctl; 2447 dev->do_ioctl = ppp_net_ioctl;
2447 2448
2448 ret = -EEXIST; 2449 ret = -EEXIST;
2449 down(&all_ppp_sem); 2450 mutex_lock(&all_ppp_mutex);
2450 if (unit < 0) 2451 if (unit < 0)
2451 unit = cardmap_find_first_free(all_ppp_units); 2452 unit = cardmap_find_first_free(all_ppp_units);
2452 else if (cardmap_get(all_ppp_units, unit) != NULL) 2453 else if (cardmap_get(all_ppp_units, unit) != NULL)
@@ -2465,12 +2466,12 @@ ppp_create_interface(int unit, int *retp)
2465 2466
2466 atomic_inc(&ppp_unit_count); 2467 atomic_inc(&ppp_unit_count);
2467 cardmap_set(&all_ppp_units, unit, ppp); 2468 cardmap_set(&all_ppp_units, unit, ppp);
2468 up(&all_ppp_sem); 2469 mutex_unlock(&all_ppp_mutex);
2469 *retp = 0; 2470 *retp = 0;
2470 return ppp; 2471 return ppp;
2471 2472
2472out2: 2473out2:
2473 up(&all_ppp_sem); 2474 mutex_unlock(&all_ppp_mutex);
2474 free_netdev(dev); 2475 free_netdev(dev);
2475out1: 2476out1:
2476 kfree(ppp); 2477 kfree(ppp);
@@ -2500,7 +2501,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2500{ 2501{
2501 struct net_device *dev; 2502 struct net_device *dev;
2502 2503
2503 down(&all_ppp_sem); 2504 mutex_lock(&all_ppp_mutex);
2504 ppp_lock(ppp); 2505 ppp_lock(ppp);
2505 dev = ppp->dev; 2506 dev = ppp->dev;
2506 ppp->dev = NULL; 2507 ppp->dev = NULL;
@@ -2514,7 +2515,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2514 ppp->file.dead = 1; 2515 ppp->file.dead = 1;
2515 ppp->owner = NULL; 2516 ppp->owner = NULL;
2516 wake_up_interruptible(&ppp->file.rwait); 2517 wake_up_interruptible(&ppp->file.rwait);
2517 up(&all_ppp_sem); 2518 mutex_unlock(&all_ppp_mutex);
2518} 2519}
2519 2520
2520/* 2521/*
@@ -2556,7 +2557,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
2556 2557
2557/* 2558/*
2558 * Locate an existing ppp unit. 2559 * Locate an existing ppp unit.
2559 * The caller should have locked the all_ppp_sem. 2560 * The caller should have locked the all_ppp_mutex.
2560 */ 2561 */
2561static struct ppp * 2562static struct ppp *
2562ppp_find_unit(int unit) 2563ppp_find_unit(int unit)
@@ -2601,7 +2602,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2601 int ret = -ENXIO; 2602 int ret = -ENXIO;
2602 int hdrlen; 2603 int hdrlen;
2603 2604
2604 down(&all_ppp_sem); 2605 mutex_lock(&all_ppp_mutex);
2605 ppp = ppp_find_unit(unit); 2606 ppp = ppp_find_unit(unit);
2606 if (ppp == 0) 2607 if (ppp == 0)
2607 goto out; 2608 goto out;
@@ -2626,7 +2627,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2626 outl: 2627 outl:
2627 write_unlock_bh(&pch->upl); 2628 write_unlock_bh(&pch->upl);
2628 out: 2629 out:
2629 up(&all_ppp_sem); 2630 mutex_unlock(&all_ppp_mutex);
2630 return ret; 2631 return ret;
2631} 2632}
2632 2633
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 9369f811075d..475dc930380f 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -337,8 +337,7 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
337 if (sk->sk_state & PPPOX_BOUND) { 337 if (sk->sk_state & PPPOX_BOUND) {
338 struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; 338 struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw;
339 int len = ntohs(ph->length); 339 int len = ntohs(ph->length);
340 skb_pull(skb, sizeof(struct pppoe_hdr)); 340 skb_pull_rcsum(skb, sizeof(struct pppoe_hdr));
341 skb_postpull_rcsum(skb, ph, sizeof(*ph));
342 if (pskb_trim_rcsum(skb, len)) 341 if (pskb_trim_rcsum(skb, len))
343 goto abort_kfree; 342 goto abort_kfree;
344 343
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index a1cb07cdb60f..253440a98022 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -128,6 +128,7 @@ static const struct mii_chip_info {
128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, 128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, 129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
130 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, 130 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
131 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
131 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, 132 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
132 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME}, 133 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
133 { "ICS LAN PHY", 0x0015, 0xF440, LAN }, 134 { "ICS LAN PHY", 0x0015, 0xF440, LAN },
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a4b2b6975d6c..0784f558ca9a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc)
549static void enable_formac(struct s_smc *smc) 549static void enable_formac(struct s_smc *smc)
550{ 550{
551 /* set formac IMSK : 0 enables irq */ 551 /* set formac IMSK : 0 enables irq */
552 outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ; 552 outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
553 outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ; 553 outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
554 outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ; 554 outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
555 outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ; 555 outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
556 outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ; 556 outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
557 outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ; 557 outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
558} 558}
559 559
560#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */ 560#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 25e028b7ce48..35dbf05c7f06 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,7 +44,7 @@
44#include "skge.h" 44#include "skge.h"
45 45
46#define DRV_NAME "skge" 46#define DRV_NAME "skge"
47#define DRV_VERSION "1.3" 47#define DRV_VERSION "1.5"
48#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
49 49
50#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
104static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
108 107
109static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
110{ 109{
@@ -358,7 +357,7 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev)
358 skge->net_stats.rx_bytes = data[1]; 357 skge->net_stats.rx_bytes = data[1];
359 skge->net_stats.tx_packets = data[2] + data[4] + data[6]; 358 skge->net_stats.tx_packets = data[2] + data[4] + data[6];
360 skge->net_stats.rx_packets = data[3] + data[5] + data[7]; 359 skge->net_stats.rx_packets = data[3] + data[5] + data[7];
361 skge->net_stats.multicast = data[5] + data[7]; 360 skge->net_stats.multicast = data[3] + data[5];
362 skge->net_stats.collisions = data[10]; 361 skge->net_stats.collisions = data[10];
363 skge->net_stats.tx_aborted_errors = data[12]; 362 skge->net_stats.tx_aborted_errors = data[12];
364 363
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = {
728 * Allocate ring elements and chain them together 727 * Allocate ring elements and chain them together
729 * One-to-one association of board descriptors with ring elements 728 * One-to-one association of board descriptors with ring elements
730 */ 729 */
731static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) 730static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
732{ 731{
733 struct skge_tx_desc *d; 732 struct skge_tx_desc *d;
734 struct skge_element *e; 733 struct skge_element *e;
735 int i; 734 int i;
736 735
737 ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL); 736 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
738 if (!ring->start) 737 if (!ring->start)
739 return -ENOMEM; 738 return -ENOMEM;
740 739
741 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 740 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
742 e->desc = d; 741 e->desc = d;
743 e->skb = NULL;
744 if (i == ring->count - 1) { 742 if (i == ring->count - 1) {
745 e->next = ring->start; 743 e->next = ring->start;
746 d->next_offset = base; 744 d->next_offset = base;
@@ -783,7 +781,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
783 * Note: DMA address is not changed by chip. 781 * Note: DMA address is not changed by chip.
784 * MTU not changed while receiver active. 782 * MTU not changed while receiver active.
785 */ 783 */
786static void skge_rx_reuse(struct skge_element *e, unsigned int size) 784static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
787{ 785{
788 struct skge_rx_desc *rd = e->desc; 786 struct skge_rx_desc *rd = e->desc;
789 787
@@ -831,7 +829,7 @@ static int skge_rx_fill(struct skge_port *skge)
831 do { 829 do {
832 struct sk_buff *skb; 830 struct sk_buff *skb;
833 831
834 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); 832 skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
835 if (!skb) 833 if (!skb)
836 return -ENOMEM; 834 return -ENOMEM;
837 835
@@ -849,8 +847,7 @@ static void skge_link_up(struct skge_port *skge)
849 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 847 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
850 848
851 netif_carrier_on(skge->netdev); 849 netif_carrier_on(skge->netdev);
852 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 850 netif_wake_queue(skge->netdev);
853 netif_wake_queue(skge->netdev);
854 851
855 if (netif_msg_link(skge)) 852 if (netif_msg_link(skge))
856 printk(KERN_INFO PFX 853 printk(KERN_INFO PFX
@@ -2157,7 +2154,7 @@ static int skge_up(struct net_device *dev)
2157 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2154 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2158 2155
2159 if (dev->mtu > RX_BUF_SIZE) 2156 if (dev->mtu > RX_BUF_SIZE)
2160 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN; 2157 skge->rx_buf_size = dev->mtu + ETH_HLEN;
2161 else 2158 else
2162 skge->rx_buf_size = RX_BUF_SIZE; 2159 skge->rx_buf_size = RX_BUF_SIZE;
2163 2160
@@ -2169,27 +2166,29 @@ static int skge_up(struct net_device *dev)
2169 if (!skge->mem) 2166 if (!skge->mem)
2170 return -ENOMEM; 2167 return -ENOMEM;
2171 2168
2169 BUG_ON(skge->dma & 7);
2170
2171 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2172 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
2173 err = -EINVAL;
2174 goto free_pci_mem;
2175 }
2176
2172 memset(skge->mem, 0, skge->mem_size); 2177 memset(skge->mem, 0, skge->mem_size);
2173 2178
2174 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2179 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2180 if (err)
2175 goto free_pci_mem; 2181 goto free_pci_mem;
2176 2182
2177 err = skge_rx_fill(skge); 2183 err = skge_rx_fill(skge);
2178 if (err) 2184 if (err)
2179 goto free_rx_ring; 2185 goto free_rx_ring;
2180 2186
2181 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2187 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2182 skge->dma + rx_size))) 2188 skge->dma + rx_size);
2189 if (err)
2183 goto free_rx_ring; 2190 goto free_rx_ring;
2184 2191
2185 skge->tx_avail = skge->tx_ring.count - 1;
2186
2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2189 hw->intr_mask |= portirqmask[port];
2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2192
2193 /* Initialize MAC */ 2192 /* Initialize MAC */
2194 spin_lock_bh(&hw->phy_lock); 2193 spin_lock_bh(&hw->phy_lock);
2195 if (hw->chip_id == CHIP_ID_GENESIS) 2194 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2245,6 @@ static int skge_down(struct net_device *dev)
2246 else 2245 else
2247 yukon_stop(skge); 2246 yukon_stop(skge);
2248 2247
2249 spin_lock_irq(&hw->hw_lock);
2250 hw->intr_mask &= ~portirqmask[skge->port];
2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2253
2254 /* Stop transmitter */ 2248 /* Stop transmitter */
2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2249 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2256 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2250 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2297,6 +2291,12 @@ static int skge_down(struct net_device *dev)
2297 return 0; 2291 return 0;
2298} 2292}
2299 2293
2294static inline int skge_avail(const struct skge_ring *ring)
2295{
2296 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2297 + (ring->to_clean - ring->to_use) - 1;
2298}
2299
2300static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2300static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2301{ 2301{
2302 struct skge_port *skge = netdev_priv(dev); 2302 struct skge_port *skge = netdev_priv(dev);
@@ -2307,27 +2307,24 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2307 int i; 2307 int i;
2308 u32 control, len; 2308 u32 control, len;
2309 u64 map; 2309 u64 map;
2310 unsigned long flags;
2311 2310
2312 skb = skb_padto(skb, ETH_ZLEN); 2311 skb = skb_padto(skb, ETH_ZLEN);
2313 if (!skb) 2312 if (!skb)
2314 return NETDEV_TX_OK; 2313 return NETDEV_TX_OK;
2315 2314
2316 local_irq_save(flags);
2317 if (!spin_trylock(&skge->tx_lock)) { 2315 if (!spin_trylock(&skge->tx_lock)) {
2318 /* Collision - tell upper layer to requeue */ 2316 /* Collision - tell upper layer to requeue */
2319 local_irq_restore(flags); 2317 return NETDEV_TX_LOCKED;
2320 return NETDEV_TX_LOCKED; 2318 }
2321 }
2322 2319
2323 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2320 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2324 if (!netif_queue_stopped(dev)) { 2321 if (!netif_queue_stopped(dev)) {
2325 netif_stop_queue(dev); 2322 netif_stop_queue(dev);
2326 2323
2327 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 2324 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2328 dev->name); 2325 dev->name);
2329 } 2326 }
2330 spin_unlock_irqrestore(&skge->tx_lock, flags); 2327 spin_unlock(&skge->tx_lock);
2331 return NETDEV_TX_BUSY; 2328 return NETDEV_TX_BUSY;
2332 } 2329 }
2333 2330
@@ -2396,49 +2393,51 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2396 dev->name, e - ring->start, skb->len); 2393 dev->name, e - ring->start, skb->len);
2397 2394
2398 ring->to_use = e->next; 2395 ring->to_use = e->next;
2399 skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1; 2396 if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
2400 if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
2401 pr_debug("%s: transmit queue full\n", dev->name); 2397 pr_debug("%s: transmit queue full\n", dev->name);
2402 netif_stop_queue(dev); 2398 netif_stop_queue(dev);
2403 } 2399 }
2404 2400
2401 mmiowb();
2402 spin_unlock(&skge->tx_lock);
2403
2405 dev->trans_start = jiffies; 2404 dev->trans_start = jiffies;
2406 spin_unlock_irqrestore(&skge->tx_lock, flags);
2407 2405
2408 return NETDEV_TX_OK; 2406 return NETDEV_TX_OK;
2409} 2407}
2410 2408
2411static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) 2409static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
2412{ 2410{
2413 /* This ring element can be skb or fragment */ 2411 struct pci_dev *pdev = skge->hw->pdev;
2414 if (e->skb) { 2412 struct skge_element *e;
2415 pci_unmap_single(hw->pdev, 2413
2416 pci_unmap_addr(e, mapaddr), 2414 for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
2417 pci_unmap_len(e, maplen), 2415 struct sk_buff *skb = e->skb;
2418 PCI_DMA_TODEVICE); 2416 int i;
2419 dev_kfree_skb_any(e->skb); 2417
2420 e->skb = NULL; 2418 e->skb = NULL;
2421 } else { 2419 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2422 pci_unmap_page(hw->pdev, 2420 skb_headlen(skb), PCI_DMA_TODEVICE);
2423 pci_unmap_addr(e, mapaddr), 2421
2424 pci_unmap_len(e, maplen), 2422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2425 PCI_DMA_TODEVICE); 2423 e = e->next;
2424 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2425 skb_shinfo(skb)->frags[i].size,
2426 PCI_DMA_TODEVICE);
2427 }
2428
2429 dev_kfree_skb(skb);
2426 } 2430 }
2431 skge->tx_ring.to_clean = e;
2427} 2432}
2428 2433
2429static void skge_tx_clean(struct skge_port *skge) 2434static void skge_tx_clean(struct skge_port *skge)
2430{ 2435{
2431 struct skge_ring *ring = &skge->tx_ring;
2432 struct skge_element *e;
2433 unsigned long flags;
2434 2436
2435 spin_lock_irqsave(&skge->tx_lock, flags); 2437 spin_lock_bh(&skge->tx_lock);
2436 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2438 skge_tx_complete(skge, skge->tx_ring.to_use);
2437 ++skge->tx_avail; 2439 netif_wake_queue(skge->netdev);
2438 skge_tx_free(skge->hw, e); 2440 spin_unlock_bh(&skge->tx_lock);
2439 }
2440 ring->to_clean = e;
2441 spin_unlock_irqrestore(&skge->tx_lock, flags);
2442} 2441}
2443 2442
2444static void skge_tx_timeout(struct net_device *dev) 2443static void skge_tx_timeout(struct net_device *dev)
@@ -2597,7 +2596,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2597 goto error; 2596 goto error;
2598 2597
2599 if (len < RX_COPY_THRESHOLD) { 2598 if (len < RX_COPY_THRESHOLD) {
2600 skb = dev_alloc_skb(len + 2); 2599 skb = alloc_skb(len + 2, GFP_ATOMIC);
2601 if (!skb) 2600 if (!skb)
2602 goto resubmit; 2601 goto resubmit;
2603 2602
@@ -2612,10 +2611,11 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2612 skge_rx_reuse(e, skge->rx_buf_size); 2611 skge_rx_reuse(e, skge->rx_buf_size);
2613 } else { 2612 } else {
2614 struct sk_buff *nskb; 2613 struct sk_buff *nskb;
2615 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); 2614 nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
2616 if (!nskb) 2615 if (!nskb)
2617 goto resubmit; 2616 goto resubmit;
2618 2617
2618 skb_reserve(nskb, NET_IP_ALIGN);
2619 pci_unmap_single(skge->hw->pdev, 2619 pci_unmap_single(skge->hw->pdev,
2620 pci_unmap_addr(e, mapaddr), 2620 pci_unmap_addr(e, mapaddr),
2621 pci_unmap_len(e, maplen), 2621 pci_unmap_len(e, maplen),
@@ -2663,6 +2663,36 @@ resubmit:
2663 return NULL; 2663 return NULL;
2664} 2664}
2665 2665
2666static void skge_tx_done(struct skge_port *skge)
2667{
2668 struct skge_ring *ring = &skge->tx_ring;
2669 struct skge_element *e, *last;
2670
2671 spin_lock(&skge->tx_lock);
2672 last = ring->to_clean;
2673 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2674 struct skge_tx_desc *td = e->desc;
2675
2676 if (td->control & BMU_OWN)
2677 break;
2678
2679 if (td->control & BMU_EOF) {
2680 last = e->next;
2681 if (unlikely(netif_msg_tx_done(skge)))
2682 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2683 skge->netdev->name, e - ring->start);
2684 }
2685 }
2686
2687 skge_tx_complete(skge, last);
2688
2689 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2690
2691 if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
2692 netif_wake_queue(skge->netdev);
2693
2694 spin_unlock(&skge->tx_lock);
2695}
2666 2696
2667static int skge_poll(struct net_device *dev, int *budget) 2697static int skge_poll(struct net_device *dev, int *budget)
2668{ 2698{
@@ -2670,8 +2700,10 @@ static int skge_poll(struct net_device *dev, int *budget)
2670 struct skge_hw *hw = skge->hw; 2700 struct skge_hw *hw = skge->hw;
2671 struct skge_ring *ring = &skge->rx_ring; 2701 struct skge_ring *ring = &skge->rx_ring;
2672 struct skge_element *e; 2702 struct skge_element *e;
2673 unsigned int to_do = min(dev->quota, *budget); 2703 int to_do = min(dev->quota, *budget);
2674 unsigned int work_done = 0; 2704 int work_done = 0;
2705
2706 skge_tx_done(skge);
2675 2707
2676 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2708 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2677 struct skge_rx_desc *rd = e->desc; 2709 struct skge_rx_desc *rd = e->desc;
@@ -2683,15 +2715,14 @@ static int skge_poll(struct net_device *dev, int *budget)
2683 if (control & BMU_OWN) 2715 if (control & BMU_OWN)
2684 break; 2716 break;
2685 2717
2686 skb = skge_rx_get(skge, e, control, rd->status, 2718 skb = skge_rx_get(skge, e, control, rd->status,
2687 le16_to_cpu(rd->csum2)); 2719 le16_to_cpu(rd->csum2));
2688 if (likely(skb)) { 2720 if (likely(skb)) {
2689 dev->last_rx = jiffies; 2721 dev->last_rx = jiffies;
2690 netif_receive_skb(skb); 2722 netif_receive_skb(skb);
2691 2723
2692 ++work_done; 2724 ++work_done;
2693 } else 2725 }
2694 skge_rx_reuse(e, skge->rx_buf_size);
2695 } 2726 }
2696 ring->to_clean = e; 2727 ring->to_clean = e;
2697 2728
@@ -2705,49 +2736,15 @@ static int skge_poll(struct net_device *dev, int *budget)
2705 if (work_done >= to_do) 2736 if (work_done >= to_do)
2706 return 1; /* not done */ 2737 return 1; /* not done */
2707 2738
2708 spin_lock_irq(&hw->hw_lock); 2739 netif_rx_complete(dev);
2709 __netif_rx_complete(dev); 2740 mmiowb();
2710 hw->intr_mask |= portirqmask[skge->port]; 2741
2742 hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
2711 skge_write32(hw, B0_IMSK, hw->intr_mask); 2743 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2713 2744
2714 return 0; 2745 return 0;
2715} 2746}
2716 2747
2717static inline void skge_tx_intr(struct net_device *dev)
2718{
2719 struct skge_port *skge = netdev_priv(dev);
2720 struct skge_hw *hw = skge->hw;
2721 struct skge_ring *ring = &skge->tx_ring;
2722 struct skge_element *e;
2723
2724 spin_lock(&skge->tx_lock);
2725 for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
2726 struct skge_tx_desc *td = e->desc;
2727 u32 control;
2728
2729 rmb();
2730 control = td->control;
2731 if (control & BMU_OWN)
2732 break;
2733
2734 if (unlikely(netif_msg_tx_done(skge)))
2735 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
2736 dev->name, e - ring->start, td->status);
2737
2738 skge_tx_free(hw, e);
2739 e->skb = NULL;
2740 ++skge->tx_avail;
2741 }
2742 ring->to_clean = e;
2743 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2744
2745 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2746 netif_wake_queue(dev);
2747
2748 spin_unlock(&skge->tx_lock);
2749}
2750
2751/* Parity errors seem to happen when Genesis is connected to a switch 2748/* Parity errors seem to happen when Genesis is connected to a switch
2752 * with no other ports present. Heartbeat error?? 2749 * with no other ports present. Heartbeat error??
2753 */ 2750 */
@@ -2770,17 +2767,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2770 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2767 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2771} 2768}
2772 2769
2773static void skge_pci_clear(struct skge_hw *hw)
2774{
2775 u16 status;
2776
2777 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2778 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2779 pci_write_config_word(hw->pdev, PCI_STATUS,
2780 status | PCI_STATUS_ERROR_BITS);
2781 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2782}
2783
2784static void skge_mac_intr(struct skge_hw *hw, int port) 2770static void skge_mac_intr(struct skge_hw *hw, int port)
2785{ 2771{
2786 if (hw->chip_id == CHIP_ID_GENESIS) 2772 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2822,23 +2808,39 @@ static void skge_error_irq(struct skge_hw *hw)
2822 if (hwstatus & IS_M2_PAR_ERR) 2808 if (hwstatus & IS_M2_PAR_ERR)
2823 skge_mac_parity(hw, 1); 2809 skge_mac_parity(hw, 1);
2824 2810
2825 if (hwstatus & IS_R1_PAR_ERR) 2811 if (hwstatus & IS_R1_PAR_ERR) {
2812 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2813 hw->dev[0]->name);
2826 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 2814 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2815 }
2827 2816
2828 if (hwstatus & IS_R2_PAR_ERR) 2817 if (hwstatus & IS_R2_PAR_ERR) {
2818 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2819 hw->dev[1]->name);
2829 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 2820 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2821 }
2830 2822
2831 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 2823 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2832 printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n", 2824 u16 pci_status, pci_cmd;
2833 hwstatus);
2834 2825
2835 skge_pci_clear(hw); 2826 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
2827 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2828
2829 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
2830 pci_name(hw->pdev), pci_cmd, pci_status);
2831
2832 /* Write the error bits back to clear them. */
2833 pci_status &= PCI_STATUS_ERROR_BITS;
2834 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2835 pci_write_config_word(hw->pdev, PCI_COMMAND,
2836 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2837 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
2838 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2836 2839
2837 /* if error still set then just ignore it */ 2840 /* if error still set then just ignore it */
2838 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2841 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2839 if (hwstatus & IS_IRQ_STAT) { 2842 if (hwstatus & IS_IRQ_STAT) {
2840 pr_debug("IRQ status %x: still set ignoring hardware errors\n", 2843 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
2841 hwstatus);
2842 hw->intr_mask &= ~IS_HW_ERR; 2844 hw->intr_mask &= ~IS_HW_ERR;
2843 } 2845 }
2844 } 2846 }
@@ -2855,12 +2857,11 @@ static void skge_extirq(unsigned long data)
2855 int port; 2857 int port;
2856 2858
2857 spin_lock(&hw->phy_lock); 2859 spin_lock(&hw->phy_lock);
2858 for (port = 0; port < 2; port++) { 2860 for (port = 0; port < hw->ports; port++) {
2859 struct net_device *dev = hw->dev[port]; 2861 struct net_device *dev = hw->dev[port];
2862 struct skge_port *skge = netdev_priv(dev);
2860 2863
2861 if (dev && netif_running(dev)) { 2864 if (netif_running(dev)) {
2862 struct skge_port *skge = netdev_priv(dev);
2863
2864 if (hw->chip_id != CHIP_ID_GENESIS) 2865 if (hw->chip_id != CHIP_ID_GENESIS)
2865 yukon_phy_intr(skge); 2866 yukon_phy_intr(skge);
2866 else 2867 else
@@ -2869,38 +2870,39 @@ static void skge_extirq(unsigned long data)
2869 } 2870 }
2870 spin_unlock(&hw->phy_lock); 2871 spin_unlock(&hw->phy_lock);
2871 2872
2872 spin_lock_irq(&hw->hw_lock);
2873 hw->intr_mask |= IS_EXT_REG; 2873 hw->intr_mask |= IS_EXT_REG;
2874 skge_write32(hw, B0_IMSK, hw->intr_mask); 2874 skge_write32(hw, B0_IMSK, hw->intr_mask);
2875 spin_unlock_irq(&hw->hw_lock);
2876} 2875}
2877 2876
2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2877static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2879{ 2878{
2880 struct skge_hw *hw = dev_id; 2879 struct skge_hw *hw = dev_id;
2881 u32 status = skge_read32(hw, B0_SP_ISRC); 2880 u32 status;
2882 2881
2883 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2882 /* Reading this register masks IRQ */
2883 status = skge_read32(hw, B0_SP_ISRC);
2884 if (status == 0)
2884 return IRQ_NONE; 2885 return IRQ_NONE;
2885 2886
2886 spin_lock(&hw->hw_lock); 2887 if (status & IS_EXT_REG) {
2887 if (status & IS_R1_F) { 2888 hw->intr_mask &= ~IS_EXT_REG;
2889 tasklet_schedule(&hw->ext_tasklet);
2890 }
2891
2892 if (status & (IS_R1_F|IS_XA1_F)) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); 2893 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2889 hw->intr_mask &= ~IS_R1_F; 2894 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
2890 netif_rx_schedule(hw->dev[0]); 2895 netif_rx_schedule(hw->dev[0]);
2891 } 2896 }
2892 2897
2893 if (status & IS_R2_F) { 2898 if (status & (IS_R2_F|IS_XA2_F)) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); 2899 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2895 hw->intr_mask &= ~IS_R2_F; 2900 hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
2896 netif_rx_schedule(hw->dev[1]); 2901 netif_rx_schedule(hw->dev[1]);
2897 } 2902 }
2898 2903
2899 if (status & IS_XA1_F) 2904 if (likely((status & hw->intr_mask) == 0))
2900 skge_tx_intr(hw->dev[0]); 2905 return IRQ_HANDLED;
2901
2902 if (status & IS_XA2_F)
2903 skge_tx_intr(hw->dev[1]);
2904 2906
2905 if (status & IS_PA_TO_RX1) { 2907 if (status & IS_PA_TO_RX1) {
2906 struct skge_port *skge = netdev_priv(hw->dev[0]); 2908 struct skge_port *skge = netdev_priv(hw->dev[0]);
@@ -2929,13 +2931,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2929 if (status & IS_HW_ERR) 2931 if (status & IS_HW_ERR)
2930 skge_error_irq(hw); 2932 skge_error_irq(hw);
2931 2933
2932 if (status & IS_EXT_REG) {
2933 hw->intr_mask &= ~IS_EXT_REG;
2934 tasklet_schedule(&hw->ext_tasklet);
2935 }
2936
2937 skge_write32(hw, B0_IMSK, hw->intr_mask); 2934 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2939 2935
2940 return IRQ_HANDLED; 2936 return IRQ_HANDLED;
2941} 2937}
@@ -3010,7 +3006,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
3010static int skge_reset(struct skge_hw *hw) 3006static int skge_reset(struct skge_hw *hw)
3011{ 3007{
3012 u32 reg; 3008 u32 reg;
3013 u16 ctst; 3009 u16 ctst, pci_status;
3014 u8 t8, mac_cfg, pmd_type, phy_type; 3010 u8 t8, mac_cfg, pmd_type, phy_type;
3015 int i; 3011 int i;
3016 3012
@@ -3021,8 +3017,13 @@ static int skge_reset(struct skge_hw *hw)
3021 skge_write8(hw, B0_CTST, CS_RST_CLR); 3017 skge_write8(hw, B0_CTST, CS_RST_CLR);
3022 3018
3023 /* clear PCI errors, if any */ 3019 /* clear PCI errors, if any */
3024 skge_pci_clear(hw); 3020 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3021 skge_write8(hw, B2_TST_CTRL2, 0);
3025 3022
3023 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3024 pci_write_config_word(hw->pdev, PCI_STATUS,
3025 pci_status | PCI_STATUS_ERROR_BITS);
3026 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3026 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3027 skge_write8(hw, B0_CTST, CS_MRST_CLR);
3027 3028
3028 /* restore CLK_RUN bits (for Yukon-Lite) */ 3029 /* restore CLK_RUN bits (for Yukon-Lite) */
@@ -3081,7 +3082,10 @@ static int skge_reset(struct skge_hw *hw)
3081 else 3082 else
3082 hw->ram_size = t8 * 4096; 3083 hw->ram_size = t8 * 4096;
3083 3084
3084 hw->intr_mask = IS_HW_ERR | IS_EXT_REG; 3085 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3086 if (hw->ports > 1)
3087 hw->intr_mask |= IS_PORT_2;
3088
3085 if (hw->chip_id == CHIP_ID_GENESIS) 3089 if (hw->chip_id == CHIP_ID_GENESIS)
3086 genesis_init(hw); 3090 genesis_init(hw);
3087 else { 3091 else {
@@ -3251,13 +3255,15 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3251 struct skge_hw *hw; 3255 struct skge_hw *hw;
3252 int err, using_dac = 0; 3256 int err, using_dac = 0;
3253 3257
3254 if ((err = pci_enable_device(pdev))) { 3258 err = pci_enable_device(pdev);
3259 if (err) {
3255 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3260 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3256 pci_name(pdev)); 3261 pci_name(pdev));
3257 goto err_out; 3262 goto err_out;
3258 } 3263 }
3259 3264
3260 if ((err = pci_request_regions(pdev, DRV_NAME))) { 3265 err = pci_request_regions(pdev, DRV_NAME);
3266 if (err) {
3261 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3267 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3262 pci_name(pdev)); 3268 pci_name(pdev));
3263 goto err_out_disable_pdev; 3269 goto err_out_disable_pdev;
@@ -3265,22 +3271,18 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3265 3271
3266 pci_set_master(pdev); 3272 pci_set_master(pdev);
3267 3273
3268 if (sizeof(dma_addr_t) > sizeof(u32) && 3274 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3269 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3270 using_dac = 1; 3275 using_dac = 1;
3271 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3276 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3272 if (err < 0) { 3277 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3273 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3278 using_dac = 0;
3274 "for consistent allocations\n", pci_name(pdev)); 3279 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3275 goto err_out_free_regions; 3280 }
3276 } 3281
3277 } else { 3282 if (err) {
3278 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3283 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3279 if (err) { 3284 pci_name(pdev));
3280 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3285 goto err_out_free_regions;
3281 pci_name(pdev));
3282 goto err_out_free_regions;
3283 }
3284 } 3286 }
3285 3287
3286#ifdef __BIG_ENDIAN 3288#ifdef __BIG_ENDIAN
@@ -3304,7 +3306,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3304 3306
3305 hw->pdev = pdev; 3307 hw->pdev = pdev;
3306 spin_lock_init(&hw->phy_lock); 3308 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3309 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3309 3310
3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3311 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
@@ -3314,7 +3315,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3314 goto err_out_free_hw; 3315 goto err_out_free_hw;
3315 } 3316 }
3316 3317
3317 if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) { 3318 err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
3319 if (err) {
3318 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3320 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3319 pci_name(pdev), pdev->irq); 3321 pci_name(pdev), pdev->irq);
3320 goto err_out_iounmap; 3322 goto err_out_iounmap;
@@ -3332,7 +3334,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3332 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3334 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3333 goto err_out_led_off; 3335 goto err_out_led_off;
3334 3336
3335 if ((err = register_netdev(dev))) { 3337 err = register_netdev(dev);
3338 if (err) {
3336 printk(KERN_ERR PFX "%s: cannot register net device\n", 3339 printk(KERN_ERR PFX "%s: cannot register net device\n",
3337 pci_name(pdev)); 3340 pci_name(pdev));
3338 goto err_out_free_netdev; 3341 goto err_out_free_netdev;
@@ -3387,7 +3390,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3387 3390
3388 skge_write32(hw, B0_IMSK, 0); 3391 skge_write32(hw, B0_IMSK, 0);
3389 skge_write16(hw, B0_LED, LED_STAT_OFF); 3392 skge_write16(hw, B0_LED, LED_STAT_OFF);
3390 skge_pci_clear(hw);
3391 skge_write8(hw, B0_CTST, CS_RST_SET); 3393 skge_write8(hw, B0_CTST, CS_RST_SET);
3392 3394
3393 tasklet_kill(&hw->ext_tasklet); 3395 tasklet_kill(&hw->ext_tasklet);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 941f12a333b6..1f1ce88c8186 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,7 +2402,6 @@ struct skge_hw {
2402 2402
2403 struct tasklet_struct ext_tasklet; 2403 struct tasklet_struct ext_tasklet;
2404 spinlock_t phy_lock; 2404 spinlock_t phy_lock;
2405 spinlock_t hw_lock;
2406}; 2405};
2407 2406
2408enum { 2407enum {
@@ -2419,7 +2418,6 @@ struct skge_port {
2419 int port; 2418 int port;
2420 2419
2421 spinlock_t tx_lock; 2420 spinlock_t tx_lock;
2422 u32 tx_avail;
2423 struct skge_ring tx_ring; 2421 struct skge_ring tx_ring;
2424 struct skge_ring rx_ring; 2422 struct skge_ring rx_ring;
2425 2423
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 73260364cba3..68f9c206a620 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "0.15" 54#define DRV_VERSION "1.1"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -61,10 +61,6 @@
61 * a receive requires one (or two if using 64 bit dma). 61 * a receive requires one (or two if using 64 bit dma).
62 */ 62 */
63 63
64#define is_ec_a1(hw) \
65 unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
66 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
67
68#define RX_LE_SIZE 512 64#define RX_LE_SIZE 512
69#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 65#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
70#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 66#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
@@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256;
96module_param(copybreak, int, 0); 92module_param(copybreak, int, 0);
97MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 93MODULE_PARM_DESC(copybreak, "Receive copy threshold");
98 94
95static int disable_msi = 0;
96module_param(disable_msi, int, 0);
97MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
98
99static const struct pci_device_id sky2_id_table[] = { 99static const struct pci_device_id sky2_id_table[] = {
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
504/* Force a renegotiation */ 504/* Force a renegotiation */
505static void sky2_phy_reinit(struct sky2_port *sky2) 505static void sky2_phy_reinit(struct sky2_port *sky2)
506{ 506{
507 down(&sky2->phy_sema); 507 spin_lock_bh(&sky2->phy_lock);
508 sky2_phy_init(sky2->hw, sky2->port); 508 sky2_phy_init(sky2->hw, sky2->port);
509 up(&sky2->phy_sema); 509 spin_unlock_bh(&sky2->phy_lock);
510} 510}
511 511
512static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 512static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
571 571
572 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 572 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
573 573
574 down(&sky2->phy_sema); 574 spin_lock_bh(&sky2->phy_lock);
575 sky2_phy_init(hw, port); 575 sky2_phy_init(hw, port);
576 up(&sky2->phy_sema); 576 spin_unlock_bh(&sky2->phy_lock);
577 577
578 /* MIB clear */ 578 /* MIB clear */
579 reg = gma_read16(hw, port, GM_PHY_ADDR); 579 reg = gma_read16(hw, port, GM_PHY_ADDR);
@@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
725 return le; 725 return le;
726} 726}
727 727
728/* 728/* Update chip's next pointer */
729 * This is a workaround code taken from SysKonnect sk98lin driver 729static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
730 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
731 */
732static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
733 u16 idx, u16 *last, u16 size)
734{ 730{
735 wmb(); 731 wmb();
736 if (is_ec_a1(hw) && idx < *last) { 732 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
737 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
738
739 if (hwget == 0) {
740 /* Start prefetching again */
741 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
742 goto setnew;
743 }
744
745 if (hwget == size - 1) {
746 /* set watermark to one list element */
747 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
748
749 /* set put index to first list element */
750 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
751 } else /* have hardware go to end of list */
752 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
753 size - 1);
754 } else {
755setnew:
756 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
757 }
758 *last = idx;
759 mmiowb(); 733 mmiowb();
760} 734}
761 735
@@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
878 if (!netif_running(dev)) 852 if (!netif_running(dev))
879 return -ENODEV; /* Phy still in reset */ 853 return -ENODEV; /* Phy still in reset */
880 854
881 switch(cmd) { 855 switch (cmd) {
882 case SIOCGMIIPHY: 856 case SIOCGMIIPHY:
883 data->phy_id = PHY_ADDR_MARV; 857 data->phy_id = PHY_ADDR_MARV;
884 858
@@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
886 case SIOCGMIIREG: { 860 case SIOCGMIIREG: {
887 u16 val = 0; 861 u16 val = 0;
888 862
889 down(&sky2->phy_sema); 863 spin_lock_bh(&sky2->phy_lock);
890 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); 864 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
891 up(&sky2->phy_sema); 865 spin_unlock_bh(&sky2->phy_lock);
892 866
893 data->val_out = val; 867 data->val_out = val;
894 break; 868 break;
@@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
898 if (!capable(CAP_NET_ADMIN)) 872 if (!capable(CAP_NET_ADMIN))
899 return -EPERM; 873 return -EPERM;
900 874
901 down(&sky2->phy_sema); 875 spin_lock_bh(&sky2->phy_lock);
902 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, 876 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
903 data->val_in); 877 data->val_in);
904 up(&sky2->phy_sema); 878 spin_unlock_bh(&sky2->phy_lock);
905 break; 879 break;
906 } 880 }
907 return err; 881 return err;
@@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
1001 975
1002 /* Tell chip about available buffers */ 976 /* Tell chip about available buffers */
1003 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 977 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1004 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
1005 return 0; 978 return 0;
1006nomem: 979nomem:
1007 sky2_rx_clean(sky2); 980 sky2_rx_clean(sky2);
@@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev)
1014 struct sky2_port *sky2 = netdev_priv(dev); 987 struct sky2_port *sky2 = netdev_priv(dev);
1015 struct sky2_hw *hw = sky2->hw; 988 struct sky2_hw *hw = sky2->hw;
1016 unsigned port = sky2->port; 989 unsigned port = sky2->port;
1017 u32 ramsize, rxspace; 990 u32 ramsize, rxspace, imask;
1018 int err = -ENOMEM; 991 int err = -ENOMEM;
1019 992
1020 if (netif_msg_ifup(sky2)) 993 if (netif_msg_ifup(sky2))
@@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev)
1079 goto err_out; 1052 goto err_out;
1080 1053
1081 /* Enable interrupts from phy/mac for port */ 1054 /* Enable interrupts from phy/mac for port */
1082 spin_lock_irq(&hw->hw_lock); 1055 imask = sky2_read32(hw, B0_IMSK);
1083 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1056 imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1084 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1057 sky2_write32(hw, B0_IMSK, imask);
1085 spin_unlock_irq(&hw->hw_lock); 1058
1086 return 0; 1059 return 0;
1087 1060
1088err_out: 1061err_out:
@@ -1202,7 +1175,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1202 /* just drop the packet if non-linear expansion fails */ 1175 /* just drop the packet if non-linear expansion fails */
1203 if (skb_header_cloned(skb) && 1176 if (skb_header_cloned(skb) &&
1204 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 1177 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1205 dev_kfree_skb_any(skb); 1178 dev_kfree_skb(skb);
1206 goto out_unlock; 1179 goto out_unlock;
1207 } 1180 }
1208 1181
@@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1299 netif_stop_queue(dev); 1272 netif_stop_queue(dev);
1300 } 1273 }
1301 1274
1302 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, 1275 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1303 &sky2->tx_last_put, TX_RING_SIZE);
1304 1276
1305out_unlock: 1277out_unlock:
1306 spin_unlock(&sky2->tx_lock); 1278 spin_unlock(&sky2->tx_lock);
@@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1332 struct tx_ring_info *re = sky2->tx_ring + put; 1304 struct tx_ring_info *re = sky2->tx_ring + put;
1333 struct sk_buff *skb = re->skb; 1305 struct sk_buff *skb = re->skb;
1334 1306
1335 nxt = re->idx; 1307 nxt = re->idx;
1336 BUG_ON(nxt >= TX_RING_SIZE); 1308 BUG_ON(nxt >= TX_RING_SIZE);
1337 prefetch(sky2->tx_ring + nxt); 1309 prefetch(sky2->tx_ring + nxt);
1338 1310
@@ -1348,15 +1320,15 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1348 struct tx_ring_info *fre; 1320 struct tx_ring_info *fre;
1349 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; 1321 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
1350 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), 1322 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1351 skb_shinfo(skb)->frags[i].size, 1323 skb_shinfo(skb)->frags[i].size,
1352 PCI_DMA_TODEVICE); 1324 PCI_DMA_TODEVICE);
1353 } 1325 }
1354 1326
1355 dev_kfree_skb_any(skb); 1327 dev_kfree_skb(skb);
1356 } 1328 }
1357 1329
1358 sky2->tx_cons = put; 1330 sky2->tx_cons = put;
1359 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) 1331 if (tx_avail(sky2) > MAX_SKB_TX_LE)
1360 netif_wake_queue(dev); 1332 netif_wake_queue(dev);
1361} 1333}
1362 1334
@@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev)
1375 struct sky2_hw *hw = sky2->hw; 1347 struct sky2_hw *hw = sky2->hw;
1376 unsigned port = sky2->port; 1348 unsigned port = sky2->port;
1377 u16 ctrl; 1349 u16 ctrl;
1350 u32 imask;
1378 1351
1379 /* Never really got started! */ 1352 /* Never really got started! */
1380 if (!sky2->tx_le) 1353 if (!sky2->tx_le)
@@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev)
1386 /* Stop more packets from being queued */ 1359 /* Stop more packets from being queued */
1387 netif_stop_queue(dev); 1360 netif_stop_queue(dev);
1388 1361
1389 /* Disable port IRQ */
1390 spin_lock_irq(&hw->hw_lock);
1391 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1392 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1393 spin_unlock_irq(&hw->hw_lock);
1394
1395 flush_scheduled_work();
1396
1397 sky2_phy_reset(hw, port); 1362 sky2_phy_reset(hw, port);
1398 1363
1399 /* Stop transmitter */ 1364 /* Stop transmitter */
@@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev)
1437 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1402 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1438 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1403 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1439 1404
1405 /* Disable port IRQ */
1406 imask = sky2_read32(hw, B0_IMSK);
1407 imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1408 sky2_write32(hw, B0_IMSK, imask);
1409
1440 /* turn off LED's */ 1410 /* turn off LED's */
1441 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1411 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1442 1412
@@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1631 return 0; 1601 return 0;
1632} 1602}
1633 1603
1634/* 1604/* Interrupt from PHY */
1635 * Interrupt from PHY are handled outside of interrupt context 1605static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1636 * because accessing phy registers requires spin wait which might
1637 * cause excess interrupt latency.
1638 */
1639static void sky2_phy_task(void *arg)
1640{ 1606{
1641 struct sky2_port *sky2 = arg; 1607 struct net_device *dev = hw->dev[port];
1642 struct sky2_hw *hw = sky2->hw; 1608 struct sky2_port *sky2 = netdev_priv(dev);
1643 u16 istatus, phystat; 1609 u16 istatus, phystat;
1644 1610
1645 down(&sky2->phy_sema); 1611 spin_lock(&sky2->phy_lock);
1646 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT); 1612 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1647 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT); 1613 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1614
1615 if (!netif_running(dev))
1616 goto out;
1648 1617
1649 if (netif_msg_intr(sky2)) 1618 if (netif_msg_intr(sky2))
1650 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", 1619 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
@@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg)
1670 sky2_link_down(sky2); 1639 sky2_link_down(sky2);
1671 } 1640 }
1672out: 1641out:
1673 up(&sky2->phy_sema); 1642 spin_unlock(&sky2->phy_lock);
1674
1675 spin_lock_irq(&hw->hw_lock);
1676 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1677 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1678 spin_unlock_irq(&hw->hw_lock);
1679} 1643}
1680 1644
1681 1645
@@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev)
1687 struct sky2_port *sky2 = netdev_priv(dev); 1651 struct sky2_port *sky2 = netdev_priv(dev);
1688 struct sky2_hw *hw = sky2->hw; 1652 struct sky2_hw *hw = sky2->hw;
1689 unsigned txq = txqaddr[sky2->port]; 1653 unsigned txq = txqaddr[sky2->port];
1690 u16 ridx; 1654 u16 report, done;
1691
1692 /* Maybe we just missed an status interrupt */
1693 spin_lock(&sky2->tx_lock);
1694 ridx = sky2_read16(hw,
1695 sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1696 sky2_tx_complete(sky2, ridx);
1697 spin_unlock(&sky2->tx_lock);
1698
1699 if (!netif_queue_stopped(dev)) {
1700 if (net_ratelimit())
1701 pr_info(PFX "transmit interrupt missed? recovered\n");
1702 return;
1703 }
1704 1655
1705 if (netif_msg_timer(sky2)) 1656 if (netif_msg_timer(sky2))
1706 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1657 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1707 1658
1708 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1659 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1709 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1660 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1710 1661
1711 sky2_tx_clean(sky2); 1662 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1663 dev->name,
1664 sky2->tx_cons, sky2->tx_prod, report, done);
1665
1666 if (report != done) {
1667 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
1668
1669 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1670 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1671 } else if (report != sky2->tx_cons) {
1672 printk(KERN_INFO PFX "status report lost?\n");
1673
1674 spin_lock_bh(&sky2->tx_lock);
1675 sky2_tx_complete(sky2, report);
1676 spin_unlock_bh(&sky2->tx_lock);
1677 } else {
1678 printk(KERN_INFO PFX "hardware hung? flushing\n");
1712 1679
1713 sky2_qset(hw, txq); 1680 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1714 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1681 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1682
1683 sky2_tx_clean(sky2);
1684
1685 sky2_qset(hw, txq);
1686 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1687 }
1715} 1688}
1716 1689
1717 1690
@@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1730 struct sky2_hw *hw = sky2->hw; 1703 struct sky2_hw *hw = sky2->hw;
1731 int err; 1704 int err;
1732 u16 ctl, mode; 1705 u16 ctl, mode;
1706 u32 imask;
1733 1707
1734 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1708 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1735 return -EINVAL; 1709 return -EINVAL;
@@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1742 return 0; 1716 return 0;
1743 } 1717 }
1744 1718
1719 imask = sky2_read32(hw, B0_IMSK);
1745 sky2_write32(hw, B0_IMSK, 0); 1720 sky2_write32(hw, B0_IMSK, 0);
1746 1721
1747 dev->trans_start = jiffies; /* prevent tx timeout */ 1722 dev->trans_start = jiffies; /* prevent tx timeout */
1748 netif_stop_queue(dev); 1723 netif_stop_queue(dev);
1749 netif_poll_disable(hw->dev[0]); 1724 netif_poll_disable(hw->dev[0]);
1750 1725
1726 synchronize_irq(hw->pdev->irq);
1727
1751 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); 1728 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1752 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); 1729 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1753 sky2_rx_stop(sky2); 1730 sky2_rx_stop(sky2);
@@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1766 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); 1743 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1767 1744
1768 err = sky2_rx_start(sky2); 1745 err = sky2_rx_start(sky2);
1769 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1746 sky2_write32(hw, B0_IMSK, imask);
1770 1747
1771 if (err) 1748 if (err)
1772 dev_close(dev); 1749 dev_close(dev);
@@ -1843,8 +1820,7 @@ resubmit:
1843 sky2_rx_add(sky2, re->mapaddr); 1820 sky2_rx_add(sky2, re->mapaddr);
1844 1821
1845 /* Tell receiver about new buffers. */ 1822 /* Tell receiver about new buffers. */
1846 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put, 1823 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
1847 &sky2->rx_last_put, RX_LE_SIZE);
1848 1824
1849 return skb; 1825 return skb;
1850 1826
@@ -1871,76 +1847,51 @@ error:
1871 goto resubmit; 1847 goto resubmit;
1872} 1848}
1873 1849
1874/* 1850/* Transmit complete */
1875 * Check for transmit complete 1851static inline void sky2_tx_done(struct net_device *dev, u16 last)
1876 */
1877#define TX_NO_STATUS 0xffff
1878
1879static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1880{ 1852{
1881 if (last != TX_NO_STATUS) { 1853 struct sky2_port *sky2 = netdev_priv(dev);
1882 struct net_device *dev = hw->dev[port];
1883 if (dev && netif_running(dev)) {
1884 struct sky2_port *sky2 = netdev_priv(dev);
1885 1854
1886 spin_lock(&sky2->tx_lock); 1855 if (netif_running(dev)) {
1887 sky2_tx_complete(sky2, last); 1856 spin_lock(&sky2->tx_lock);
1888 spin_unlock(&sky2->tx_lock); 1857 sky2_tx_complete(sky2, last);
1889 } 1858 spin_unlock(&sky2->tx_lock);
1890 } 1859 }
1891} 1860}
1892 1861
1893/* 1862/* Process status response ring */
1894 * Both ports share the same status interrupt, therefore there is only 1863static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1895 * one poll routine.
1896 */
1897static int sky2_poll(struct net_device *dev0, int *budget)
1898{ 1864{
1899 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; 1865 int work_done = 0;
1900 unsigned int to_do = min(dev0->quota, *budget);
1901 unsigned int work_done = 0;
1902 u16 hwidx;
1903 u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
1904
1905 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1906
1907 /*
1908 * Kick the STAT_LEV_TIMER_CTRL timer.
1909 * This fixes my hangs on Yukon-EC (0xb6) rev 1.
1910 * The if clause is there to start the timer only if it has been
1911 * configured correctly and not been disabled via ethtool.
1912 */
1913 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
1914 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
1915 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
1916 }
1917 1866
1918 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1919 BUG_ON(hwidx >= STATUS_RING_SIZE);
1920 rmb(); 1867 rmb();
1921 1868
1922 while (hwidx != hw->st_idx) { 1869 for(;;) {
1923 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1870 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1924 struct net_device *dev; 1871 struct net_device *dev;
1925 struct sky2_port *sky2; 1872 struct sky2_port *sky2;
1926 struct sk_buff *skb; 1873 struct sk_buff *skb;
1927 u32 status; 1874 u32 status;
1928 u16 length; 1875 u16 length;
1876 u8 link, opcode;
1877
1878 opcode = le->opcode;
1879 if (!opcode)
1880 break;
1881 opcode &= ~HW_OWNER;
1929 1882
1930 le = hw->st_le + hw->st_idx;
1931 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1883 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1932 prefetch(hw->st_le + hw->st_idx); 1884 le->opcode = 0;
1933 1885
1934 BUG_ON(le->link >= 2); 1886 link = le->link;
1935 dev = hw->dev[le->link]; 1887 BUG_ON(link >= 2);
1936 if (dev == NULL || !netif_running(dev)) 1888 dev = hw->dev[link];
1937 continue;
1938 1889
1939 sky2 = netdev_priv(dev); 1890 sky2 = netdev_priv(dev);
1940 status = le32_to_cpu(le->status); 1891 length = le->length;
1941 length = le16_to_cpu(le->length); 1892 status = le->status;
1942 1893
1943 switch (le->opcode & ~HW_OWNER) { 1894 switch (opcode) {
1944 case OP_RXSTAT: 1895 case OP_RXSTAT:
1945 skb = sky2_receive(sky2, length, status); 1896 skb = sky2_receive(sky2, length, status);
1946 if (!skb) 1897 if (!skb)
@@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1980 1931
1981 case OP_TXINDEXLE: 1932 case OP_TXINDEXLE:
1982 /* TX index reports status for both ports */ 1933 /* TX index reports status for both ports */
1983 tx_done[0] = status & 0xffff; 1934 sky2_tx_done(hw->dev[0], status & 0xffff);
1984 tx_done[1] = ((status >> 24) & 0xff) 1935 if (hw->dev[1])
1985 | (u16)(length & 0xf) << 8; 1936 sky2_tx_done(hw->dev[1],
1937 ((status >> 24) & 0xff)
1938 | (u16)(length & 0xf) << 8);
1986 break; 1939 break;
1987 1940
1988 default: 1941 default:
1989 if (net_ratelimit()) 1942 if (net_ratelimit())
1990 printk(KERN_WARNING PFX 1943 printk(KERN_WARNING PFX
1991 "unknown status opcode 0x%x\n", le->opcode); 1944 "unknown status opcode 0x%x\n", opcode);
1992 break; 1945 break;
1993 } 1946 }
1994 } 1947 }
1995 1948
1996exit_loop: 1949exit_loop:
1997 sky2_tx_check(hw, 0, tx_done[0]); 1950 return work_done;
1998 sky2_tx_check(hw, 1, tx_done[1]);
1999
2000 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
2001 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2002 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2003 }
2004
2005 if (likely(work_done < to_do)) {
2006 spin_lock_irq(&hw->hw_lock);
2007 __netif_rx_complete(dev0);
2008
2009 hw->intr_mask |= Y2_IS_STAT_BMU;
2010 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2011 spin_unlock_irq(&hw->hw_lock);
2012
2013 return 0;
2014 } else {
2015 *budget -= work_done;
2016 dev0->quota -= work_done;
2017 return 1;
2018 }
2019} 1951}
2020 1952
2021static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) 1953static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
@@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2134 } 2066 }
2135} 2067}
2136 2068
2137static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) 2069/* This should never happen it is a fatal situation */
2070static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2071 const char *rxtx, u32 mask)
2138{ 2072{
2139 struct net_device *dev = hw->dev[port]; 2073 struct net_device *dev = hw->dev[port];
2140 struct sky2_port *sky2 = netdev_priv(dev); 2074 struct sky2_port *sky2 = netdev_priv(dev);
2075 u32 imask;
2141 2076
2142 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 2077 printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
2143 sky2_write32(hw, B0_IMSK, hw->intr_mask); 2078 dev ? dev->name : "<not registered>", rxtx);
2144 2079
2145 schedule_work(&sky2->phy_task); 2080 imask = sky2_read32(hw, B0_IMSK);
2081 imask &= ~mask;
2082 sky2_write32(hw, B0_IMSK, imask);
2083
2084 if (dev) {
2085 spin_lock(&sky2->phy_lock);
2086 sky2_link_down(sky2);
2087 spin_unlock(&sky2->phy_lock);
2088 }
2146} 2089}
2147 2090
2148static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) 2091static int sky2_poll(struct net_device *dev0, int *budget)
2149{ 2092{
2150 struct sky2_hw *hw = dev_id; 2093 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
2151 struct net_device *dev0 = hw->dev[0]; 2094 int work_limit = min(dev0->quota, *budget);
2152 u32 status; 2095 int work_done = 0;
2096 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2153 2097
2154 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 2098 if (unlikely(status & ~Y2_IS_STAT_BMU)) {
2155 if (status == 0 || status == ~0) 2099 if (status & Y2_IS_HW_ERR)
2156 return IRQ_NONE; 2100 sky2_hw_intr(hw);
2157 2101
2158 spin_lock(&hw->hw_lock); 2102 if (status & Y2_IS_IRQ_PHY1)
2159 if (status & Y2_IS_HW_ERR) 2103 sky2_phy_intr(hw, 0);
2160 sky2_hw_intr(hw);
2161 2104
2162 /* Do NAPI for Rx and Tx status */ 2105 if (status & Y2_IS_IRQ_PHY2)
2163 if (status & Y2_IS_STAT_BMU) { 2106 sky2_phy_intr(hw, 1);
2164 hw->intr_mask &= ~Y2_IS_STAT_BMU;
2165 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2166 2107
2167 if (likely(__netif_rx_schedule_prep(dev0))) { 2108 if (status & Y2_IS_IRQ_MAC1)
2168 prefetch(&hw->st_le[hw->st_idx]); 2109 sky2_mac_intr(hw, 0);
2169 __netif_rx_schedule(dev0); 2110
2170 } 2111 if (status & Y2_IS_IRQ_MAC2)
2112 sky2_mac_intr(hw, 1);
2113
2114 if (status & Y2_IS_CHK_RX1)
2115 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2116
2117 if (status & Y2_IS_CHK_RX2)
2118 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2119
2120 if (status & Y2_IS_CHK_TXA1)
2121 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2122
2123 if (status & Y2_IS_CHK_TXA2)
2124 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2171 } 2125 }
2172 2126
2173 if (status & Y2_IS_IRQ_PHY1) 2127 if (status & Y2_IS_STAT_BMU) {
2174 sky2_phy_intr(hw, 0); 2128 work_done = sky2_status_intr(hw, work_limit);
2129 *budget -= work_done;
2130 dev0->quota -= work_done;
2131
2132 if (work_done >= work_limit)
2133 return 1;
2175 2134
2176 if (status & Y2_IS_IRQ_PHY2) 2135 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2177 sky2_phy_intr(hw, 1); 2136 }
2178 2137
2179 if (status & Y2_IS_IRQ_MAC1) 2138 netif_rx_complete(dev0);
2180 sky2_mac_intr(hw, 0);
2181 2139
2182 if (status & Y2_IS_IRQ_MAC2) 2140 status = sky2_read32(hw, B0_Y2_SP_LISR);
2183 sky2_mac_intr(hw, 1); 2141 return 0;
2142}
2184 2143
2185 sky2_write32(hw, B0_Y2_SP_ICR, 2); 2144static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2145{
2146 struct sky2_hw *hw = dev_id;
2147 struct net_device *dev0 = hw->dev[0];
2148 u32 status;
2149
2150 /* Reading this mask interrupts as side effect */
2151 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2152 if (status == 0 || status == ~0)
2153 return IRQ_NONE;
2186 2154
2187 spin_unlock(&hw->hw_lock); 2155 prefetch(&hw->st_le[hw->st_idx]);
2156 if (likely(__netif_rx_schedule_prep(dev0)))
2157 __netif_rx_schedule(dev0);
2158 else
2159 printk(KERN_DEBUG PFX "irq race detected\n");
2188 2160
2189 return IRQ_HANDLED; 2161 return IRQ_HANDLED;
2190} 2162}
@@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw)
2238 return -EOPNOTSUPP; 2210 return -EOPNOTSUPP;
2239 } 2211 }
2240 2212
2213 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2214
2215 /* This rev is really old, and requires untested workarounds */
2216 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2217 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
2218 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2219 hw->chip_id, hw->chip_rev);
2220 return -EOPNOTSUPP;
2221 }
2222
2223 /* This chip is new and not tested yet */
2224 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
2225 pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
2226 pci_name(hw->pdev));
2227 pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
2228 }
2229
2241 /* disable ASF */ 2230 /* disable ASF */
2242 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2231 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2243 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2232 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw)
2258 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2247 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2259 2248
2260 /* clear any PEX errors */ 2249 /* clear any PEX errors */
2261 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) 2250 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2262 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2251 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2263 2252
2264 2253
@@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw)
2271 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 2260 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2272 ++hw->ports; 2261 ++hw->ports;
2273 } 2262 }
2274 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2275 2263
2276 sky2_set_power_state(hw, PCI_D0); 2264 sky2_set_power_state(hw, PCI_D0);
2277 2265
@@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw)
2337 /* Set the list last index */ 2325 /* Set the list last index */
2338 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); 2326 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2339 2327
2340 /* These status setup values are copied from SysKonnect's driver */ 2328 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2341 if (is_ec_a1(hw)) { 2329 sky2_write8(hw, STAT_FIFO_WM, 16);
2342 /* WA for dev. #4.3 */
2343 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
2344 2330
2345 /* set Status-FIFO watermark */ 2331 /* set Status-FIFO ISR watermark */
2346 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */ 2332 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2347 2333 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2348 /* set Status-FIFO ISR watermark */ 2334 else
2349 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */ 2335 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2350 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000));
2351 } else {
2352 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2353 sky2_write8(hw, STAT_FIFO_WM, 16);
2354
2355 /* set Status-FIFO ISR watermark */
2356 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2357 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2358 else
2359 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2360 2336
2361 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); 2337 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2362 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); 2338 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2363 } 2339 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2364 2340
2365 /* enable status unit */ 2341 /* enable status unit */
2366 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); 2342 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
@@ -2502,17 +2478,34 @@ static const struct sky2_stat {
2502 { "rx_unicast", GM_RXF_UC_OK }, 2478 { "rx_unicast", GM_RXF_UC_OK },
2503 { "tx_mac_pause", GM_TXF_MPAUSE }, 2479 { "tx_mac_pause", GM_TXF_MPAUSE },
2504 { "rx_mac_pause", GM_RXF_MPAUSE }, 2480 { "rx_mac_pause", GM_RXF_MPAUSE },
2505 { "collisions", GM_TXF_SNG_COL }, 2481 { "collisions", GM_TXF_COL },
2506 { "late_collision",GM_TXF_LAT_COL }, 2482 { "late_collision",GM_TXF_LAT_COL },
2507 { "aborted", GM_TXF_ABO_COL }, 2483 { "aborted", GM_TXF_ABO_COL },
2484 { "single_collisions", GM_TXF_SNG_COL },
2508 { "multi_collisions", GM_TXF_MUL_COL }, 2485 { "multi_collisions", GM_TXF_MUL_COL },
2509 { "fifo_underrun", GM_TXE_FIFO_UR }, 2486
2510 { "fifo_overflow", GM_RXE_FIFO_OV }, 2487 { "rx_short", GM_RXF_SHT },
2511 { "rx_toolong", GM_RXF_LNG_ERR },
2512 { "rx_jabber", GM_RXF_JAB_PKT },
2513 { "rx_runt", GM_RXE_FRAG }, 2488 { "rx_runt", GM_RXE_FRAG },
2489 { "rx_64_byte_packets", GM_RXF_64B },
2490 { "rx_65_to_127_byte_packets", GM_RXF_127B },
2491 { "rx_128_to_255_byte_packets", GM_RXF_255B },
2492 { "rx_256_to_511_byte_packets", GM_RXF_511B },
2493 { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
2494 { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
2495 { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
2514 { "rx_too_long", GM_RXF_LNG_ERR }, 2496 { "rx_too_long", GM_RXF_LNG_ERR },
2497 { "rx_fifo_overflow", GM_RXE_FIFO_OV },
2498 { "rx_jabber", GM_RXF_JAB_PKT },
2515 { "rx_fcs_error", GM_RXF_FCS_ERR }, 2499 { "rx_fcs_error", GM_RXF_FCS_ERR },
2500
2501 { "tx_64_byte_packets", GM_TXF_64B },
2502 { "tx_65_to_127_byte_packets", GM_TXF_127B },
2503 { "tx_128_to_255_byte_packets", GM_TXF_255B },
2504 { "tx_256_to_511_byte_packets", GM_TXF_511B },
2505 { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
2506 { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
2507 { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
2508 { "tx_fifo_underrun", GM_TXE_FIFO_UR },
2516}; 2509};
2517 2510
2518static u32 sky2_get_rx_csum(struct net_device *dev) 2511static u32 sky2_get_rx_csum(struct net_device *dev)
@@ -2614,7 +2607,7 @@ static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2614 sky2->net_stats.rx_bytes = data[1]; 2607 sky2->net_stats.rx_bytes = data[1];
2615 sky2->net_stats.tx_packets = data[2] + data[4] + data[6]; 2608 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2616 sky2->net_stats.rx_packets = data[3] + data[5] + data[7]; 2609 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2617 sky2->net_stats.multicast = data[5] + data[7]; 2610 sky2->net_stats.multicast = data[3] + data[5];
2618 sky2->net_stats.collisions = data[10]; 2611 sky2->net_stats.collisions = data[10];
2619 sky2->net_stats.tx_aborted_errors = data[12]; 2612 sky2->net_stats.tx_aborted_errors = data[12];
2620 2613
@@ -2743,7 +2736,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2743 ms = data * 1000; 2736 ms = data * 1000;
2744 2737
2745 /* save initial values */ 2738 /* save initial values */
2746 down(&sky2->phy_sema); 2739 spin_lock_bh(&sky2->phy_lock);
2747 if (hw->chip_id == CHIP_ID_YUKON_XL) { 2740 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2748 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 2741 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2749 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 2742 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
@@ -2759,9 +2752,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2759 sky2_led(hw, port, onoff); 2752 sky2_led(hw, port, onoff);
2760 onoff = !onoff; 2753 onoff = !onoff;
2761 2754
2762 up(&sky2->phy_sema); 2755 spin_unlock_bh(&sky2->phy_lock);
2763 interrupted = msleep_interruptible(250); 2756 interrupted = msleep_interruptible(250);
2764 down(&sky2->phy_sema); 2757 spin_lock_bh(&sky2->phy_lock);
2765 2758
2766 ms -= 250; 2759 ms -= 250;
2767 } 2760 }
@@ -2776,7 +2769,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2776 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 2769 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2777 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 2770 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2778 } 2771 }
2779 up(&sky2->phy_sema); 2772 spin_unlock_bh(&sky2->phy_lock);
2780 2773
2781 return 0; 2774 return 0;
2782} 2775}
@@ -2806,38 +2799,6 @@ static int sky2_set_pauseparam(struct net_device *dev,
2806 return err; 2799 return err;
2807} 2800}
2808 2801
2809#ifdef CONFIG_PM
2810static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2811{
2812 struct sky2_port *sky2 = netdev_priv(dev);
2813
2814 wol->supported = WAKE_MAGIC;
2815 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2816}
2817
2818static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2819{
2820 struct sky2_port *sky2 = netdev_priv(dev);
2821 struct sky2_hw *hw = sky2->hw;
2822
2823 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2824 return -EOPNOTSUPP;
2825
2826 sky2->wol = wol->wolopts == WAKE_MAGIC;
2827
2828 if (sky2->wol) {
2829 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2830
2831 sky2_write16(hw, WOL_CTRL_STAT,
2832 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2833 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2834 } else
2835 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2836
2837 return 0;
2838}
2839#endif
2840
2841static int sky2_get_coalesce(struct net_device *dev, 2802static int sky2_get_coalesce(struct net_device *dev,
2842 struct ethtool_coalesce *ecmd) 2803 struct ethtool_coalesce *ecmd)
2843{ 2804{
@@ -2878,19 +2839,11 @@ static int sky2_set_coalesce(struct net_device *dev,
2878{ 2839{
2879 struct sky2_port *sky2 = netdev_priv(dev); 2840 struct sky2_port *sky2 = netdev_priv(dev);
2880 struct sky2_hw *hw = sky2->hw; 2841 struct sky2_hw *hw = sky2->hw;
2881 const u32 tmin = sky2_clk2us(hw, 1); 2842 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
2882 const u32 tmax = 5000;
2883 2843
2884 if (ecmd->tx_coalesce_usecs != 0 && 2844 if (ecmd->tx_coalesce_usecs > tmax ||
2885 (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax)) 2845 ecmd->rx_coalesce_usecs > tmax ||
2886 return -EINVAL; 2846 ecmd->rx_coalesce_usecs_irq > tmax)
2887
2888 if (ecmd->rx_coalesce_usecs != 0 &&
2889 (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax))
2890 return -EINVAL;
2891
2892 if (ecmd->rx_coalesce_usecs_irq != 0 &&
2893 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
2894 return -EINVAL; 2847 return -EINVAL;
2895 2848
2896 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) 2849 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
@@ -3025,10 +2978,6 @@ static struct ethtool_ops sky2_ethtool_ops = {
3025 .set_ringparam = sky2_set_ringparam, 2978 .set_ringparam = sky2_set_ringparam,
3026 .get_pauseparam = sky2_get_pauseparam, 2979 .get_pauseparam = sky2_get_pauseparam,
3027 .set_pauseparam = sky2_set_pauseparam, 2980 .set_pauseparam = sky2_set_pauseparam,
3028#ifdef CONFIG_PM
3029 .get_wol = sky2_get_wol,
3030 .set_wol = sky2_set_wol,
3031#endif
3032 .phys_id = sky2_phys_id, 2981 .phys_id = sky2_phys_id,
3033 .get_stats_count = sky2_get_stats_count, 2982 .get_stats_count = sky2_get_stats_count,
3034 .get_ethtool_stats = sky2_get_ethtool_stats, 2983 .get_ethtool_stats = sky2_get_ethtool_stats,
@@ -3082,16 +3031,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3082 sky2->speed = -1; 3031 sky2->speed = -1;
3083 sky2->advertising = sky2_supported_modes(hw); 3032 sky2->advertising = sky2_supported_modes(hw);
3084 3033
3085 /* Receive checksum disabled for Yukon XL 3034 /* Receive checksum disabled for Yukon XL
3086 * because of observed problems with incorrect 3035 * because of observed problems with incorrect
3087 * values when multiple packets are received in one interrupt 3036 * values when multiple packets are received in one interrupt
3088 */ 3037 */
3089 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); 3038 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
3090 3039
3091 INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2); 3040 spin_lock_init(&sky2->phy_lock);
3092 init_MUTEX(&sky2->phy_sema);
3093 sky2->tx_pending = TX_DEF_PENDING; 3041 sky2->tx_pending = TX_DEF_PENDING;
3094 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING; 3042 sky2->rx_pending = RX_DEF_PENDING;
3095 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); 3043 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
3096 3044
3097 hw->dev[port] = dev; 3045 hw->dev[port] = dev;
@@ -3133,6 +3081,66 @@ static void __devinit sky2_show_addr(struct net_device *dev)
3133 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 3081 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3134} 3082}
3135 3083
3084/* Handle software interrupt used during MSI test */
3085static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
3086 struct pt_regs *regs)
3087{
3088 struct sky2_hw *hw = dev_id;
3089 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3090
3091 if (status == 0)
3092 return IRQ_NONE;
3093
3094 if (status & Y2_IS_IRQ_SW) {
3095 hw->msi_detected = 1;
3096 wake_up(&hw->msi_wait);
3097 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3098 }
3099 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3100
3101 return IRQ_HANDLED;
3102}
3103
3104/* Test interrupt path by forcing a a software IRQ */
3105static int __devinit sky2_test_msi(struct sky2_hw *hw)
3106{
3107 struct pci_dev *pdev = hw->pdev;
3108 int err;
3109
3110 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
3111
3112 err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
3113 if (err) {
3114 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3115 pci_name(pdev), pdev->irq);
3116 return err;
3117 }
3118
3119 init_waitqueue_head (&hw->msi_wait);
3120
3121 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3122 wmb();
3123
3124 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
3125
3126 if (!hw->msi_detected) {
3127 /* MSI test failed, go back to INTx mode */
3128 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
3129 "switching to INTx mode. Please report this failure to "
3130 "the PCI maintainer and include system chipset information.\n",
3131 pci_name(pdev));
3132
3133 err = -EOPNOTSUPP;
3134 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3135 }
3136
3137 sky2_write32(hw, B0_IMSK, 0);
3138
3139 free_irq(pdev->irq, hw);
3140
3141 return err;
3142}
3143
3136static int __devinit sky2_probe(struct pci_dev *pdev, 3144static int __devinit sky2_probe(struct pci_dev *pdev,
3137 const struct pci_device_id *ent) 3145 const struct pci_device_id *ent)
3138{ 3146{
@@ -3201,7 +3209,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3201 goto err_out_free_hw; 3209 goto err_out_free_hw;
3202 } 3210 }
3203 hw->pm_cap = pm_cap; 3211 hw->pm_cap = pm_cap;
3204 spin_lock_init(&hw->hw_lock);
3205 3212
3206#ifdef __BIG_ENDIAN 3213#ifdef __BIG_ENDIAN
3207 /* byte swap descriptors in hardware */ 3214 /* byte swap descriptors in hardware */
@@ -3254,21 +3261,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3254 } 3261 }
3255 } 3262 }
3256 3263
3257 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); 3264 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3265 err = sky2_test_msi(hw);
3266 if (err == -EOPNOTSUPP)
3267 pci_disable_msi(pdev);
3268 else if (err)
3269 goto err_out_unregister;
3270 }
3271
3272 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
3258 if (err) { 3273 if (err) {
3259 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3274 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3260 pci_name(pdev), pdev->irq); 3275 pci_name(pdev), pdev->irq);
3261 goto err_out_unregister; 3276 goto err_out_unregister;
3262 } 3277 }
3263 3278
3264 hw->intr_mask = Y2_IS_BASE; 3279 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3265 sky2_write32(hw, B0_IMSK, hw->intr_mask);
3266 3280
3267 pci_set_drvdata(pdev, hw); 3281 pci_set_drvdata(pdev, hw);
3268 3282
3269 return 0; 3283 return 0;
3270 3284
3271err_out_unregister: 3285err_out_unregister:
3286 pci_disable_msi(pdev);
3272 if (dev1) { 3287 if (dev1) {
3273 unregister_netdev(dev1); 3288 unregister_netdev(dev1);
3274 free_netdev(dev1); 3289 free_netdev(dev1);
@@ -3311,6 +3326,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3311 sky2_read8(hw, B0_CTST); 3326 sky2_read8(hw, B0_CTST);
3312 3327
3313 free_irq(pdev->irq, hw); 3328 free_irq(pdev->irq, hw);
3329 pci_disable_msi(pdev);
3314 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 3330 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3315 pci_release_regions(pdev); 3331 pci_release_regions(pdev);
3316 pci_disable_device(pdev); 3332 pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index dce955c76f3c..2838f661b393 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -278,13 +278,11 @@ enum {
278 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ 278 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
279 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ 279 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
280 280
281 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU | 281 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
282 Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY | 282 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
283 Y2_IS_IRQ_SW | Y2_IS_TIMINT, 283 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
284 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | 284 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
285 Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1, 285 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
286 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
287 Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
288}; 286};
289 287
290/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 288/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -1375,23 +1373,23 @@ enum {
1375 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ 1373 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1376 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ 1374 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1377 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ 1375 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1376/* MIB Counters */
1377 GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */
1378 GM_MIB_CNT_SIZE = 256,
1378}; 1379};
1379 1380
1380/* MIB Counters */
1381#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1382#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1383 1381
1384/* 1382/*
1385 * MIB Counters base address definitions (low word) - 1383 * MIB Counters base address definitions (low word) -
1386 * use offset 4 for access to high word (32 bit r/o) 1384 * use offset 4 for access to high word (32 bit r/o)
1387 */ 1385 */
1388enum { 1386enum {
1389 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ 1387 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1390 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ 1388 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1391 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ 1389 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1392 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ 1390 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1393 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ 1391 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1394 /* GM_MIB_CNT_BASE + 40: reserved */ 1392
1395 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ 1393 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1396 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ 1394 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1397 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ 1395 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
@@ -1399,37 +1397,36 @@ enum {
1399 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ 1397 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1400 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ 1398 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1401 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ 1399 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1402 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ 1400 GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */
1403 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ 1401 GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */
1404 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ 1402 GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */
1405 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ 1403 GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */
1406 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ 1404 GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */
1407 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ 1405 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */
1408 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ 1406 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */
1409 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ 1407 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */
1410 /* GM_MIB_CNT_BASE + 168: reserved */ 1408
1411 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ 1409 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */
1412 /* GM_MIB_CNT_BASE + 184: reserved */ 1410 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */
1413 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ 1411 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */
1414 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ 1412 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */
1415 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ 1413 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */
1416 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ 1414 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */
1417 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ 1415 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */
1418 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ 1416 GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */
1419 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ 1417 GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */
1420 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ 1418 GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */
1421 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ 1419 GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */
1422 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ 1420 GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */
1423 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ 1421 GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */
1424 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ 1422 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */
1425 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ 1423
1426 1424 GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */
1427 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ 1425 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */
1428 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ 1426 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */
1429 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ 1427 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */
1430 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ 1428 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */
1431 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ 1429 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */
1432 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1433}; 1430};
1434 1431
1435/* GMAC Bit Definitions */ 1432/* GMAC Bit Definitions */
@@ -1832,6 +1829,7 @@ struct sky2_port {
1832 struct net_device *netdev; 1829 struct net_device *netdev;
1833 unsigned port; 1830 unsigned port;
1834 u32 msg_enable; 1831 u32 msg_enable;
1832 spinlock_t phy_lock;
1835 1833
1836 spinlock_t tx_lock ____cacheline_aligned_in_smp; 1834 spinlock_t tx_lock ____cacheline_aligned_in_smp;
1837 struct tx_ring_info *tx_ring; 1835 struct tx_ring_info *tx_ring;
@@ -1840,7 +1838,6 @@ struct sky2_port {
1840 u16 tx_prod; /* next le to use */ 1838 u16 tx_prod; /* next le to use */
1841 u32 tx_addr64; 1839 u32 tx_addr64;
1842 u16 tx_pending; 1840 u16 tx_pending;
1843 u16 tx_last_put;
1844 u16 tx_last_mss; 1841 u16 tx_last_mss;
1845 1842
1846 struct ring_info *rx_ring ____cacheline_aligned_in_smp; 1843 struct ring_info *rx_ring ____cacheline_aligned_in_smp;
@@ -1849,7 +1846,6 @@ struct sky2_port {
1849 u16 rx_next; /* next re to check */ 1846 u16 rx_next; /* next re to check */
1850 u16 rx_put; /* next le index to use */ 1847 u16 rx_put; /* next le index to use */
1851 u16 rx_pending; 1848 u16 rx_pending;
1852 u16 rx_last_put;
1853 u16 rx_bufsize; 1849 u16 rx_bufsize;
1854#ifdef SKY2_VLAN_TAG_USED 1850#ifdef SKY2_VLAN_TAG_USED
1855 u16 rx_tag; 1851 u16 rx_tag;
@@ -1865,20 +1861,15 @@ struct sky2_port {
1865 u8 rx_pause; 1861 u8 rx_pause;
1866 u8 tx_pause; 1862 u8 tx_pause;
1867 u8 rx_csum; 1863 u8 rx_csum;
1868 u8 wol;
1869 1864
1870 struct net_device_stats net_stats; 1865 struct net_device_stats net_stats;
1871 1866
1872 struct work_struct phy_task;
1873 struct semaphore phy_sema;
1874}; 1867};
1875 1868
1876struct sky2_hw { 1869struct sky2_hw {
1877 void __iomem *regs; 1870 void __iomem *regs;
1878 struct pci_dev *pdev; 1871 struct pci_dev *pdev;
1879 struct net_device *dev[2]; 1872 struct net_device *dev[2];
1880 spinlock_t hw_lock;
1881 u32 intr_mask;
1882 1873
1883 int pm_cap; 1874 int pm_cap;
1884 u8 chip_id; 1875 u8 chip_id;
@@ -1889,6 +1880,8 @@ struct sky2_hw {
1889 struct sky2_status_le *st_le; 1880 struct sky2_status_le *st_le;
1890 u32 st_idx; 1881 u32 st_idx;
1891 dma_addr_t st_dma; 1882 dma_addr_t st_dma;
1883 int msi_detected;
1884 wait_queue_head_t msi_wait;
1892}; 1885};
1893 1886
1894/* Register accessor for memory mapped device */ 1887/* Register accessor for memory mapped device */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 7ec08127c9d6..0e9833adf9fe 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -215,15 +215,12 @@ struct smc_local {
215 215
216 spinlock_t lock; 216 spinlock_t lock;
217 217
218#ifdef SMC_CAN_USE_DATACS
219 u32 __iomem *datacs;
220#endif
221
222#ifdef SMC_USE_PXA_DMA 218#ifdef SMC_USE_PXA_DMA
223 /* DMA needs the physical address of the chip */ 219 /* DMA needs the physical address of the chip */
224 u_long physaddr; 220 u_long physaddr;
225#endif 221#endif
226 void __iomem *base; 222 void __iomem *base;
223 void __iomem *datacs;
227}; 224};
228 225
229#if SMC_DEBUG > 0 226#if SMC_DEBUG > 0
@@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev)
2104 * Set the appropriate byte/word mode. 2101 * Set the appropriate byte/word mode.
2105 */ 2102 */
2106 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; 2103 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
2107#ifndef SMC_CAN_USE_16BIT 2104 if (!SMC_CAN_USE_16BIT)
2108 ecsr |= ECSR_IOIS8; 2105 ecsr |= ECSR_IOIS8;
2109#endif
2110 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); 2106 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2111 local_irq_restore(flags); 2107 local_irq_restore(flags);
2112 2108
@@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev)
2143 release_mem_region(res->start, ATTRIB_SIZE); 2139 release_mem_region(res->start, ATTRIB_SIZE);
2144} 2140}
2145 2141
2146#ifdef SMC_CAN_USE_DATACS 2142static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2147static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2148{ 2143{
2149 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); 2144 if (SMC_CAN_USE_DATACS) {
2150 struct smc_local *lp = netdev_priv(ndev); 2145 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2146 struct smc_local *lp = netdev_priv(ndev);
2151 2147
2152 if (!res) 2148 if (!res)
2153 return; 2149 return;
2154 2150
2155 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { 2151 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
2156 printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); 2152 printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
2157 return; 2153 return;
2158 } 2154 }
2159 2155
2160 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); 2156 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
2157 }
2161} 2158}
2162 2159
2163static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) 2160static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
2164{ 2161{
2165 struct smc_local *lp = netdev_priv(ndev); 2162 if (SMC_CAN_USE_DATACS) {
2166 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); 2163 struct smc_local *lp = netdev_priv(ndev);
2164 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2167 2165
2168 if (lp->datacs) 2166 if (lp->datacs)
2169 iounmap(lp->datacs); 2167 iounmap(lp->datacs);
2170 2168
2171 lp->datacs = NULL; 2169 lp->datacs = NULL;
2172 2170
2173 if (res) 2171 if (res)
2174 release_mem_region(res->start, SMC_DATA_EXTENT); 2172 release_mem_region(res->start, SMC_DATA_EXTENT);
2173 }
2175} 2174}
2176#else
2177static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {}
2178static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {}
2179#endif
2180 2175
2181/* 2176/*
2182 * smc_init(void) 2177 * smc_init(void)
@@ -2221,6 +2216,10 @@ static int smc_drv_probe(struct platform_device *pdev)
2221 2216
2222 ndev->dma = (unsigned char)-1; 2217 ndev->dma = (unsigned char)-1;
2223 ndev->irq = platform_get_irq(pdev, 0); 2218 ndev->irq = platform_get_irq(pdev, 0);
2219 if (ndev->irq < 0) {
2220 ret = -ENODEV;
2221 goto out_free_netdev;
2222 }
2224 2223
2225 ret = smc_request_attrib(pdev); 2224 ret = smc_request_attrib(pdev);
2226 if (ret) 2225 if (ret)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e0efd1964e72..e1be1af51201 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
275#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l) 275#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l)
276#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; }) 276#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; })
277 277
278static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) 278#define SMC_outsw LPD7A40X_SMC_outsw
279
280static inline void LPD7A40X_SMC_outsw(unsigned long a, int r,
281 unsigned char* p, int l)
279{ 282{
280 unsigned short* ps = (unsigned short*) p; 283 unsigned short* ps = (unsigned short*) p;
281 while (l-- > 0) { 284 while (l-- > 0) {
@@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
342 345
343#endif 346#endif
344 347
345#ifndef SMC_IRQ_FLAGS
346#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
347#endif
348
349#ifdef SMC_USE_PXA_DMA 348#ifdef SMC_USE_PXA_DMA
350/* 349/*
351 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 350 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
@@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs)
441#endif /* SMC_USE_PXA_DMA */ 440#endif /* SMC_USE_PXA_DMA */
442 441
443 442
444/* Because of bank switching, the LAN91x uses only 16 I/O ports */ 443/*
444 * Everything a particular hardware setup needs should have been defined
445 * at this point. Add stubs for the undefined cases, mainly to avoid
446 * compilation warnings since they'll be optimized away, or to prevent buggy
447 * use of them.
448 */
449
450#if ! SMC_CAN_USE_32BIT
451#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
452#define SMC_outl(x, ioaddr, reg) BUG()
453#define SMC_insl(a, r, p, l) BUG()
454#define SMC_outsl(a, r, p, l) BUG()
455#endif
456
457#if !defined(SMC_insl) || !defined(SMC_outsl)
458#define SMC_insl(a, r, p, l) BUG()
459#define SMC_outsl(a, r, p, l) BUG()
460#endif
461
462#if ! SMC_CAN_USE_16BIT
463
464/*
465 * Any 16-bit access is performed with two 8-bit accesses if the hardware
466 * can't do it directly. Most registers are 16-bit so those are mandatory.
467 */
468#define SMC_outw(x, ioaddr, reg) \
469 do { \
470 unsigned int __val16 = (x); \
471 SMC_outb( __val16, ioaddr, reg ); \
472 SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
473 } while (0)
474#define SMC_inw(ioaddr, reg) \
475 ({ \
476 unsigned int __val16; \
477 __val16 = SMC_inb( ioaddr, reg ); \
478 __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
479 __val16; \
480 })
481
482#define SMC_insw(a, r, p, l) BUG()
483#define SMC_outsw(a, r, p, l) BUG()
484
485#endif
486
487#if !defined(SMC_insw) || !defined(SMC_outsw)
488#define SMC_insw(a, r, p, l) BUG()
489#define SMC_outsw(a, r, p, l) BUG()
490#endif
491
492#if ! SMC_CAN_USE_8BIT
493#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
494#define SMC_outb(x, ioaddr, reg) BUG()
495#define SMC_insb(a, r, p, l) BUG()
496#define SMC_outsb(a, r, p, l) BUG()
497#endif
498
499#if !defined(SMC_insb) || !defined(SMC_outsb)
500#define SMC_insb(a, r, p, l) BUG()
501#define SMC_outsb(a, r, p, l) BUG()
502#endif
503
504#ifndef SMC_CAN_USE_DATACS
505#define SMC_CAN_USE_DATACS 0
506#endif
507
445#ifndef SMC_IO_SHIFT 508#ifndef SMC_IO_SHIFT
446#define SMC_IO_SHIFT 0 509#define SMC_IO_SHIFT 0
447#endif 510#endif
511
512#ifndef SMC_IRQ_FLAGS
513#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
514#endif
515
516#ifndef SMC_INTERRUPT_PREAMBLE
517#define SMC_INTERRUPT_PREAMBLE
518#endif
519
520
521/* Because of bank switching, the LAN91x uses only 16 I/O ports */
448#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) 522#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
449#define SMC_DATA_EXTENT (4) 523#define SMC_DATA_EXTENT (4)
450 524
@@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = {
817 * Note: the following macros do *not* select the bank -- this must 891 * Note: the following macros do *not* select the bank -- this must
818 * be done separately as needed in the main code. The SMC_REG() macro 892 * be done separately as needed in the main code. The SMC_REG() macro
819 * only uses the bank argument for debugging purposes (when enabled). 893 * only uses the bank argument for debugging purposes (when enabled).
894 *
895 * Note: despite inline functions being safer, everything leading to this
896 * should preferably be macros to let BUG() display the line number in
897 * the core source code since we're interested in the top call site
898 * not in any inline function location.
820 */ 899 */
821 900
822#if SMC_DEBUG > 0 901#if SMC_DEBUG > 0
@@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = {
834#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) 913#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT)
835#endif 914#endif
836 915
837#if SMC_CAN_USE_8BIT 916/*
838#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG ) 917 * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
839#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG ) 918 * aligned to a 32 bit boundary. I tell you that does exist!
840#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG ) 919 * Fortunately the affected register accesses can be easily worked around
841#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG ) 920 * since we can write zeroes to the preceeding 16 bits without adverse
842#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG ) 921 * effects and use a 32-bit access.
843#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG ) 922 *
844#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG ) 923 * Enforce it on any 32-bit capable setup for now.
845#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG ) 924 */
846#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG ) 925#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT
847#else 926
848#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF) 927#define SMC_GET_PN() \
849#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG ) 928 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \
850#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8) 929 : (SMC_inw(ioaddr, PN_REG) & 0xFF) )
851#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF) 930
852#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8) 931#define SMC_SET_PN(x) \
853#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF) 932 do { \
933 if (SMC_MUST_ALIGN_WRITE) \
934 SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \
935 else if (SMC_CAN_USE_8BIT) \
936 SMC_outb(x, ioaddr, PN_REG); \
937 else \
938 SMC_outw(x, ioaddr, PN_REG); \
939 } while (0)
940
941#define SMC_GET_AR() \
942 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \
943 : (SMC_inw(ioaddr, PN_REG) >> 8) )
944
945#define SMC_GET_TXFIFO() \
946 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \
947 : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) )
948
949#define SMC_GET_RXFIFO() \
950 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \
951 : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) )
952
953#define SMC_GET_INT() \
954 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \
955 : (SMC_inw(ioaddr, INT_REG) & 0xFF) )
956
854#define SMC_ACK_INT(x) \ 957#define SMC_ACK_INT(x) \
855 do { \ 958 do { \
856 unsigned long __flags; \ 959 if (SMC_CAN_USE_8BIT) \
857 int __mask; \ 960 SMC_outb(x, ioaddr, INT_REG); \
858 local_irq_save(__flags); \ 961 else { \
859 __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ 962 unsigned long __flags; \
860 SMC_outw( __mask | (x), ioaddr, INT_REG ); \ 963 int __mask; \
861 local_irq_restore(__flags); \ 964 local_irq_save(__flags); \
965 __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
966 SMC_outw( __mask | (x), ioaddr, INT_REG ); \
967 local_irq_restore(__flags); \
968 } \
969 } while (0)
970
971#define SMC_GET_INT_MASK() \
972 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \
973 : (SMC_inw( ioaddr, INT_REG ) >> 8) )
974
975#define SMC_SET_INT_MASK(x) \
976 do { \
977 if (SMC_CAN_USE_8BIT) \
978 SMC_outb(x, ioaddr, IM_REG); \
979 else \
980 SMC_outw((x) << 8, ioaddr, INT_REG); \
981 } while (0)
982
983#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT)
984
985#define SMC_SELECT_BANK(x) \
986 do { \
987 if (SMC_MUST_ALIGN_WRITE) \
988 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
989 else \
990 SMC_outw(x, ioaddr, BANK_SELECT); \
991 } while (0)
992
993#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG)
994
995#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG)
996
997#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG)
998
999#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG)
1000
1001#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG)
1002
1003#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG)
1004
1005#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG)
1006
1007#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG)
1008
1009#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG)
1010
1011#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG)
1012
1013#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG)
1014
1015#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG)
1016
1017#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG)
1018
1019#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG)
1020
1021#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG)
1022
1023#define SMC_SET_PTR(x) \
1024 do { \
1025 if (SMC_MUST_ALIGN_WRITE) \
1026 SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \
1027 else \
1028 SMC_outw(x, ioaddr, PTR_REG); \
862 } while (0) 1029 } while (0)
863#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8)
864#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG )
865#endif
866 1030
867#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT ) 1031#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG)
868#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT ) 1032
869#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG ) 1033#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG)
870#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG ) 1034
871#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG ) 1035#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG)
872#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG ) 1036
873#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG ) 1037#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG)
874#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG ) 1038
875#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG ) 1039#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG)
876#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG ) 1040
877#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG ) 1041#define SMC_SET_RPC(x) \
878#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG ) 1042 do { \
879#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG ) 1043 if (SMC_MUST_ALIGN_WRITE) \
880#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG ) 1044 SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \
881#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG ) 1045 else \
882#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) 1046 SMC_outw(x, ioaddr, RPC_REG); \
883#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) 1047 } while (0)
884#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) 1048
885#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG ) 1049#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG)
886#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) 1050
887#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) 1051#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG)
888#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
889#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG )
890#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG )
891#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG )
892#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG )
893 1052
894#ifndef SMC_GET_MAC_ADDR 1053#ifndef SMC_GET_MAC_ADDR
895#define SMC_GET_MAC_ADDR(addr) \ 1054#define SMC_GET_MAC_ADDR(addr) \
@@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = {
920 SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ 1079 SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \
921 } while (0) 1080 } while (0)
922 1081
923#if SMC_CAN_USE_32BIT
924/*
925 * Some setups just can't write 8 or 16 bits reliably when not aligned
926 * to a 32 bit boundary. I tell you that exists!
927 * We re-do the ones here that can be easily worked around if they can have
928 * their low parts written to 0 without adverse effects.
929 */
930#undef SMC_SELECT_BANK
931#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT )
932#undef SMC_SET_RPC
933#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) )
934#undef SMC_SET_PN
935#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) )
936#undef SMC_SET_PTR
937#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) )
938#endif
939
940#if SMC_CAN_USE_32BIT
941#define SMC_PUT_PKT_HDR(status, length) \
942 SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG )
943#define SMC_GET_PKT_HDR(status, length) \
944 do { \
945 unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \
946 (status) = __val & 0xffff; \
947 (length) = __val >> 16; \
948 } while (0)
949#else
950#define SMC_PUT_PKT_HDR(status, length) \ 1082#define SMC_PUT_PKT_HDR(status, length) \
951 do { \ 1083 do { \
952 SMC_outw( status, ioaddr, DATA_REG ); \ 1084 if (SMC_CAN_USE_32BIT) \
953 SMC_outw( length, ioaddr, DATA_REG ); \ 1085 SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \
954 } while (0) 1086 else { \
955#define SMC_GET_PKT_HDR(status, length) \ 1087 SMC_outw(status, ioaddr, DATA_REG); \
956 do { \ 1088 SMC_outw(length, ioaddr, DATA_REG); \
957 (status) = SMC_inw( ioaddr, DATA_REG ); \ 1089 } \
958 (length) = SMC_inw( ioaddr, DATA_REG ); \
959 } while (0) 1090 } while (0)
960#endif
961 1091
962#if SMC_CAN_USE_32BIT 1092#define SMC_GET_PKT_HDR(status, length) \
963#define _SMC_PUSH_DATA(p, l) \
964 do { \ 1093 do { \
965 char *__ptr = (p); \ 1094 if (SMC_CAN_USE_32BIT) { \
966 int __len = (l); \ 1095 unsigned int __val = SMC_inl(ioaddr, DATA_REG); \
967 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1096 (status) = __val & 0xffff; \
968 __len -= 2; \ 1097 (length) = __val >> 16; \
969 SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \ 1098 } else { \
970 __ptr += 2; \ 1099 (status) = SMC_inw(ioaddr, DATA_REG); \
971 } \ 1100 (length) = SMC_inw(ioaddr, DATA_REG); \
972 SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \
973 if (__len & 2) { \
974 __ptr += (__len & ~3); \
975 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
976 } \ 1101 } \
977 } while (0) 1102 } while (0)
978#define _SMC_PULL_DATA(p, l) \
979 do { \
980 char *__ptr = (p); \
981 int __len = (l); \
982 if ((unsigned long)__ptr & 2) { \
983 /* \
984 * We want 32bit alignment here. \
985 * Since some buses perform a full 32bit \
986 * fetch even for 16bit data we can't use \
987 * SMC_inw() here. Back both source (on chip \
988 * and destination) pointers of 2 bytes. \
989 */ \
990 __ptr -= 2; \
991 __len += 2; \
992 SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
993 } \
994 __len += 2; \
995 SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \
996 } while (0)
997#elif SMC_CAN_USE_16BIT
998#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 )
999#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 )
1000#elif SMC_CAN_USE_8BIT
1001#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l )
1002#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l )
1003#endif
1004 1103
1005#if ! SMC_CAN_USE_16BIT 1104#define SMC_PUSH_DATA(p, l) \
1006#define SMC_outw(x, ioaddr, reg) \
1007 do { \ 1105 do { \
1008 unsigned int __val16 = (x); \ 1106 if (SMC_CAN_USE_32BIT) { \
1009 SMC_outb( __val16, ioaddr, reg ); \ 1107 void *__ptr = (p); \
1010 SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ 1108 int __len = (l); \
1109 void *__ioaddr = ioaddr; \
1110 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1111 __len -= 2; \
1112 SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
1113 __ptr += 2; \
1114 } \
1115 if (SMC_CAN_USE_DATACS && lp->datacs) \
1116 __ioaddr = lp->datacs; \
1117 SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \
1118 if (__len & 2) { \
1119 __ptr += (__len & ~3); \
1120 SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \
1121 } \
1122 } else if (SMC_CAN_USE_16BIT) \
1123 SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \
1124 else if (SMC_CAN_USE_8BIT) \
1125 SMC_outsb(ioaddr, DATA_REG, p, l); \
1011 } while (0) 1126 } while (0)
1012#define SMC_inw(ioaddr, reg) \
1013 ({ \
1014 unsigned int __val16; \
1015 __val16 = SMC_inb( ioaddr, reg ); \
1016 __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
1017 __val16; \
1018 })
1019#endif
1020
1021#ifdef SMC_CAN_USE_DATACS
1022#define SMC_PUSH_DATA(p, l) \
1023 if ( lp->datacs ) { \
1024 unsigned char *__ptr = (p); \
1025 int __len = (l); \
1026 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1027 __len -= 2; \
1028 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
1029 __ptr += 2; \
1030 } \
1031 outsl(lp->datacs, __ptr, __len >> 2); \
1032 if (__len & 2) { \
1033 __ptr += (__len & ~3); \
1034 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
1035 } \
1036 } else { \
1037 _SMC_PUSH_DATA(p, l); \
1038 }
1039 1127
1040#define SMC_PULL_DATA(p, l) \ 1128#define SMC_PULL_DATA(p, l) \
1041 if ( lp->datacs ) { \ 1129 do { \
1042 unsigned char *__ptr = (p); \ 1130 if (SMC_CAN_USE_32BIT) { \
1043 int __len = (l); \ 1131 void *__ptr = (p); \
1044 if ((unsigned long)__ptr & 2) { \ 1132 int __len = (l); \
1045 /* \ 1133 void *__ioaddr = ioaddr; \
1046 * We want 32bit alignment here. \ 1134 if ((unsigned long)__ptr & 2) { \
1047 * Since some buses perform a full 32bit \ 1135 /* \
1048 * fetch even for 16bit data we can't use \ 1136 * We want 32bit alignment here. \
1049 * SMC_inw() here. Back both source (on chip \ 1137 * Since some buses perform a full \
1050 * and destination) pointers of 2 bytes. \ 1138 * 32bit fetch even for 16bit data \
1051 */ \ 1139 * we can't use SMC_inw() here. \
1052 __ptr -= 2; \ 1140 * Back both source (on-chip) and \
1141 * destination pointers of 2 bytes. \
1142 * This is possible since the call to \
1143 * SMC_GET_PKT_HDR() already advanced \
1144 * the source pointer of 4 bytes, and \
1145 * the skb_reserve(skb, 2) advanced \
1146 * the destination pointer of 2 bytes. \
1147 */ \
1148 __ptr -= 2; \
1149 __len += 2; \
1150 SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1151 } \
1152 if (SMC_CAN_USE_DATACS && lp->datacs) \
1153 __ioaddr = lp->datacs; \
1053 __len += 2; \ 1154 __len += 2; \
1054 SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ 1155 SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \
1055 } \ 1156 } else if (SMC_CAN_USE_16BIT) \
1056 __len += 2; \ 1157 SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \
1057 insl( lp->datacs, __ptr, __len >> 2); \ 1158 else if (SMC_CAN_USE_8BIT) \
1058 } else { \ 1159 SMC_insb(ioaddr, DATA_REG, p, l); \
1059 _SMC_PULL_DATA(p, l); \ 1160 } while (0)
1060 }
1061#else
1062#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l)
1063#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l)
1064#endif
1065
1066#if !defined (SMC_INTERRUPT_PREAMBLE)
1067# define SMC_INTERRUPT_PREAMBLE
1068#endif
1069 1161
1070#endif /* _SMC91X_H_ */ 1162#endif /* _SMC91X_H_ */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 28ce47a02408..38cd30cb7c75 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -55,6 +55,7 @@
55#include <linux/workqueue.h> 55#include <linux/workqueue.h>
56#include <linux/if_vlan.h> 56#include <linux/if_vlan.h>
57#include <linux/bitops.h> 57#include <linux/bitops.h>
58#include <linux/mutex.h>
58 59
59#include <asm/system.h> 60#include <asm/system.h>
60#include <asm/io.h> 61#include <asm/io.h>
@@ -2284,7 +2285,7 @@ static void gem_reset_task(void *data)
2284{ 2285{
2285 struct gem *gp = (struct gem *) data; 2286 struct gem *gp = (struct gem *) data;
2286 2287
2287 down(&gp->pm_sem); 2288 mutex_lock(&gp->pm_mutex);
2288 2289
2289 netif_poll_disable(gp->dev); 2290 netif_poll_disable(gp->dev);
2290 2291
@@ -2311,7 +2312,7 @@ static void gem_reset_task(void *data)
2311 2312
2312 netif_poll_enable(gp->dev); 2313 netif_poll_enable(gp->dev);
2313 2314
2314 up(&gp->pm_sem); 2315 mutex_unlock(&gp->pm_mutex);
2315} 2316}
2316 2317
2317 2318
@@ -2320,14 +2321,14 @@ static int gem_open(struct net_device *dev)
2320 struct gem *gp = dev->priv; 2321 struct gem *gp = dev->priv;
2321 int rc = 0; 2322 int rc = 0;
2322 2323
2323 down(&gp->pm_sem); 2324 mutex_lock(&gp->pm_mutex);
2324 2325
2325 /* We need the cell enabled */ 2326 /* We need the cell enabled */
2326 if (!gp->asleep) 2327 if (!gp->asleep)
2327 rc = gem_do_start(dev); 2328 rc = gem_do_start(dev);
2328 gp->opened = (rc == 0); 2329 gp->opened = (rc == 0);
2329 2330
2330 up(&gp->pm_sem); 2331 mutex_unlock(&gp->pm_mutex);
2331 2332
2332 return rc; 2333 return rc;
2333} 2334}
@@ -2340,13 +2341,13 @@ static int gem_close(struct net_device *dev)
2340 * our caller (dev_close) already did it for us 2341 * our caller (dev_close) already did it for us
2341 */ 2342 */
2342 2343
2343 down(&gp->pm_sem); 2344 mutex_lock(&gp->pm_mutex);
2344 2345
2345 gp->opened = 0; 2346 gp->opened = 0;
2346 if (!gp->asleep) 2347 if (!gp->asleep)
2347 gem_do_stop(dev, 0); 2348 gem_do_stop(dev, 0);
2348 2349
2349 up(&gp->pm_sem); 2350 mutex_unlock(&gp->pm_mutex);
2350 2351
2351 return 0; 2352 return 0;
2352} 2353}
@@ -2358,7 +2359,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2358 struct gem *gp = dev->priv; 2359 struct gem *gp = dev->priv;
2359 unsigned long flags; 2360 unsigned long flags;
2360 2361
2361 down(&gp->pm_sem); 2362 mutex_lock(&gp->pm_mutex);
2362 2363
2363 netif_poll_disable(dev); 2364 netif_poll_disable(dev);
2364 2365
@@ -2391,11 +2392,11 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2391 /* Stop the link timer */ 2392 /* Stop the link timer */
2392 del_timer_sync(&gp->link_timer); 2393 del_timer_sync(&gp->link_timer);
2393 2394
2394 /* Now we release the semaphore to not block the reset task who 2395 /* Now we release the mutex to not block the reset task who
2395 * can take it too. We are marked asleep, so there will be no 2396 * can take it too. We are marked asleep, so there will be no
2396 * conflict here 2397 * conflict here
2397 */ 2398 */
2398 up(&gp->pm_sem); 2399 mutex_unlock(&gp->pm_mutex);
2399 2400
2400 /* Wait for a pending reset task to complete */ 2401 /* Wait for a pending reset task to complete */
2401 while (gp->reset_task_pending) 2402 while (gp->reset_task_pending)
@@ -2424,7 +2425,7 @@ static int gem_resume(struct pci_dev *pdev)
2424 2425
2425 printk(KERN_INFO "%s: resuming\n", dev->name); 2426 printk(KERN_INFO "%s: resuming\n", dev->name);
2426 2427
2427 down(&gp->pm_sem); 2428 mutex_lock(&gp->pm_mutex);
2428 2429
2429 /* Keep the cell enabled during the entire operation, no need to 2430 /* Keep the cell enabled during the entire operation, no need to
2430 * take a lock here tho since nothing else can happen while we are 2431 * take a lock here tho since nothing else can happen while we are
@@ -2440,7 +2441,7 @@ static int gem_resume(struct pci_dev *pdev)
2440 * still asleep, a new sleep cycle may bring it back 2441 * still asleep, a new sleep cycle may bring it back
2441 */ 2442 */
2442 gem_put_cell(gp); 2443 gem_put_cell(gp);
2443 up(&gp->pm_sem); 2444 mutex_unlock(&gp->pm_mutex);
2444 return 0; 2445 return 0;
2445 } 2446 }
2446 pci_set_master(gp->pdev); 2447 pci_set_master(gp->pdev);
@@ -2486,7 +2487,7 @@ static int gem_resume(struct pci_dev *pdev)
2486 2487
2487 netif_poll_enable(dev); 2488 netif_poll_enable(dev);
2488 2489
2489 up(&gp->pm_sem); 2490 mutex_unlock(&gp->pm_mutex);
2490 2491
2491 return 0; 2492 return 0;
2492} 2493}
@@ -2591,7 +2592,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
2591 return 0; 2592 return 0;
2592 } 2593 }
2593 2594
2594 down(&gp->pm_sem); 2595 mutex_lock(&gp->pm_mutex);
2595 spin_lock_irq(&gp->lock); 2596 spin_lock_irq(&gp->lock);
2596 spin_lock(&gp->tx_lock); 2597 spin_lock(&gp->tx_lock);
2597 dev->mtu = new_mtu; 2598 dev->mtu = new_mtu;
@@ -2602,7 +2603,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
2602 } 2603 }
2603 spin_unlock(&gp->tx_lock); 2604 spin_unlock(&gp->tx_lock);
2604 spin_unlock_irq(&gp->lock); 2605 spin_unlock_irq(&gp->lock);
2605 up(&gp->pm_sem); 2606 mutex_unlock(&gp->pm_mutex);
2606 2607
2607 return 0; 2608 return 0;
2608} 2609}
@@ -2771,10 +2772,10 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2771 int rc = -EOPNOTSUPP; 2772 int rc = -EOPNOTSUPP;
2772 unsigned long flags; 2773 unsigned long flags;
2773 2774
2774 /* Hold the PM semaphore while doing ioctl's or we may collide 2775 /* Hold the PM mutex while doing ioctl's or we may collide
2775 * with power management. 2776 * with power management.
2776 */ 2777 */
2777 down(&gp->pm_sem); 2778 mutex_lock(&gp->pm_mutex);
2778 2779
2779 spin_lock_irqsave(&gp->lock, flags); 2780 spin_lock_irqsave(&gp->lock, flags);
2780 gem_get_cell(gp); 2781 gem_get_cell(gp);
@@ -2812,7 +2813,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2812 gem_put_cell(gp); 2813 gem_put_cell(gp);
2813 spin_unlock_irqrestore(&gp->lock, flags); 2814 spin_unlock_irqrestore(&gp->lock, flags);
2814 2815
2815 up(&gp->pm_sem); 2816 mutex_unlock(&gp->pm_mutex);
2816 2817
2817 return rc; 2818 return rc;
2818} 2819}
@@ -3033,7 +3034,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3033 3034
3034 spin_lock_init(&gp->lock); 3035 spin_lock_init(&gp->lock);
3035 spin_lock_init(&gp->tx_lock); 3036 spin_lock_init(&gp->tx_lock);
3036 init_MUTEX(&gp->pm_sem); 3037 mutex_init(&gp->pm_mutex);
3037 3038
3038 init_timer(&gp->link_timer); 3039 init_timer(&gp->link_timer);
3039 gp->link_timer.function = gem_link_timer; 3040 gp->link_timer.function = gem_link_timer;
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 13006d759ad8..89847215d006 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -980,15 +980,15 @@ struct gem {
980 int tx_new, tx_old; 980 int tx_new, tx_old;
981 981
982 unsigned int has_wol : 1; /* chip supports wake-on-lan */ 982 unsigned int has_wol : 1; /* chip supports wake-on-lan */
983 unsigned int asleep : 1; /* chip asleep, protected by pm_sem */ 983 unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */
984 unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ 984 unsigned int asleep_wol : 1; /* was asleep with WOL enabled */
985 unsigned int opened : 1; /* driver opened, protected by pm_sem */ 985 unsigned int opened : 1; /* driver opened, protected by pm_mutex */
986 unsigned int running : 1; /* chip running, protected by lock */ 986 unsigned int running : 1; /* chip running, protected by lock */
987 987
988 /* cell enable count, protected by lock */ 988 /* cell enable count, protected by lock */
989 int cell_enabled; 989 int cell_enabled;
990 990
991 struct semaphore pm_sem; 991 struct mutex pm_mutex;
992 992
993 u32 msg_enable; 993 u32 msg_enable;
994 u32 status; 994 u32 status;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6c6c5498899f..b5473325bff4 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.49" 72#define DRV_MODULE_VERSION "3.54"
73#define DRV_MODULE_RELDATE "Feb 2, 2006" 73#define DRV_MODULE_RELDATE "Mar 23, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -221,10 +221,26 @@ static struct pci_device_id tg3_pci_tbl[] = {
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, 222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, 236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, 240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, 244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, 246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
@@ -534,6 +550,9 @@ static void tg3_enable_ints(struct tg3 *tp)
534 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 550 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 551 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
536 (tp->last_tag << 24)); 552 (tp->last_tag << 24));
553 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
554 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
555 (tp->last_tag << 24));
537 tg3_cond_int(tp); 556 tg3_cond_int(tp);
538} 557}
539 558
@@ -1038,9 +1057,11 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1038 struct net_device *dev_peer; 1057 struct net_device *dev_peer;
1039 1058
1040 dev_peer = pci_get_drvdata(tp->pdev_peer); 1059 dev_peer = pci_get_drvdata(tp->pdev_peer);
1060 /* remove_one() may have been run on the peer. */
1041 if (!dev_peer) 1061 if (!dev_peer)
1042 BUG(); 1062 tp_peer = tp;
1043 tp_peer = netdev_priv(dev_peer); 1063 else
1064 tp_peer = netdev_priv(dev_peer);
1044 } 1065 }
1045 1066
1046 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 1067 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
@@ -1131,7 +1152,20 @@ static int tg3_halt_cpu(struct tg3 *, u32);
1131static int tg3_nvram_lock(struct tg3 *); 1152static int tg3_nvram_lock(struct tg3 *);
1132static void tg3_nvram_unlock(struct tg3 *); 1153static void tg3_nvram_unlock(struct tg3 *);
1133 1154
1134static int tg3_set_power_state(struct tg3 *tp, int state) 1155static void tg3_power_down_phy(struct tg3 *tp)
1156{
1157 /* The PHY should not be powered down on some chips because
1158 * of bugs.
1159 */
1160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1162 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1163 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1164 return;
1165 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1166}
1167
1168static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1135{ 1169{
1136 u32 misc_host_ctrl; 1170 u32 misc_host_ctrl;
1137 u16 power_control, power_caps; 1171 u16 power_control, power_caps;
@@ -1150,7 +1184,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1150 power_control |= PCI_PM_CTRL_PME_STATUS; 1184 power_control |= PCI_PM_CTRL_PME_STATUS;
1151 power_control &= ~(PCI_PM_CTRL_STATE_MASK); 1185 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1152 switch (state) { 1186 switch (state) {
1153 case 0: 1187 case PCI_D0:
1154 power_control |= 0; 1188 power_control |= 0;
1155 pci_write_config_word(tp->pdev, 1189 pci_write_config_word(tp->pdev,
1156 pm + PCI_PM_CTRL, 1190 pm + PCI_PM_CTRL,
@@ -1163,15 +1197,15 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1163 1197
1164 return 0; 1198 return 0;
1165 1199
1166 case 1: 1200 case PCI_D1:
1167 power_control |= 1; 1201 power_control |= 1;
1168 break; 1202 break;
1169 1203
1170 case 2: 1204 case PCI_D2:
1171 power_control |= 2; 1205 power_control |= 2;
1172 break; 1206 break;
1173 1207
1174 case 3: 1208 case PCI_D3hot:
1175 power_control |= 3; 1209 power_control |= 3;
1176 break; 1210 break;
1177 1211
@@ -1310,8 +1344,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1310 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1344 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1311 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1345 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1312 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1346 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1313 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) 1347 tg3_power_down_phy(tp);
1314 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1315 } 1348 }
1316 } 1349 }
1317 1350
@@ -2680,6 +2713,12 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2680 2713
2681 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2714 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2682 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2715 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2718 bmsr |= BMSR_LSTATUS;
2719 else
2720 bmsr &= ~BMSR_LSTATUS;
2721 }
2683 2722
2684 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 2723 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2685 2724
@@ -2748,6 +2787,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2748 bmcr = new_bmcr; 2787 bmcr = new_bmcr;
2749 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2788 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2750 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2789 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2790 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2791 ASIC_REV_5714) {
2792 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2793 bmsr |= BMSR_LSTATUS;
2794 else
2795 bmsr &= ~BMSR_LSTATUS;
2796 }
2751 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 2797 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2752 } 2798 }
2753 } 2799 }
@@ -3338,6 +3384,23 @@ static inline void tg3_full_unlock(struct tg3 *tp)
3338 spin_unlock_bh(&tp->lock); 3384 spin_unlock_bh(&tp->lock);
3339} 3385}
3340 3386
3387/* One-shot MSI handler - Chip automatically disables interrupt
3388 * after sending MSI so driver doesn't have to do it.
3389 */
3390static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3391{
3392 struct net_device *dev = dev_id;
3393 struct tg3 *tp = netdev_priv(dev);
3394
3395 prefetch(tp->hw_status);
3396 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3397
3398 if (likely(!tg3_irq_sync(tp)))
3399 netif_rx_schedule(dev); /* schedule NAPI poll */
3400
3401 return IRQ_HANDLED;
3402}
3403
3341/* MSI ISR - No need to check for interrupt sharing and no need to 3404/* MSI ISR - No need to check for interrupt sharing and no need to
3342 * flush status block and interrupt mailbox. PCI ordering rules 3405 * flush status block and interrupt mailbox. PCI ordering rules
3343 * guarantee that MSI will arrive after the status block. 3406 * guarantee that MSI will arrive after the status block.
@@ -3628,11 +3691,139 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3628 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3691 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3629} 3692}
3630 3693
3694/* hard_start_xmit for devices that don't have any bugs and
3695 * support TG3_FLG2_HW_TSO_2 only.
3696 */
3631static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3697static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3632{ 3698{
3633 struct tg3 *tp = netdev_priv(dev); 3699 struct tg3 *tp = netdev_priv(dev);
3634 dma_addr_t mapping; 3700 dma_addr_t mapping;
3635 u32 len, entry, base_flags, mss; 3701 u32 len, entry, base_flags, mss;
3702
3703 len = skb_headlen(skb);
3704
3705 /* No BH disabling for tx_lock here. We are running in BH disabled
3706 * context and TX reclaim runs via tp->poll inside of a software
3707 * interrupt. Furthermore, IRQ processing runs lockless so we have
3708 * no IRQ context deadlocks to worry about either. Rejoice!
3709 */
3710 if (!spin_trylock(&tp->tx_lock))
3711 return NETDEV_TX_LOCKED;
3712
3713 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3714 if (!netif_queue_stopped(dev)) {
3715 netif_stop_queue(dev);
3716
3717 /* This is a hard error, log it. */
3718 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3719 "queue awake!\n", dev->name);
3720 }
3721 spin_unlock(&tp->tx_lock);
3722 return NETDEV_TX_BUSY;
3723 }
3724
3725 entry = tp->tx_prod;
3726 base_flags = 0;
3727#if TG3_TSO_SUPPORT != 0
3728 mss = 0;
3729 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3730 (mss = skb_shinfo(skb)->tso_size) != 0) {
3731 int tcp_opt_len, ip_tcp_len;
3732
3733 if (skb_header_cloned(skb) &&
3734 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3735 dev_kfree_skb(skb);
3736 goto out_unlock;
3737 }
3738
3739 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3740 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3741
3742 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3743 TXD_FLAG_CPU_POST_DMA);
3744
3745 skb->nh.iph->check = 0;
3746 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3747
3748 skb->h.th->check = 0;
3749
3750 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3751 }
3752 else if (skb->ip_summed == CHECKSUM_HW)
3753 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3754#else
3755 mss = 0;
3756 if (skb->ip_summed == CHECKSUM_HW)
3757 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3758#endif
3759#if TG3_VLAN_TAG_USED
3760 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3761 base_flags |= (TXD_FLAG_VLAN |
3762 (vlan_tx_tag_get(skb) << 16));
3763#endif
3764
3765 /* Queue skb data, a.k.a. the main skb fragment. */
3766 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3767
3768 tp->tx_buffers[entry].skb = skb;
3769 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3770
3771 tg3_set_txd(tp, entry, mapping, len, base_flags,
3772 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3773
3774 entry = NEXT_TX(entry);
3775
3776 /* Now loop through additional data fragments, and queue them. */
3777 if (skb_shinfo(skb)->nr_frags > 0) {
3778 unsigned int i, last;
3779
3780 last = skb_shinfo(skb)->nr_frags - 1;
3781 for (i = 0; i <= last; i++) {
3782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3783
3784 len = frag->size;
3785 mapping = pci_map_page(tp->pdev,
3786 frag->page,
3787 frag->page_offset,
3788 len, PCI_DMA_TODEVICE);
3789
3790 tp->tx_buffers[entry].skb = NULL;
3791 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3792
3793 tg3_set_txd(tp, entry, mapping, len,
3794 base_flags, (i == last) | (mss << 1));
3795
3796 entry = NEXT_TX(entry);
3797 }
3798 }
3799
3800 /* Packets are ready, update Tx producer idx local and on card. */
3801 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3802
3803 tp->tx_prod = entry;
3804 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3805 netif_stop_queue(dev);
3806 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3807 netif_wake_queue(tp->dev);
3808 }
3809
3810out_unlock:
3811 mmiowb();
3812 spin_unlock(&tp->tx_lock);
3813
3814 dev->trans_start = jiffies;
3815
3816 return NETDEV_TX_OK;
3817}
3818
3819/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3820 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3821 */
3822static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3823{
3824 struct tg3 *tp = netdev_priv(dev);
3825 dma_addr_t mapping;
3826 u32 len, entry, base_flags, mss;
3636 int would_hit_hwbug; 3827 int would_hit_hwbug;
3637 3828
3638 len = skb_headlen(skb); 3829 len = skb_headlen(skb);
@@ -4369,6 +4560,11 @@ static int tg3_chip_reset(struct tg3 *tp)
4369 tp->nvram_lock_cnt = 0; 4560 tp->nvram_lock_cnt = 0;
4370 } 4561 }
4371 4562
4563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4566 tw32(GRC_FASTBOOT_PC, 0);
4567
4372 /* 4568 /*
4373 * We must avoid the readl() that normally takes place. 4569 * We must avoid the readl() that normally takes place.
4374 * It locks machines, causes machine checks, and other 4570 * It locks machines, causes machine checks, and other
@@ -5518,6 +5714,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5518 5714
5519 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5715 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5520 5716
5717 if (!netif_running(dev))
5718 return 0;
5719
5521 spin_lock_bh(&tp->lock); 5720 spin_lock_bh(&tp->lock);
5522 __tg3_set_mac_addr(tp); 5721 __tg3_set_mac_addr(tp);
5523 spin_unlock_bh(&tp->lock); 5722 spin_unlock_bh(&tp->lock);
@@ -5585,6 +5784,9 @@ static int tg3_reset_hw(struct tg3 *tp)
5585 tg3_abort_hw(tp, 1); 5784 tg3_abort_hw(tp, 1);
5586 } 5785 }
5587 5786
5787 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5788 tg3_phy_reset(tp);
5789
5588 err = tg3_chip_reset(tp); 5790 err = tg3_chip_reset(tp);
5589 if (err) 5791 if (err)
5590 return err; 5792 return err;
@@ -5955,6 +6157,9 @@ static int tg3_reset_hw(struct tg3 *tp)
5955 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 6157 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5956 GRC_LCLCTRL_GPIO_OUTPUT3; 6158 GRC_LCLCTRL_GPIO_OUTPUT3;
5957 6159
6160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6161 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6162
5958 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 6163 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5959 6164
5960 /* GPIO1 must be driven high for eeprom write protect */ 6165 /* GPIO1 must be driven high for eeprom write protect */
@@ -5993,6 +6198,11 @@ static int tg3_reset_hw(struct tg3 *tp)
5993 } 6198 }
5994 } 6199 }
5995 6200
6201 /* Enable host coalescing bug fix */
6202 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6203 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6204 val |= (1 << 29);
6205
5996 tw32_f(WDMAC_MODE, val); 6206 tw32_f(WDMAC_MODE, val);
5997 udelay(40); 6207 udelay(40);
5998 6208
@@ -6048,6 +6258,9 @@ static int tg3_reset_hw(struct tg3 *tp)
6048 udelay(100); 6258 udelay(100);
6049 6259
6050 tp->rx_mode = RX_MODE_ENABLE; 6260 tp->rx_mode = RX_MODE_ENABLE;
6261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6262 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6263
6051 tw32_f(MAC_RX_MODE, tp->rx_mode); 6264 tw32_f(MAC_RX_MODE, tp->rx_mode);
6052 udelay(10); 6265 udelay(10);
6053 6266
@@ -6097,6 +6310,17 @@ static int tg3_reset_hw(struct tg3 *tp)
6097 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; 6310 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6098 } 6311 }
6099 6312
6313 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6315 u32 tmp;
6316
6317 tmp = tr32(SERDES_RX_CTRL);
6318 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6319 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6320 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6321 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6322 }
6323
6100 err = tg3_setup_phy(tp, 1); 6324 err = tg3_setup_phy(tp, 1);
6101 if (err) 6325 if (err)
6102 return err; 6326 return err;
@@ -6175,7 +6399,7 @@ static int tg3_init_hw(struct tg3 *tp)
6175 int err; 6399 int err;
6176 6400
6177 /* Force the chip into D0. */ 6401 /* Force the chip into D0. */
6178 err = tg3_set_power_state(tp, 0); 6402 err = tg3_set_power_state(tp, PCI_D0);
6179 if (err) 6403 if (err)
6180 goto out; 6404 goto out;
6181 6405
@@ -6331,6 +6555,26 @@ static void tg3_timer(unsigned long __opaque)
6331 add_timer(&tp->timer); 6555 add_timer(&tp->timer);
6332} 6556}
6333 6557
6558static int tg3_request_irq(struct tg3 *tp)
6559{
6560 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6561 unsigned long flags;
6562 struct net_device *dev = tp->dev;
6563
6564 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6565 fn = tg3_msi;
6566 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6567 fn = tg3_msi_1shot;
6568 flags = SA_SAMPLE_RANDOM;
6569 } else {
6570 fn = tg3_interrupt;
6571 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6572 fn = tg3_interrupt_tagged;
6573 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6574 }
6575 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6576}
6577
6334static int tg3_test_interrupt(struct tg3 *tp) 6578static int tg3_test_interrupt(struct tg3 *tp)
6335{ 6579{
6336 struct net_device *dev = tp->dev; 6580 struct net_device *dev = tp->dev;
@@ -6367,16 +6611,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
6367 6611
6368 free_irq(tp->pdev->irq, dev); 6612 free_irq(tp->pdev->irq, dev);
6369 6613
6370 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6614 err = tg3_request_irq(tp);
6371 err = request_irq(tp->pdev->irq, tg3_msi,
6372 SA_SAMPLE_RANDOM, dev->name, dev);
6373 else {
6374 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6375 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6376 fn = tg3_interrupt_tagged;
6377 err = request_irq(tp->pdev->irq, fn,
6378 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6379 }
6380 6615
6381 if (err) 6616 if (err)
6382 return err; 6617 return err;
@@ -6428,14 +6663,7 @@ static int tg3_test_msi(struct tg3 *tp)
6428 6663
6429 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6664 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6430 6665
6431 { 6666 err = tg3_request_irq(tp);
6432 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6433 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6434 fn = tg3_interrupt_tagged;
6435
6436 err = request_irq(tp->pdev->irq, fn,
6437 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6438 }
6439 if (err) 6667 if (err)
6440 return err; 6668 return err;
6441 6669
@@ -6462,6 +6690,10 @@ static int tg3_open(struct net_device *dev)
6462 6690
6463 tg3_full_lock(tp, 0); 6691 tg3_full_lock(tp, 0);
6464 6692
6693 err = tg3_set_power_state(tp, PCI_D0);
6694 if (err)
6695 return err;
6696
6465 tg3_disable_ints(tp); 6697 tg3_disable_ints(tp);
6466 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 6698 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6467 6699
@@ -6476,7 +6708,9 @@ static int tg3_open(struct net_device *dev)
6476 6708
6477 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6709 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6478 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6710 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6479 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6711 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6712 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6713 (tp->pdev_peer == tp->pdev))) {
6480 /* All MSI supporting chips should support tagged 6714 /* All MSI supporting chips should support tagged
6481 * status. Assert that this is the case. 6715 * status. Assert that this is the case.
6482 */ 6716 */
@@ -6491,17 +6725,7 @@ static int tg3_open(struct net_device *dev)
6491 tp->tg3_flags2 |= TG3_FLG2_USING_MSI; 6725 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6492 } 6726 }
6493 } 6727 }
6494 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6728 err = tg3_request_irq(tp);
6495 err = request_irq(tp->pdev->irq, tg3_msi,
6496 SA_SAMPLE_RANDOM, dev->name, dev);
6497 else {
6498 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6499 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6500 fn = tg3_interrupt_tagged;
6501
6502 err = request_irq(tp->pdev->irq, fn,
6503 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6504 }
6505 6729
6506 if (err) { 6730 if (err) {
6507 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6731 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -6566,6 +6790,14 @@ static int tg3_open(struct net_device *dev)
6566 6790
6567 return err; 6791 return err;
6568 } 6792 }
6793
6794 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6795 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6796 u32 val = tr32(0x7c04);
6797
6798 tw32(0x7c04, val | (1 << 29));
6799 }
6800 }
6569 } 6801 }
6570 6802
6571 tg3_full_lock(tp, 0); 6803 tg3_full_lock(tp, 0);
@@ -6839,7 +7071,6 @@ static int tg3_close(struct net_device *dev)
6839 tp->tg3_flags &= 7071 tp->tg3_flags &=
6840 ~(TG3_FLAG_INIT_COMPLETE | 7072 ~(TG3_FLAG_INIT_COMPLETE |
6841 TG3_FLAG_GOT_SERDES_FLOWCTL); 7073 TG3_FLAG_GOT_SERDES_FLOWCTL);
6842 netif_carrier_off(tp->dev);
6843 7074
6844 tg3_full_unlock(tp); 7075 tg3_full_unlock(tp);
6845 7076
@@ -6856,6 +7087,10 @@ static int tg3_close(struct net_device *dev)
6856 7087
6857 tg3_free_consistent(tp); 7088 tg3_free_consistent(tp);
6858 7089
7090 tg3_set_power_state(tp, PCI_D3hot);
7091
7092 netif_carrier_off(tp->dev);
7093
6859 return 0; 7094 return 0;
6860} 7095}
6861 7096
@@ -7150,6 +7385,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
7150{ 7385{
7151 struct tg3 *tp = netdev_priv(dev); 7386 struct tg3 *tp = netdev_priv(dev);
7152 7387
7388 if (!netif_running(dev))
7389 return;
7390
7153 tg3_full_lock(tp, 0); 7391 tg3_full_lock(tp, 0);
7154 __tg3_set_rx_mode(dev); 7392 __tg3_set_rx_mode(dev);
7155 tg3_full_unlock(tp); 7393 tg3_full_unlock(tp);
@@ -7174,6 +7412,9 @@ static void tg3_get_regs(struct net_device *dev,
7174 7412
7175 memset(p, 0, TG3_REGDUMP_LEN); 7413 memset(p, 0, TG3_REGDUMP_LEN);
7176 7414
7415 if (tp->link_config.phy_is_low_power)
7416 return;
7417
7177 tg3_full_lock(tp, 0); 7418 tg3_full_lock(tp, 0);
7178 7419
7179#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 7420#define __GET_REG32(reg) (*(p)++ = tr32(reg))
@@ -7240,6 +7481,7 @@ static int tg3_get_eeprom_len(struct net_device *dev)
7240} 7481}
7241 7482
7242static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); 7483static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7484static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7243 7485
7244static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 7486static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7245{ 7487{
@@ -7248,6 +7490,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7248 u8 *pd; 7490 u8 *pd;
7249 u32 i, offset, len, val, b_offset, b_count; 7491 u32 i, offset, len, val, b_offset, b_count;
7250 7492
7493 if (tp->link_config.phy_is_low_power)
7494 return -EAGAIN;
7495
7251 offset = eeprom->offset; 7496 offset = eeprom->offset;
7252 len = eeprom->len; 7497 len = eeprom->len;
7253 eeprom->len = 0; 7498 eeprom->len = 0;
@@ -7309,6 +7554,9 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7309 u32 offset, len, b_offset, odd_len, start, end; 7554 u32 offset, len, b_offset, odd_len, start, end;
7310 u8 *buf; 7555 u8 *buf;
7311 7556
7557 if (tp->link_config.phy_is_low_power)
7558 return -EAGAIN;
7559
7312 if (eeprom->magic != TG3_EEPROM_MAGIC) 7560 if (eeprom->magic != TG3_EEPROM_MAGIC)
7313 return -EINVAL; 7561 return -EINVAL;
7314 7562
@@ -7442,6 +7690,7 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
7442 7690
7443 strcpy(info->driver, DRV_MODULE_NAME); 7691 strcpy(info->driver, DRV_MODULE_NAME);
7444 strcpy(info->version, DRV_MODULE_VERSION); 7692 strcpy(info->version, DRV_MODULE_VERSION);
7693 strcpy(info->fw_version, tp->fw_ver);
7445 strcpy(info->bus_info, pci_name(tp->pdev)); 7694 strcpy(info->bus_info, pci_name(tp->pdev));
7446} 7695}
7447 7696
@@ -7536,11 +7785,20 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7536 7785
7537 ering->rx_max_pending = TG3_RX_RING_SIZE - 1; 7786 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7538 ering->rx_mini_max_pending = 0; 7787 ering->rx_mini_max_pending = 0;
7539 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; 7788 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7789 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7790 else
7791 ering->rx_jumbo_max_pending = 0;
7792
7793 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7540 7794
7541 ering->rx_pending = tp->rx_pending; 7795 ering->rx_pending = tp->rx_pending;
7542 ering->rx_mini_pending = 0; 7796 ering->rx_mini_pending = 0;
7543 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 7797 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7798 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7799 else
7800 ering->rx_jumbo_pending = 0;
7801
7544 ering->tx_pending = tp->tx_pending; 7802 ering->tx_pending = tp->tx_pending;
7545} 7803}
7546 7804
@@ -7661,10 +7919,11 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7661 return 0; 7919 return 0;
7662 } 7920 }
7663 7921
7664 if (data) 7922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7665 dev->features |= NETIF_F_IP_CSUM; 7923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7924 ethtool_op_set_tx_hw_csum(dev, data);
7666 else 7925 else
7667 dev->features &= ~NETIF_F_IP_CSUM; 7926 ethtool_op_set_tx_csum(dev, data);
7668 7927
7669 return 0; 7928 return 0;
7670} 7929}
@@ -7734,29 +7993,52 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7734} 7993}
7735 7994
7736#define NVRAM_TEST_SIZE 0x100 7995#define NVRAM_TEST_SIZE 0x100
7996#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7737 7997
7738static int tg3_test_nvram(struct tg3 *tp) 7998static int tg3_test_nvram(struct tg3 *tp)
7739{ 7999{
7740 u32 *buf, csum; 8000 u32 *buf, csum, magic;
7741 int i, j, err = 0; 8001 int i, j, err = 0, size;
8002
8003 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8004 return -EIO;
8005
8006 if (magic == TG3_EEPROM_MAGIC)
8007 size = NVRAM_TEST_SIZE;
8008 else if ((magic & 0xff000000) == 0xa5000000) {
8009 if ((magic & 0xe00000) == 0x200000)
8010 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8011 else
8012 return 0;
8013 } else
8014 return -EIO;
7742 8015
7743 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL); 8016 buf = kmalloc(size, GFP_KERNEL);
7744 if (buf == NULL) 8017 if (buf == NULL)
7745 return -ENOMEM; 8018 return -ENOMEM;
7746 8019
7747 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) { 8020 err = -EIO;
8021 for (i = 0, j = 0; i < size; i += 4, j++) {
7748 u32 val; 8022 u32 val;
7749 8023
7750 if ((err = tg3_nvram_read(tp, i, &val)) != 0) 8024 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7751 break; 8025 break;
7752 buf[j] = cpu_to_le32(val); 8026 buf[j] = cpu_to_le32(val);
7753 } 8027 }
7754 if (i < NVRAM_TEST_SIZE) 8028 if (i < size)
7755 goto out; 8029 goto out;
7756 8030
7757 err = -EIO; 8031 /* Selfboot format */
7758 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) 8032 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
7759 goto out; 8033 u8 *buf8 = (u8 *) buf, csum8 = 0;
8034
8035 for (i = 0; i < size; i++)
8036 csum8 += buf8[i];
8037
8038 if (csum8 == 0)
8039 return 0;
8040 return -EIO;
8041 }
7760 8042
7761 /* Bootstrap checksum at offset 0x10 */ 8043 /* Bootstrap checksum at offset 0x10 */
7762 csum = calc_crc((unsigned char *) buf, 0x10); 8044 csum = calc_crc((unsigned char *) buf, 0x10);
@@ -7802,7 +8084,7 @@ static int tg3_test_link(struct tg3 *tp)
7802} 8084}
7803 8085
7804/* Only test the commonly used registers */ 8086/* Only test the commonly used registers */
7805static const int tg3_test_registers(struct tg3 *tp) 8087static int tg3_test_registers(struct tg3 *tp)
7806{ 8088{
7807 int i, is_5705; 8089 int i, is_5705;
7808 u32 offset, read_mask, write_mask, val, save_val, read_val; 8090 u32 offset, read_mask, write_mask, val, save_val, read_val;
@@ -8050,14 +8332,25 @@ static int tg3_test_memory(struct tg3 *tp)
8050 { 0x00008000, 0x02000}, 8332 { 0x00008000, 0x02000},
8051 { 0x00010000, 0x0e000}, 8333 { 0x00010000, 0x0e000},
8052 { 0xffffffff, 0x00000} 8334 { 0xffffffff, 0x00000}
8335 }, mem_tbl_5755[] = {
8336 { 0x00000200, 0x00008},
8337 { 0x00004000, 0x00800},
8338 { 0x00006000, 0x00800},
8339 { 0x00008000, 0x02000},
8340 { 0x00010000, 0x0c000},
8341 { 0xffffffff, 0x00000}
8053 }; 8342 };
8054 struct mem_entry *mem_tbl; 8343 struct mem_entry *mem_tbl;
8055 int err = 0; 8344 int err = 0;
8056 int i; 8345 int i;
8057 8346
8058 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8347 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8059 mem_tbl = mem_tbl_5705; 8348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8060 else 8349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8350 mem_tbl = mem_tbl_5755;
8351 else
8352 mem_tbl = mem_tbl_5705;
8353 } else
8061 mem_tbl = mem_tbl_570x; 8354 mem_tbl = mem_tbl_570x;
8062 8355
8063 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 8356 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
@@ -8229,6 +8522,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8229{ 8522{
8230 struct tg3 *tp = netdev_priv(dev); 8523 struct tg3 *tp = netdev_priv(dev);
8231 8524
8525 if (tp->link_config.phy_is_low_power)
8526 tg3_set_power_state(tp, PCI_D0);
8527
8232 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 8528 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8233 8529
8234 if (tg3_test_nvram(tp) != 0) { 8530 if (tg3_test_nvram(tp) != 0) {
@@ -8257,6 +8553,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8257 if (!err) 8553 if (!err)
8258 tg3_nvram_unlock(tp); 8554 tg3_nvram_unlock(tp);
8259 8555
8556 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8557 tg3_phy_reset(tp);
8558
8260 if (tg3_test_registers(tp) != 0) { 8559 if (tg3_test_registers(tp) != 0) {
8261 etest->flags |= ETH_TEST_FL_FAILED; 8560 etest->flags |= ETH_TEST_FL_FAILED;
8262 data[2] = 1; 8561 data[2] = 1;
@@ -8286,6 +8585,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8286 8585
8287 tg3_full_unlock(tp); 8586 tg3_full_unlock(tp);
8288 } 8587 }
8588 if (tp->link_config.phy_is_low_power)
8589 tg3_set_power_state(tp, PCI_D3hot);
8590
8289} 8591}
8290 8592
8291static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 8593static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -8305,6 +8607,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8305 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 8607 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8306 break; /* We have no PHY */ 8608 break; /* We have no PHY */
8307 8609
8610 if (tp->link_config.phy_is_low_power)
8611 return -EAGAIN;
8612
8308 spin_lock_bh(&tp->lock); 8613 spin_lock_bh(&tp->lock);
8309 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 8614 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8310 spin_unlock_bh(&tp->lock); 8615 spin_unlock_bh(&tp->lock);
@@ -8321,6 +8626,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8321 if (!capable(CAP_NET_ADMIN)) 8626 if (!capable(CAP_NET_ADMIN))
8322 return -EPERM; 8627 return -EPERM;
8323 8628
8629 if (tp->link_config.phy_is_low_power)
8630 return -EAGAIN;
8631
8324 spin_lock_bh(&tp->lock); 8632 spin_lock_bh(&tp->lock);
8325 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 8633 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8326 spin_unlock_bh(&tp->lock); 8634 spin_unlock_bh(&tp->lock);
@@ -8464,14 +8772,14 @@ static struct ethtool_ops tg3_ethtool_ops = {
8464 8772
8465static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 8773static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8466{ 8774{
8467 u32 cursize, val; 8775 u32 cursize, val, magic;
8468 8776
8469 tp->nvram_size = EEPROM_CHIP_SIZE; 8777 tp->nvram_size = EEPROM_CHIP_SIZE;
8470 8778
8471 if (tg3_nvram_read(tp, 0, &val) != 0) 8779 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8472 return; 8780 return;
8473 8781
8474 if (swab32(val) != TG3_EEPROM_MAGIC) 8782 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8475 return; 8783 return;
8476 8784
8477 /* 8785 /*
@@ -8479,13 +8787,13 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8479 * When we encounter our validation signature, we know the addressing 8787 * When we encounter our validation signature, we know the addressing
8480 * has wrapped around, and thus have our chip size. 8788 * has wrapped around, and thus have our chip size.
8481 */ 8789 */
8482 cursize = 0x800; 8790 cursize = 0x10;
8483 8791
8484 while (cursize < tp->nvram_size) { 8792 while (cursize < tp->nvram_size) {
8485 if (tg3_nvram_read(tp, cursize, &val) != 0) 8793 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8486 return; 8794 return;
8487 8795
8488 if (swab32(val) == TG3_EEPROM_MAGIC) 8796 if (val == magic)
8489 break; 8797 break;
8490 8798
8491 cursize <<= 1; 8799 cursize <<= 1;
@@ -8498,6 +8806,15 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8498{ 8806{
8499 u32 val; 8807 u32 val;
8500 8808
8809 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8810 return;
8811
8812 /* Selfboot format */
8813 if (val != TG3_EEPROM_MAGIC) {
8814 tg3_get_eeprom_size(tp);
8815 return;
8816 }
8817
8501 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 8818 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8502 if (val != 0) { 8819 if (val != 0) {
8503 tp->nvram_size = (val >> 16) * 1024; 8820 tp->nvram_size = (val >> 16) * 1024;
@@ -8621,6 +8938,85 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8621 } 8938 }
8622} 8939}
8623 8940
8941static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8942{
8943 u32 nvcfg1;
8944
8945 nvcfg1 = tr32(NVRAM_CFG1);
8946
8947 /* NVRAM protection for TPM */
8948 if (nvcfg1 & (1 << 27))
8949 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8950
8951 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8952 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8953 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8954 tp->nvram_jedecnum = JEDEC_ATMEL;
8955 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8956 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8957
8958 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8959 tw32(NVRAM_CFG1, nvcfg1);
8960 break;
8961 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8962 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8963 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8964 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8965 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8966 tp->nvram_jedecnum = JEDEC_ATMEL;
8967 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8968 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8969 tp->nvram_pagesize = 264;
8970 break;
8971 case FLASH_5752VENDOR_ST_M45PE10:
8972 case FLASH_5752VENDOR_ST_M45PE20:
8973 case FLASH_5752VENDOR_ST_M45PE40:
8974 tp->nvram_jedecnum = JEDEC_ST;
8975 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8976 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8977 tp->nvram_pagesize = 256;
8978 break;
8979 }
8980}
8981
8982static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8983{
8984 u32 nvcfg1;
8985
8986 nvcfg1 = tr32(NVRAM_CFG1);
8987
8988 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8989 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8990 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8991 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8992 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8993 tp->nvram_jedecnum = JEDEC_ATMEL;
8994 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8995 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8996
8997 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8998 tw32(NVRAM_CFG1, nvcfg1);
8999 break;
9000 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9001 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9002 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9003 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9004 tp->nvram_jedecnum = JEDEC_ATMEL;
9005 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9006 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9007 tp->nvram_pagesize = 264;
9008 break;
9009 case FLASH_5752VENDOR_ST_M45PE10:
9010 case FLASH_5752VENDOR_ST_M45PE20:
9011 case FLASH_5752VENDOR_ST_M45PE40:
9012 tp->nvram_jedecnum = JEDEC_ST;
9013 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9014 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9015 tp->nvram_pagesize = 256;
9016 break;
9017 }
9018}
9019
8624/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 9020/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8625static void __devinit tg3_nvram_init(struct tg3 *tp) 9021static void __devinit tg3_nvram_init(struct tg3 *tp)
8626{ 9022{
@@ -8656,6 +9052,10 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
8656 9052
8657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 9053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8658 tg3_get_5752_nvram_info(tp); 9054 tg3_get_5752_nvram_info(tp);
9055 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9056 tg3_get_5755_nvram_info(tp);
9057 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9058 tg3_get_5787_nvram_info(tp);
8659 else 9059 else
8660 tg3_get_nvram_info(tp); 9060 tg3_get_nvram_info(tp);
8661 9061
@@ -8725,6 +9125,34 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8725 return 0; 9125 return 0;
8726} 9126}
8727 9127
9128static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9129{
9130 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9131 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9132 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9133 (tp->nvram_jedecnum == JEDEC_ATMEL))
9134
9135 addr = ((addr / tp->nvram_pagesize) <<
9136 ATMEL_AT45DB0X1B_PAGE_POS) +
9137 (addr % tp->nvram_pagesize);
9138
9139 return addr;
9140}
9141
9142static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9143{
9144 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9145 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9146 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9147 (tp->nvram_jedecnum == JEDEC_ATMEL))
9148
9149 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9150 tp->nvram_pagesize) +
9151 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9152
9153 return addr;
9154}
9155
8728static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 9156static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8729{ 9157{
8730 int ret; 9158 int ret;
@@ -8737,14 +9165,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8737 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 9165 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8738 return tg3_nvram_read_using_eeprom(tp, offset, val); 9166 return tg3_nvram_read_using_eeprom(tp, offset, val);
8739 9167
8740 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 9168 offset = tg3_nvram_phys_addr(tp, offset);
8741 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8742 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8743
8744 offset = ((offset / tp->nvram_pagesize) <<
8745 ATMEL_AT45DB0X1B_PAGE_POS) +
8746 (offset % tp->nvram_pagesize);
8747 }
8748 9169
8749 if (offset > NVRAM_ADDR_MSK) 9170 if (offset > NVRAM_ADDR_MSK)
8750 return -EINVAL; 9171 return -EINVAL;
@@ -8769,6 +9190,16 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8769 return ret; 9190 return ret;
8770} 9191}
8771 9192
9193static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9194{
9195 int err;
9196 u32 tmp;
9197
9198 err = tg3_nvram_read(tp, offset, &tmp);
9199 *val = swab32(tmp);
9200 return err;
9201}
9202
8772static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 9203static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8773 u32 offset, u32 len, u8 *buf) 9204 u32 offset, u32 len, u8 *buf)
8774{ 9205{
@@ -8921,15 +9352,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8921 9352
8922 page_off = offset % tp->nvram_pagesize; 9353 page_off = offset % tp->nvram_pagesize;
8923 9354
8924 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) && 9355 phy_addr = tg3_nvram_phys_addr(tp, offset);
8925 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8926
8927 phy_addr = ((offset / tp->nvram_pagesize) <<
8928 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8929 }
8930 else {
8931 phy_addr = offset;
8932 }
8933 9356
8934 tw32(NVRAM_ADDR, phy_addr); 9357 tw32(NVRAM_ADDR, phy_addr);
8935 9358
@@ -8944,6 +9367,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8944 nvram_cmd |= NVRAM_CMD_LAST; 9367 nvram_cmd |= NVRAM_CMD_LAST;
8945 9368
8946 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && 9369 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9370 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9371 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
8947 (tp->nvram_jedecnum == JEDEC_ST) && 9372 (tp->nvram_jedecnum == JEDEC_ST) &&
8948 (nvram_cmd & NVRAM_CMD_FIRST)) { 9373 (nvram_cmd & NVRAM_CMD_FIRST)) {
8949 9374
@@ -9081,12 +9506,18 @@ static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9081 return NULL; 9506 return NULL;
9082} 9507}
9083 9508
9084/* Since this function may be called in D3-hot power state during
9085 * tg3_init_one(), only config cycles are allowed.
9086 */
9087static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) 9509static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9088{ 9510{
9089 u32 val; 9511 u32 val;
9512 u16 pmcsr;
9513
9514 /* On some early chips the SRAM cannot be accessed in D3hot state,
9515 * so need make sure we're in D0.
9516 */
9517 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9518 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9519 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9520 msleep(1);
9090 9521
9091 /* Make sure register accesses (indirect or otherwise) 9522 /* Make sure register accesses (indirect or otherwise)
9092 * will function correctly. 9523 * will function correctly.
@@ -9347,6 +9778,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9347{ 9778{
9348 unsigned char vpd_data[256]; 9779 unsigned char vpd_data[256];
9349 int i; 9780 int i;
9781 u32 magic;
9350 9782
9351 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { 9783 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9352 /* Sun decided not to put the necessary bits in the 9784 /* Sun decided not to put the necessary bits in the
@@ -9356,16 +9788,43 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9356 return; 9788 return;
9357 } 9789 }
9358 9790
9359 for (i = 0; i < 256; i += 4) { 9791 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9360 u32 tmp; 9792 return;
9361 9793
9362 if (tg3_nvram_read(tp, 0x100 + i, &tmp)) 9794 if (magic == TG3_EEPROM_MAGIC) {
9363 goto out_not_found; 9795 for (i = 0; i < 256; i += 4) {
9796 u32 tmp;
9797
9798 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9799 goto out_not_found;
9364 9800
9365 vpd_data[i + 0] = ((tmp >> 0) & 0xff); 9801 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9366 vpd_data[i + 1] = ((tmp >> 8) & 0xff); 9802 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9367 vpd_data[i + 2] = ((tmp >> 16) & 0xff); 9803 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9368 vpd_data[i + 3] = ((tmp >> 24) & 0xff); 9804 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9805 }
9806 } else {
9807 int vpd_cap;
9808
9809 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9810 for (i = 0; i < 256; i += 4) {
9811 u32 tmp, j = 0;
9812 u16 tmp16;
9813
9814 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9815 i);
9816 while (j++ < 100) {
9817 pci_read_config_word(tp->pdev, vpd_cap +
9818 PCI_VPD_ADDR, &tmp16);
9819 if (tmp16 & 0x8000)
9820 break;
9821 msleep(1);
9822 }
9823 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9824 &tmp);
9825 tmp = cpu_to_le32(tmp);
9826 memcpy(&vpd_data[i], &tmp, 4);
9827 }
9369 } 9828 }
9370 9829
9371 /* Now parse and find the part number. */ 9830 /* Now parse and find the part number. */
@@ -9412,6 +9871,46 @@ out_not_found:
9412 strcpy(tp->board_part_number, "none"); 9871 strcpy(tp->board_part_number, "none");
9413} 9872}
9414 9873
9874static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9875{
9876 u32 val, offset, start;
9877
9878 if (tg3_nvram_read_swab(tp, 0, &val))
9879 return;
9880
9881 if (val != TG3_EEPROM_MAGIC)
9882 return;
9883
9884 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9885 tg3_nvram_read_swab(tp, 0x4, &start))
9886 return;
9887
9888 offset = tg3_nvram_logical_addr(tp, offset);
9889 if (tg3_nvram_read_swab(tp, offset, &val))
9890 return;
9891
9892 if ((val & 0xfc000000) == 0x0c000000) {
9893 u32 ver_offset, addr;
9894 int i;
9895
9896 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9897 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9898 return;
9899
9900 if (val != 0)
9901 return;
9902
9903 addr = offset + ver_offset - start;
9904 for (i = 0; i < 16; i += 4) {
9905 if (tg3_nvram_read(tp, addr + i, &val))
9906 return;
9907
9908 val = cpu_to_le32(val);
9909 memcpy(tp->fw_ver + i, &val, 4);
9910 }
9911 }
9912}
9913
9415#ifdef CONFIG_SPARC64 9914#ifdef CONFIG_SPARC64
9416static int __devinit tg3_is_sun_570X(struct tg3 *tp) 9915static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9417{ 9916{
@@ -9603,6 +10102,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9603 10102
9604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 10103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 10104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9606 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 10107 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9607 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 10108 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9608 10109
@@ -9610,12 +10111,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9610 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 10111 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9611 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 10112 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9612 10113
9613 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 10114 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
9614 tp->tg3_flags2 |= TG3_FLG2_HW_TSO; 10115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10117 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10118 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10119 } else
10120 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10121 }
9615 10122
9616 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 10123 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9617 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 10124 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9618 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) 10125 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10126 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
9619 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 10128 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9620 10129
9621 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) 10130 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
@@ -9771,8 +10280,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9771 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 10280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9772 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 10281 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9773 10282
10283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10284 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10285
9774 /* Force the chip into D0. */ 10286 /* Force the chip into D0. */
9775 err = tg3_set_power_state(tp, 0); 10287 err = tg3_set_power_state(tp, PCI_D0);
9776 if (err) { 10288 if (err) {
9777 printk(KERN_ERR PFX "(%s) transition to D0 failed\n", 10289 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9778 pci_name(tp->pdev)); 10290 pci_name(tp->pdev));
@@ -9825,7 +10337,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9825 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 10337 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9826 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 10338 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9827 10339
9828 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 10340 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10341 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10342 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
9829 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10343 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9830 10344
9831 tp->coalesce_mode = 0; 10345 tp->coalesce_mode = 0;
@@ -9925,6 +10439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9925 } 10439 }
9926 10440
9927 tg3_read_partno(tp); 10441 tg3_read_partno(tp);
10442 tg3_read_fw_ver(tp);
9928 10443
9929 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 10444 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9930 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 10445 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
@@ -9960,10 +10475,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9960 else 10475 else
9961 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 10476 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9962 10477
9963 /* It seems all chips can get confused if TX buffers 10478 /* All chips before 5787 can get confused if TX buffers
9964 * straddle the 4GB address boundary in some cases. 10479 * straddle the 4GB address boundary in some cases.
9965 */ 10480 */
9966 tp->dev->hard_start_xmit = tg3_start_xmit; 10481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10483 tp->dev->hard_start_xmit = tg3_start_xmit;
10484 else
10485 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
9967 10486
9968 tp->rx_offset = 2; 10487 tp->rx_offset = 2;
9969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 10488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
@@ -10491,7 +11010,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
10491 tp->link_config.speed = SPEED_INVALID; 11010 tp->link_config.speed = SPEED_INVALID;
10492 tp->link_config.duplex = DUPLEX_INVALID; 11011 tp->link_config.duplex = DUPLEX_INVALID;
10493 tp->link_config.autoneg = AUTONEG_ENABLE; 11012 tp->link_config.autoneg = AUTONEG_ENABLE;
10494 netif_carrier_off(tp->dev);
10495 tp->link_config.active_speed = SPEED_INVALID; 11013 tp->link_config.active_speed = SPEED_INVALID;
10496 tp->link_config.active_duplex = DUPLEX_INVALID; 11014 tp->link_config.active_duplex = DUPLEX_INVALID;
10497 tp->link_config.phy_is_low_power = 0; 11015 tp->link_config.phy_is_low_power = 0;
@@ -10550,6 +11068,8 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10550 case PHY_ID_BCM5752: return "5752"; 11068 case PHY_ID_BCM5752: return "5752";
10551 case PHY_ID_BCM5714: return "5714"; 11069 case PHY_ID_BCM5714: return "5714";
10552 case PHY_ID_BCM5780: return "5780"; 11070 case PHY_ID_BCM5780: return "5780";
11071 case PHY_ID_BCM5755: return "5755";
11072 case PHY_ID_BCM5787: return "5787";
10553 case PHY_ID_BCM8002: return "8002/serdes"; 11073 case PHY_ID_BCM8002: return "8002/serdes";
10554 case 0: return "serdes"; 11074 case 0: return "serdes";
10555 default: return "unknown"; 11075 default: return "unknown";
@@ -10848,11 +11368,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10848 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11368 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10849 } 11369 }
10850 11370
10851 /* TSO is off by default, user can enable using ethtool. */ 11371 /* TSO is on by default on chips that support hardware TSO.
10852#if 0 11372 * Firmware TSO on older chips gives lower performance, so it
10853 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) 11373 * is off by default, but can be enabled using ethtool.
11374 */
11375 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
10854 dev->features |= NETIF_F_TSO; 11376 dev->features |= NETIF_F_TSO;
10855#endif
10856 11377
10857#endif 11378#endif
10858 11379
@@ -10896,7 +11417,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10896 * checksumming. 11417 * checksumming.
10897 */ 11418 */
10898 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { 11419 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10899 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 11420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11422 dev->features |= NETIF_F_HW_CSUM;
11423 else
11424 dev->features |= NETIF_F_IP_CSUM;
11425 dev->features |= NETIF_F_SG;
10900 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 11426 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10901 } else 11427 } else
10902 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 11428 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
@@ -10949,6 +11475,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10949 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : 11475 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
10950 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); 11476 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
10951 11477
11478 netif_carrier_off(tp->dev);
11479
10952 return 0; 11480 return 0;
10953 11481
10954err_out_iounmap: 11482err_out_iounmap:
@@ -11044,7 +11572,7 @@ static int tg3_resume(struct pci_dev *pdev)
11044 11572
11045 pci_restore_state(tp->pdev); 11573 pci_restore_state(tp->pdev);
11046 11574
11047 err = tg3_set_power_state(tp, 0); 11575 err = tg3_set_power_state(tp, PCI_D0);
11048 if (err) 11576 if (err)
11049 return err; 11577 return err;
11050 11578
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 7e3b613afb29..c43cc3264202 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -138,6 +138,8 @@
138#define ASIC_REV_5752 0x06 138#define ASIC_REV_5752 0x06
139#define ASIC_REV_5780 0x08 139#define ASIC_REV_5780 0x08
140#define ASIC_REV_5714 0x09 140#define ASIC_REV_5714 0x09
141#define ASIC_REV_5755 0x0a
142#define ASIC_REV_5787 0x0b
141#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 143#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
142#define CHIPREV_5700_AX 0x70 144#define CHIPREV_5700_AX 0x70
143#define CHIPREV_5700_BX 0x71 145#define CHIPREV_5700_BX 0x71
@@ -455,6 +457,7 @@
455#define RX_MODE_PROMISC 0x00000100 457#define RX_MODE_PROMISC 0x00000100
456#define RX_MODE_NO_CRC_CHECK 0x00000200 458#define RX_MODE_NO_CRC_CHECK 0x00000200
457#define RX_MODE_KEEP_VLAN_TAG 0x00000400 459#define RX_MODE_KEEP_VLAN_TAG 0x00000400
460#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
458#define MAC_RX_STATUS 0x0000046c 461#define MAC_RX_STATUS 0x0000046c
459#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 462#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
460#define RX_STATUS_XOFF_RCVD 0x00000002 463#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1339,6 +1342,7 @@
1339#define GRC_LCLCTRL_CLEARINT 0x00000002 1342#define GRC_LCLCTRL_CLEARINT 0x00000002
1340#define GRC_LCLCTRL_SETINT 0x00000004 1343#define GRC_LCLCTRL_SETINT 0x00000004
1341#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 1344#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
1345#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */
1342#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ 1346#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */
1343#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ 1347#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */
1344#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 1348#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020
@@ -1393,6 +1397,7 @@
1393#define GRC_MDI_CTRL 0x00006844 1397#define GRC_MDI_CTRL 0x00006844
1394#define GRC_SEEPROM_DELAY 0x00006848 1398#define GRC_SEEPROM_DELAY 0x00006848
1395/* 0x684c --> 0x6c00 unused */ 1399/* 0x684c --> 0x6c00 unused */
1400#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */
1396 1401
1397/* 0x6c00 --> 0x7000 unused */ 1402/* 0x6c00 --> 0x7000 unused */
1398 1403
@@ -1436,6 +1441,16 @@
1436#define FLASH_5752VENDOR_ST_M45PE10 0x02400000 1441#define FLASH_5752VENDOR_ST_M45PE10 0x02400000
1437#define FLASH_5752VENDOR_ST_M45PE20 0x02400002 1442#define FLASH_5752VENDOR_ST_M45PE20 0x02400002
1438#define FLASH_5752VENDOR_ST_M45PE40 0x02400001 1443#define FLASH_5752VENDOR_ST_M45PE40 0x02400001
1444#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001
1445#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
1446#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
1447#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
1448#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
1449#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
1450#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
1451#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002
1452#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000
1453#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000
1439#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 1454#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
1440#define FLASH_5752PAGE_SIZE_256 0x00000000 1455#define FLASH_5752PAGE_SIZE_256 0x00000000
1441#define FLASH_5752PAGE_SIZE_512 0x10000000 1456#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -2185,7 +2200,7 @@ struct tg3 {
2185#define TG3_FLG2_PHY_SERDES 0x00002000 2200#define TG3_FLG2_PHY_SERDES 0x00002000
2186#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 2201#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000
2187#define TG3_FLG2_FLASH 0x00008000 2202#define TG3_FLG2_FLASH 0x00008000
2188#define TG3_FLG2_HW_TSO 0x00010000 2203#define TG3_FLG2_HW_TSO_1 0x00010000
2189#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 2204#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2190#define TG3_FLG2_5705_PLUS 0x00040000 2205#define TG3_FLG2_5705_PLUS 0x00040000
2191#define TG3_FLG2_5750_PLUS 0x00080000 2206#define TG3_FLG2_5750_PLUS 0x00080000
@@ -2198,6 +2213,9 @@ struct tg3 {
2198#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2213#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2199#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2214#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2200#define TG3_FLG2_5780_CLASS 0x04000000 2215#define TG3_FLG2_5780_CLASS 0x04000000
2216#define TG3_FLG2_HW_TSO_2 0x08000000
2217#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2218#define TG3_FLG2_1SHOT_MSI 0x10000000
2201 2219
2202 u32 split_mode_max_reqs; 2220 u32 split_mode_max_reqs;
2203#define SPLIT_MODE_5704_MAX_REQ 3 2221#define SPLIT_MODE_5704_MAX_REQ 3
@@ -2247,6 +2265,8 @@ struct tg3 {
2247#define PHY_ID_BCM5752 0x60008100 2265#define PHY_ID_BCM5752 0x60008100
2248#define PHY_ID_BCM5714 0x60008340 2266#define PHY_ID_BCM5714 0x60008340
2249#define PHY_ID_BCM5780 0x60008350 2267#define PHY_ID_BCM5780 0x60008350
2268#define PHY_ID_BCM5755 0xbc050cc0
2269#define PHY_ID_BCM5787 0xbc050ce0
2250#define PHY_ID_BCM8002 0x60010140 2270#define PHY_ID_BCM8002 0x60010140
2251#define PHY_ID_INVALID 0xffffffff 2271#define PHY_ID_INVALID 0xffffffff
2252#define PHY_ID_REV_MASK 0x0000000f 2272#define PHY_ID_REV_MASK 0x0000000f
@@ -2258,6 +2278,7 @@ struct tg3 {
2258 u32 led_ctrl; 2278 u32 led_ctrl;
2259 2279
2260 char board_part_number[24]; 2280 char board_part_number[24];
2281 char fw_ver[16];
2261 u32 nic_sram_data_cfg; 2282 u32 nic_sram_data_cfg;
2262 u32 pci_clock_ctrl; 2283 u32 pci_clock_ctrl;
2263 struct pci_dev *pdev_peer; 2284 struct pci_dev *pdev_peer;
@@ -2271,7 +2292,8 @@ struct tg3 {
2271 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2292 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2272 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2293 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2273 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ 2294 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
2274 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002) 2295 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2296 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM8002)
2275 2297
2276 struct tg3_hw_stats *hw_stats; 2298 struct tg3_hw_stats *hw_stats;
2277 dma_addr_t stats_mapping; 2299 dma_addr_t stats_mapping;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 6299e186c73f..e3dd144d326b 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1327,11 +1327,11 @@ static void de_clean_rings (struct de_private *de)
1327 struct sk_buff *skb = de->tx_skb[i].skb; 1327 struct sk_buff *skb = de->tx_skb[i].skb;
1328 if ((skb) && (skb != DE_DUMMY_SKB)) { 1328 if ((skb) && (skb != DE_DUMMY_SKB)) {
1329 if (skb != DE_SETUP_SKB) { 1329 if (skb != DE_SETUP_SKB) {
1330 dev_kfree_skb(skb);
1331 de->net_stats.tx_dropped++; 1330 de->net_stats.tx_dropped++;
1332 pci_unmap_single(de->pdev, 1331 pci_unmap_single(de->pdev,
1333 de->tx_skb[i].mapping, 1332 de->tx_skb[i].mapping,
1334 skb->len, PCI_DMA_TODEVICE); 1333 skb->len, PCI_DMA_TODEVICE);
1334 dev_kfree_skb(skb);
1335 } else { 1335 } else {
1336 pci_unmap_single(de->pdev, 1336 pci_unmap_single(de->pdev,
1337 de->tx_skb[i].mapping, 1337 de->tx_skb[i].mapping,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index db2c798ba89e..175ba13bce41 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1495,8 +1495,7 @@ module_param(skip_pci_probe, bool, 0);
1495MODULE_LICENSE("GPL"); 1495MODULE_LICENSE("GPL");
1496 1496
1497 1497
1498int 1498int __init init_module( void )
1499init_module( void )
1500{ 1499{
1501 struct net_device *dev; 1500 struct net_device *dev;
1502 int err; 1501 int err;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5b0a19a5058d..6a1033ec06cf 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -25,6 +25,15 @@ config NET_RADIO
25 the tools from 25 the tools from
26 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 26 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
27 27
28config NET_WIRELESS_RTNETLINK
29 bool "Wireless Extension API over RtNetlink"
30 ---help---
31 Support the Wireless Extension API over the RtNetlink socket
32 in addition to the traditional ioctl interface (selected above).
33
34 For now, few tools use this facility, but it might grow in the
35 future. The only downside is that it adds 4.5 kB to your kernel.
36
28# Note : the cards are obsolete (can't buy them anymore), but the drivers 37# Note : the cards are obsolete (can't buy them anymore), but the drivers
29# are not, as people are still using them... 38# are not, as people are still using them...
30comment "Obsolete Wireless cards support (pre-802.11)" 39comment "Obsolete Wireless cards support (pre-802.11)"
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 864937a409e5..108d9fed8f07 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -770,6 +770,11 @@ typedef struct {
770} BSSListRid; 770} BSSListRid;
771 771
772typedef struct { 772typedef struct {
773 BSSListRid bss;
774 struct list_head list;
775} BSSListElement;
776
777typedef struct {
773 u8 rssipct; 778 u8 rssipct;
774 u8 rssidBm; 779 u8 rssidBm;
775} tdsRssiEntry; 780} tdsRssiEntry;
@@ -902,6 +907,7 @@ static char swversion[] = "2.1";
902#define NUM_MODULES 2 907#define NUM_MODULES 2
903#define MIC_MSGLEN_MAX 2400 908#define MIC_MSGLEN_MAX 2400
904#define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX 909#define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX
910#define AIRO_DEF_MTU 2312
905 911
906typedef struct { 912typedef struct {
907 u32 size; // size 913 u32 size; // size
@@ -1119,6 +1125,8 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket,
1119static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); 1125static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
1120static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); 1126static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
1121 1127
1128static void airo_networks_free(struct airo_info *ai);
1129
1122struct airo_info { 1130struct airo_info {
1123 struct net_device_stats stats; 1131 struct net_device_stats stats;
1124 struct net_device *dev; 1132 struct net_device *dev;
@@ -1150,7 +1158,7 @@ struct airo_info {
1150#define FLAG_COMMIT 13 1158#define FLAG_COMMIT 13
1151#define FLAG_RESET 14 1159#define FLAG_RESET 14
1152#define FLAG_FLASHING 15 1160#define FLAG_FLASHING 15
1153#define JOB_MASK 0x1ff0000 1161#define JOB_MASK 0x2ff0000
1154#define JOB_DIE 16 1162#define JOB_DIE 16
1155#define JOB_XMIT 17 1163#define JOB_XMIT 17
1156#define JOB_XMIT11 18 1164#define JOB_XMIT11 18
@@ -1160,6 +1168,7 @@ struct airo_info {
1160#define JOB_EVENT 22 1168#define JOB_EVENT 22
1161#define JOB_AUTOWEP 23 1169#define JOB_AUTOWEP 23
1162#define JOB_WSTATS 24 1170#define JOB_WSTATS 24
1171#define JOB_SCAN_RESULTS 25
1163 int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen, 1172 int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
1164 int whichbap); 1173 int whichbap);
1165 unsigned short *flash; 1174 unsigned short *flash;
@@ -1176,7 +1185,7 @@ struct airo_info {
1176 } xmit, xmit11; 1185 } xmit, xmit11;
1177 struct net_device *wifidev; 1186 struct net_device *wifidev;
1178 struct iw_statistics wstats; // wireless stats 1187 struct iw_statistics wstats; // wireless stats
1179 unsigned long scan_timestamp; /* Time started to scan */ 1188 unsigned long scan_timeout; /* Time scan should be read */
1180 struct iw_spy_data spy_data; 1189 struct iw_spy_data spy_data;
1181 struct iw_public_data wireless_data; 1190 struct iw_public_data wireless_data;
1182 /* MIC stuff */ 1191 /* MIC stuff */
@@ -1198,6 +1207,10 @@ struct airo_info {
1198 APListRid *APList; 1207 APListRid *APList;
1199#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE 1208#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
1200 char proc_name[IFNAMSIZ]; 1209 char proc_name[IFNAMSIZ];
1210
1211 struct list_head network_list;
1212 struct list_head network_free_list;
1213 BSSListElement *networks;
1201}; 1214};
1202 1215
1203static inline int bap_read(struct airo_info *ai, u16 *pu16Dst, int bytelen, 1216static inline int bap_read(struct airo_info *ai, u16 *pu16Dst, int bytelen,
@@ -1216,6 +1229,22 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
1216static int flashputbuf(struct airo_info *ai); 1229static int flashputbuf(struct airo_info *ai);
1217static int flashrestart(struct airo_info *ai,struct net_device *dev); 1230static int flashrestart(struct airo_info *ai,struct net_device *dev);
1218 1231
1232#define airo_print(type, name, fmt, args...) \
1233 { printk(type "airo(%s): " fmt "\n", name, ##args); }
1234
1235#define airo_print_info(name, fmt, args...) \
1236 airo_print(KERN_INFO, name, fmt, ##args)
1237
1238#define airo_print_dbg(name, fmt, args...) \
1239 airo_print(KERN_DEBUG, name, fmt, ##args)
1240
1241#define airo_print_warn(name, fmt, args...) \
1242 airo_print(KERN_WARNING, name, fmt, ##args)
1243
1244#define airo_print_err(name, fmt, args...) \
1245 airo_print(KERN_ERR, name, fmt, ##args)
1246
1247
1219/*********************************************************************** 1248/***********************************************************************
1220 * MIC ROUTINES * 1249 * MIC ROUTINES *
1221 *********************************************************************** 1250 ***********************************************************************
@@ -1294,7 +1323,7 @@ static int micsetup(struct airo_info *ai) {
1294 ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); 1323 ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP);
1295 1324
1296 if (ai->tfm == NULL) { 1325 if (ai->tfm == NULL) {
1297 printk(KERN_ERR "airo: failed to load transform for AES\n"); 1326 airo_print_err(ai->dev->name, "failed to load transform for AES");
1298 return ERROR; 1327 return ERROR;
1299 } 1328 }
1300 1329
@@ -1726,11 +1755,11 @@ static int writeWepKeyRid(struct airo_info*ai, WepKeyRid *pwkr, int perm, int lo
1726 wkr.kindex = cpu_to_le16(wkr.kindex); 1755 wkr.kindex = cpu_to_le16(wkr.kindex);
1727 wkr.klen = cpu_to_le16(wkr.klen); 1756 wkr.klen = cpu_to_le16(wkr.klen);
1728 rc = PC4500_writerid(ai, RID_WEP_TEMP, &wkr, sizeof(wkr), lock); 1757 rc = PC4500_writerid(ai, RID_WEP_TEMP, &wkr, sizeof(wkr), lock);
1729 if (rc!=SUCCESS) printk(KERN_ERR "airo: WEP_TEMP set %x\n", rc); 1758 if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc);
1730 if (perm) { 1759 if (perm) {
1731 rc = PC4500_writerid(ai, RID_WEP_PERM, &wkr, sizeof(wkr), lock); 1760 rc = PC4500_writerid(ai, RID_WEP_PERM, &wkr, sizeof(wkr), lock);
1732 if (rc!=SUCCESS) { 1761 if (rc!=SUCCESS) {
1733 printk(KERN_ERR "airo: WEP_PERM set %x\n", rc); 1762 airo_print_err(ai->dev->name, "WEP_PERM set %x", rc);
1734 } 1763 }
1735 } 1764 }
1736 return rc; 1765 return rc;
@@ -1909,7 +1938,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1909 struct airo_info *ai = dev->priv; 1938 struct airo_info *ai = dev->priv;
1910 1939
1911 if (!skb) { 1940 if (!skb) {
1912 printk(KERN_ERR "airo: %s: skb==NULL\n",__FUNCTION__); 1941 airo_print_err(dev->name, "%s: skb == NULL!",__FUNCTION__);
1913 return 0; 1942 return 0;
1914 } 1943 }
1915 npacks = skb_queue_len (&ai->txq); 1944 npacks = skb_queue_len (&ai->txq);
@@ -1955,8 +1984,8 @@ static int mpi_send_packet (struct net_device *dev)
1955 /* get a packet to send */ 1984 /* get a packet to send */
1956 1985
1957 if ((skb = skb_dequeue(&ai->txq)) == 0) { 1986 if ((skb = skb_dequeue(&ai->txq)) == 0) {
1958 printk (KERN_ERR 1987 airo_print_err(dev->name,
1959 "airo: %s: Dequeue'd zero in send_packet()\n", 1988 "%s: Dequeue'd zero in send_packet()",
1960 __FUNCTION__); 1989 __FUNCTION__);
1961 return 0; 1990 return 0;
1962 } 1991 }
@@ -2108,7 +2137,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2108 u32 *fids = priv->fids; 2137 u32 *fids = priv->fids;
2109 2138
2110 if ( skb == NULL ) { 2139 if ( skb == NULL ) {
2111 printk( KERN_ERR "airo: skb == NULL!!!\n" ); 2140 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__);
2112 return 0; 2141 return 0;
2113 } 2142 }
2114 2143
@@ -2179,7 +2208,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2179 } 2208 }
2180 2209
2181 if ( skb == NULL ) { 2210 if ( skb == NULL ) {
2182 printk( KERN_ERR "airo: skb == NULL!!!\n" ); 2211 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__);
2183 return 0; 2212 return 0;
2184 } 2213 }
2185 2214
@@ -2364,6 +2393,8 @@ void stop_airo_card( struct net_device *dev, int freeres )
2364 dev_kfree_skb(skb); 2393 dev_kfree_skb(skb);
2365 } 2394 }
2366 2395
2396 airo_networks_free (ai);
2397
2367 kfree(ai->flash); 2398 kfree(ai->flash);
2368 kfree(ai->rssi); 2399 kfree(ai->rssi);
2369 kfree(ai->APList); 2400 kfree(ai->APList);
@@ -2434,7 +2465,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
2434 cmd.parm2 = MPI_MAX_FIDS; 2465 cmd.parm2 = MPI_MAX_FIDS;
2435 rc=issuecommand(ai, &cmd, &rsp); 2466 rc=issuecommand(ai, &cmd, &rsp);
2436 if (rc != SUCCESS) { 2467 if (rc != SUCCESS) {
2437 printk(KERN_ERR "airo: Couldn't allocate RX FID\n"); 2468 airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
2438 return rc; 2469 return rc;
2439 } 2470 }
2440 2471
@@ -2462,7 +2493,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
2462 2493
2463 rc=issuecommand(ai, &cmd, &rsp); 2494 rc=issuecommand(ai, &cmd, &rsp);
2464 if (rc != SUCCESS) { 2495 if (rc != SUCCESS) {
2465 printk(KERN_ERR "airo: Couldn't allocate TX FID\n"); 2496 airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
2466 return rc; 2497 return rc;
2467 } 2498 }
2468 2499
@@ -2476,7 +2507,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
2476 cmd.parm2 = 1; /* Magic number... */ 2507 cmd.parm2 = 1; /* Magic number... */
2477 rc=issuecommand(ai, &cmd, &rsp); 2508 rc=issuecommand(ai, &cmd, &rsp);
2478 if (rc != SUCCESS) { 2509 if (rc != SUCCESS) {
2479 printk(KERN_ERR "airo: Couldn't allocate RID\n"); 2510 airo_print_err(ai->dev->name, "Couldn't allocate RID");
2480 return rc; 2511 return rc;
2481 } 2512 }
2482 2513
@@ -2508,25 +2539,25 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2508 aux_len = AUXMEMSIZE; 2539 aux_len = AUXMEMSIZE;
2509 2540
2510 if (!request_mem_region(mem_start, mem_len, name)) { 2541 if (!request_mem_region(mem_start, mem_len, name)) {
2511 printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n", 2542 airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
2512 (int)mem_start, (int)mem_len, name); 2543 (int)mem_start, (int)mem_len, name);
2513 goto out; 2544 goto out;
2514 } 2545 }
2515 if (!request_mem_region(aux_start, aux_len, name)) { 2546 if (!request_mem_region(aux_start, aux_len, name)) {
2516 printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n", 2547 airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
2517 (int)aux_start, (int)aux_len, name); 2548 (int)aux_start, (int)aux_len, name);
2518 goto free_region1; 2549 goto free_region1;
2519 } 2550 }
2520 2551
2521 ai->pcimem = ioremap(mem_start, mem_len); 2552 ai->pcimem = ioremap(mem_start, mem_len);
2522 if (!ai->pcimem) { 2553 if (!ai->pcimem) {
2523 printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n", 2554 airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
2524 (int)mem_start, (int)mem_len, name); 2555 (int)mem_start, (int)mem_len, name);
2525 goto free_region2; 2556 goto free_region2;
2526 } 2557 }
2527 ai->pciaux = ioremap(aux_start, aux_len); 2558 ai->pciaux = ioremap(aux_start, aux_len);
2528 if (!ai->pciaux) { 2559 if (!ai->pciaux) {
2529 printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n", 2560 airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
2530 (int)aux_start, (int)aux_len, name); 2561 (int)aux_start, (int)aux_len, name);
2531 goto free_memmap; 2562 goto free_memmap;
2532 } 2563 }
@@ -2534,7 +2565,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2534 /* Reserve PKTSIZE for each fid and 2K for the Rids */ 2565 /* Reserve PKTSIZE for each fid and 2K for the Rids */
2535 ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); 2566 ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
2536 if (!ai->shared) { 2567 if (!ai->shared) {
2537 printk(KERN_ERR "airo: Couldn't alloc_consistent %d\n", 2568 airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d",
2538 PCI_SHARED_LEN); 2569 PCI_SHARED_LEN);
2539 goto free_auxmap; 2570 goto free_auxmap;
2540 } 2571 }
@@ -2626,7 +2657,7 @@ static void wifi_setup(struct net_device *dev)
2626 2657
2627 dev->type = ARPHRD_IEEE80211; 2658 dev->type = ARPHRD_IEEE80211;
2628 dev->hard_header_len = ETH_HLEN; 2659 dev->hard_header_len = ETH_HLEN;
2629 dev->mtu = 2312; 2660 dev->mtu = AIRO_DEF_MTU;
2630 dev->addr_len = ETH_ALEN; 2661 dev->addr_len = ETH_ALEN;
2631 dev->tx_queue_len = 100; 2662 dev->tx_queue_len = 100;
2632 2663
@@ -2670,6 +2701,42 @@ static int reset_card( struct net_device *dev , int lock) {
2670 return 0; 2701 return 0;
2671} 2702}
2672 2703
2704#define MAX_NETWORK_COUNT 64
2705static int airo_networks_allocate(struct airo_info *ai)
2706{
2707 if (ai->networks)
2708 return 0;
2709
2710 ai->networks =
2711 kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement),
2712 GFP_KERNEL);
2713 if (!ai->networks) {
2714 airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
2715 return -ENOMEM;
2716 }
2717
2718 return 0;
2719}
2720
2721static void airo_networks_free(struct airo_info *ai)
2722{
2723 if (!ai->networks)
2724 return;
2725 kfree(ai->networks);
2726 ai->networks = NULL;
2727}
2728
2729static void airo_networks_initialize(struct airo_info *ai)
2730{
2731 int i;
2732
2733 INIT_LIST_HEAD(&ai->network_free_list);
2734 INIT_LIST_HEAD(&ai->network_list);
2735 for (i = 0; i < MAX_NETWORK_COUNT; i++)
2736 list_add_tail(&ai->networks[i].list,
2737 &ai->network_free_list);
2738}
2739
2673static struct net_device *_init_airo_card( unsigned short irq, int port, 2740static struct net_device *_init_airo_card( unsigned short irq, int port,
2674 int is_pcmcia, struct pci_dev *pci, 2741 int is_pcmcia, struct pci_dev *pci,
2675 struct device *dmdev ) 2742 struct device *dmdev )
@@ -2681,22 +2748,22 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2681 /* Create the network device object. */ 2748 /* Create the network device object. */
2682 dev = alloc_etherdev(sizeof(*ai)); 2749 dev = alloc_etherdev(sizeof(*ai));
2683 if (!dev) { 2750 if (!dev) {
2684 printk(KERN_ERR "airo: Couldn't alloc_etherdev\n"); 2751 airo_print_err("", "Couldn't alloc_etherdev");
2685 return NULL; 2752 return NULL;
2686 } 2753 }
2687 if (dev_alloc_name(dev, dev->name) < 0) { 2754 if (dev_alloc_name(dev, dev->name) < 0) {
2688 printk(KERN_ERR "airo: Couldn't get name!\n"); 2755 airo_print_err("", "Couldn't get name!");
2689 goto err_out_free; 2756 goto err_out_free;
2690 } 2757 }
2691 2758
2692 ai = dev->priv; 2759 ai = dev->priv;
2693 ai->wifidev = NULL; 2760 ai->wifidev = NULL;
2694 ai->flags = 0; 2761 ai->flags = 0;
2762 ai->dev = dev;
2695 if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { 2763 if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
2696 printk(KERN_DEBUG "airo: Found an MPI350 card\n"); 2764 airo_print_dbg(dev->name, "Found an MPI350 card");
2697 set_bit(FLAG_MPI, &ai->flags); 2765 set_bit(FLAG_MPI, &ai->flags);
2698 } 2766 }
2699 ai->dev = dev;
2700 spin_lock_init(&ai->aux_lock); 2767 spin_lock_init(&ai->aux_lock);
2701 sema_init(&ai->sem, 1); 2768 sema_init(&ai->sem, 1);
2702 ai->config.len = 0; 2769 ai->config.len = 0;
@@ -2711,6 +2778,10 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2711 if (rc) 2778 if (rc)
2712 goto err_out_thr; 2779 goto err_out_thr;
2713 2780
2781 if (airo_networks_allocate (ai))
2782 goto err_out_unlink;
2783 airo_networks_initialize (ai);
2784
2714 /* The Airo-specific entries in the device structure. */ 2785 /* The Airo-specific entries in the device structure. */
2715 if (test_bit(FLAG_MPI,&ai->flags)) { 2786 if (test_bit(FLAG_MPI,&ai->flags)) {
2716 skb_queue_head_init (&ai->txq); 2787 skb_queue_head_init (&ai->txq);
@@ -2732,33 +2803,33 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2732 2803
2733 SET_NETDEV_DEV(dev, dmdev); 2804 SET_NETDEV_DEV(dev, dmdev);
2734 2805
2735
2736 reset_card (dev, 1); 2806 reset_card (dev, 1);
2737 msleep(400); 2807 msleep(400);
2738 2808
2739 rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev ); 2809 rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev );
2740 if (rc) { 2810 if (rc) {
2741 printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc ); 2811 airo_print_err(dev->name, "register interrupt %d failed, rc %d",
2812 irq, rc);
2742 goto err_out_unlink; 2813 goto err_out_unlink;
2743 } 2814 }
2744 if (!is_pcmcia) { 2815 if (!is_pcmcia) {
2745 if (!request_region( dev->base_addr, 64, dev->name )) { 2816 if (!request_region( dev->base_addr, 64, dev->name )) {
2746 rc = -EBUSY; 2817 rc = -EBUSY;
2747 printk(KERN_ERR "airo: Couldn't request region\n"); 2818 airo_print_err(dev->name, "Couldn't request region");
2748 goto err_out_irq; 2819 goto err_out_irq;
2749 } 2820 }
2750 } 2821 }
2751 2822
2752 if (test_bit(FLAG_MPI,&ai->flags)) { 2823 if (test_bit(FLAG_MPI,&ai->flags)) {
2753 if (mpi_map_card(ai, pci, dev->name)) { 2824 if (mpi_map_card(ai, pci, dev->name)) {
2754 printk(KERN_ERR "airo: Could not map memory\n"); 2825 airo_print_err(dev->name, "Could not map memory");
2755 goto err_out_res; 2826 goto err_out_res;
2756 } 2827 }
2757 } 2828 }
2758 2829
2759 if (probe) { 2830 if (probe) {
2760 if ( setup_card( ai, dev->dev_addr, 1 ) != SUCCESS ) { 2831 if ( setup_card( ai, dev->dev_addr, 1 ) != SUCCESS ) {
2761 printk( KERN_ERR "airo: MAC could not be enabled\n" ); 2832 airo_print_err(dev->name, "MAC could not be enabled" );
2762 rc = -EIO; 2833 rc = -EIO;
2763 goto err_out_map; 2834 goto err_out_map;
2764 } 2835 }
@@ -2769,21 +2840,20 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2769 2840
2770 rc = register_netdev(dev); 2841 rc = register_netdev(dev);
2771 if (rc) { 2842 if (rc) {
2772 printk(KERN_ERR "airo: Couldn't register_netdev\n"); 2843 airo_print_err(dev->name, "Couldn't register_netdev");
2773 goto err_out_map; 2844 goto err_out_map;
2774 } 2845 }
2775 ai->wifidev = init_wifidev(ai, dev); 2846 ai->wifidev = init_wifidev(ai, dev);
2776 2847
2777 set_bit(FLAG_REGISTERED,&ai->flags); 2848 set_bit(FLAG_REGISTERED,&ai->flags);
2778 printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n", 2849 airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x",
2779 dev->name,
2780 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 2850 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2781 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); 2851 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
2782 2852
2783 /* Allocate the transmit buffers */ 2853 /* Allocate the transmit buffers */
2784 if (probe && !test_bit(FLAG_MPI,&ai->flags)) 2854 if (probe && !test_bit(FLAG_MPI,&ai->flags))
2785 for( i = 0; i < MAX_FIDS; i++ ) 2855 for( i = 0; i < MAX_FIDS; i++ )
2786 ai->fids[i] = transmit_allocate(ai,2312,i>=MAX_FIDS/2); 2856 ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
2787 2857
2788 setup_proc_entry( dev, dev->priv ); /* XXX check for failure */ 2858 setup_proc_entry( dev, dev->priv ); /* XXX check for failure */
2789 netif_start_queue(dev); 2859 netif_start_queue(dev);
@@ -2840,16 +2910,16 @@ int reset_airo_card( struct net_device *dev )
2840 return -1; 2910 return -1;
2841 2911
2842 if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) { 2912 if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) {
2843 printk( KERN_ERR "airo: MAC could not be enabled\n" ); 2913 airo_print_err(dev->name, "MAC could not be enabled");
2844 return -1; 2914 return -1;
2845 } 2915 }
2846 printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n", dev->name, 2916 airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x",
2847 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 2917 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2848 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 2918 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2849 /* Allocate the transmit buffers if needed */ 2919 /* Allocate the transmit buffers if needed */
2850 if (!test_bit(FLAG_MPI,&ai->flags)) 2920 if (!test_bit(FLAG_MPI,&ai->flags))
2851 for( i = 0; i < MAX_FIDS; i++ ) 2921 for( i = 0; i < MAX_FIDS; i++ )
2852 ai->fids[i] = transmit_allocate (ai,2312,i>=MAX_FIDS/2); 2922 ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
2853 2923
2854 enable_interrupts( ai ); 2924 enable_interrupts( ai );
2855 netif_wake_queue(dev); 2925 netif_wake_queue(dev);
@@ -2875,6 +2945,65 @@ static void airo_send_event(struct net_device *dev) {
2875 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 2945 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
2876} 2946}
2877 2947
2948static void airo_process_scan_results (struct airo_info *ai) {
2949 union iwreq_data wrqu;
2950 BSSListRid BSSList;
2951 int rc;
2952 BSSListElement * loop_net;
2953 BSSListElement * tmp_net;
2954
2955 /* Blow away current list of scan results */
2956 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
2957 list_move_tail (&loop_net->list, &ai->network_free_list);
2958 /* Don't blow away ->list, just BSS data */
2959 memset (loop_net, 0, sizeof (loop_net->bss));
2960 }
2961
2962 /* Try to read the first entry of the scan result */
2963 rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0);
2964 if((rc) || (BSSList.index == 0xffff)) {
2965 /* No scan results */
2966 goto out;
2967 }
2968
2969 /* Read and parse all entries */
2970 tmp_net = NULL;
2971 while((!rc) && (BSSList.index != 0xffff)) {
2972 /* Grab a network off the free list */
2973 if (!list_empty(&ai->network_free_list)) {
2974 tmp_net = list_entry(ai->network_free_list.next,
2975 BSSListElement, list);
2976 list_del(ai->network_free_list.next);
2977 }
2978
2979 if (tmp_net != NULL) {
2980 memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss));
2981 list_add_tail(&tmp_net->list, &ai->network_list);
2982 tmp_net = NULL;
2983 }
2984
2985 /* Read next entry */
2986 rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
2987 &BSSList, sizeof(BSSList), 0);
2988 }
2989
2990out:
2991 ai->scan_timeout = 0;
2992 clear_bit(JOB_SCAN_RESULTS, &ai->flags);
2993 up(&ai->sem);
2994
2995 /* Send an empty event to user space.
2996 * We don't send the received data on
2997 * the event because it would require
2998 * us to do complex transcoding, and
2999 * we want to minimise the work done in
3000 * the irq handler. Use a request to
3001 * extract the data - Jean II */
3002 wrqu.data.length = 0;
3003 wrqu.data.flags = 0;
3004 wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
3005}
3006
2878static int airo_thread(void *data) { 3007static int airo_thread(void *data) {
2879 struct net_device *dev = data; 3008 struct net_device *dev = data;
2880 struct airo_info *ai = dev->priv; 3009 struct airo_info *ai = dev->priv;
@@ -2904,13 +3033,26 @@ static int airo_thread(void *data) {
2904 set_current_state(TASK_INTERRUPTIBLE); 3033 set_current_state(TASK_INTERRUPTIBLE);
2905 if (ai->flags & JOB_MASK) 3034 if (ai->flags & JOB_MASK)
2906 break; 3035 break;
2907 if (ai->expires) { 3036 if (ai->expires || ai->scan_timeout) {
2908 if (time_after_eq(jiffies,ai->expires)){ 3037 if (ai->scan_timeout &&
3038 time_after_eq(jiffies,ai->scan_timeout)){
3039 set_bit(JOB_SCAN_RESULTS,&ai->flags);
3040 break;
3041 } else if (ai->expires &&
3042 time_after_eq(jiffies,ai->expires)){
2909 set_bit(JOB_AUTOWEP,&ai->flags); 3043 set_bit(JOB_AUTOWEP,&ai->flags);
2910 break; 3044 break;
2911 } 3045 }
2912 if (!signal_pending(current)) { 3046 if (!signal_pending(current)) {
2913 schedule_timeout(ai->expires - jiffies); 3047 unsigned long wake_at;
3048 if (!ai->expires || !ai->scan_timeout) {
3049 wake_at = max(ai->expires,
3050 ai->scan_timeout);
3051 } else {
3052 wake_at = min(ai->expires,
3053 ai->scan_timeout);
3054 }
3055 schedule_timeout(wake_at - jiffies);
2914 continue; 3056 continue;
2915 } 3057 }
2916 } else if (!signal_pending(current)) { 3058 } else if (!signal_pending(current)) {
@@ -2953,6 +3095,10 @@ static int airo_thread(void *data) {
2953 airo_send_event(dev); 3095 airo_send_event(dev);
2954 else if (test_bit(JOB_AUTOWEP, &ai->flags)) 3096 else if (test_bit(JOB_AUTOWEP, &ai->flags))
2955 timer_func(dev); 3097 timer_func(dev);
3098 else if (test_bit(JOB_SCAN_RESULTS, &ai->flags))
3099 airo_process_scan_results(ai);
3100 else /* Shouldn't get here, but we make sure to unlock */
3101 up(&ai->sem);
2956 } 3102 }
2957 complete_and_exit (&ai->thr_exited, 0); 3103 complete_and_exit (&ai->thr_exited, 0);
2958} 3104}
@@ -3047,19 +3193,15 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3047 * and reassociations as valid status 3193 * and reassociations as valid status
3048 * Jean II */ 3194 * Jean II */
3049 if(newStatus == ASSOCIATED) { 3195 if(newStatus == ASSOCIATED) {
3050 if (apriv->scan_timestamp) { 3196#if 0
3051 /* Send an empty event to user space. 3197 /* FIXME: Grabbing scan results here
3052 * We don't send the received data on 3198 * seems to be too early??? Just wait for
3053 * the event because it would require 3199 * timeout instead. */
3054 * us to do complex transcoding, and 3200 if (apriv->scan_timeout > 0) {
3055 * we want to minimise the work done in 3201 set_bit(JOB_SCAN_RESULTS, &apriv->flags);
3056 * the irq handler. Use a request to 3202 wake_up_interruptible(&apriv->thr_wait);
3057 * extract the data - Jean II */
3058 wrqu.data.length = 0;
3059 wrqu.data.flags = 0;
3060 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
3061 apriv->scan_timestamp = 0;
3062 } 3203 }
3204#endif
3063 if (down_trylock(&apriv->sem) != 0) { 3205 if (down_trylock(&apriv->sem) != 0) {
3064 set_bit(JOB_EVENT, &apriv->flags); 3206 set_bit(JOB_EVENT, &apriv->flags);
3065 wake_up_interruptible(&apriv->thr_wait); 3207 wake_up_interruptible(&apriv->thr_wait);
@@ -3117,8 +3259,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3117 } 3259 }
3118 len = le16_to_cpu(hdr.len); 3260 len = le16_to_cpu(hdr.len);
3119 3261
3120 if (len > 2312) { 3262 if (len > AIRO_DEF_MTU) {
3121 printk( KERN_ERR "airo: Bad size %d\n", len ); 3263 airo_print_err(apriv->dev->name, "Bad size %d", len);
3122 goto badrx; 3264 goto badrx;
3123 } 3265 }
3124 if (len == 0) 3266 if (len == 0)
@@ -3161,10 +3303,12 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3161 bap_read (apriv, &gap, sizeof(gap), BAP0); 3303 bap_read (apriv, &gap, sizeof(gap), BAP0);
3162 gap = le16_to_cpu(gap); 3304 gap = le16_to_cpu(gap);
3163 if (gap) { 3305 if (gap) {
3164 if (gap <= 8) 3306 if (gap <= 8) {
3165 bap_read (apriv, tmpbuf, gap, BAP0); 3307 bap_read (apriv, tmpbuf, gap, BAP0);
3166 else 3308 } else {
3167 printk(KERN_ERR "airo: gaplen too big. Problems will follow...\n"); 3309 airo_print_err(apriv->dev->name, "gaplen too "
3310 "big. Problems will follow...");
3311 }
3168 } 3312 }
3169 bap_read (apriv, buffer + hdrlen/2, len, BAP0); 3313 bap_read (apriv, buffer + hdrlen/2, len, BAP0);
3170 } else { 3314 } else {
@@ -3281,12 +3425,13 @@ exitrx:
3281 } 3425 }
3282 } else { 3426 } else {
3283 OUT4500( apriv, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); 3427 OUT4500( apriv, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
3284 printk( KERN_ERR "airo: Unallocated FID was used to xmit\n" ); 3428 airo_print_err(apriv->dev->name, "Unallocated FID was "
3429 "used to xmit" );
3285 } 3430 }
3286 } 3431 }
3287exittx: 3432exittx:
3288 if ( status & ~STATUS_INTS & ~IGNORE_INTS ) 3433 if ( status & ~STATUS_INTS & ~IGNORE_INTS )
3289 printk( KERN_WARNING "airo: Got weird status %x\n", 3434 airo_print_warn(apriv->dev->name, "Got weird status %x",
3290 status & ~STATUS_INTS & ~IGNORE_INTS ); 3435 status & ~STATUS_INTS & ~IGNORE_INTS );
3291 } 3436 }
3292 3437
@@ -3359,8 +3504,8 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
3359 up(&ai->sem); 3504 up(&ai->sem);
3360 3505
3361 if (rc) 3506 if (rc)
3362 printk(KERN_ERR "%s: Cannot enable MAC, err=%d\n", 3507 airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d",
3363 __FUNCTION__,rc); 3508 __FUNCTION__, rc);
3364 return rc; 3509 return rc;
3365} 3510}
3366 3511
@@ -3489,8 +3634,8 @@ void mpi_receive_802_11 (struct airo_info *ai)
3489 if (ai->wifidev == NULL) 3634 if (ai->wifidev == NULL)
3490 hdr.len = 0; 3635 hdr.len = 0;
3491 len = le16_to_cpu(hdr.len); 3636 len = le16_to_cpu(hdr.len);
3492 if (len > 2312) { 3637 if (len > AIRO_DEF_MTU) {
3493 printk( KERN_ERR "airo: Bad size %d\n", len ); 3638 airo_print_err(ai->dev->name, "Bad size %d", len);
3494 goto badrx; 3639 goto badrx;
3495 } 3640 }
3496 if (len == 0) 3641 if (len == 0)
@@ -3531,8 +3676,8 @@ void mpi_receive_802_11 (struct airo_info *ai)
3531 if (gap <= 8) 3676 if (gap <= 8)
3532 ptr += gap; 3677 ptr += gap;
3533 else 3678 else
3534 printk(KERN_ERR 3679 airo_print_err(ai->dev->name,
3535 "airo: gaplen too big. Problems will follow...\n"); 3680 "gaplen too big. Problems will follow...");
3536 } 3681 }
3537 memcpy ((char *)buffer + hdrlen, ptr, len); 3682 memcpy ((char *)buffer + hdrlen, ptr, len);
3538 ptr += len; 3683 ptr += len;
@@ -3604,15 +3749,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3604 if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { 3749 if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
3605 if (lock) 3750 if (lock)
3606 up(&ai->sem); 3751 up(&ai->sem);
3607 printk(KERN_ERR "airo: Error checking for AUX port\n"); 3752 airo_print_err(ai->dev->name, "Error checking for AUX port");
3608 return ERROR; 3753 return ERROR;
3609 } 3754 }
3610 if (!aux_bap || rsp.status & 0xff00) { 3755 if (!aux_bap || rsp.status & 0xff00) {
3611 ai->bap_read = fast_bap_read; 3756 ai->bap_read = fast_bap_read;
3612 printk(KERN_DEBUG "airo: Doing fast bap_reads\n"); 3757 airo_print_dbg(ai->dev->name, "Doing fast bap_reads");
3613 } else { 3758 } else {
3614 ai->bap_read = aux_bap_read; 3759 ai->bap_read = aux_bap_read;
3615 printk(KERN_DEBUG "airo: Doing AUX bap_reads\n"); 3760 airo_print_dbg(ai->dev->name, "Doing AUX bap_reads");
3616 } 3761 }
3617 } 3762 }
3618 if (lock) 3763 if (lock)
@@ -3643,7 +3788,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3643 if (cap_rid.softCap & 8) 3788 if (cap_rid.softCap & 8)
3644 ai->config.rmode |= RXMODE_NORMALIZED_RSSI; 3789 ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
3645 else 3790 else
3646 printk(KERN_WARNING "airo: unknown received signal level scale\n"); 3791 airo_print_warn(ai->dev->name, "unknown received signal "
3792 "level scale");
3647 } 3793 }
3648 ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; 3794 ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
3649 ai->config.authType = AUTH_OPEN; 3795 ai->config.authType = AUTH_OPEN;
@@ -3706,7 +3852,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3706 3852
3707 status = enable_MAC(ai, &rsp, lock); 3853 status = enable_MAC(ai, &rsp, lock);
3708 if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) { 3854 if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) {
3709 printk( KERN_ERR "airo: Bad MAC enable reason = %x, rid = %x, offset = %d\n", rsp.rsp0, rsp.rsp1, rsp.rsp2 ); 3855 airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x,"
3856 " offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 );
3710 return ERROR; 3857 return ERROR;
3711 } 3858 }
3712 3859
@@ -3749,8 +3896,8 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
3749 } 3896 }
3750 3897
3751 if ( max_tries == -1 ) { 3898 if ( max_tries == -1 ) {
3752 printk( KERN_ERR 3899 airo_print_err(ai->dev->name,
3753 "airo: Max tries exceeded when issueing command\n" ); 3900 "Max tries exceeded when issueing command");
3754 if (IN4500(ai, COMMAND) & COMMAND_BUSY) 3901 if (IN4500(ai, COMMAND) & COMMAND_BUSY)
3755 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); 3902 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
3756 return ERROR; 3903 return ERROR;
@@ -3762,11 +3909,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
3762 pRsp->rsp1 = IN4500(ai, RESP1); 3909 pRsp->rsp1 = IN4500(ai, RESP1);
3763 pRsp->rsp2 = IN4500(ai, RESP2); 3910 pRsp->rsp2 = IN4500(ai, RESP2);
3764 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) { 3911 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) {
3765 printk (KERN_ERR "airo: cmd= %x\n", pCmd->cmd); 3912 airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd);
3766 printk (KERN_ERR "airo: status= %x\n", pRsp->status); 3913 airo_print_err(ai->dev->name, "status= %x\n", pRsp->status);
3767 printk (KERN_ERR "airo: Rsp0= %x\n", pRsp->rsp0); 3914 airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0);
3768 printk (KERN_ERR "airo: Rsp1= %x\n", pRsp->rsp1); 3915 airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1);
3769 printk (KERN_ERR "airo: Rsp2= %x\n", pRsp->rsp2); 3916 airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2);
3770 } 3917 }
3771 3918
3772 // clear stuck command busy if necessary 3919 // clear stuck command busy if necessary
@@ -3799,15 +3946,15 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
3799 } 3946 }
3800 } else if ( status & BAP_ERR ) { 3947 } else if ( status & BAP_ERR ) {
3801 /* invalid rid or offset */ 3948 /* invalid rid or offset */
3802 printk( KERN_ERR "airo: BAP error %x %d\n", 3949 airo_print_err(ai->dev->name, "BAP error %x %d",
3803 status, whichbap ); 3950 status, whichbap );
3804 return ERROR; 3951 return ERROR;
3805 } else if (status & BAP_DONE) { // success 3952 } else if (status & BAP_DONE) { // success
3806 return SUCCESS; 3953 return SUCCESS;
3807 } 3954 }
3808 if ( !(max_tries--) ) { 3955 if ( !(max_tries--) ) {
3809 printk( KERN_ERR 3956 airo_print_err(ai->dev->name,
3810 "airo: BAP setup error too many retries\n" ); 3957 "airo: BAP setup error too many retries\n");
3811 return ERROR; 3958 return ERROR;
3812 } 3959 }
3813 // -- PC4500 missed it, try again 3960 // -- PC4500 missed it, try again
@@ -3962,8 +4109,8 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in
3962 len = min(len, (int)le16_to_cpu(*(u16*)pBuf)) - 2; 4109 len = min(len, (int)le16_to_cpu(*(u16*)pBuf)) - 2;
3963 4110
3964 if ( len <= 2 ) { 4111 if ( len <= 2 ) {
3965 printk( KERN_ERR 4112 airo_print_err(ai->dev->name,
3966 "airo: Rid %x has a length of %d which is too short\n", 4113 "Rid %x has a length of %d which is too short",
3967 (int)rid, (int)len ); 4114 (int)rid, (int)len );
3968 rc = ERROR; 4115 rc = ERROR;
3969 goto done; 4116 goto done;
@@ -3996,8 +4143,8 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
3996 Resp rsp; 4143 Resp rsp;
3997 4144
3998 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) 4145 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
3999 printk(KERN_ERR 4146 airo_print_err(ai->dev->name,
4000 "%s: MAC should be disabled (rid=%04x)\n", 4147 "%s: MAC should be disabled (rid=%04x)",
4001 __FUNCTION__, rid); 4148 __FUNCTION__, rid);
4002 memset(&cmd, 0, sizeof(cmd)); 4149 memset(&cmd, 0, sizeof(cmd));
4003 memset(&rsp, 0, sizeof(rsp)); 4150 memset(&rsp, 0, sizeof(rsp));
@@ -4013,7 +4160,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4013 &ai->config_desc.rid_desc, sizeof(Rid)); 4160 &ai->config_desc.rid_desc, sizeof(Rid));
4014 4161
4015 if (len < 4 || len > 2047) { 4162 if (len < 4 || len > 2047) {
4016 printk(KERN_ERR "%s: len=%d\n",__FUNCTION__,len); 4163 airo_print_err(ai->dev->name, "%s: len=%d", __FUNCTION__, len);
4017 rc = -1; 4164 rc = -1;
4018 } else { 4165 } else {
4019 memcpy((char *)ai->config_desc.virtual_host_addr, 4166 memcpy((char *)ai->config_desc.virtual_host_addr,
@@ -4021,10 +4168,10 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4021 4168
4022 rc = issuecommand(ai, &cmd, &rsp); 4169 rc = issuecommand(ai, &cmd, &rsp);
4023 if ((rc & 0xff00) != 0) { 4170 if ((rc & 0xff00) != 0) {
4024 printk(KERN_ERR "%s: Write rid Error %d\n", 4171 airo_print_err(ai->dev->name, "%s: Write rid Error %d",
4025 __FUNCTION__,rc); 4172 __FUNCTION__, rc);
4026 printk(KERN_ERR "%s: Cmd=%04x\n", 4173 airo_print_err(ai->dev->name, "%s: Cmd=%04x",
4027 __FUNCTION__,cmd.cmd); 4174 __FUNCTION__, cmd.cmd);
4028 } 4175 }
4029 4176
4030 if ((rsp.status & 0x7f00)) 4177 if ((rsp.status & 0x7f00))
@@ -4123,7 +4270,7 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
4123 len >>= 16; 4270 len >>= 16;
4124 4271
4125 if (len <= ETH_ALEN * 2) { 4272 if (len <= ETH_ALEN * 2) {
4126 printk( KERN_WARNING "Short packet %d\n", len ); 4273 airo_print_warn(ai->dev->name, "Short packet %d", len);
4127 return ERROR; 4274 return ERROR;
4128 } 4275 }
4129 len -= ETH_ALEN * 2; 4276 len -= ETH_ALEN * 2;
@@ -4187,7 +4334,7 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
4187 } 4334 }
4188 4335
4189 if (len < hdrlen) { 4336 if (len < hdrlen) {
4190 printk( KERN_WARNING "Short packet %d\n", len ); 4337 airo_print_warn(ai->dev->name, "Short packet %d", len);
4191 return ERROR; 4338 return ERROR;
4192 } 4339 }
4193 4340
@@ -4584,15 +4731,14 @@ static int proc_stats_rid_open( struct inode *inode,
4584 i*4<stats.len; i++){ 4731 i*4<stats.len; i++){
4585 if (!statsLabels[i]) continue; 4732 if (!statsLabels[i]) continue;
4586 if (j+strlen(statsLabels[i])+16>4096) { 4733 if (j+strlen(statsLabels[i])+16>4096) {
4587 printk(KERN_WARNING 4734 airo_print_warn(apriv->dev->name,
4588 "airo: Potentially disasterous buffer overflow averted!\n"); 4735 "Potentially disasterous buffer overflow averted!");
4589 break; 4736 break;
4590 } 4737 }
4591 j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], vals[i]); 4738 j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], vals[i]);
4592 } 4739 }
4593 if (i*4>=stats.len){ 4740 if (i*4>=stats.len){
4594 printk(KERN_WARNING 4741 airo_print_warn(apriv->dev->name, "Got a short rid");
4595 "airo: Got a short rid\n");
4596 } 4742 }
4597 data->readlen = j; 4743 data->readlen = j;
4598 return 0; 4744 return 0;
@@ -4754,7 +4900,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) {
4754 4900
4755 line += 14; 4901 line += 14;
4756 v = get_dec_u16(line, &i, 4); 4902 v = get_dec_u16(line, &i, 4);
4757 v = (v<0) ? 0 : ((v>2312) ? 2312 : v); 4903 v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
4758 ai->config.rtsThres = (u16)v; 4904 ai->config.rtsThres = (u16)v;
4759 set_bit (FLAG_COMMIT, &ai->flags); 4905 set_bit (FLAG_COMMIT, &ai->flags);
4760 } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) { 4906 } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
@@ -4788,7 +4934,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) {
4788 4934
4789 line += 15; 4935 line += 15;
4790 v = get_dec_u16(line, &i, 4); 4936 v = get_dec_u16(line, &i, 4);
4791 v = (v<256) ? 256 : ((v>2312) ? 2312 : v); 4937 v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
4792 v = v & 0xfffe; /* Make sure its even */ 4938 v = v & 0xfffe; /* Make sure its even */
4793 ai->config.fragThresh = (u16)v; 4939 ai->config.fragThresh = (u16)v;
4794 set_bit (FLAG_COMMIT, &ai->flags); 4940 set_bit (FLAG_COMMIT, &ai->flags);
@@ -4798,8 +4944,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) {
4798 case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break; 4944 case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
4799 case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break; 4945 case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
4800 case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break; 4946 case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
4801 default: 4947 default: airo_print_warn(ai->dev->name, "Unknown modulation");
4802 printk( KERN_WARNING "airo: Unknown modulation\n" );
4803 } 4948 }
4804 } else if (!strncmp(line, "Preamble: ", 10)) { 4949 } else if (!strncmp(line, "Preamble: ", 10)) {
4805 line += 10; 4950 line += 10;
@@ -4807,10 +4952,10 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) {
4807 case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break; 4952 case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
4808 case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break; 4953 case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
4809 case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break; 4954 case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
4810 default: printk(KERN_WARNING "airo: Unknown preamble\n"); 4955 default: airo_print_warn(ai->dev->name, "Unknown preamble");
4811 } 4956 }
4812 } else { 4957 } else {
4813 printk( KERN_WARNING "Couldn't figure out %s\n", line ); 4958 airo_print_warn(ai->dev->name, "Couldn't figure out %s", line);
4814 } 4959 }
4815 while( line[0] && line[0] != '\n' ) line++; 4960 while( line[0] && line[0] != '\n' ) line++;
4816 if ( line[0] ) line++; 4961 if ( line[0] ) line++;
@@ -5076,7 +5221,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5076 } 5221 }
5077 j = 2; 5222 j = 2;
5078 } else { 5223 } else {
5079 printk(KERN_ERR "airo: WepKey passed invalid key index\n"); 5224 airo_print_err(ai->dev->name, "WepKey passed invalid key index");
5080 return; 5225 return;
5081 } 5226 }
5082 5227
@@ -5489,17 +5634,16 @@ static int __init airo_init_module( void )
5489 airo_entry->gid = proc_gid; 5634 airo_entry->gid = proc_gid;
5490 5635
5491 for( i = 0; i < 4 && io[i] && irq[i]; i++ ) { 5636 for( i = 0; i < 4 && io[i] && irq[i]; i++ ) {
5492 printk( KERN_INFO 5637 airo_print_info("", "Trying to configure ISA adapter at irq=%d "
5493 "airo: Trying to configure ISA adapter at irq=%d io=0x%x\n", 5638 "io=0x%x", irq[i], io[i] );
5494 irq[i], io[i] );
5495 if (init_airo_card( irq[i], io[i], 0, NULL )) 5639 if (init_airo_card( irq[i], io[i], 0, NULL ))
5496 have_isa_dev = 1; 5640 have_isa_dev = 1;
5497 } 5641 }
5498 5642
5499#ifdef CONFIG_PCI 5643#ifdef CONFIG_PCI
5500 printk( KERN_INFO "airo: Probing for PCI adapters\n" ); 5644 airo_print_info("", "Probing for PCI adapters");
5501 pci_register_driver(&airo_driver); 5645 pci_register_driver(&airo_driver);
5502 printk( KERN_INFO "airo: Finished probing for PCI adapters\n" ); 5646 airo_print_info("", "Finished probing for PCI adapters");
5503#endif 5647#endif
5504 5648
5505 /* Always exit with success, as we are a library module 5649 /* Always exit with success, as we are a library module
@@ -5511,7 +5655,7 @@ static int __init airo_init_module( void )
5511static void __exit airo_cleanup_module( void ) 5655static void __exit airo_cleanup_module( void )
5512{ 5656{
5513 while( airo_devices ) { 5657 while( airo_devices ) {
5514 printk( KERN_INFO "airo: Unregistering %s\n", airo_devices->dev->name ); 5658 airo_print_info(airo_devices->dev->name, "Unregistering...\n");
5515 stop_airo_card( airo_devices->dev, 1 ); 5659 stop_airo_card( airo_devices->dev, 1 );
5516 } 5660 }
5517#ifdef CONFIG_PCI 5661#ifdef CONFIG_PCI
@@ -5622,7 +5766,8 @@ static int airo_set_freq(struct net_device *dev,
5622 /* We should do a better check than that, 5766 /* We should do a better check than that,
5623 * based on the card capability !!! */ 5767 * based on the card capability !!! */
5624 if((channel < 1) || (channel > 14)) { 5768 if((channel < 1) || (channel > 14)) {
5625 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); 5769 airo_print_dbg(dev->name, "New channel value of %d is invalid!",
5770 fwrq->m);
5626 rc = -EINVAL; 5771 rc = -EINVAL;
5627 } else { 5772 } else {
5628 readConfigRid(local, 1); 5773 readConfigRid(local, 1);
@@ -5946,8 +6091,8 @@ static int airo_set_rts(struct net_device *dev,
5946 int rthr = vwrq->value; 6091 int rthr = vwrq->value;
5947 6092
5948 if(vwrq->disabled) 6093 if(vwrq->disabled)
5949 rthr = 2312; 6094 rthr = AIRO_DEF_MTU;
5950 if((rthr < 0) || (rthr > 2312)) { 6095 if((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
5951 return -EINVAL; 6096 return -EINVAL;
5952 } 6097 }
5953 readConfigRid(local, 1); 6098 readConfigRid(local, 1);
@@ -5970,7 +6115,7 @@ static int airo_get_rts(struct net_device *dev,
5970 6115
5971 readConfigRid(local, 1); 6116 readConfigRid(local, 1);
5972 vwrq->value = local->config.rtsThres; 6117 vwrq->value = local->config.rtsThres;
5973 vwrq->disabled = (vwrq->value >= 2312); 6118 vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
5974 vwrq->fixed = 1; 6119 vwrq->fixed = 1;
5975 6120
5976 return 0; 6121 return 0;
@@ -5989,8 +6134,8 @@ static int airo_set_frag(struct net_device *dev,
5989 int fthr = vwrq->value; 6134 int fthr = vwrq->value;
5990 6135
5991 if(vwrq->disabled) 6136 if(vwrq->disabled)
5992 fthr = 2312; 6137 fthr = AIRO_DEF_MTU;
5993 if((fthr < 256) || (fthr > 2312)) { 6138 if((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
5994 return -EINVAL; 6139 return -EINVAL;
5995 } 6140 }
5996 fthr &= ~0x1; /* Get an even value - is it really needed ??? */ 6141 fthr &= ~0x1; /* Get an even value - is it really needed ??? */
@@ -6014,7 +6159,7 @@ static int airo_get_frag(struct net_device *dev,
6014 6159
6015 readConfigRid(local, 1); 6160 readConfigRid(local, 1);
6016 vwrq->value = local->config.fragThresh; 6161 vwrq->value = local->config.fragThresh;
6017 vwrq->disabled = (vwrq->value >= 2312); 6162 vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
6018 vwrq->fixed = 1; 6163 vwrq->fixed = 1;
6019 6164
6020 return 0; 6165 return 0;
@@ -6709,9 +6854,9 @@ static int airo_get_range(struct net_device *dev,
6709 range->throughput = 1500 * 1000; 6854 range->throughput = 1500 * 1000;
6710 6855
6711 range->min_rts = 0; 6856 range->min_rts = 0;
6712 range->max_rts = 2312; 6857 range->max_rts = AIRO_DEF_MTU;
6713 range->min_frag = 256; 6858 range->min_frag = 256;
6714 range->max_frag = 2312; 6859 range->max_frag = AIRO_DEF_MTU;
6715 6860
6716 if(cap_rid.softCap & 2) { 6861 if(cap_rid.softCap & 2) {
6717 // WEP: RC4 40 bits 6862 // WEP: RC4 40 bits
@@ -6972,6 +7117,7 @@ static int airo_set_scan(struct net_device *dev,
6972 struct airo_info *ai = dev->priv; 7117 struct airo_info *ai = dev->priv;
6973 Cmd cmd; 7118 Cmd cmd;
6974 Resp rsp; 7119 Resp rsp;
7120 int wake = 0;
6975 7121
6976 /* Note : you may have realised that, as this is a SET operation, 7122 /* Note : you may have realised that, as this is a SET operation,
6977 * this is privileged and therefore a normal user can't 7123 * this is privileged and therefore a normal user can't
@@ -6981,17 +7127,25 @@ static int airo_set_scan(struct net_device *dev,
6981 * Jean II */ 7127 * Jean II */
6982 if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; 7128 if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
6983 7129
7130 if (down_interruptible(&ai->sem))
7131 return -ERESTARTSYS;
7132
7133 /* If there's already a scan in progress, don't
7134 * trigger another one. */
7135 if (ai->scan_timeout > 0)
7136 goto out;
7137
6984 /* Initiate a scan command */ 7138 /* Initiate a scan command */
6985 memset(&cmd, 0, sizeof(cmd)); 7139 memset(&cmd, 0, sizeof(cmd));
6986 cmd.cmd=CMD_LISTBSS; 7140 cmd.cmd=CMD_LISTBSS;
6987 if (down_interruptible(&ai->sem))
6988 return -ERESTARTSYS;
6989 issuecommand(ai, &cmd, &rsp); 7141 issuecommand(ai, &cmd, &rsp);
6990 ai->scan_timestamp = jiffies; 7142 ai->scan_timeout = RUN_AT(3*HZ);
6991 up(&ai->sem); 7143 wake = 1;
6992
6993 /* At this point, just return to the user. */
6994 7144
7145out:
7146 up(&ai->sem);
7147 if (wake)
7148 wake_up_interruptible(&ai->thr_wait);
6995 return 0; 7149 return 0;
6996} 7150}
6997 7151
@@ -7111,59 +7265,38 @@ static int airo_get_scan(struct net_device *dev,
7111 char *extra) 7265 char *extra)
7112{ 7266{
7113 struct airo_info *ai = dev->priv; 7267 struct airo_info *ai = dev->priv;
7114 BSSListRid BSSList; 7268 BSSListElement *net;
7115 int rc; 7269 int err = 0;
7116 char *current_ev = extra; 7270 char *current_ev = extra;
7117 7271
7118 /* When we are associated again, the scan has surely finished. 7272 /* If a scan is in-progress, return -EAGAIN */
7119 * Just in case, let's make sure enough time has elapsed since 7273 if (ai->scan_timeout > 0)
7120 * we started the scan. - Javier */
7121 if(ai->scan_timestamp && time_before(jiffies,ai->scan_timestamp+3*HZ)) {
7122 /* Important note : we don't want to block the caller
7123 * until results are ready for various reasons.
7124 * First, managing wait queues is complex and racy
7125 * (there may be multiple simultaneous callers).
7126 * Second, we grab some rtnetlink lock before comming
7127 * here (in dev_ioctl()).
7128 * Third, the caller can wait on the Wireless Event
7129 * - Jean II */
7130 return -EAGAIN; 7274 return -EAGAIN;
7131 }
7132 ai->scan_timestamp = 0;
7133 7275
7134 /* There's only a race with proc_BSSList_open(), but its 7276 if (down_interruptible(&ai->sem))
7135 * consequences are begnign. So I don't bother fixing it - Javier */ 7277 return -EAGAIN;
7136
7137 /* Try to read the first entry of the scan result */
7138 rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 1);
7139 if((rc) || (BSSList.index == 0xffff)) {
7140 /* Client error, no scan results...
7141 * The caller need to restart the scan. */
7142 return -ENODATA;
7143 }
7144 7278
7145 /* Read and parse all entries */ 7279 list_for_each_entry (net, &ai->network_list, list) {
7146 while((!rc) && (BSSList.index != 0xffff)) {
7147 /* Translate to WE format this entry */ 7280 /* Translate to WE format this entry */
7148 current_ev = airo_translate_scan(dev, current_ev, 7281 current_ev = airo_translate_scan(dev, current_ev,
7149 extra + dwrq->length, 7282 extra + dwrq->length,
7150 &BSSList); 7283 &net->bss);
7151 7284
7152 /* Check if there is space for one more entry */ 7285 /* Check if there is space for one more entry */
7153 if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { 7286 if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
7154 /* Ask user space to try again with a bigger buffer */ 7287 /* Ask user space to try again with a bigger buffer */
7155 return -E2BIG; 7288 err = -E2BIG;
7289 goto out;
7156 } 7290 }
7157
7158 /* Read next entry */
7159 rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
7160 &BSSList, sizeof(BSSList), 1);
7161 } 7291 }
7292
7162 /* Length of data */ 7293 /* Length of data */
7163 dwrq->length = (current_ev - extra); 7294 dwrq->length = (current_ev - extra);
7164 dwrq->flags = 0; /* todo */ 7295 dwrq->flags = 0; /* todo */
7165 7296
7166 return 0; 7297out:
7298 up(&ai->sem);
7299 return err;
7167} 7300}
7168 7301
7169/*------------------------------------------------------------------*/ 7302/*------------------------------------------------------------------*/
@@ -7711,7 +7844,7 @@ static int cmdreset(struct airo_info *ai) {
7711 disable_MAC(ai, 1); 7844 disable_MAC(ai, 1);
7712 7845
7713 if(!waitbusy (ai)){ 7846 if(!waitbusy (ai)){
7714 printk(KERN_INFO "Waitbusy hang before RESET\n"); 7847 airo_print_info(ai->dev->name, "Waitbusy hang before RESET");
7715 return -EBUSY; 7848 return -EBUSY;
7716 } 7849 }
7717 7850
@@ -7720,7 +7853,7 @@ static int cmdreset(struct airo_info *ai) {
7720 ssleep(1); /* WAS 600 12/7/00 */ 7853 ssleep(1); /* WAS 600 12/7/00 */
7721 7854
7722 if(!waitbusy (ai)){ 7855 if(!waitbusy (ai)){
7723 printk(KERN_INFO "Waitbusy hang AFTER RESET\n"); 7856 airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET");
7724 return -EBUSY; 7857 return -EBUSY;
7725 } 7858 }
7726 return 0; 7859 return 0;
@@ -7748,7 +7881,7 @@ static int setflashmode (struct airo_info *ai) {
7748 7881
7749 if(!waitbusy(ai)) { 7882 if(!waitbusy(ai)) {
7750 clear_bit (FLAG_FLASHING, &ai->flags); 7883 clear_bit (FLAG_FLASHING, &ai->flags);
7751 printk(KERN_INFO "Waitbusy hang after setflash mode\n"); 7884 airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode");
7752 return -EIO; 7885 return -EIO;
7753 } 7886 }
7754 return 0; 7887 return 0;
@@ -7777,7 +7910,7 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
7777 7910
7778 /* timeout for busy clear wait */ 7911 /* timeout for busy clear wait */
7779 if(waittime <= 0 ){ 7912 if(waittime <= 0 ){
7780 printk(KERN_INFO "flash putchar busywait timeout! \n"); 7913 airo_print_info(ai->dev->name, "flash putchar busywait timeout!");
7781 return -EBUSY; 7914 return -EBUSY;
7782 } 7915 }
7783 7916
@@ -7866,7 +7999,7 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev){
7866 if (!test_bit(FLAG_MPI,&ai->flags)) 7999 if (!test_bit(FLAG_MPI,&ai->flags))
7867 for( i = 0; i < MAX_FIDS; i++ ) { 8000 for( i = 0; i < MAX_FIDS; i++ ) {
7868 ai->fids[i] = transmit_allocate 8001 ai->fids[i] = transmit_allocate
7869 ( ai, 2312, i >= MAX_FIDS / 2 ); 8002 ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 );
7870 } 8003 }
7871 8004
7872 ssleep(1); /* Added 12/7/00 */ 8005 ssleep(1); /* Added 12/7/00 */
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 753a1de6664b..06c3fa32b310 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -3141,7 +3141,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
3141 if (ret == 1) { 3141 if (ret == 1) {
3142 sta = ap_add_sta(ap, sta_addr); 3142 sta = ap_add_sta(ap, sta_addr);
3143 if (!sta) 3143 if (!sta)
3144 ret = -1; 3144 return -1;
3145 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; 3145 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
3146 sta->ap = 1; 3146 sta->ap = 1;
3147 memset(sta->supported_rates, 0, sizeof(sta->supported_rates)); 3147 memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index f8f4503475f9..d335b250923a 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -585,8 +585,6 @@ static int prism2_config(dev_link_t *link)
585 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL); 585 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
586 hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL); 586 hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
587 if (parse == NULL || hw_priv == NULL) { 587 if (parse == NULL || hw_priv == NULL) {
588 kfree(parse);
589 kfree(hw_priv);
590 ret = -ENOMEM; 588 ret = -ENOMEM;
591 goto failed; 589 goto failed;
592 } 590 }
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b1f142d9e232..328e9a1d13b5 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -928,15 +928,15 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len)
928 928
929 res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL); 929 res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL);
930 up(&local->rid_bap_sem); 930 up(&local->rid_bap_sem);
931
931 if (res) { 932 if (res) {
932 printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE " 933 printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE "
933 "failed (res=%d, rid=%04x, len=%d)\n", 934 "failed (res=%d, rid=%04x, len=%d)\n",
934 dev->name, res, rid, len); 935 dev->name, res, rid, len);
935 return res;
936 }
937 936
938 if (res == -ETIMEDOUT) 937 if (res == -ETIMEDOUT)
939 prism2_hw_reset(dev); 938 prism2_hw_reset(dev);
939 }
940 940
941 return res; 941 return res;
942} 942}
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index f3e0ce1ee037..8b37e824dfcb 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3358,10 +3358,6 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3358 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { 3358 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
3359 if (!sta_ptr) 3359 if (!sta_ptr)
3360 local->tx_keyidx = i; 3360 local->tx_keyidx = i;
3361 else if (i) {
3362 ret = -EINVAL;
3363 goto done;
3364 }
3365 } 3361 }
3366 3362
3367 3363
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 2e85bdced2dd..194f07097581 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -307,7 +307,7 @@ static int prism2_pci_probe(struct pci_dev *pdev,
307 memset(hw_priv, 0, sizeof(*hw_priv)); 307 memset(hw_priv, 0, sizeof(*hw_priv));
308 308
309 if (pci_enable_device(pdev)) 309 if (pci_enable_device(pdev))
310 return -EIO; 310 goto err_out_free;
311 311
312 phymem = pci_resource_start(pdev, 0); 312 phymem = pci_resource_start(pdev, 0);
313 313
@@ -368,6 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev,
368 err_out_disable: 368 err_out_disable:
369 pci_disable_device(pdev); 369 pci_disable_device(pdev);
370 prism2_free_local_data(dev); 370 prism2_free_local_data(dev);
371
372 err_out_free:
371 kfree(hw_priv); 373 kfree(hw_priv);
372 374
373 return -ENODEV; 375 return -ENODEV;
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 94fe2449f099..edaaa943eb8f 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -368,7 +368,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
368 368
369 switch (cis[pos]) { 369 switch (cis[pos]) {
370 case CISTPL_CONFIG: 370 case CISTPL_CONFIG:
371 if (cis[pos + 1] < 1) 371 if (cis[pos + 1] < 2)
372 goto cis_error; 372 goto cis_error;
373 rmsz = (cis[pos + 2] & 0x3c) >> 2; 373 rmsz = (cis[pos + 2] & 0x3c) >> 2;
374 rasz = cis[pos + 2] & 0x03; 374 rasz = cis[pos + 2] & 0x03;
@@ -390,7 +390,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
390 break; 390 break;
391 391
392 case CISTPL_MANFID: 392 case CISTPL_MANFID:
393 if (cis[pos + 1] < 4) 393 if (cis[pos + 1] < 5)
394 goto cis_error; 394 goto cis_error;
395 manfid1 = cis[pos + 2] + (cis[pos + 3] << 8); 395 manfid1 = cis[pos + 2] + (cis[pos + 3] << 8);
396 manfid2 = cis[pos + 4] + (cis[pos + 5] << 8); 396 manfid2 = cis[pos + 4] + (cis[pos + 5] << 8);
@@ -452,7 +452,7 @@ static int prism2_plx_probe(struct pci_dev *pdev,
452 memset(hw_priv, 0, sizeof(*hw_priv)); 452 memset(hw_priv, 0, sizeof(*hw_priv));
453 453
454 if (pci_enable_device(pdev)) 454 if (pci_enable_device(pdev))
455 return -EIO; 455 goto err_out_free;
456 456
457 /* National Datacomm NCP130 based on TMD7160, not PLX9052. */ 457 /* National Datacomm NCP130 based on TMD7160, not PLX9052. */
458 tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131); 458 tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131);
@@ -567,9 +567,6 @@ static int prism2_plx_probe(struct pci_dev *pdev,
567 return hostap_hw_ready(dev); 567 return hostap_hw_ready(dev);
568 568
569 fail: 569 fail:
570 prism2_free_local_data(dev);
571 kfree(hw_priv);
572
573 if (irq_registered && dev) 570 if (irq_registered && dev)
574 free_irq(dev->irq, dev); 571 free_irq(dev->irq, dev);
575 572
@@ -577,6 +574,10 @@ static int prism2_plx_probe(struct pci_dev *pdev,
577 iounmap(attr_mem); 574 iounmap(attr_mem);
578 575
579 pci_disable_device(pdev); 576 pci_disable_device(pdev);
577 prism2_free_local_data(dev);
578
579 err_out_free:
580 kfree(hw_priv);
580 581
581 return -ENODEV; 582 return -ENODEV;
582} 583}