aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c86
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c106
-rw-r--r--drivers/net/bonding/bond_3ad.h13
-rw-r--r--drivers/net/bonding/bond_alb.c75
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c781
-rw-r--r--drivers/net/bonding/bond_sysfs.c1358
-rw-r--r--drivers/net/bonding/bonding.h52
-rw-r--r--drivers/net/chelsio/sge.c19
-rw-r--r--drivers/net/chelsio/sge.h2
-rw-r--r--drivers/net/dgrs.c2
-rw-r--r--drivers/net/e1000/e1000.h4
-rw-r--r--drivers/net/e1000/e1000_ethtool.c111
-rw-r--r--drivers/net/e1000/e1000_hw.c67
-rw-r--r--drivers/net/e1000/e1000_hw.h4
-rw-r--r--drivers/net/e1000/e1000_main.c78
-rw-r--r--drivers/net/forcedeth.c15
-rw-r--r--drivers/net/gianfar.c231
-rw-r--r--drivers/net/gianfar.h69
-rw-r--r--drivers/net/gianfar_ethtool.c2
-rw-r--r--drivers/net/gianfar_mii.h1
-rw-r--r--drivers/net/gianfar_sysfs.c311
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c38
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h2
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/stir4200.c1
-rw-r--r--drivers/net/ixp2000/Kconfig6
-rw-r--r--drivers/net/ixp2000/Makefile3
-rw-r--r--drivers/net/ixp2000/caleb.c137
-rw-r--r--drivers/net/ixp2000/caleb.h22
-rw-r--r--drivers/net/ixp2000/enp2611.c245
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.c213
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ixp2000/ixpdev.c421
-rw-r--r--drivers/net/ixp2000/ixpdev.h27
-rw-r--r--drivers/net/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ixp2000/pm3386.c334
-rw-r--r--drivers/net/ixp2000/pm3386.h28
-rw-r--r--drivers/net/jazzsonic.c4
-rw-r--r--drivers/net/mipsnet.h30
-rw-r--r--drivers/net/ns83820.c1
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c32
-rw-r--r--drivers/net/pcnet32.c5
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/pppoe.c31
-rw-r--r--drivers/net/pppox.c10
-rw-r--r--drivers/net/s2io.c210
-rw-r--r--drivers/net/s2io.h3
-rw-r--r--drivers/net/sis900.c73
-rw-r--r--drivers/net/sis900.h45
-rw-r--r--drivers/net/sk98lin/Makefile6
-rw-r--r--drivers/net/sk98lin/h/skdrv2nd.h13
-rw-r--r--drivers/net/sk98lin/h/skvpd.h8
-rw-r--r--drivers/net/sk98lin/skcsum.c871
-rw-r--r--drivers/net/sk98lin/skethtool.c50
-rw-r--r--drivers/net/sk98lin/skge.c385
-rw-r--r--drivers/net/sk98lin/skproc.c265
-rw-r--r--drivers/net/skge.c95
-rw-r--r--drivers/net/skge.h73
-rw-r--r--drivers/net/sky2.c3262
-rw-r--r--drivers/net/sky2.h1922
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/tg3.c269
-rw-r--r--drivers/net/tg3.h14
-rw-r--r--drivers/net/tlan.c4
-rw-r--r--drivers/net/wan/lmc/lmc_prot.h15
-rw-r--r--drivers/net/wireless/Kconfig6
-rw-r--r--drivers/net/wireless/airo.c19
-rw-r--r--drivers/net/wireless/atmel.c1490
-rw-r--r--drivers/net/wireless/hostap/Makefile1
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c (renamed from drivers/net/wireless/hostap/hostap.c)0
-rw-r--r--drivers/net/wireless/ipw2100.c40
-rw-r--r--drivers/net/wireless/ipw2100.h2
-rw-r--r--drivers/net/wireless/ipw2200.c36
-rw-r--r--drivers/net/wireless/ipw2200.h6
-rw-r--r--drivers/net/wireless/orinoco.c3
-rw-r--r--drivers/net/wireless/orinoco_nortel.c6
86 files changed, 11663 insertions, 3646 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 30bee11c48bd..d2102a27d307 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -586,16 +586,16 @@ struct rtl8139_private {
586 dma_addr_t tx_bufs_dma; 586 dma_addr_t tx_bufs_dma;
587 signed char phys[4]; /* MII device addresses. */ 587 signed char phys[4]; /* MII device addresses. */
588 char twistie, twist_row, twist_col; /* Twister tune state. */ 588 char twistie, twist_row, twist_col; /* Twister tune state. */
589 unsigned int default_port:4; /* Last dev->if_port value. */ 589 unsigned int default_port : 4; /* Last dev->if_port value. */
590 unsigned int have_thread : 1;
590 spinlock_t lock; 591 spinlock_t lock;
591 spinlock_t rx_lock; 592 spinlock_t rx_lock;
592 chip_t chipset; 593 chip_t chipset;
593 pid_t thr_pid;
594 wait_queue_head_t thr_wait;
595 struct completion thr_exited;
596 u32 rx_config; 594 u32 rx_config;
597 struct rtl_extra_stats xstats; 595 struct rtl_extra_stats xstats;
598 int time_to_die; 596
597 struct work_struct thread;
598
599 struct mii_if_info mii; 599 struct mii_if_info mii;
600 unsigned int regs_len; 600 unsigned int regs_len;
601 unsigned long fifo_copy_timeout; 601 unsigned long fifo_copy_timeout;
@@ -620,7 +620,7 @@ static int rtl8139_open (struct net_device *dev);
620static int mdio_read (struct net_device *dev, int phy_id, int location); 620static int mdio_read (struct net_device *dev, int phy_id, int location);
621static void mdio_write (struct net_device *dev, int phy_id, int location, 621static void mdio_write (struct net_device *dev, int phy_id, int location,
622 int val); 622 int val);
623static void rtl8139_start_thread(struct net_device *dev); 623static void rtl8139_start_thread(struct rtl8139_private *tp);
624static void rtl8139_tx_timeout (struct net_device *dev); 624static void rtl8139_tx_timeout (struct net_device *dev);
625static void rtl8139_init_ring (struct net_device *dev); 625static void rtl8139_init_ring (struct net_device *dev);
626static int rtl8139_start_xmit (struct sk_buff *skb, 626static int rtl8139_start_xmit (struct sk_buff *skb,
@@ -637,6 +637,7 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
637static void rtl8139_set_rx_mode (struct net_device *dev); 637static void rtl8139_set_rx_mode (struct net_device *dev);
638static void __set_rx_mode (struct net_device *dev); 638static void __set_rx_mode (struct net_device *dev);
639static void rtl8139_hw_start (struct net_device *dev); 639static void rtl8139_hw_start (struct net_device *dev);
640static void rtl8139_thread (void *_data);
640static struct ethtool_ops rtl8139_ethtool_ops; 641static struct ethtool_ops rtl8139_ethtool_ops;
641 642
642/* write MMIO register, with flush */ 643/* write MMIO register, with flush */
@@ -1007,8 +1008,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1007 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1008 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1008 spin_lock_init (&tp->lock); 1009 spin_lock_init (&tp->lock);
1009 spin_lock_init (&tp->rx_lock); 1010 spin_lock_init (&tp->rx_lock);
1010 init_waitqueue_head (&tp->thr_wait); 1011 INIT_WORK(&tp->thread, rtl8139_thread, dev);
1011 init_completion (&tp->thr_exited);
1012 tp->mii.dev = dev; 1012 tp->mii.dev = dev;
1013 tp->mii.mdio_read = mdio_read; 1013 tp->mii.mdio_read = mdio_read;
1014 tp->mii.mdio_write = mdio_write; 1014 tp->mii.mdio_write = mdio_write;
@@ -1345,7 +1345,7 @@ static int rtl8139_open (struct net_device *dev)
1345 dev->irq, RTL_R8 (MediaStatus), 1345 dev->irq, RTL_R8 (MediaStatus),
1346 tp->mii.full_duplex ? "full" : "half"); 1346 tp->mii.full_duplex ? "full" : "half");
1347 1347
1348 rtl8139_start_thread(dev); 1348 rtl8139_start_thread(tp);
1349 1349
1350 return 0; 1350 return 0;
1351} 1351}
@@ -1594,55 +1594,43 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1594 RTL_R8 (Config1)); 1594 RTL_R8 (Config1));
1595} 1595}
1596 1596
1597static int rtl8139_thread (void *data) 1597static void rtl8139_thread (void *_data)
1598{ 1598{
1599 struct net_device *dev = data; 1599 struct net_device *dev = _data;
1600 struct rtl8139_private *tp = netdev_priv(dev); 1600 struct rtl8139_private *tp = netdev_priv(dev);
1601 unsigned long timeout; 1601 unsigned long thr_delay;
1602
1603 daemonize("%s", dev->name);
1604 allow_signal(SIGTERM);
1605
1606 while (1) {
1607 timeout = next_tick;
1608 do {
1609 timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
1610 /* make swsusp happy with our thread */
1611 try_to_freeze();
1612 } while (!signal_pending (current) && (timeout > 0));
1613
1614 if (signal_pending (current)) {
1615 flush_signals(current);
1616 }
1617 1602
1618 if (tp->time_to_die) 1603 if (rtnl_shlock_nowait() == 0) {
1619 break;
1620
1621 if (rtnl_lock_interruptible ())
1622 break;
1623 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1604 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1624 rtnl_unlock (); 1605 rtnl_unlock ();
1606
1607 thr_delay = next_tick;
1608 } else {
1609 /* unlikely race. mitigate with fast poll. */
1610 thr_delay = HZ / 2;
1625 } 1611 }
1626 1612
1627 complete_and_exit (&tp->thr_exited, 0); 1613 schedule_delayed_work(&tp->thread, thr_delay);
1628} 1614}
1629 1615
1630static void rtl8139_start_thread(struct net_device *dev) 1616static void rtl8139_start_thread(struct rtl8139_private *tp)
1631{ 1617{
1632 struct rtl8139_private *tp = netdev_priv(dev);
1633
1634 tp->thr_pid = -1;
1635 tp->twistie = 0; 1618 tp->twistie = 0;
1636 tp->time_to_die = 0;
1637 if (tp->chipset == CH_8139_K) 1619 if (tp->chipset == CH_8139_K)
1638 tp->twistie = 1; 1620 tp->twistie = 1;
1639 else if (tp->drv_flags & HAS_LNK_CHNG) 1621 else if (tp->drv_flags & HAS_LNK_CHNG)
1640 return; 1622 return;
1641 1623
1642 tp->thr_pid = kernel_thread(rtl8139_thread, dev, CLONE_FS|CLONE_FILES); 1624 tp->have_thread = 1;
1643 if (tp->thr_pid < 0) { 1625
1644 printk (KERN_WARNING "%s: unable to start kernel thread\n", 1626 schedule_delayed_work(&tp->thread, next_tick);
1645 dev->name); 1627}
1628
1629static void rtl8139_stop_thread(struct rtl8139_private *tp)
1630{
1631 if (tp->have_thread) {
1632 cancel_rearming_delayed_work(&tp->thread);
1633 tp->have_thread = 0;
1646 } 1634 }
1647} 1635}
1648 1636
@@ -2224,22 +2212,12 @@ static int rtl8139_close (struct net_device *dev)
2224{ 2212{
2225 struct rtl8139_private *tp = netdev_priv(dev); 2213 struct rtl8139_private *tp = netdev_priv(dev);
2226 void __iomem *ioaddr = tp->mmio_addr; 2214 void __iomem *ioaddr = tp->mmio_addr;
2227 int ret = 0;
2228 unsigned long flags; 2215 unsigned long flags;
2229 2216
2230 netif_stop_queue (dev); 2217 netif_stop_queue (dev);
2231 2218
2232 if (tp->thr_pid >= 0) { 2219 rtl8139_stop_thread(tp);
2233 tp->time_to_die = 1; 2220
2234 wmb();
2235 ret = kill_proc (tp->thr_pid, SIGTERM, 1);
2236 if (ret) {
2237 printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
2238 return ret;
2239 }
2240 wait_for_completion (&tp->thr_exited);
2241 }
2242
2243 if (netif_msg_ifdown(tp)) 2221 if (netif_msg_ifdown(tp))
2244 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", 2222 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n",
2245 dev->name, RTL_R16 (IntrStatus)); 2223 dev->name, RTL_R16 (IntrStatus));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ebd7313d7fc1..e2fa29b612cd 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1901,6 +1901,8 @@ config E1000_NAPI
1901 1901
1902 If in doubt, say N. 1902 If in doubt, say N.
1903 1903
1904source "drivers/net/ixp2000/Kconfig"
1905
1904config MYRI_SBUS 1906config MYRI_SBUS
1905 tristate "MyriCOM Gigabit Ethernet support" 1907 tristate "MyriCOM Gigabit Ethernet support"
1906 depends on SBUS 1908 depends on SBUS
@@ -2008,7 +2010,18 @@ config SKGE
2008 2010
2009 It does not support the link failover and network management 2011 It does not support the link failover and network management
2010 features that "portable" vendor supplied sk98lin driver does. 2012 features that "portable" vendor supplied sk98lin driver does.
2011 2013
2014
2015config SKY2
2016 tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
2017 depends on PCI && EXPERIMENTAL
2018 select CRC32
2019 ---help---
2020 This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
2021
2022 To compile this driver as a module, choose M here: the module
2023 will be called sky2. This is recommended.
2024
2012config SK98LIN 2025config SK98LIN
2013 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support" 2026 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
2014 depends on PCI 2027 depends on PCI
@@ -2120,7 +2133,7 @@ config BNX2
2120 2133
2121config SPIDER_NET 2134config SPIDER_NET
2122 tristate "Spider Gigabit Ethernet driver" 2135 tristate "Spider Gigabit Ethernet driver"
2123 depends on PCI && PPC_BPA 2136 depends on PCI && PPC_CELL
2124 help 2137 help
2125 This driver supports the Gigabit Ethernet chips present on the 2138 This driver supports the Gigabit Ethernet chips present on the
2126 Cell Processor-Based Blades from IBM. 2139 Cell Processor-Based Blades from IBM.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4cffd34442aa..b74a7cb5bae6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -13,7 +13,10 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
13obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
14obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
15 15
16gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o 16gianfar_driver-objs := gianfar.o \
17 gianfar_ethtool.o \
18 gianfar_mii.o \
19 gianfar_sysfs.o
17 20
18# 21#
19# link order important here 22# link order important here
@@ -59,6 +62,7 @@ spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o
59obj-$(CONFIG_SPIDER_NET) += spidernet.o 62obj-$(CONFIG_SPIDER_NET) += spidernet.o
60obj-$(CONFIG_TC35815) += tc35815.o 63obj-$(CONFIG_TC35815) += tc35815.o
61obj-$(CONFIG_SKGE) += skge.o 64obj-$(CONFIG_SKGE) += skge.o
65obj-$(CONFIG_SKY2) += sky2.o
62obj-$(CONFIG_SK98LIN) += sk98lin/ 66obj-$(CONFIG_SK98LIN) += sk98lin/
63obj-$(CONFIG_SKFP) += skfp/ 67obj-$(CONFIG_SKFP) += skfp/
64obj-$(CONFIG_VIA_RHINE) += via-rhine.o 68obj-$(CONFIG_VIA_RHINE) += via-rhine.o
@@ -202,6 +206,7 @@ obj-$(CONFIG_NET_TULIP) += tulip/
202obj-$(CONFIG_HAMRADIO) += hamradio/ 206obj-$(CONFIG_HAMRADIO) += hamradio/
203obj-$(CONFIG_IRDA) += irda/ 207obj-$(CONFIG_IRDA) += irda/
204obj-$(CONFIG_ETRAX_ETHERNET) += cris/ 208obj-$(CONFIG_ETRAX_ETHERNET) += cris/
209obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
205 210
206obj-$(CONFIG_NETCONSOLE) += netconsole.o 211obj-$(CONFIG_NETCONSOLE) += netconsole.o
207 212
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c53848f787eb..7aa49b974dc5 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -28,8 +28,8 @@
28 28
29#define DRV_MODULE_NAME "b44" 29#define DRV_MODULE_NAME "b44"
30#define PFX DRV_MODULE_NAME ": " 30#define PFX DRV_MODULE_NAME ": "
31#define DRV_MODULE_VERSION "0.96" 31#define DRV_MODULE_VERSION "0.97"
32#define DRV_MODULE_RELDATE "Nov 8, 2005" 32#define DRV_MODULE_RELDATE "Nov 30, 2005"
33 33
34#define B44_DEF_MSG_ENABLE \ 34#define B44_DEF_MSG_ENABLE \
35 (NETIF_MSG_DRV | \ 35 (NETIF_MSG_DRV | \
@@ -1417,6 +1417,7 @@ static int b44_open(struct net_device *dev)
1417 add_timer(&bp->timer); 1417 add_timer(&bp->timer);
1418 1418
1419 b44_enable_ints(bp); 1419 b44_enable_ints(bp);
1420 netif_start_queue(dev);
1420out: 1421out:
1421 return err; 1422 return err;
1422} 1423}
@@ -1837,12 +1838,15 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1837{ 1838{
1838 struct mii_ioctl_data *data = if_mii(ifr); 1839 struct mii_ioctl_data *data = if_mii(ifr);
1839 struct b44 *bp = netdev_priv(dev); 1840 struct b44 *bp = netdev_priv(dev);
1840 int err; 1841 int err = -EINVAL;
1842
1843 if (!netif_running(dev))
1844 goto out;
1841 1845
1842 spin_lock_irq(&bp->lock); 1846 spin_lock_irq(&bp->lock);
1843 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); 1847 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1844 spin_unlock_irq(&bp->lock); 1848 spin_unlock_irq(&bp->lock);
1845 1849out:
1846 return err; 1850 return err;
1847} 1851}
1848 1852
@@ -2113,6 +2117,7 @@ static int b44_resume(struct pci_dev *pdev)
2113 add_timer(&bp->timer); 2117 add_timer(&bp->timer);
2114 2118
2115 b44_enable_ints(bp); 2119 b44_enable_ints(bp);
2120 netif_wake_queue(dev);
2116 return 0; 2121 return 0;
2117} 2122}
2118 2123
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index cf50384b469e..5cdae2bc055a 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
8 8
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d2f34d5a8083..f3f5825469d6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -18,38 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
25 * Amir Noam <amir.noam at intel dot com>
26 * - Added support for lacp_rate module param.
27 *
28 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Based on discussion on mailing list, changed locking scheme
30 * to use lock/unlock or lock_bh/unlock_bh appropriately instead
31 * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
32 * hidden bugs and solves system hangs that occurred due to the fact
33 * that holding lock_irqsave doesn't prevent softirqs from running.
34 * This also increases total throughput since interrupts are not
35 * blocked on each transmitted packets or monitor timeout.
36 *
37 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
38 * - Renamed bond_3ad_link_status_changed() to
39 * bond_3ad_handle_link_change() for compatibility with TLB.
40 *
41 * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
42 * - Fix long fail over time when releasing last slave of an active
43 * aggregator - send LACPDU on unbind of slave to tell partner this
44 * port is no longer aggregatable.
45 *
46 * 2003/06/25 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
47 * - Send LACPDU as highest priority packet to further fix the above
48 * problem on very high Tx traffic load where packets may get dropped
49 * by the slave.
50 *
51 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
52 * - Code cleanup and style changes
53 */ 21 */
54 22
55//#define BONDING_DEBUG 1 23//#define BONDING_DEBUG 1
@@ -1198,10 +1166,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1198 // detect loopback situation 1166 // detect loopback situation
1199 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) { 1167 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
1200 // INFO_RECEIVED_LOOPBACK_FRAMES 1168 // INFO_RECEIVED_LOOPBACK_FRAMES
1201 printk(KERN_ERR DRV_NAME ": An illegal loopback occurred on adapter (%s)\n", 1169 printk(KERN_ERR DRV_NAME ": %s: An illegal loopback occurred on "
1202 port->slave->dev->name); 1170 "adapter (%s). Check the configuration to verify that all "
1203 printk(KERN_ERR "Check the configuration to verify that all Adapters " 1171 "Adapters are connected to 802.3ad compliant switch ports\n",
1204 "are connected to 802.3ad compliant switch ports\n"); 1172 port->slave->dev->master->name, port->slave->dev->name);
1205 __release_rx_machine_lock(port); 1173 __release_rx_machine_lock(port);
1206 return; 1174 return;
1207 } 1175 }
@@ -1378,8 +1346,9 @@ static void ad_port_selection_logic(struct port *port)
1378 } 1346 }
1379 } 1347 }
1380 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list 1348 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
1381 printk(KERN_WARNING DRV_NAME ": Warning: Port %d (on %s) was " 1349 printk(KERN_WARNING DRV_NAME ": %s: Warning: Port %d (on %s) was "
1382 "related to aggregator %d but was not on its port list\n", 1350 "related to aggregator %d but was not on its port list\n",
1351 port->slave->dev->master->name,
1383 port->actor_port_number, port->slave->dev->name, 1352 port->actor_port_number, port->slave->dev->name,
1384 port->aggregator->aggregator_identifier); 1353 port->aggregator->aggregator_identifier);
1385 } 1354 }
@@ -1450,7 +1419,8 @@ static void ad_port_selection_logic(struct port *port)
1450 1419
1451 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1420 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1452 } else { 1421 } else {
1453 printk(KERN_ERR DRV_NAME ": Port %d (on %s) did not find a suitable aggregator\n", 1422 printk(KERN_ERR DRV_NAME ": %s: Port %d (on %s) did not find a suitable aggregator\n",
1423 port->slave->dev->master->name,
1454 port->actor_port_number, port->slave->dev->name); 1424 port->actor_port_number, port->slave->dev->name);
1455 } 1425 }
1456 } 1426 }
@@ -1582,8 +1552,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator)
1582 1552
1583 // check if any partner replys 1553 // check if any partner replys
1584 if (best_aggregator->is_individual) { 1554 if (best_aggregator->is_individual) {
1585 printk(KERN_WARNING DRV_NAME ": Warning: No 802.3ad response from the link partner " 1555 printk(KERN_WARNING DRV_NAME ": %s: Warning: No 802.3ad response from "
1586 "for any adapters in the bond\n"); 1556 "the link partner for any adapters in the bond\n",
1557 best_aggregator->slave->dev->master->name);
1587 } 1558 }
1588 1559
1589 // check if there are more than one aggregator 1560 // check if there are more than one aggregator
@@ -1915,7 +1886,8 @@ int bond_3ad_bind_slave(struct slave *slave)
1915 struct aggregator *aggregator; 1886 struct aggregator *aggregator;
1916 1887
1917 if (bond == NULL) { 1888 if (bond == NULL) {
1918 printk(KERN_ERR "The slave %s is not attached to its bond\n", slave->dev->name); 1889 printk(KERN_ERR DRV_NAME ": %s: The slave %s is not attached to its bond\n",
1890 slave->dev->master->name, slave->dev->name);
1919 return -1; 1891 return -1;
1920 } 1892 }
1921 1893
@@ -1990,7 +1962,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
1990 1962
1991 // if slave is null, the whole port is not initialized 1963 // if slave is null, the whole port is not initialized
1992 if (!port->slave) { 1964 if (!port->slave) {
1993 printk(KERN_WARNING DRV_NAME ": Trying to unbind an uninitialized port on %s\n", slave->dev->name); 1965 printk(KERN_WARNING DRV_NAME ": Warning: %s: Trying to "
1966 "unbind an uninitialized port on %s\n",
1967 slave->dev->master->name, slave->dev->name);
1994 return; 1968 return;
1995 } 1969 }
1996 1970
@@ -2021,7 +1995,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2021 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); 1995 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
2022 1996
2023 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { 1997 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
2024 printk(KERN_INFO DRV_NAME ": Removing an active aggregator\n"); 1998 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
1999 aggregator->slave->dev->master->name);
2025 // select new active aggregator 2000 // select new active aggregator
2026 select_new_active_agg = 1; 2001 select_new_active_agg = 1;
2027 } 2002 }
@@ -2051,15 +2026,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
2051 ad_agg_selection_logic(__get_first_agg(port)); 2026 ad_agg_selection_logic(__get_first_agg(port));
2052 } 2027 }
2053 } else { 2028 } else {
2054 printk(KERN_WARNING DRV_NAME ": Warning: unbinding aggregator, " 2029 printk(KERN_WARNING DRV_NAME ": %s: Warning: unbinding aggregator, "
2055 "and could not find a new aggregator for its ports\n"); 2030 "and could not find a new aggregator for its ports\n",
2031 slave->dev->master->name);
2056 } 2032 }
2057 } else { // in case that the only port related to this aggregator is the one we want to remove 2033 } else { // in case that the only port related to this aggregator is the one we want to remove
2058 select_new_active_agg = aggregator->is_active; 2034 select_new_active_agg = aggregator->is_active;
2059 // clear the aggregator 2035 // clear the aggregator
2060 ad_clear_agg(aggregator); 2036 ad_clear_agg(aggregator);
2061 if (select_new_active_agg) { 2037 if (select_new_active_agg) {
2062 printk(KERN_INFO "Removing an active aggregator\n"); 2038 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2039 slave->dev->master->name);
2063 // select new active aggregator 2040 // select new active aggregator
2064 ad_agg_selection_logic(__get_first_agg(port)); 2041 ad_agg_selection_logic(__get_first_agg(port));
2065 } 2042 }
@@ -2085,7 +2062,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2085 // clear the aggregator 2062 // clear the aggregator
2086 ad_clear_agg(temp_aggregator); 2063 ad_clear_agg(temp_aggregator);
2087 if (select_new_active_agg) { 2064 if (select_new_active_agg) {
2088 printk(KERN_INFO "Removing an active aggregator\n"); 2065 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2066 slave->dev->master->name);
2089 // select new active aggregator 2067 // select new active aggregator
2090 ad_agg_selection_logic(__get_first_agg(port)); 2068 ad_agg_selection_logic(__get_first_agg(port));
2091 } 2069 }
@@ -2131,7 +2109,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2131 // select the active aggregator for the bond 2109 // select the active aggregator for the bond
2132 if ((port = __get_first_port(bond))) { 2110 if ((port = __get_first_port(bond))) {
2133 if (!port->slave) { 2111 if (!port->slave) {
2134 printk(KERN_WARNING DRV_NAME ": Warning: bond's first port is uninitialized\n"); 2112 printk(KERN_WARNING DRV_NAME ": %s: Warning: bond's first port is "
2113 "uninitialized\n", bond->dev->name);
2135 goto re_arm; 2114 goto re_arm;
2136 } 2115 }
2137 2116
@@ -2143,7 +2122,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2143 // for each port run the state machines 2122 // for each port run the state machines
2144 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2123 for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
2145 if (!port->slave) { 2124 if (!port->slave) {
2146 printk(KERN_WARNING DRV_NAME ": Warning: Found an uninitialized port\n"); 2125 printk(KERN_WARNING DRV_NAME ": %s: Warning: Found an uninitialized "
2126 "port\n", bond->dev->name);
2147 goto re_arm; 2127 goto re_arm;
2148 } 2128 }
2149 2129
@@ -2184,7 +2164,8 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2184 port = &(SLAVE_AD_INFO(slave).port); 2164 port = &(SLAVE_AD_INFO(slave).port);
2185 2165
2186 if (!port->slave) { 2166 if (!port->slave) {
2187 printk(KERN_WARNING DRV_NAME ": Warning: port of slave %s is uninitialized\n", slave->dev->name); 2167 printk(KERN_WARNING DRV_NAME ": %s: Warning: port of slave %s is "
2168 "uninitialized\n", slave->dev->name, slave->dev->master->name);
2188 return; 2169 return;
2189 } 2170 }
2190 2171
@@ -2230,8 +2211,9 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2230 2211
2231 // if slave is null, the whole port is not initialized 2212 // if slave is null, the whole port is not initialized
2232 if (!port->slave) { 2213 if (!port->slave) {
2233 printk(KERN_WARNING DRV_NAME ": Warning: speed changed for uninitialized port on %s\n", 2214 printk(KERN_WARNING DRV_NAME ": Warning: %s: speed "
2234 slave->dev->name); 2215 "changed for uninitialized port on %s\n",
2216 slave->dev->master->name, slave->dev->name);
2235 return; 2217 return;
2236 } 2218 }
2237 2219
@@ -2257,8 +2239,9 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2257 2239
2258 // if slave is null, the whole port is not initialized 2240 // if slave is null, the whole port is not initialized
2259 if (!port->slave) { 2241 if (!port->slave) {
2260 printk(KERN_WARNING DRV_NAME ": Warning: duplex changed for uninitialized port on %s\n", 2242 printk(KERN_WARNING DRV_NAME ": %s: Warning: duplex changed "
2261 slave->dev->name); 2243 "for uninitialized port on %s\n",
2244 slave->dev->master->name, slave->dev->name);
2262 return; 2245 return;
2263 } 2246 }
2264 2247
@@ -2285,8 +2268,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2285 2268
2286 // if slave is null, the whole port is not initialized 2269 // if slave is null, the whole port is not initialized
2287 if (!port->slave) { 2270 if (!port->slave) {
2288 printk(KERN_WARNING DRV_NAME ": Warning: link status changed for uninitialized port on %s\n", 2271 printk(KERN_WARNING DRV_NAME ": Warning: %s: link status changed for "
2289 slave->dev->name); 2272 "uninitialized port on %s\n",
2273 slave->dev->master->name, slave->dev->name);
2290 return; 2274 return;
2291 } 2275 }
2292 2276
@@ -2363,7 +2347,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2363 } 2347 }
2364 2348
2365 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2349 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2366 printk(KERN_DEBUG "ERROR: bond_3ad_get_active_agg_info failed\n"); 2350 printk(KERN_DEBUG DRV_NAME ": %s: Error: "
2351 "bond_3ad_get_active_agg_info failed\n", dev->name);
2367 goto out; 2352 goto out;
2368 } 2353 }
2369 2354
@@ -2372,7 +2357,9 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2372 2357
2373 if (slaves_in_agg == 0) { 2358 if (slaves_in_agg == 0) {
2374 /*the aggregator is empty*/ 2359 /*the aggregator is empty*/
2375 printk(KERN_DEBUG "ERROR: active aggregator is empty\n"); 2360 printk(KERN_DEBUG DRV_NAME ": %s: Error: active "
2361 "aggregator is empty\n",
2362 dev->name);
2376 goto out; 2363 goto out;
2377 } 2364 }
2378 2365
@@ -2390,7 +2377,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2390 } 2377 }
2391 2378
2392 if (slave_agg_no >= 0) { 2379 if (slave_agg_no >= 0) {
2393 printk(KERN_ERR DRV_NAME ": Error: Couldn't find a slave to tx on for aggregator ID %d\n", agg_id); 2380 printk(KERN_ERR DRV_NAME ": %s: Error: Couldn't find a slave to tx on "
2381 "for aggregator ID %d\n", dev->name, agg_id);
2394 goto out; 2382 goto out;
2395 } 2383 }
2396 2384
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 673a30af5660..5ee2cef5b037 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -18,19 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
25 * Amir Noam <amir.noam at intel dot com>
26 * - Added support for lacp_rate module param.
27 *
28 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Renamed bond_3ad_link_status_changed() to
30 * bond_3ad_handle_link_change() for compatibility with TLB.
31 *
32 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
33 * - Code cleanup and style changes
34 */ 21 */
35 22
36#ifndef __BOND_3AD_H__ 23#ifndef __BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f8fce3961197..854ddfb90da1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -18,25 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/06/25 - Shmulik Hen <shmulik.hen at intel dot com>
25 * - Fixed signed/unsigned calculation errors that caused load sharing
26 * to collapse to one slave under very heavy UDP Tx stress.
27 *
28 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
29 * - Add support for setting bond's MAC address with special
30 * handling required for ALB/TLB.
31 *
32 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
33 * - Code cleanup and style changes
34 *
35 * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
36 * - Fixed: Cannot remove and re-enslave the original active slave.
37 *
38 * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
39 * - Add capability to tag self generated packets in ALB/TLB modes.
40 */ 21 */
41 22
42//#define BONDING_DEBUG 1 23//#define BONDING_DEBUG 1
@@ -198,20 +179,21 @@ static int tlb_initialize(struct bonding *bond)
198{ 179{
199 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 180 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
200 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info); 181 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
182 struct tlb_client_info *new_hashtbl;
201 int i; 183 int i;
202 184
203 spin_lock_init(&(bond_info->tx_hashtbl_lock)); 185 spin_lock_init(&(bond_info->tx_hashtbl_lock));
204 186
205 _lock_tx_hashtbl(bond); 187 new_hashtbl = kmalloc(size, GFP_KERNEL);
206 188 if (!new_hashtbl) {
207 bond_info->tx_hashtbl = kmalloc(size, GFP_KERNEL);
208 if (!bond_info->tx_hashtbl) {
209 printk(KERN_ERR DRV_NAME 189 printk(KERN_ERR DRV_NAME
210 ": Error: %s: Failed to allocate TLB hash table\n", 190 ": %s: Error: Failed to allocate TLB hash table\n",
211 bond->dev->name); 191 bond->dev->name);
212 _unlock_tx_hashtbl(bond);
213 return -1; 192 return -1;
214 } 193 }
194 _lock_tx_hashtbl(bond);
195
196 bond_info->tx_hashtbl = new_hashtbl;
215 197
216 memset(bond_info->tx_hashtbl, 0, size); 198 memset(bond_info->tx_hashtbl, 0, size);
217 199
@@ -513,7 +495,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
513 client_info->mac_dst); 495 client_info->mac_dst);
514 if (!skb) { 496 if (!skb) {
515 printk(KERN_ERR DRV_NAME 497 printk(KERN_ERR DRV_NAME
516 ": Error: failed to create an ARP packet\n"); 498 ": %s: Error: failed to create an ARP packet\n",
499 client_info->slave->dev->master->name);
517 continue; 500 continue;
518 } 501 }
519 502
@@ -523,7 +506,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
523 skb = vlan_put_tag(skb, client_info->vlan_id); 506 skb = vlan_put_tag(skb, client_info->vlan_id);
524 if (!skb) { 507 if (!skb) {
525 printk(KERN_ERR DRV_NAME 508 printk(KERN_ERR DRV_NAME
526 ": Error: failed to insert VLAN tag\n"); 509 ": %s: Error: failed to insert VLAN tag\n",
510 client_info->slave->dev->master->name);
527 continue; 511 continue;
528 } 512 }
529 } 513 }
@@ -606,8 +590,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
606 590
607 if (!client_info->slave) { 591 if (!client_info->slave) {
608 printk(KERN_ERR DRV_NAME 592 printk(KERN_ERR DRV_NAME
609 ": Error: found a client with no channel in " 593 ": %s: Error: found a client with no channel in "
610 "the client's hash table\n"); 594 "the client's hash table\n",
595 bond->dev->name);
611 continue; 596 continue;
612 } 597 }
613 /*update all clients using this src_ip, that are not assigned 598 /*update all clients using this src_ip, that are not assigned
@@ -797,21 +782,22 @@ static int rlb_initialize(struct bonding *bond)
797{ 782{
798 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 783 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
799 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type); 784 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
785 struct rlb_client_info *new_hashtbl;
800 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); 786 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
801 int i; 787 int i;
802 788
803 spin_lock_init(&(bond_info->rx_hashtbl_lock)); 789 spin_lock_init(&(bond_info->rx_hashtbl_lock));
804 790
805 _lock_rx_hashtbl(bond); 791 new_hashtbl = kmalloc(size, GFP_KERNEL);
806 792 if (!new_hashtbl) {
807 bond_info->rx_hashtbl = kmalloc(size, GFP_KERNEL);
808 if (!bond_info->rx_hashtbl) {
809 printk(KERN_ERR DRV_NAME 793 printk(KERN_ERR DRV_NAME
810 ": Error: %s: Failed to allocate RLB hash table\n", 794 ": %s: Error: Failed to allocate RLB hash table\n",
811 bond->dev->name); 795 bond->dev->name);
812 _unlock_rx_hashtbl(bond);
813 return -1; 796 return -1;
814 } 797 }
798 _lock_rx_hashtbl(bond);
799
800 bond_info->rx_hashtbl = new_hashtbl;
815 801
816 bond_info->rx_hashtbl_head = RLB_NULL_INDEX; 802 bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
817 803
@@ -927,7 +913,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
927 skb = vlan_put_tag(skb, vlan->vlan_id); 913 skb = vlan_put_tag(skb, vlan->vlan_id);
928 if (!skb) { 914 if (!skb) {
929 printk(KERN_ERR DRV_NAME 915 printk(KERN_ERR DRV_NAME
930 ": Error: failed to insert VLAN tag\n"); 916 ": %s: Error: failed to insert VLAN tag\n",
917 bond->dev->name);
931 continue; 918 continue;
932 } 919 }
933 } 920 }
@@ -956,11 +943,11 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
956 s_addr.sa_family = dev->type; 943 s_addr.sa_family = dev->type;
957 if (dev_set_mac_address(dev, &s_addr)) { 944 if (dev_set_mac_address(dev, &s_addr)) {
958 printk(KERN_ERR DRV_NAME 945 printk(KERN_ERR DRV_NAME
959 ": Error: dev_set_mac_address of dev %s failed! ALB " 946 ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
960 "mode requires that the base driver support setting " 947 "mode requires that the base driver support setting "
961 "the hw address also when the network device's " 948 "the hw address also when the network device's "
962 "interface is open\n", 949 "interface is open\n",
963 dev->name); 950 dev->master->name, dev->name);
964 return -EOPNOTSUPP; 951 return -EOPNOTSUPP;
965 } 952 }
966 return 0; 953 return 0;
@@ -1153,16 +1140,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1153 bond->alb_info.rlb_enabled); 1140 bond->alb_info.rlb_enabled);
1154 1141
1155 printk(KERN_WARNING DRV_NAME 1142 printk(KERN_WARNING DRV_NAME
1156 ": Warning: the hw address of slave %s is in use by " 1143 ": %s: Warning: the hw address of slave %s is in use by "
1157 "the bond; giving it the hw address of %s\n", 1144 "the bond; giving it the hw address of %s\n",
1158 slave->dev->name, free_mac_slave->dev->name); 1145 bond->dev->name, slave->dev->name, free_mac_slave->dev->name);
1159 1146
1160 } else if (has_bond_addr) { 1147 } else if (has_bond_addr) {
1161 printk(KERN_ERR DRV_NAME 1148 printk(KERN_ERR DRV_NAME
1162 ": Error: the hw address of slave %s is in use by the " 1149 ": %s: Error: the hw address of slave %s is in use by the "
1163 "bond; couldn't find a slave with a free hw address to " 1150 "bond; couldn't find a slave with a free hw address to "
1164 "give it (this should not have happened)\n", 1151 "give it (this should not have happened)\n",
1165 slave->dev->name); 1152 bond->dev->name, slave->dev->name);
1166 return -EFAULT; 1153 return -EFAULT;
1167 } 1154 }
1168 1155
@@ -1250,6 +1237,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1250 tlb_deinitialize(bond); 1237 tlb_deinitialize(bond);
1251 return res; 1238 return res;
1252 } 1239 }
1240 } else {
1241 bond->alb_info.rlb_enabled = 0;
1253 } 1242 }
1254 1243
1255 return 0; 1244 return 0;
@@ -1409,7 +1398,7 @@ void bond_alb_monitor(struct bonding *bond)
1409 read_lock(&bond->curr_slave_lock); 1398 read_lock(&bond->curr_slave_lock);
1410 1399
1411 bond_for_each_slave(bond, slave, i) { 1400 bond_for_each_slave(bond, slave, i) {
1412 alb_send_learning_packets(slave,slave->dev->dev_addr); 1401 alb_send_learning_packets(slave, slave->dev->dev_addr);
1413 } 1402 }
1414 1403
1415 read_unlock(&bond->curr_slave_lock); 1404 read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e4091cd8d654..28f2a2fd1b5a 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -18,15 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
25 * - Add support for setting bond's MAC address with special
26 * handling required for ALB/TLB.
27 *
28 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Code cleanup and style changes
30 */ 21 */
31 22
32#ifndef __BOND_ALB_H__ 23#ifndef __BOND_ALB_H__
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94cec3cf2a13..2582d98ef5c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -29,466 +29,6 @@
29 * b: if a hw mac address already is there, eth0's hw mac address 29 * b: if a hw mac address already is there, eth0's hw mac address
30 * will then be set from bond0. 30 * will then be set from bond0.
31 * 31 *
32 * v0.1 - first working version.
33 * v0.2 - changed stats to be calculated by summing slaves stats.
34 *
35 * Changes:
36 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
37 * - fix leaks on failure at bond_init
38 *
39 * 2000/09/30 - Willy Tarreau <willy at meta-x.org>
40 * - added trivial code to release a slave device.
41 * - fixed security bug (CAP_NET_ADMIN not checked)
42 * - implemented MII link monitoring to disable dead links :
43 * All MII capable slaves are checked every <miimon> milliseconds
44 * (100 ms seems good). This value can be changed by passing it to
45 * insmod. A value of zero disables the monitoring (default).
46 * - fixed an infinite loop in bond_xmit_roundrobin() when there's no
47 * good slave.
48 * - made the code hopefully SMP safe
49 *
50 * 2000/10/03 - Willy Tarreau <willy at meta-x.org>
51 * - optimized slave lists based on relevant suggestions from Thomas Davis
52 * - implemented active-backup method to obtain HA with two switches:
53 * stay as long as possible on the same active interface, while we
54 * also monitor the backup one (MII link status) because we want to know
55 * if we are able to switch at any time. ( pass "mode=1" to insmod )
56 * - lots of stress testings because we need it to be more robust than the
57 * wires ! :->
58 *
59 * 2000/10/09 - Willy Tarreau <willy at meta-x.org>
60 * - added up and down delays after link state change.
61 * - optimized the slaves chaining so that when we run forward, we never
62 * repass through the bond itself, but we can find it by searching
63 * backwards. Renders the deletion more difficult, but accelerates the
64 * scan.
65 * - smarter enslaving and releasing.
66 * - finer and more robust SMP locking
67 *
68 * 2000/10/17 - Willy Tarreau <willy at meta-x.org>
69 * - fixed two potential SMP race conditions
70 *
71 * 2000/10/18 - Willy Tarreau <willy at meta-x.org>
72 * - small fixes to the monitoring FSM in case of zero delays
73 * 2000/11/01 - Willy Tarreau <willy at meta-x.org>
74 * - fixed first slave not automatically used in trunk mode.
75 * 2000/11/10 : spelling of "EtherChannel" corrected.
76 * 2000/11/13 : fixed a race condition in case of concurrent accesses to ioctl().
77 * 2000/12/16 : fixed improper usage of rtnl_exlock_nowait().
78 *
79 * 2001/1/3 - Chad N. Tindel <ctindel at ieee dot org>
80 * - The bonding driver now simulates MII status monitoring, just like
81 * a normal network device. It will show that the link is down iff
82 * every slave in the bond shows that their links are down. If at least
83 * one slave is up, the bond's MII status will appear as up.
84 *
85 * 2001/2/7 - Chad N. Tindel <ctindel at ieee dot org>
86 * - Applications can now query the bond from user space to get
87 * information which may be useful. They do this by calling
88 * the BOND_INFO_QUERY ioctl. Once the app knows how many slaves
89 * are in the bond, it can call the BOND_SLAVE_INFO_QUERY ioctl to
90 * get slave specific information (# link failures, etc). See
91 * <linux/if_bonding.h> for more details. The structs of interest
92 * are ifbond and ifslave.
93 *
94 * 2001/4/5 - Chad N. Tindel <ctindel at ieee dot org>
95 * - Ported to 2.4 Kernel
96 *
97 * 2001/5/2 - Jeffrey E. Mast <jeff at mastfamily dot com>
98 * - When a device is detached from a bond, the slave device is no longer
99 * left thinking that is has a master.
100 *
101 * 2001/5/16 - Jeffrey E. Mast <jeff at mastfamily dot com>
102 * - memset did not appropriately initialized the bond rw_locks. Used
103 * rwlock_init to initialize to unlocked state to prevent deadlock when
104 * first attempting a lock
105 * - Called SET_MODULE_OWNER for bond device
106 *
107 * 2001/5/17 - Tim Anderson <tsa at mvista.com>
108 * - 2 paths for releasing for slave release; 1 through ioctl
109 * and 2) through close. Both paths need to release the same way.
110 * - the free slave in bond release is changing slave status before
111 * the free. The netdev_set_master() is intended to change slave state
112 * so it should not be done as part of the release process.
113 * - Simple rule for slave state at release: only the active in A/B and
114 * only one in the trunked case.
115 *
116 * 2001/6/01 - Tim Anderson <tsa at mvista.com>
117 * - Now call dev_close when releasing a slave so it doesn't screw up
118 * out routing table.
119 *
120 * 2001/6/01 - Chad N. Tindel <ctindel at ieee dot org>
121 * - Added /proc support for getting bond and slave information.
122 * Information is in /proc/net/<bond device>/info.
123 * - Changed the locking when calling bond_close to prevent deadlock.
124 *
125 * 2001/8/05 - Janice Girouard <girouard at us.ibm.com>
126 * - correct problem where refcnt of slave is not incremented in bond_ioctl
127 * so the system hangs when halting.
128 * - correct locking problem when unable to malloc in bond_enslave.
129 * - adding bond_xmit_xor logic.
130 * - adding multiple bond device support.
131 *
132 * 2001/8/13 - Erik Habbinga <erik_habbinga at hp dot com>
133 * - correct locking problem with rtnl_exlock_nowait
134 *
135 * 2001/8/23 - Janice Girouard <girouard at us.ibm.com>
136 * - bzero initial dev_bonds, to correct oops
137 * - convert SIOCDEVPRIVATE to new MII ioctl calls
138 *
139 * 2001/9/13 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
140 * - Add the BOND_CHANGE_ACTIVE ioctl implementation
141 *
142 * 2001/9/14 - Mark Huth <mhuth at mvista dot com>
143 * - Change MII_LINK_READY to not check for end of auto-negotiation,
144 * but only for an up link.
145 *
146 * 2001/9/20 - Chad N. Tindel <ctindel at ieee dot org>
147 * - Add the device field to bonding_t. Previously the net_device
148 * corresponding to a bond wasn't available from the bonding_t
149 * structure.
150 *
151 * 2001/9/25 - Janice Girouard <girouard at us.ibm.com>
152 * - add arp_monitor for active backup mode
153 *
154 * 2001/10/23 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
155 * - Various memory leak fixes
156 *
157 * 2001/11/5 - Mark Huth <mark dot huth at mvista dot com>
158 * - Don't take rtnl lock in bond_mii_monitor as it deadlocks under
159 * certain hotswap conditions.
160 * Note: this same change may be required in bond_arp_monitor ???
161 * - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
162 * - Handle hot swap ethernet interface deregistration events to remove
163 * kernel oops following hot swap of enslaved interface
164 *
165 * 2002/1/2 - Chad N. Tindel <ctindel at ieee dot org>
166 * - Restore original slave flags at release time.
167 *
168 * 2002/02/18 - Erik Habbinga <erik_habbinga at hp dot com>
169 * - bond_release(): calling kfree on our_slave after call to
170 * bond_restore_slave_flags, not before
171 * - bond_enslave(): saving slave flags into original_flags before
172 * call to netdev_set_master, so the IFF_SLAVE flag doesn't end
173 * up in original_flags
174 *
175 * 2002/04/05 - Mark Smith <mark.smith at comdev dot cc> and
176 * Steve Mead <steve.mead at comdev dot cc>
177 * - Port Gleb Natapov's multicast support patchs from 2.4.12
178 * to 2.4.18 adding support for multicast.
179 *
180 * 2002/06/10 - Tony Cureington <tony.cureington * hp_com>
181 * - corrected uninitialized pointer (ifr.ifr_data) in bond_check_dev_link;
182 * actually changed function to use MIIPHY, then MIIREG, and finally
183 * ETHTOOL to determine the link status
184 * - fixed bad ifr_data pointer assignments in bond_ioctl
185 * - corrected mode 1 being reported as active-backup in bond_get_info;
186 * also added text to distinguish type of load balancing (rr or xor)
187 * - change arp_ip_target module param from "1-12s" (array of 12 ptrs)
188 * to "s" (a single ptr)
189 *
190 * 2002/08/30 - Jay Vosburgh <fubar at us dot ibm dot com>
191 * - Removed acquisition of xmit_lock in set_multicast_list; caused
192 * deadlock on SMP (lock is held by caller).
193 * - Revamped SIOCGMIIPHY, SIOCGMIIREG portion of bond_check_dev_link().
194 *
195 * 2002/09/18 - Jay Vosburgh <fubar at us dot ibm dot com>
196 * - Fixed up bond_check_dev_link() (and callers): removed some magic
197 * numbers, banished local MII_ defines, wrapped ioctl calls to
198 * prevent EFAULT errors
199 *
200 * 2002/9/30 - Jay Vosburgh <fubar at us dot ibm dot com>
201 * - make sure the ip target matches the arp_target before saving the
202 * hw address.
203 *
204 * 2002/9/30 - Dan Eisner <eisner at 2robots dot com>
205 * - make sure my_ip is set before taking down the link, since
206 * not all switches respond if the source ip is not set.
207 *
208 * 2002/10/8 - Janice Girouard <girouard at us dot ibm dot com>
209 * - read in the local ip address when enslaving a device
210 * - add primary support
211 * - make sure 2*arp_interval has passed when a new device
212 * is brought on-line before taking it down.
213 *
214 * 2002/09/11 - Philippe De Muyter <phdm at macqel dot be>
215 * - Added bond_xmit_broadcast logic.
216 * - Added bond_mode() support function.
217 *
218 * 2002/10/26 - Laurent Deniel <laurent.deniel at free.fr>
219 * - allow to register multicast addresses only on active slave
220 * (useful in active-backup mode)
221 * - add multicast module parameter
222 * - fix deletion of multicast groups after unloading module
223 *
224 * 2002/11/06 - Kameshwara Rayaprolu <kameshwara.rao * wipro_com>
225 * - Changes to prevent panic from closing the device twice; if we close
226 * the device in bond_release, we must set the original_flags to down
227 * so it won't be closed again by the network layer.
228 *
229 * 2002/11/07 - Tony Cureington <tony.cureington * hp_com>
230 * - Fix arp_target_hw_addr memory leak
231 * - Created activebackup_arp_monitor function to handle arp monitoring
232 * in active backup mode - the bond_arp_monitor had several problems...
233 * such as allowing slaves to tx arps sequentially without any delay
234 * for a response
235 * - Renamed bond_arp_monitor to loadbalance_arp_monitor and re-wrote
236 * this function to just handle arp monitoring in load-balancing mode;
237 * it is a lot more compact now
238 * - Changes to ensure one and only one slave transmits in active-backup
239 * mode
240 * - Robustesize parameters; warn users about bad combinations of
241 * parameters; also if miimon is specified and a network driver does
242 * not support MII or ETHTOOL, inform the user of this
243 * - Changes to support link_failure_count when in arp monitoring mode
244 * - Fix up/down delay reported in /proc
245 * - Added version; log version; make version available from "modinfo -d"
246 * - Fixed problem in bond_check_dev_link - if the first IOCTL (SIOCGMIIPH)
247 * failed, the ETHTOOL ioctl never got a chance
248 *
249 * 2002/11/16 - Laurent Deniel <laurent.deniel at free.fr>
250 * - fix multicast handling in activebackup_arp_monitor
251 * - remove one unnecessary and confusing curr_active_slave == slave test
252 * in activebackup_arp_monitor
253 *
254 * 2002/11/17 - Laurent Deniel <laurent.deniel at free.fr>
255 * - fix bond_slave_info_query when slave_id = num_slaves
256 *
257 * 2002/11/19 - Janice Girouard <girouard at us dot ibm dot com>
258 * - correct ifr_data reference. Update ifr_data reference
259 * to mii_ioctl_data struct values to avoid confusion.
260 *
261 * 2002/11/22 - Bert Barbe <bert.barbe at oracle dot com>
262 * - Add support for multiple arp_ip_target
263 *
264 * 2002/12/13 - Jay Vosburgh <fubar at us dot ibm dot com>
265 * - Changed to allow text strings for mode and multicast, e.g.,
266 * insmod bonding mode=active-backup. The numbers still work.
267 * One change: an invalid choice will cause module load failure,
268 * rather than the previous behavior of just picking one.
269 * - Minor cleanups; got rid of dup ctype stuff, atoi function
270 *
271 * 2003/02/07 - Jay Vosburgh <fubar at us dot ibm dot com>
272 * - Added use_carrier module parameter that causes miimon to
273 * use netif_carrier_ok() test instead of MII/ETHTOOL ioctls.
274 * - Minor cleanups; consolidated ioctl calls to one function.
275 *
276 * 2003/02/07 - Tony Cureington <tony.cureington * hp_com>
277 * - Fix bond_mii_monitor() logic error that could result in
278 * bonding round-robin mode ignoring links after failover/recovery
279 *
280 * 2003/03/17 - Jay Vosburgh <fubar at us dot ibm dot com>
281 * - kmalloc fix (GFP_KERNEL to GFP_ATOMIC) reported by
282 * Shmulik dot Hen at intel.com.
283 * - Based on discussion on mailing list, changed use of
284 * update_slave_cnt(), created wrapper functions for adding/removing
285 * slaves, changed bond_xmit_xor() to check slave_cnt instead of
286 * checking slave and slave->dev (which only worked by accident).
287 * - Misc code cleanup: get arp_send() prototype from header file,
288 * add max_bonds to bonding.txt.
289 *
290 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
291 * Shmulik Hen <shmulik.hen at intel dot com>
292 * - Make sure only bond_attach_slave() and bond_detach_slave() can
293 * manipulate the slave list, including slave_cnt, even when in
294 * bond_release_all().
295 * - Fixed hang in bond_release() with traffic running:
296 * netdev_set_master() must not be called from within the bond lock.
297 *
298 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
299 * Shmulik Hen <shmulik.hen at intel dot com>
300 * - Fixed hang in bond_enslave() with traffic running:
301 * netdev_set_master() must not be called from within the bond lock.
302 *
303 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>
304 * - Added support for getting slave's speed and duplex via ethtool.
305 * Needed for 802.3ad and other future modes.
306 *
307 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
308 * Shmulik Hen <shmulik.hen at intel dot com>
309 * - Enable support of modes that need to use the unique mac address of
310 * each slave.
311 * * bond_enslave(): Moved setting the slave's mac address, and
312 * openning it, from the application to the driver. This breaks
313 * backward comaptibility with old versions of ifenslave that open
314 * the slave before enalsving it !!!.
315 * * bond_release(): The driver also takes care of closing the slave
316 * and restoring its original mac address.
317 * - Removed the code that restores all base driver's flags.
318 * Flags are automatically restored once all undo stages are done
319 * properly.
320 * - Block possibility of enslaving before the master is up. This
321 * prevents putting the system in an unstable state.
322 *
323 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
324 * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
325 * Shmulik Hen <shmulik.hen at intel dot com>
326 * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
327 *
328 * 2003/05/01 - Amir Noam <amir.noam at intel dot com>
329 * - Added ABI version control to restore compatibility between
330 * new/old ifenslave and new/old bonding.
331 *
332 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
333 * - Fixed bug in bond_release_all(): save old value of curr_active_slave
334 * before setting it to NULL.
335 * - Changed driver versioning scheme to include version number instead
336 * of release date (that is already in another field). There are 3
337 * fields X.Y.Z where:
338 * X - Major version - big behavior changes
339 * Y - Minor version - addition of features
340 * Z - Extra version - minor changes and bug fixes
341 * The current version is 1.0.0 as a base line.
342 *
343 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
344 * Amir Noam <amir.noam at intel dot com>
345 * - Added support for lacp_rate module param.
346 * - Code beautification and style changes (mainly in comments).
347 * new version - 1.0.1
348 *
349 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
350 * - Based on discussion on mailing list, changed locking scheme
351 * to use lock/unlock or lock_bh/unlock_bh appropriately instead
352 * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
353 * hidden bugs and solves system hangs that occurred due to the fact
354 * that holding lock_irqsave doesn't prevent softirqs from running.
355 * This also increases total throughput since interrupts are not
356 * blocked on each transmitted packets or monitor timeout.
357 * new version - 2.0.0
358 *
359 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
360 * - Added support for Transmit load balancing mode.
361 * - Concentrate all assignments of curr_active_slave to a single point
362 * so specific modes can take actions when the primary adapter is
363 * changed.
364 * - Take the updelay parameter into consideration during bond_enslave
365 * since some adapters loose their link during setting the device.
366 * - Renamed bond_3ad_link_status_changed() to
367 * bond_3ad_handle_link_change() for compatibility with TLB.
368 * new version - 2.1.0
369 *
370 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
371 * - Added support for Adaptive load balancing mode which is
372 * equivalent to Transmit load balancing + Receive load balancing.
373 * new version - 2.2.0
374 *
375 * 2003/05/15 - Jay Vosburgh <fubar at us dot ibm dot com>
376 * - Applied fix to activebackup_arp_monitor posted to bonding-devel
377 * by Tony Cureington <tony.cureington * hp_com>. Fixes ARP
378 * monitor endless failover bug. Version to 2.2.10
379 *
380 * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
381 * - Fixed bug in ABI version control - Don't commit to a specific
382 * ABI version if receiving unsupported ioctl commands.
383 *
384 * 2003/05/22 - Jay Vosburgh <fubar at us dot ibm dot com>
385 * - Fix ifenslave -c causing bond to loose existing routes;
386 * added bond_set_mac_address() that doesn't require the
387 * bond to be down.
388 * - In conjunction with fix for ifenslave -c, in
389 * bond_change_active(), changing to the already active slave
390 * is no longer an error (it successfully does nothing).
391 *
392 * 2003/06/30 - Amir Noam <amir.noam at intel dot com>
393 * - Fixed bond_change_active() for ALB/TLB modes.
394 * Version to 2.2.14.
395 *
396 * 2003/07/29 - Amir Noam <amir.noam at intel dot com>
397 * - Fixed ARP monitoring bug.
398 * Version to 2.2.15.
399 *
400 * 2003/07/31 - Willy Tarreau <willy at ods dot org>
401 * - Fixed kernel panic when using ARP monitoring without
402 * setting bond's IP address.
403 * Version to 2.2.16.
404 *
405 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
406 * - Back port from 2.6: use alloc_netdev(); fix /proc handling;
407 * made stats a part of bond struct so no need to allocate
408 * and free it separately; use standard list operations instead
409 * of pre-allocated array of bonds.
410 * Version to 2.3.0.
411 *
412 * 2003/08/07 - Jay Vosburgh <fubar at us dot ibm dot com>,
413 * Amir Noam <amir.noam at intel dot com> and
414 * Shmulik Hen <shmulik.hen at intel dot com>
415 * - Propagating master's settings: Distinguish between modes that
416 * use a primary slave from those that don't, and propagate settings
417 * accordingly; Consolidate change_active opeartions and add
418 * reselect_active and find_best opeartions; Decouple promiscuous
419 * handling from the multicast mode setting; Add support for changing
420 * HW address and MTU with proper unwind; Consolidate procfs code,
421 * add CHANGENAME handler; Enhance netdev notification handling.
422 * Version to 2.4.0.
423 *
424 * 2003/09/15 - Stephen Hemminger <shemminger at osdl dot org>,
425 * Amir Noam <amir.noam at intel dot com>
426 * - Convert /proc to seq_file interface.
427 * Change /proc/net/bondX/info to /proc/net/bonding/bondX.
428 * Set version to 2.4.1.
429 *
430 * 2003/11/20 - Amir Noam <amir.noam at intel dot com>
431 * - Fix /proc creation/destruction.
432 *
433 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
434 * - Massive cleanup - Set version to 2.5.0
435 * Code changes:
436 * o Consolidate format of prints and debug prints.
437 * o Remove bonding_t/slave_t typedefs and consolidate all casts.
438 * o Remove dead code and unnecessary checks.
439 * o Consolidate starting/stopping timers.
440 * o Consolidate handling of primary module param throughout the code.
441 * o Removed multicast module param support - all settings are done
442 * according to mode.
443 * o Slave list iteration - bond is no longer part of the list,
444 * added cyclic list iteration macros.
445 * o Consolidate error handling in all xmit functions.
446 * Style changes:
447 * o Consolidate function naming and declarations.
448 * o Consolidate function params and local variables names.
449 * o Consolidate return values.
450 * o Consolidate curly braces.
451 * o Consolidate conditionals format.
452 * o Change struct member names and types.
453 * o Chomp trailing spaces, remove empty lines, fix indentations.
454 * o Re-organize code according to context.
455 *
456 * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
457 * - Fixed: Cannot remove and re-enslave the original active slave.
458 * - Fixed: Releasing the original active slave causes mac address
459 * duplication.
460 * - Add support for slaves that use ethtool_ops.
461 * Set version to 2.5.3.
462 *
463 * 2004/01/05 - Amir Noam <amir.noam at intel dot com>
464 * - Save bonding parameters per bond instead of using the global values.
465 * Set version to 2.5.4.
466 *
467 * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
468 * - Enhance VLAN support:
469 * * Add support for VLAN hardware acceleration capable slaves.
470 * * Add capability to tag self generated packets in ALB/TLB modes.
471 * Set version to 2.6.0.
472 * 2004/10/29 - Mitch Williams <mitch.a.williams at intel dot com>
473 * - Fixed bug when unloading module while using 802.3ad. If
474 * spinlock debugging is turned on, this causes a stack dump.
475 * Solution is to move call to dev_remove_pack outside of the
476 * spinlock.
477 * Set version to 2.6.1.
478 * 2005/06/05 - Jay Vosburgh <fubar@us.ibm.com>
479 * - Support for generating gratuitous ARPs in active-backup mode.
480 * Includes support for VLAN tagging all bonding-generated ARPs
481 * as needed. Set version to 2.6.2.
482 * 2005/06/08 - Jason Gabler <jygabler at lbl dot gov>
483 * - alternate hashing policy support for mode 2
484 * * Added kernel parameter "xmit_hash_policy" to allow the selection
485 * of different hashing policies for mode 2. The original mode 2
486 * policy is the default, now found in xmit_hash_policy_layer2().
487 * * Added xmit_hash_policy_layer34()
488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
492 */ 32 */
493 33
494//#define BONDING_DEBUG 1 34//#define BONDING_DEBUG 1
@@ -557,6 +97,7 @@ static char *lacp_rate = NULL;
557static char *xmit_hash_policy = NULL; 97static char *xmit_hash_policy = NULL;
558static int arp_interval = BOND_LINK_ARP_INTERV; 98static int arp_interval = BOND_LINK_ARP_INTERV;
559static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 99static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
100struct bond_params bonding_defaults;
560 101
561module_param(max_bonds, int, 0); 102module_param(max_bonds, int, 0);
562MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 103MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -565,17 +106,24 @@ MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
565module_param(updelay, int, 0); 106module_param(updelay, int, 0);
566MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 107MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
567module_param(downdelay, int, 0); 108module_param(downdelay, int, 0);
568MODULE_PARM_DESC(downdelay, "Delay before considering link down, in milliseconds"); 109MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
110 "in milliseconds");
569module_param(use_carrier, int, 0); 111module_param(use_carrier, int, 0);
570MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; 0 for off, 1 for on (default)"); 112MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
113 "0 for off, 1 for on (default)");
571module_param(mode, charp, 0); 114module_param(mode, charp, 0);
572MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor"); 115MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
116 "1 for active-backup, 2 for balance-xor, "
117 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
118 "6 for balance-alb");
573module_param(primary, charp, 0); 119module_param(primary, charp, 0);
574MODULE_PARM_DESC(primary, "Primary network device to use"); 120MODULE_PARM_DESC(primary, "Primary network device to use");
575module_param(lacp_rate, charp, 0); 121module_param(lacp_rate, charp, 0);
576MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner (slow/fast)"); 122MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
123 "(slow/fast)");
577module_param(xmit_hash_policy, charp, 0); 124module_param(xmit_hash_policy, charp, 0);
578MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method : 0 for layer 2 (default), 1 for layer 3+4"); 125MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
126 ", 1 for layer 3+4");
579module_param(arp_interval, int, 0); 127module_param(arp_interval, int, 0);
580MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 128MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
581module_param_array(arp_ip_target, charp, NULL, 0); 129module_param_array(arp_ip_target, charp, NULL, 0);
@@ -586,30 +134,27 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
586static const char *version = 134static const char *version =
587 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 135 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
588 136
589static LIST_HEAD(bond_dev_list); 137LIST_HEAD(bond_dev_list);
590 138
591#ifdef CONFIG_PROC_FS 139#ifdef CONFIG_PROC_FS
592static struct proc_dir_entry *bond_proc_dir = NULL; 140static struct proc_dir_entry *bond_proc_dir = NULL;
593#endif 141#endif
594 142
143extern struct rw_semaphore bonding_rwsem;
595static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; 144static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
596static int arp_ip_count = 0; 145static int arp_ip_count = 0;
597static int bond_mode = BOND_MODE_ROUNDROBIN; 146static int bond_mode = BOND_MODE_ROUNDROBIN;
598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 147static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
599static int lacp_fast = 0; 148static int lacp_fast = 0;
600 149
601struct bond_parm_tbl {
602 char *modename;
603 int mode;
604};
605 150
606static struct bond_parm_tbl bond_lacp_tbl[] = { 151struct bond_parm_tbl bond_lacp_tbl[] = {
607{ "slow", AD_LACP_SLOW}, 152{ "slow", AD_LACP_SLOW},
608{ "fast", AD_LACP_FAST}, 153{ "fast", AD_LACP_FAST},
609{ NULL, -1}, 154{ NULL, -1},
610}; 155};
611 156
612static struct bond_parm_tbl bond_mode_tbl[] = { 157struct bond_parm_tbl bond_mode_tbl[] = {
613{ "balance-rr", BOND_MODE_ROUNDROBIN}, 158{ "balance-rr", BOND_MODE_ROUNDROBIN},
614{ "active-backup", BOND_MODE_ACTIVEBACKUP}, 159{ "active-backup", BOND_MODE_ACTIVEBACKUP},
615{ "balance-xor", BOND_MODE_XOR}, 160{ "balance-xor", BOND_MODE_XOR},
@@ -620,7 +165,7 @@ static struct bond_parm_tbl bond_mode_tbl[] = {
620{ NULL, -1}, 165{ NULL, -1},
621}; 166};
622 167
623static struct bond_parm_tbl xmit_hashtype_tbl[] = { 168struct bond_parm_tbl xmit_hashtype_tbl[] = {
624{ "layer2", BOND_XMIT_POLICY_LAYER2}, 169{ "layer2", BOND_XMIT_POLICY_LAYER2},
625{ "layer3+4", BOND_XMIT_POLICY_LAYER34}, 170{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
626{ NULL, -1}, 171{ NULL, -1},
@@ -628,12 +173,11 @@ static struct bond_parm_tbl xmit_hashtype_tbl[] = {
628 173
629/*-------------------------- Forward declarations ---------------------------*/ 174/*-------------------------- Forward declarations ---------------------------*/
630 175
631static inline void bond_set_mode_ops(struct bonding *bond, int mode);
632static void bond_send_gratuitous_arp(struct bonding *bond); 176static void bond_send_gratuitous_arp(struct bonding *bond);
633 177
634/*---------------------------- General routines -----------------------------*/ 178/*---------------------------- General routines -----------------------------*/
635 179
636static const char *bond_mode_name(int mode) 180const char *bond_mode_name(int mode)
637{ 181{
638 switch (mode) { 182 switch (mode) {
639 case BOND_MODE_ROUNDROBIN : 183 case BOND_MODE_ROUNDROBIN :
@@ -910,7 +454,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
910 res = bond_add_vlan(bond, vid); 454 res = bond_add_vlan(bond, vid);
911 if (res) { 455 if (res) {
912 printk(KERN_ERR DRV_NAME 456 printk(KERN_ERR DRV_NAME
913 ": %s: Failed to add vlan id %d\n", 457 ": %s: Error: Failed to add vlan id %d\n",
914 bond_dev->name, vid); 458 bond_dev->name, vid);
915 } 459 }
916} 460}
@@ -944,7 +488,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
944 res = bond_del_vlan(bond, vid); 488 res = bond_del_vlan(bond, vid);
945 if (res) { 489 if (res) {
946 printk(KERN_ERR DRV_NAME 490 printk(KERN_ERR DRV_NAME
947 ": %s: Failed to remove vlan id %d\n", 491 ": %s: Error: Failed to remove vlan id %d\n",
948 bond_dev->name, vid); 492 bond_dev->name, vid);
949 } 493 }
950} 494}
@@ -1449,7 +993,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1449 * 993 *
1450 * Warning: Caller must hold curr_slave_lock for writing. 994 * Warning: Caller must hold curr_slave_lock for writing.
1451 */ 995 */
1452static void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 996void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1453{ 997{
1454 struct slave *old_active = bond->curr_active_slave; 998 struct slave *old_active = bond->curr_active_slave;
1455 999
@@ -1523,7 +1067,7 @@ static void bond_change_active_slave(struct bonding *bond, struct slave *new_act
1523 * 1067 *
1524 * Warning: Caller must hold curr_slave_lock for writing. 1068 * Warning: Caller must hold curr_slave_lock for writing.
1525 */ 1069 */
1526static void bond_select_active_slave(struct bonding *bond) 1070void bond_select_active_slave(struct bonding *bond)
1527{ 1071{
1528 struct slave *best_slave; 1072 struct slave *best_slave;
1529 1073
@@ -1591,7 +1135,7 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1591 1135
1592/*---------------------------------- IOCTL ----------------------------------*/ 1136/*---------------------------------- IOCTL ----------------------------------*/
1593 1137
1594static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev) 1138int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
1595{ 1139{
1596 dprintk("bond_dev=%p\n", bond_dev); 1140 dprintk("bond_dev=%p\n", bond_dev);
1597 dprintk("slave_dev=%p\n", slave_dev); 1141 dprintk("slave_dev=%p\n", slave_dev);
@@ -1631,7 +1175,7 @@ static int bond_compute_features(struct bonding *bond)
1631} 1175}
1632 1176
1633/* enslave device <slave> to bond device <master> */ 1177/* enslave device <slave> to bond device <master> */
1634static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1178int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1635{ 1179{
1636 struct bonding *bond = bond_dev->priv; 1180 struct bonding *bond = bond_dev->priv;
1637 struct slave *new_slave = NULL; 1181 struct slave *new_slave = NULL;
@@ -1644,8 +1188,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1644 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1188 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1645 slave_dev->do_ioctl == NULL) { 1189 slave_dev->do_ioctl == NULL) {
1646 printk(KERN_WARNING DRV_NAME 1190 printk(KERN_WARNING DRV_NAME
1647 ": Warning : no link monitoring support for %s\n", 1191 ": %s: Warning: no link monitoring support for %s\n",
1648 slave_dev->name); 1192 bond_dev->name, slave_dev->name);
1649 } 1193 }
1650 1194
1651 /* bond must be initialized by bond_open() before enslaving */ 1195 /* bond must be initialized by bond_open() before enslaving */
@@ -1666,17 +1210,17 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1666 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1210 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1667 if (!list_empty(&bond->vlan_list)) { 1211 if (!list_empty(&bond->vlan_list)) {
1668 printk(KERN_ERR DRV_NAME 1212 printk(KERN_ERR DRV_NAME
1669 ": Error: cannot enslave VLAN " 1213 ": %s: Error: cannot enslave VLAN "
1670 "challenged slave %s on VLAN enabled " 1214 "challenged slave %s on VLAN enabled "
1671 "bond %s\n", slave_dev->name, 1215 "bond %s\n", bond_dev->name, slave_dev->name,
1672 bond_dev->name); 1216 bond_dev->name);
1673 return -EPERM; 1217 return -EPERM;
1674 } else { 1218 } else {
1675 printk(KERN_WARNING DRV_NAME 1219 printk(KERN_WARNING DRV_NAME
1676 ": Warning: enslaved VLAN challenged " 1220 ": %s: Warning: enslaved VLAN challenged "
1677 "slave %s. Adding VLANs will be blocked as " 1221 "slave %s. Adding VLANs will be blocked as "
1678 "long as %s is part of bond %s\n", 1222 "long as %s is part of bond %s\n",
1679 slave_dev->name, slave_dev->name, 1223 bond_dev->name, slave_dev->name, slave_dev->name,
1680 bond_dev->name); 1224 bond_dev->name);
1681 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1225 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1682 } 1226 }
@@ -1706,12 +1250,11 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1706 1250
1707 if (slave_dev->set_mac_address == NULL) { 1251 if (slave_dev->set_mac_address == NULL) {
1708 printk(KERN_ERR DRV_NAME 1252 printk(KERN_ERR DRV_NAME
1709 ": Error: The slave device you specified does " 1253 ": %s: Error: The slave device you specified does "
1710 "not support setting the MAC address.\n"); 1254 "not support setting the MAC address. "
1711 printk(KERN_ERR 1255 "Your kernel likely does not support slave "
1712 "Your kernel likely does not support slave devices.\n"); 1256 "devices.\n", bond_dev->name);
1713 1257 res = -EOPNOTSUPP;
1714 res = -EOPNOTSUPP;
1715 goto err_undo_flags; 1258 goto err_undo_flags;
1716 } 1259 }
1717 1260
@@ -1827,21 +1370,21 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1827 * the messages for netif_carrier. 1370 * the messages for netif_carrier.
1828 */ 1371 */
1829 printk(KERN_WARNING DRV_NAME 1372 printk(KERN_WARNING DRV_NAME
1830 ": Warning: MII and ETHTOOL support not " 1373 ": %s: Warning: MII and ETHTOOL support not "
1831 "available for interface %s, and " 1374 "available for interface %s, and "
1832 "arp_interval/arp_ip_target module parameters " 1375 "arp_interval/arp_ip_target module parameters "
1833 "not specified, thus bonding will not detect " 1376 "not specified, thus bonding will not detect "
1834 "link failures! see bonding.txt for details.\n", 1377 "link failures! see bonding.txt for details.\n",
1835 slave_dev->name); 1378 bond_dev->name, slave_dev->name);
1836 } else if (link_reporting == -1) { 1379 } else if (link_reporting == -1) {
1837 /* unable get link status using mii/ethtool */ 1380 /* unable get link status using mii/ethtool */
1838 printk(KERN_WARNING DRV_NAME 1381 printk(KERN_WARNING DRV_NAME
1839 ": Warning: can't get link status from " 1382 ": %s: Warning: can't get link status from "
1840 "interface %s; the network driver associated " 1383 "interface %s; the network driver associated "
1841 "with this interface does not support MII or " 1384 "with this interface does not support MII or "
1842 "ETHTOOL link status reporting, thus miimon " 1385 "ETHTOOL link status reporting, thus miimon "
1843 "has no effect on this interface.\n", 1386 "has no effect on this interface.\n",
1844 slave_dev->name); 1387 bond_dev->name, slave_dev->name);
1845 } 1388 }
1846 } 1389 }
1847 1390
@@ -1868,15 +1411,15 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1868 if (bond_update_speed_duplex(new_slave) && 1411 if (bond_update_speed_duplex(new_slave) &&
1869 (new_slave->link != BOND_LINK_DOWN)) { 1412 (new_slave->link != BOND_LINK_DOWN)) {
1870 printk(KERN_WARNING DRV_NAME 1413 printk(KERN_WARNING DRV_NAME
1871 ": Warning: failed to get speed and duplex from %s, " 1414 ": %s: Warning: failed to get speed and duplex from %s, "
1872 "assumed to be 100Mb/sec and Full.\n", 1415 "assumed to be 100Mb/sec and Full.\n",
1873 new_slave->dev->name); 1416 bond_dev->name, new_slave->dev->name);
1874 1417
1875 if (bond->params.mode == BOND_MODE_8023AD) { 1418 if (bond->params.mode == BOND_MODE_8023AD) {
1876 printk(KERN_WARNING 1419 printk(KERN_WARNING DRV_NAME
1877 "Operation of 802.3ad mode requires ETHTOOL " 1420 ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
1878 "support in base driver for proper aggregator " 1421 "support in base driver for proper aggregator "
1879 "selection.\n"); 1422 "selection.\n", bond_dev->name);
1880 } 1423 }
1881 } 1424 }
1882 1425
@@ -1958,6 +1501,10 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1958 1501
1959 write_unlock_bh(&bond->lock); 1502 write_unlock_bh(&bond->lock);
1960 1503
1504 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1505 if (res)
1506 goto err_unset_master;
1507
1961 printk(KERN_INFO DRV_NAME 1508 printk(KERN_INFO DRV_NAME
1962 ": %s: enslaving %s as a%s interface with a%s link.\n", 1509 ": %s: enslaving %s as a%s interface with a%s link.\n",
1963 bond_dev->name, slave_dev->name, 1510 bond_dev->name, slave_dev->name,
@@ -1999,7 +1546,7 @@ err_undo_flags:
1999 * for Bonded connections: 1546 * for Bonded connections:
2000 * The first up interface should be left on and all others downed. 1547 * The first up interface should be left on and all others downed.
2001 */ 1548 */
2002static int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 1549int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2003{ 1550{
2004 struct bonding *bond = bond_dev->priv; 1551 struct bonding *bond = bond_dev->priv;
2005 struct slave *slave, *oldcurrent; 1552 struct slave *slave, *oldcurrent;
@@ -2010,7 +1557,7 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2010 if (!(slave_dev->flags & IFF_SLAVE) || 1557 if (!(slave_dev->flags & IFF_SLAVE) ||
2011 (slave_dev->master != bond_dev)) { 1558 (slave_dev->master != bond_dev)) {
2012 printk(KERN_ERR DRV_NAME 1559 printk(KERN_ERR DRV_NAME
2013 ": Error: %s: cannot release %s.\n", 1560 ": %s: Error: cannot release %s.\n",
2014 bond_dev->name, slave_dev->name); 1561 bond_dev->name, slave_dev->name);
2015 return -EINVAL; 1562 return -EINVAL;
2016 } 1563 }
@@ -2031,11 +1578,12 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2031 ETH_ALEN); 1578 ETH_ALEN);
2032 if (!mac_addr_differ && (bond->slave_cnt > 1)) { 1579 if (!mac_addr_differ && (bond->slave_cnt > 1)) {
2033 printk(KERN_WARNING DRV_NAME 1580 printk(KERN_WARNING DRV_NAME
2034 ": Warning: the permanent HWaddr of %s " 1581 ": %s: Warning: the permanent HWaddr of %s "
2035 "- %02X:%02X:%02X:%02X:%02X:%02X - is " 1582 "- %02X:%02X:%02X:%02X:%02X:%02X - is "
2036 "still in use by %s. Set the HWaddr of " 1583 "still in use by %s. Set the HWaddr of "
2037 "%s to a different address to avoid " 1584 "%s to a different address to avoid "
2038 "conflicts.\n", 1585 "conflicts.\n",
1586 bond_dev->name,
2039 slave_dev->name, 1587 slave_dev->name,
2040 slave->perm_hwaddr[0], 1588 slave->perm_hwaddr[0],
2041 slave->perm_hwaddr[1], 1589 slave->perm_hwaddr[1],
@@ -2111,24 +1659,28 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2111 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1659 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2112 } else { 1660 } else {
2113 printk(KERN_WARNING DRV_NAME 1661 printk(KERN_WARNING DRV_NAME
2114 ": Warning: clearing HW address of %s while it " 1662 ": %s: Warning: clearing HW address of %s while it "
2115 "still has VLANs.\n", 1663 "still has VLANs.\n",
2116 bond_dev->name); 1664 bond_dev->name, bond_dev->name);
2117 printk(KERN_WARNING DRV_NAME 1665 printk(KERN_WARNING DRV_NAME
2118 ": When re-adding slaves, make sure the bond's " 1666 ": %s: When re-adding slaves, make sure the bond's "
2119 "HW address matches its VLANs'.\n"); 1667 "HW address matches its VLANs'.\n",
1668 bond_dev->name);
2120 } 1669 }
2121 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 1670 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2122 !bond_has_challenged_slaves(bond)) { 1671 !bond_has_challenged_slaves(bond)) {
2123 printk(KERN_INFO DRV_NAME 1672 printk(KERN_INFO DRV_NAME
2124 ": last VLAN challenged slave %s " 1673 ": %s: last VLAN challenged slave %s "
2125 "left bond %s. VLAN blocking is removed\n", 1674 "left bond %s. VLAN blocking is removed\n",
2126 slave_dev->name, bond_dev->name); 1675 bond_dev->name, slave_dev->name, bond_dev->name);
2127 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; 1676 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
2128 } 1677 }
2129 1678
2130 write_unlock_bh(&bond->lock); 1679 write_unlock_bh(&bond->lock);
2131 1680
1681 /* must do this from outside any spinlocks */
1682 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1683
2132 bond_del_vlans_from_slave(bond, slave_dev); 1684 bond_del_vlans_from_slave(bond, slave_dev);
2133 1685
2134 /* If the mode USES_PRIMARY, then we should only remove its 1686 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2220,6 +1772,7 @@ static int bond_release_all(struct net_device *bond_dev)
2220 */ 1772 */
2221 write_unlock_bh(&bond->lock); 1773 write_unlock_bh(&bond->lock);
2222 1774
1775 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2223 bond_del_vlans_from_slave(bond, slave_dev); 1776 bond_del_vlans_from_slave(bond, slave_dev);
2224 1777
2225 /* If the mode USES_PRIMARY, then we should only remove its 1778 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2274,12 +1827,13 @@ static int bond_release_all(struct net_device *bond_dev)
2274 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1827 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2275 } else { 1828 } else {
2276 printk(KERN_WARNING DRV_NAME 1829 printk(KERN_WARNING DRV_NAME
2277 ": Warning: clearing HW address of %s while it " 1830 ": %s: Warning: clearing HW address of %s while it "
2278 "still has VLANs.\n", 1831 "still has VLANs.\n",
2279 bond_dev->name); 1832 bond_dev->name, bond_dev->name);
2280 printk(KERN_WARNING DRV_NAME 1833 printk(KERN_WARNING DRV_NAME
2281 ": When re-adding slaves, make sure the bond's " 1834 ": %s: When re-adding slaves, make sure the bond's "
2282 "HW address matches its VLANs'.\n"); 1835 "HW address matches its VLANs'.\n",
1836 bond_dev->name);
2283 } 1837 }
2284 1838
2285 printk(KERN_INFO DRV_NAME 1839 printk(KERN_INFO DRV_NAME
@@ -2397,7 +1951,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2397/*-------------------------------- Monitoring -------------------------------*/ 1951/*-------------------------------- Monitoring -------------------------------*/
2398 1952
2399/* this function is called regularly to monitor each slave's link. */ 1953/* this function is called regularly to monitor each slave's link. */
2400static void bond_mii_monitor(struct net_device *bond_dev) 1954void bond_mii_monitor(struct net_device *bond_dev)
2401{ 1955{
2402 struct bonding *bond = bond_dev->priv; 1956 struct bonding *bond = bond_dev->priv;
2403 struct slave *slave, *oldcurrent; 1957 struct slave *slave, *oldcurrent;
@@ -2596,8 +2150,11 @@ static void bond_mii_monitor(struct net_device *bond_dev)
2596 break; 2150 break;
2597 default: 2151 default:
2598 /* Should not happen */ 2152 /* Should not happen */
2599 printk(KERN_ERR "bonding: Error: %s Illegal value (link=%d)\n", 2153 printk(KERN_ERR DRV_NAME
2600 slave->dev->name, slave->link); 2154 ": %s: Error: %s Illegal value (link=%d)\n",
2155 bond_dev->name,
2156 slave->dev->name,
2157 slave->link);
2601 goto out; 2158 goto out;
2602 } /* end of switch (slave->link) */ 2159 } /* end of switch (slave->link) */
2603 2160
@@ -2721,7 +2278,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2721 struct flowi fl; 2278 struct flowi fl;
2722 struct rtable *rt; 2279 struct rtable *rt;
2723 2280
2724 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2281 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2282 if (!targets[i])
2283 continue;
2725 dprintk("basa: target %x\n", targets[i]); 2284 dprintk("basa: target %x\n", targets[i]);
2726 if (list_empty(&bond->vlan_list)) { 2285 if (list_empty(&bond->vlan_list)) {
2727 dprintk("basa: empty vlan: arp_send\n"); 2286 dprintk("basa: empty vlan: arp_send\n");
@@ -2825,7 +2384,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2825 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2384 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2826 * arp monitoring in active backup mode. 2385 * arp monitoring in active backup mode.
2827 */ 2386 */
2828static void bond_loadbalance_arp_mon(struct net_device *bond_dev) 2387void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2829{ 2388{
2830 struct bonding *bond = bond_dev->priv; 2389 struct bonding *bond = bond_dev->priv;
2831 struct slave *slave, *oldcurrent; 2390 struct slave *slave, *oldcurrent;
@@ -2963,7 +2522,7 @@ out:
2963 * may have received. 2522 * may have received.
2964 * see loadbalance_arp_monitor for arp monitoring in load balancing mode 2523 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2965 */ 2524 */
2966static void bond_activebackup_arp_mon(struct net_device *bond_dev) 2525void bond_activebackup_arp_mon(struct net_device *bond_dev)
2967{ 2526{
2968 struct bonding *bond = bond_dev->priv; 2527 struct bonding *bond = bond_dev->priv;
2969 struct slave *slave; 2528 struct slave *slave;
@@ -3249,6 +2808,8 @@ static void bond_info_show_master(struct seq_file *seq)
3249{ 2808{
3250 struct bonding *bond = seq->private; 2809 struct bonding *bond = seq->private;
3251 struct slave *curr; 2810 struct slave *curr;
2811 int i;
2812 u32 target;
3252 2813
3253 read_lock(&bond->curr_slave_lock); 2814 read_lock(&bond->curr_slave_lock);
3254 curr = bond->curr_active_slave; 2815 curr = bond->curr_active_slave;
@@ -3257,10 +2818,17 @@ static void bond_info_show_master(struct seq_file *seq)
3257 seq_printf(seq, "Bonding Mode: %s\n", 2818 seq_printf(seq, "Bonding Mode: %s\n",
3258 bond_mode_name(bond->params.mode)); 2819 bond_mode_name(bond->params.mode));
3259 2820
2821 if (bond->params.mode == BOND_MODE_XOR ||
2822 bond->params.mode == BOND_MODE_8023AD) {
2823 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
2824 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
2825 bond->params.xmit_policy);
2826 }
2827
3260 if (USES_PRIMARY(bond->params.mode)) { 2828 if (USES_PRIMARY(bond->params.mode)) {
3261 seq_printf(seq, "Primary Slave: %s\n", 2829 seq_printf(seq, "Primary Slave: %s\n",
3262 (bond->params.primary[0]) ? 2830 (bond->primary_slave) ?
3263 bond->params.primary : "None"); 2831 bond->primary_slave->dev->name : "None");
3264 2832
3265 seq_printf(seq, "Currently Active Slave: %s\n", 2833 seq_printf(seq, "Currently Active Slave: %s\n",
3266 (curr) ? curr->dev->name : "None"); 2834 (curr) ? curr->dev->name : "None");
@@ -3273,6 +2841,27 @@ static void bond_info_show_master(struct seq_file *seq)
3273 seq_printf(seq, "Down Delay (ms): %d\n", 2841 seq_printf(seq, "Down Delay (ms): %d\n",
3274 bond->params.downdelay * bond->params.miimon); 2842 bond->params.downdelay * bond->params.miimon);
3275 2843
2844
2845 /* ARP information */
2846 if(bond->params.arp_interval > 0) {
2847 int printed=0;
2848 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
2849 bond->params.arp_interval);
2850
2851 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
2852
2853 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
2854 if (!bond->params.arp_targets[i])
2855 continue;
2856 if (printed)
2857 seq_printf(seq, ",");
2858 target = ntohl(bond->params.arp_targets[i]);
2859 seq_printf(seq, " %d.%d.%d.%d", HIPQUAD(target));
2860 printed = 1;
2861 }
2862 seq_printf(seq, "\n");
2863 }
2864
3276 if (bond->params.mode == BOND_MODE_8023AD) { 2865 if (bond->params.mode == BOND_MODE_8023AD) {
3277 struct ad_info ad_info; 2866 struct ad_info ad_info;
3278 2867
@@ -3478,7 +3067,10 @@ static int bond_event_changename(struct bonding *bond)
3478 bond_remove_proc_entry(bond); 3067 bond_remove_proc_entry(bond);
3479 bond_create_proc_entry(bond); 3068 bond_create_proc_entry(bond);
3480#endif 3069#endif
3481 3070 down_write(&(bonding_rwsem));
3071 bond_destroy_sysfs_entry(bond);
3072 bond_create_sysfs_entry(bond);
3073 up_write(&(bonding_rwsem));
3482 return NOTIFY_DONE; 3074 return NOTIFY_DONE;
3483} 3075}
3484 3076
@@ -3955,6 +3547,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3955 return -EPERM; 3547 return -EPERM;
3956 } 3548 }
3957 3549
3550 down_write(&(bonding_rwsem));
3958 slave_dev = dev_get_by_name(ifr->ifr_slave); 3551 slave_dev = dev_get_by_name(ifr->ifr_slave);
3959 3552
3960 dprintk("slave_dev=%p: \n", slave_dev); 3553 dprintk("slave_dev=%p: \n", slave_dev);
@@ -3987,6 +3580,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3987 dev_put(slave_dev); 3580 dev_put(slave_dev);
3988 } 3581 }
3989 3582
3583 up_write(&(bonding_rwsem));
3990 return res; 3584 return res;
3991} 3585}
3992 3586
@@ -4071,6 +3665,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4071 bond_for_each_slave(bond, slave, i) { 3665 bond_for_each_slave(bond, slave, i) {
4072 dprintk("s %p s->p %p c_m %p\n", slave, 3666 dprintk("s %p s->p %p c_m %p\n", slave,
4073 slave->prev, slave->dev->change_mtu); 3667 slave->prev, slave->dev->change_mtu);
3668
4074 res = dev_set_mtu(slave->dev, new_mtu); 3669 res = dev_set_mtu(slave->dev, new_mtu);
4075 3670
4076 if (res) { 3671 if (res) {
@@ -4397,8 +3992,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4397 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3992 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4398 if (!skb2) { 3993 if (!skb2) {
4399 printk(KERN_ERR DRV_NAME 3994 printk(KERN_ERR DRV_NAME
4400 ": Error: bond_xmit_broadcast(): " 3995 ": %s: Error: bond_xmit_broadcast(): "
4401 "skb_clone() failed\n"); 3996 "skb_clone() failed\n",
3997 bond_dev->name);
4402 continue; 3998 continue;
4403 } 3999 }
4404 4000
@@ -4431,7 +4027,7 @@ out:
4431/* 4027/*
4432 * set bond mode specific net device operations 4028 * set bond mode specific net device operations
4433 */ 4029 */
4434static inline void bond_set_mode_ops(struct bonding *bond, int mode) 4030void bond_set_mode_ops(struct bonding *bond, int mode)
4435{ 4031{
4436 struct net_device *bond_dev = bond->dev; 4032 struct net_device *bond_dev = bond->dev;
4437 4033
@@ -4467,7 +4063,8 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4467 default: 4063 default:
4468 /* Should never happen, mode already checked */ 4064 /* Should never happen, mode already checked */
4469 printk(KERN_ERR DRV_NAME 4065 printk(KERN_ERR DRV_NAME
4470 ": Error: Unknown bonding mode %d\n", 4066 ": %s: Error: Unknown bonding mode %d\n",
4067 bond_dev->name,
4471 mode); 4068 mode);
4472 break; 4069 break;
4473 } 4070 }
@@ -4491,7 +4088,7 @@ static struct ethtool_ops bond_ethtool_ops = {
4491 * Does not allocate but creates a /proc entry. 4088 * Does not allocate but creates a /proc entry.
4492 * Allowed to fail. 4089 * Allowed to fail.
4493 */ 4090 */
4494static int __init bond_init(struct net_device *bond_dev, struct bond_params *params) 4091static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4495{ 4092{
4496 struct bonding *bond = bond_dev->priv; 4093 struct bonding *bond = bond_dev->priv;
4497 4094
@@ -4565,7 +4162,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4565/* De-initialize device specific data. 4162/* De-initialize device specific data.
4566 * Caller must hold rtnl_lock. 4163 * Caller must hold rtnl_lock.
4567 */ 4164 */
4568static inline void bond_deinit(struct net_device *bond_dev) 4165void bond_deinit(struct net_device *bond_dev)
4569{ 4166{
4570 struct bonding *bond = bond_dev->priv; 4167 struct bonding *bond = bond_dev->priv;
4571 4168
@@ -4601,7 +4198,7 @@ static void bond_free_all(void)
4601 * Convert string input module parms. Accept either the 4198 * Convert string input module parms. Accept either the
4602 * number of the mode or its string name. 4199 * number of the mode or its string name.
4603 */ 4200 */
4604static inline int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl) 4201int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl)
4605{ 4202{
4606 int i; 4203 int i;
4607 4204
@@ -4670,7 +4267,7 @@ static int bond_check_params(struct bond_params *params)
4670 if (max_bonds < 1 || max_bonds > INT_MAX) { 4267 if (max_bonds < 1 || max_bonds > INT_MAX) {
4671 printk(KERN_WARNING DRV_NAME 4268 printk(KERN_WARNING DRV_NAME
4672 ": Warning: max_bonds (%d) not in range %d-%d, so it " 4269 ": Warning: max_bonds (%d) not in range %d-%d, so it "
4673 "was reset to BOND_DEFAULT_MAX_BONDS (%d)", 4270 "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4674 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS); 4271 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4675 max_bonds = BOND_DEFAULT_MAX_BONDS; 4272 max_bonds = BOND_DEFAULT_MAX_BONDS;
4676 } 4273 }
@@ -4881,81 +4478,96 @@ static int bond_check_params(struct bond_params *params)
4881 return 0; 4478 return 0;
4882} 4479}
4883 4480
4481/* Create a new bond based on the specified name and bonding parameters.
4482 * Caller must NOT hold rtnl_lock; we need to release it here before we
4483 * set up our sysfs entries.
4484 */
4485int bond_create(char *name, struct bond_params *params, struct bonding **newbond)
4486{
4487 struct net_device *bond_dev;
4488 int res;
4489
4490 rtnl_lock();
4491 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup);
4492 if (!bond_dev) {
4493 printk(KERN_ERR DRV_NAME
4494 ": %s: eek! can't alloc netdev!\n",
4495 name);
4496 res = -ENOMEM;
4497 goto out_rtnl;
4498 }
4499
4500 /* bond_init() must be called after dev_alloc_name() (for the
4501 * /proc files), but before register_netdevice(), because we
4502 * need to set function pointers.
4503 */
4504
4505 res = bond_init(bond_dev, params);
4506 if (res < 0) {
4507 goto out_netdev;
4508 }
4509
4510 SET_MODULE_OWNER(bond_dev);
4511
4512 res = register_netdevice(bond_dev);
4513 if (res < 0) {
4514 goto out_bond;
4515 }
4516 if (newbond)
4517 *newbond = bond_dev->priv;
4518
4519 rtnl_unlock(); /* allows sysfs registration of net device */
4520 res = bond_create_sysfs_entry(bond_dev->priv);
4521 goto done;
4522out_bond:
4523 bond_deinit(bond_dev);
4524out_netdev:
4525 free_netdev(bond_dev);
4526out_rtnl:
4527 rtnl_unlock();
4528done:
4529 return res;
4530}
4531
4884static int __init bonding_init(void) 4532static int __init bonding_init(void)
4885{ 4533{
4886 struct bond_params params;
4887 int i; 4534 int i;
4888 int res; 4535 int res;
4536 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4889 4537
4890 printk(KERN_INFO "%s", version); 4538 printk(KERN_INFO "%s", version);
4891 4539
4892 res = bond_check_params(&params); 4540 res = bond_check_params(&bonding_defaults);
4893 if (res) { 4541 if (res) {
4894 return res; 4542 goto out;
4895 } 4543 }
4896 4544
4897 rtnl_lock();
4898
4899#ifdef CONFIG_PROC_FS 4545#ifdef CONFIG_PROC_FS
4900 bond_create_proc_dir(); 4546 bond_create_proc_dir();
4901#endif 4547#endif
4902
4903 for (i = 0; i < max_bonds; i++) { 4548 for (i = 0; i < max_bonds; i++) {
4904 struct net_device *bond_dev; 4549 sprintf(new_bond_name, "bond%d",i);
4905 4550 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4906 bond_dev = alloc_netdev(sizeof(struct bonding), "", ether_setup); 4551 if (res)
4907 if (!bond_dev) { 4552 goto err;
4908 res = -ENOMEM;
4909 goto out_err;
4910 }
4911
4912 res = dev_alloc_name(bond_dev, "bond%d");
4913 if (res < 0) {
4914 free_netdev(bond_dev);
4915 goto out_err;
4916 }
4917
4918 /* bond_init() must be called after dev_alloc_name() (for the
4919 * /proc files), but before register_netdevice(), because we
4920 * need to set function pointers.
4921 */
4922 res = bond_init(bond_dev, &params);
4923 if (res < 0) {
4924 free_netdev(bond_dev);
4925 goto out_err;
4926 }
4927
4928 SET_MODULE_OWNER(bond_dev);
4929
4930 res = register_netdevice(bond_dev);
4931 if (res < 0) {
4932 bond_deinit(bond_dev);
4933 free_netdev(bond_dev);
4934 goto out_err;
4935 }
4936 } 4553 }
4937 4554
4938 rtnl_unlock(); 4555 res = bond_create_sysfs();
4556 if (res)
4557 goto err;
4558
4939 register_netdevice_notifier(&bond_netdev_notifier); 4559 register_netdevice_notifier(&bond_netdev_notifier);
4940 register_inetaddr_notifier(&bond_inetaddr_notifier); 4560 register_inetaddr_notifier(&bond_inetaddr_notifier);
4941 4561
4942 return 0; 4562 goto out;
4943 4563err:
4944out_err:
4945 /*
4946 * rtnl_unlock() will run netdev_run_todo(), putting the
4947 * thus-far-registered bonding devices into a state which
4948 * unregigister_netdevice() will accept
4949 */
4950 rtnl_unlock();
4951 rtnl_lock(); 4564 rtnl_lock();
4952
4953 /* free and unregister all bonds that were successfully added */
4954 bond_free_all(); 4565 bond_free_all();
4955 4566 bond_destroy_sysfs();
4956 rtnl_unlock(); 4567 rtnl_unlock();
4957 4568out:
4958 return res; 4569 return res;
4570
4959} 4571}
4960 4572
4961static void __exit bonding_exit(void) 4573static void __exit bonding_exit(void)
@@ -4965,6 +4577,7 @@ static void __exit bonding_exit(void)
4965 4577
4966 rtnl_lock(); 4578 rtnl_lock();
4967 bond_free_all(); 4579 bond_free_all();
4580 bond_destroy_sysfs();
4968 rtnl_unlock(); 4581 rtnl_unlock();
4969} 4582}
4970 4583
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
new file mode 100644
index 000000000000..32d13da43a0b
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -0,0 +1,1358 @@
1
2/*
3 * Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 */
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/device.h>
28#include <linux/sysdev.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/string.h>
32#include <linux/netdevice.h>
33#include <linux/inetdevice.h>
34#include <linux/in.h>
35#include <linux/sysfs.h>
36#include <linux/string.h>
37#include <linux/ctype.h>
38#include <linux/inet.h>
39#include <linux/rtnetlink.h>
40
41/* #define BONDING_DEBUG 1 */
42#include "bonding.h"
43#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
44#define to_net_dev(class) container_of(class, struct net_device, class_dev)
45#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv))
46
47/*---------------------------- Declarations -------------------------------*/
48
49
50extern struct list_head bond_dev_list;
51extern struct bond_params bonding_defaults;
52extern struct bond_parm_tbl bond_mode_tbl[];
53extern struct bond_parm_tbl bond_lacp_tbl[];
54extern struct bond_parm_tbl xmit_hashtype_tbl[];
55
56static int expected_refcount = -1;
57static struct class *netdev_class;
58/*--------------------------- Data Structures -----------------------------*/
59
60/* Bonding sysfs lock. Why can't we just use the subsytem lock?
61 * Because kobject_register tries to acquire the subsystem lock. If
62 * we already hold the lock (which we would if the user was creating
63 * a new bond through the sysfs interface), we deadlock.
64 * This lock is only needed when deleting a bond - we need to make sure
65 * that we don't collide with an ongoing ioctl.
66 */
67
68struct rw_semaphore bonding_rwsem;
69
70
71
72
73/*------------------------------ Functions --------------------------------*/
74
75/*
76 * "show" function for the bond_masters attribute.
77 * The class parameter is ignored.
78 */
79static ssize_t bonding_show_bonds(struct class *cls, char *buffer)
80{
81 int res = 0;
82 struct bonding *bond;
83
84 down_read(&(bonding_rwsem));
85
86 list_for_each_entry(bond, &bond_dev_list, bond_list) {
87 if (res > (PAGE_SIZE - IFNAMSIZ)) {
88 /* not enough space for another interface name */
89 if ((PAGE_SIZE - res) > 10)
90 res = PAGE_SIZE - 10;
91 res += sprintf(buffer + res, "++more++");
92 break;
93 }
94 res += sprintf(buffer + res, "%s ",
95 bond->dev->name);
96 }
97 res += sprintf(buffer + res, "\n");
98 res++;
99 up_read(&(bonding_rwsem));
100 return res;
101}
102
103/*
104 * "store" function for the bond_masters attribute. This is what
105 * creates and deletes entire bonds.
106 *
107 * The class parameter is ignored.
108 *
109 */
110
111static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t count)
112{
113 char command[IFNAMSIZ + 1] = {0, };
114 char *ifname;
115 int res = count;
116 struct bonding *bond;
117 struct bonding *nxt;
118
119 down_write(&(bonding_rwsem));
120 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
121 ifname = command + 1;
122 if ((strlen(command) <= 1) ||
123 !dev_valid_name(ifname))
124 goto err_no_cmd;
125
126 if (command[0] == '+') {
127
128 /* Check to see if the bond already exists. */
129 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
130 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
131 printk(KERN_ERR DRV_NAME
132 ": cannot add bond %s; it already exists\n",
133 ifname);
134 res = -EPERM;
135 goto out;
136 }
137
138 printk(KERN_INFO DRV_NAME
139 ": %s is being created...\n", ifname);
140 if (bond_create(ifname, &bonding_defaults, &bond)) {
141 printk(KERN_INFO DRV_NAME
142 ": %s interface already exists. Bond creation failed.\n",
143 ifname);
144 res = -EPERM;
145 }
146 goto out;
147 }
148
149 if (command[0] == '-') {
150 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
151 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
152 rtnl_lock();
153 /* check the ref count on the bond's kobject.
154 * If it's > expected, then there's a file open,
155 * and we have to fail.
156 */
157 if (atomic_read(&bond->dev->class_dev.kobj.kref.refcount)
158 > expected_refcount){
159 rtnl_unlock();
160 printk(KERN_INFO DRV_NAME
161 ": Unable remove bond %s due to open references.\n",
162 ifname);
163 res = -EPERM;
164 goto out;
165 }
166 printk(KERN_INFO DRV_NAME
167 ": %s is being deleted...\n",
168 bond->dev->name);
169 unregister_netdevice(bond->dev);
170 bond_deinit(bond->dev);
171 bond_destroy_sysfs_entry(bond);
172 rtnl_unlock();
173 goto out;
174 }
175
176 printk(KERN_ERR DRV_NAME
177 ": unable to delete non-existent bond %s\n", ifname);
178 res = -ENODEV;
179 goto out;
180 }
181
182err_no_cmd:
183 printk(KERN_ERR DRV_NAME
184 ": no command found in bonding_masters. Use +ifname or -ifname.\n");
185 res = -EPERM;
186
187 /* Always return either count or an error. If you return 0, you'll
188 * get called forever, which is bad.
189 */
190out:
191 up_write(&(bonding_rwsem));
192 return res;
193}
194/* class attribute for bond_masters file. This ends up in /sys/class/net */
195static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
196 bonding_show_bonds, bonding_store_bonds);
197
198int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave)
199{
200 char linkname[IFNAMSIZ+7];
201 int ret = 0;
202
203 /* first, create a link from the slave back to the master */
204 ret = sysfs_create_link(&(slave->class_dev.kobj), &(master->class_dev.kobj),
205 "master");
206 if (ret)
207 return ret;
208 /* next, create a link from the master to the slave */
209 sprintf(linkname,"slave_%s",slave->name);
210 ret = sysfs_create_link(&(master->class_dev.kobj), &(slave->class_dev.kobj),
211 linkname);
212 return ret;
213
214}
215
216void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave)
217{
218 char linkname[IFNAMSIZ+7];
219
220 sysfs_remove_link(&(slave->class_dev.kobj), "master");
221 sprintf(linkname,"slave_%s",slave->name);
222 sysfs_remove_link(&(master->class_dev.kobj), linkname);
223}
224
225
226/*
227 * Show the slaves in the current bond.
228 */
229static ssize_t bonding_show_slaves(struct class_device *cd, char *buf)
230{
231 struct slave *slave;
232 int i, res = 0;
233 struct bonding *bond = to_bond(cd);
234
235 read_lock_bh(&bond->lock);
236 bond_for_each_slave(bond, slave, i) {
237 if (res > (PAGE_SIZE - IFNAMSIZ)) {
238 /* not enough space for another interface name */
239 if ((PAGE_SIZE - res) > 10)
240 res = PAGE_SIZE - 10;
241 res += sprintf(buf + res, "++more++");
242 break;
243 }
244 res += sprintf(buf + res, "%s ", slave->dev->name);
245 }
246 read_unlock_bh(&bond->lock);
247 res += sprintf(buf + res, "\n");
248 res++;
249 return res;
250}
251
252/*
253 * Set the slaves in the current bond. The bond interface must be
254 * up for this to succeed.
255 * This function is largely the same flow as bonding_update_bonds().
256 */
257static ssize_t bonding_store_slaves(struct class_device *cd, const char *buffer, size_t count)
258{
259 char command[IFNAMSIZ + 1] = { 0, };
260 char *ifname;
261 int i, res, found, ret = count;
262 struct slave *slave;
263 struct net_device *dev = 0;
264 struct bonding *bond = to_bond(cd);
265
266 /* Quick sanity check -- is the bond interface up? */
267 if (!(bond->dev->flags & IFF_UP)) {
268 printk(KERN_ERR DRV_NAME
269 ": %s: Unable to update slaves because interface is down.\n",
270 bond->dev->name);
271 ret = -EPERM;
272 goto out;
273 }
274
275 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
276
277 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
278 ifname = command + 1;
279 if ((strlen(command) <= 1) ||
280 !dev_valid_name(ifname))
281 goto err_no_cmd;
282
283 if (command[0] == '+') {
284
285 /* Got a slave name in ifname. Is it already in the list? */
286 found = 0;
287 read_lock_bh(&bond->lock);
288 bond_for_each_slave(bond, slave, i)
289 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
290 printk(KERN_ERR DRV_NAME
291 ": %s: Interface %s is already enslaved!\n",
292 bond->dev->name, ifname);
293 ret = -EPERM;
294 read_unlock_bh(&bond->lock);
295 goto out;
296 }
297
298 read_unlock_bh(&bond->lock);
299 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
300 bond->dev->name, ifname);
301 dev = dev_get_by_name(ifname);
302 if (!dev) {
303 printk(KERN_INFO DRV_NAME
304 ": %s: Interface %s does not exist!\n",
305 bond->dev->name, ifname);
306 ret = -EPERM;
307 goto out;
308 }
309 else
310 dev_put(dev);
311
312 if (dev->flags & IFF_UP) {
313 printk(KERN_ERR DRV_NAME
314 ": %s: Error: Unable to enslave %s "
315 "because it is already up.\n",
316 bond->dev->name, dev->name);
317 ret = -EPERM;
318 goto out;
319 }
320 /* If this is the first slave, then we need to set
321 the master's hardware address to be the same as the
322 slave's. */
323 if (!(*((u32 *) & (bond->dev->dev_addr[0])))) {
324 memcpy(bond->dev->dev_addr, dev->dev_addr,
325 dev->addr_len);
326 }
327
328 /* Set the slave's MTU to match the bond */
329 if (dev->mtu != bond->dev->mtu) {
330 if (dev->change_mtu) {
331 res = dev->change_mtu(dev,
332 bond->dev->mtu);
333 if (res) {
334 ret = res;
335 goto out;
336 }
337 } else {
338 dev->mtu = bond->dev->mtu;
339 }
340 }
341 rtnl_lock();
342 res = bond_enslave(bond->dev, dev);
343 rtnl_unlock();
344 if (res) {
345 ret = res;
346 }
347 goto out;
348 }
349
350 if (command[0] == '-') {
351 dev = NULL;
352 bond_for_each_slave(bond, slave, i)
353 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
354 dev = slave->dev;
355 break;
356 }
357 if (dev) {
358 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
359 bond->dev->name, dev->name);
360 rtnl_lock();
361 res = bond_release(bond->dev, dev);
362 rtnl_unlock();
363 if (res) {
364 ret = res;
365 goto out;
366 }
367 /* set the slave MTU to the default */
368 if (dev->change_mtu) {
369 dev->change_mtu(dev, 1500);
370 } else {
371 dev->mtu = 1500;
372 }
373 }
374 else {
375 printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n",
376 ifname, bond->dev->name);
377 ret = -ENODEV;
378 }
379 goto out;
380 }
381
382err_no_cmd:
383 printk(KERN_ERR DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
384 ret = -EPERM;
385
386out:
387 return ret;
388}
389
390static CLASS_DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
391
392/*
393 * Show and set the bonding mode. The bond interface must be down to
394 * change the mode.
395 */
396static ssize_t bonding_show_mode(struct class_device *cd, char *buf)
397{
398 struct bonding *bond = to_bond(cd);
399
400 return sprintf(buf, "%s %d\n",
401 bond_mode_tbl[bond->params.mode].modename,
402 bond->params.mode) + 1;
403}
404
405static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size_t count)
406{
407 int new_value, ret = count;
408 struct bonding *bond = to_bond(cd);
409
410 if (bond->dev->flags & IFF_UP) {
411 printk(KERN_ERR DRV_NAME
412 ": unable to update mode of %s because interface is up.\n",
413 bond->dev->name);
414 ret = -EPERM;
415 goto out;
416 }
417
418 new_value = bond_parse_parm((char *)buf, bond_mode_tbl);
419 if (new_value < 0) {
420 printk(KERN_ERR DRV_NAME
421 ": %s: Ignoring invalid mode value %.*s.\n",
422 bond->dev->name,
423 (int)strlen(buf) - 1, buf);
424 ret = -EINVAL;
425 goto out;
426 } else {
427 bond->params.mode = new_value;
428 bond_set_mode_ops(bond, bond->params.mode);
429 printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n",
430 bond->dev->name, bond_mode_tbl[new_value].modename, new_value);
431 }
432out:
433 return ret;
434}
435static CLASS_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
436
437/*
438 * Show and set the bonding transmit hash method. The bond interface must be down to
439 * change the xmit hash policy.
440 */
441static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf)
442{
443 int count;
444 struct bonding *bond = to_bond(cd);
445
446 if ((bond->params.mode != BOND_MODE_XOR) &&
447 (bond->params.mode != BOND_MODE_8023AD)) {
448 // Not Applicable
449 count = sprintf(buf, "NA\n") + 1;
450 } else {
451 count = sprintf(buf, "%s %d\n",
452 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
453 bond->params.xmit_policy) + 1;
454 }
455
456 return count;
457}
458
459static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf, size_t count)
460{
461 int new_value, ret = count;
462 struct bonding *bond = to_bond(cd);
463
464 if (bond->dev->flags & IFF_UP) {
465 printk(KERN_ERR DRV_NAME
466 "%s: Interface is up. Unable to update xmit policy.\n",
467 bond->dev->name);
468 ret = -EPERM;
469 goto out;
470 }
471
472 if ((bond->params.mode != BOND_MODE_XOR) &&
473 (bond->params.mode != BOND_MODE_8023AD)) {
474 printk(KERN_ERR DRV_NAME
475 "%s: Transmit hash policy is irrelevant in this mode.\n",
476 bond->dev->name);
477 ret = -EPERM;
478 goto out;
479 }
480
481 new_value = bond_parse_parm((char *)buf, xmit_hashtype_tbl);
482 if (new_value < 0) {
483 printk(KERN_ERR DRV_NAME
484 ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
485 bond->dev->name,
486 (int)strlen(buf) - 1, buf);
487 ret = -EINVAL;
488 goto out;
489 } else {
490 bond->params.xmit_policy = new_value;
491 bond_set_mode_ops(bond, bond->params.mode);
492 printk(KERN_INFO DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
493 bond->dev->name, xmit_hashtype_tbl[new_value].modename, new_value);
494 }
495out:
496 return ret;
497}
498static CLASS_DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
499
500/*
501 * Show and set the arp timer interval. There are two tricky bits
502 * here. First, if ARP monitoring is activated, then we must disable
503 * MII monitoring. Second, if the ARP timer isn't running, we must
504 * start it.
505 */
506static ssize_t bonding_show_arp_interval(struct class_device *cd, char *buf)
507{
508 struct bonding *bond = to_bond(cd);
509
510 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1;
511}
512
513static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *buf, size_t count)
514{
515 int new_value, ret = count;
516 struct bonding *bond = to_bond(cd);
517
518 if (sscanf(buf, "%d", &new_value) != 1) {
519 printk(KERN_ERR DRV_NAME
520 ": %s: no arp_interval value specified.\n",
521 bond->dev->name);
522 ret = -EINVAL;
523 goto out;
524 }
525 if (new_value < 0) {
526 printk(KERN_ERR DRV_NAME
527 ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
528 bond->dev->name, new_value, INT_MAX);
529 ret = -EINVAL;
530 goto out;
531 }
532
533 printk(KERN_INFO DRV_NAME
534 ": %s: Setting ARP monitoring interval to %d.\n",
535 bond->dev->name, new_value);
536 bond->params.arp_interval = new_value;
537 if (bond->params.miimon) {
538 printk(KERN_INFO DRV_NAME
539 ": %s: ARP monitoring cannot be used with MII monitoring. "
540 "%s Disabling MII monitoring.\n",
541 bond->dev->name, bond->dev->name);
542 bond->params.miimon = 0;
543 /* Kill MII timer, else it brings bond's link down */
544 if (bond->arp_timer.function) {
545 printk(KERN_INFO DRV_NAME
546 ": %s: Kill MII timer, else it brings bond's link down...\n",
547 bond->dev->name);
548 del_timer_sync(&bond->mii_timer);
549 }
550 }
551 if (!bond->params.arp_targets[0]) {
552 printk(KERN_INFO DRV_NAME
553 ": %s: ARP monitoring has been set up, "
554 "but no ARP targets have been specified.\n",
555 bond->dev->name);
556 }
557 if (bond->dev->flags & IFF_UP) {
558 /* If the interface is up, we may need to fire off
559 * the ARP timer. If the interface is down, the
560 * timer will get fired off when the open function
561 * is called.
562 */
563 if (bond->arp_timer.function) {
564 /* The timer's already set up, so fire it off */
565 mod_timer(&bond->arp_timer, jiffies + 1);
566 } else {
567 /* Set up the timer. */
568 init_timer(&bond->arp_timer);
569 bond->arp_timer.expires = jiffies + 1;
570 bond->arp_timer.data =
571 (unsigned long) bond->dev;
572 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
573 bond->arp_timer.function =
574 (void *)
575 &bond_activebackup_arp_mon;
576 } else {
577 bond->arp_timer.function =
578 (void *)
579 &bond_loadbalance_arp_mon;
580 }
581 add_timer(&bond->arp_timer);
582 }
583 }
584
585out:
586 return ret;
587}
588static CLASS_DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
589
590/*
591 * Show and set the arp targets.
592 */
593static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf)
594{
595 int i, res = 0;
596 struct bonding *bond = to_bond(cd);
597
598 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
599 if (bond->params.arp_targets[i])
600 res += sprintf(buf + res, "%u.%u.%u.%u ",
601 NIPQUAD(bond->params.arp_targets[i]));
602 }
603 if (res)
604 res--; /* eat the leftover space */
605 res += sprintf(buf + res, "\n");
606 res++;
607 return res;
608}
609
610static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *buf, size_t count)
611{
612 u32 newtarget;
613 int i = 0, done = 0, ret = count;
614 struct bonding *bond = to_bond(cd);
615 u32 *targets;
616
617 targets = bond->params.arp_targets;
618 newtarget = in_aton(buf + 1);
619 /* look for adds */
620 if (buf[0] == '+') {
621 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
622 printk(KERN_ERR DRV_NAME
623 ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n",
624 bond->dev->name, NIPQUAD(newtarget));
625 ret = -EINVAL;
626 goto out;
627 }
628 /* look for an empty slot to put the target in, and check for dupes */
629 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
630 if (targets[i] == newtarget) { /* duplicate */
631 printk(KERN_ERR DRV_NAME
632 ": %s: ARP target %u.%u.%u.%u is already present\n",
633 bond->dev->name, NIPQUAD(newtarget));
634 if (done)
635 targets[i] = 0;
636 ret = -EINVAL;
637 goto out;
638 }
639 if (targets[i] == 0 && !done) {
640 printk(KERN_INFO DRV_NAME
641 ": %s: adding ARP target %d.%d.%d.%d.\n",
642 bond->dev->name, NIPQUAD(newtarget));
643 done = 1;
644 targets[i] = newtarget;
645 }
646 }
647 if (!done) {
648 printk(KERN_ERR DRV_NAME
649 ": %s: ARP target table is full!\n",
650 bond->dev->name);
651 ret = -EINVAL;
652 goto out;
653 }
654
655 }
656 else if (buf[0] == '-') {
657 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
658 printk(KERN_ERR DRV_NAME
659 ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n",
660 bond->dev->name, NIPQUAD(newtarget));
661 ret = -EINVAL;
662 goto out;
663 }
664
665 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
666 if (targets[i] == newtarget) {
667 printk(KERN_INFO DRV_NAME
668 ": %s: removing ARP target %d.%d.%d.%d.\n",
669 bond->dev->name, NIPQUAD(newtarget));
670 targets[i] = 0;
671 done = 1;
672 }
673 }
674 if (!done) {
675 printk(KERN_INFO DRV_NAME
676 ": %s: unable to remove nonexistent ARP target %d.%d.%d.%d.\n",
677 bond->dev->name, NIPQUAD(newtarget));
678 ret = -EINVAL;
679 goto out;
680 }
681 }
682 else {
683 printk(KERN_ERR DRV_NAME ": no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
684 bond->dev->name);
685 ret = -EPERM;
686 goto out;
687 }
688
689out:
690 return ret;
691}
692static CLASS_DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
693
694/*
695 * Show and set the up and down delays. These must be multiples of the
696 * MII monitoring value, and are stored internally as the multiplier.
697 * Thus, we must translate to MS for the real world.
698 */
699static ssize_t bonding_show_downdelay(struct class_device *cd, char *buf)
700{
701 struct bonding *bond = to_bond(cd);
702
703 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1;
704}
705
706static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf, size_t count)
707{
708 int new_value, ret = count;
709 struct bonding *bond = to_bond(cd);
710
711 if (!(bond->params.miimon)) {
712 printk(KERN_ERR DRV_NAME
713 ": %s: Unable to set down delay as MII monitoring is disabled\n",
714 bond->dev->name);
715 ret = -EPERM;
716 goto out;
717 }
718
719 if (sscanf(buf, "%d", &new_value) != 1) {
720 printk(KERN_ERR DRV_NAME
721 ": %s: no down delay value specified.\n",
722 bond->dev->name);
723 ret = -EINVAL;
724 goto out;
725 }
726 if (new_value < 0) {
727 printk(KERN_ERR DRV_NAME
728 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
729 bond->dev->name, new_value, 1, INT_MAX);
730 ret = -EINVAL;
731 goto out;
732 } else {
733 if ((new_value % bond->params.miimon) != 0) {
734 printk(KERN_WARNING DRV_NAME
735 ": %s: Warning: down delay (%d) is not a multiple "
736 "of miimon (%d), delay rounded to %d ms\n",
737 bond->dev->name, new_value, bond->params.miimon,
738 (new_value / bond->params.miimon) *
739 bond->params.miimon);
740 }
741 bond->params.downdelay = new_value / bond->params.miimon;
742 printk(KERN_INFO DRV_NAME ": %s: Setting down delay to %d.\n",
743 bond->dev->name, bond->params.downdelay * bond->params.miimon);
744
745 }
746
747out:
748 return ret;
749}
750static CLASS_DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
751
752static ssize_t bonding_show_updelay(struct class_device *cd, char *buf)
753{
754 struct bonding *bond = to_bond(cd);
755
756 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1;
757
758}
759
760static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, size_t count)
761{
762 int new_value, ret = count;
763 struct bonding *bond = to_bond(cd);
764
765 if (!(bond->params.miimon)) {
766 printk(KERN_ERR DRV_NAME
767 ": %s: Unable to set up delay as MII monitoring is disabled\n",
768 bond->dev->name);
769 ret = -EPERM;
770 goto out;
771 }
772
773 if (sscanf(buf, "%d", &new_value) != 1) {
774 printk(KERN_ERR DRV_NAME
775 ": %s: no up delay value specified.\n",
776 bond->dev->name);
777 ret = -EINVAL;
778 goto out;
779 }
780 if (new_value < 0) {
781 printk(KERN_ERR DRV_NAME
782 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
783 bond->dev->name, new_value, 1, INT_MAX);
784 ret = -EINVAL;
785 goto out;
786 } else {
787 if ((new_value % bond->params.miimon) != 0) {
788 printk(KERN_WARNING DRV_NAME
789 ": %s: Warning: up delay (%d) is not a multiple "
790 "of miimon (%d), updelay rounded to %d ms\n",
791 bond->dev->name, new_value, bond->params.miimon,
792 (new_value / bond->params.miimon) *
793 bond->params.miimon);
794 }
795 bond->params.updelay = new_value / bond->params.miimon;
796 printk(KERN_INFO DRV_NAME ": %s: Setting up delay to %d.\n",
797 bond->dev->name, bond->params.updelay * bond->params.miimon);
798
799 }
800
801out:
802 return ret;
803}
804static CLASS_DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
805
806/*
807 * Show and set the LACP interval. Interface must be down, and the mode
808 * must be set to 802.3ad mode.
809 */
810static ssize_t bonding_show_lacp(struct class_device *cd, char *buf)
811{
812 struct bonding *bond = to_bond(cd);
813
814 return sprintf(buf, "%s %d\n",
815 bond_lacp_tbl[bond->params.lacp_fast].modename,
816 bond->params.lacp_fast) + 1;
817}
818
819static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size_t count)
820{
821 int new_value, ret = count;
822 struct bonding *bond = to_bond(cd);
823
824 if (bond->dev->flags & IFF_UP) {
825 printk(KERN_ERR DRV_NAME
826 ": %s: Unable to update LACP rate because interface is up.\n",
827 bond->dev->name);
828 ret = -EPERM;
829 goto out;
830 }
831
832 if (bond->params.mode != BOND_MODE_8023AD) {
833 printk(KERN_ERR DRV_NAME
834 ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
835 bond->dev->name);
836 ret = -EPERM;
837 goto out;
838 }
839
840 new_value = bond_parse_parm((char *)buf, bond_lacp_tbl);
841
842 if ((new_value == 1) || (new_value == 0)) {
843 bond->params.lacp_fast = new_value;
844 printk(KERN_INFO DRV_NAME
845 ": %s: Setting LACP rate to %s (%d).\n",
846 bond->dev->name, bond_lacp_tbl[new_value].modename, new_value);
847 } else {
848 printk(KERN_ERR DRV_NAME
849 ": %s: Ignoring invalid LACP rate value %.*s.\n",
850 bond->dev->name, (int)strlen(buf) - 1, buf);
851 ret = -EINVAL;
852 }
853out:
854 return ret;
855}
856static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
857
858/*
859 * Show and set the MII monitor interval. There are two tricky bits
860 * here. First, if MII monitoring is activated, then we must disable
861 * ARP monitoring. Second, if the timer isn't running, we must
862 * start it.
863 */
864static ssize_t bonding_show_miimon(struct class_device *cd, char *buf)
865{
866 struct bonding *bond = to_bond(cd);
867
868 return sprintf(buf, "%d\n", bond->params.miimon) + 1;
869}
870
871static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, size_t count)
872{
873 int new_value, ret = count;
874 struct bonding *bond = to_bond(cd);
875
876 if (sscanf(buf, "%d", &new_value) != 1) {
877 printk(KERN_ERR DRV_NAME
878 ": %s: no miimon value specified.\n",
879 bond->dev->name);
880 ret = -EINVAL;
881 goto out;
882 }
883 if (new_value < 0) {
884 printk(KERN_ERR DRV_NAME
885 ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
886 bond->dev->name, new_value, 1, INT_MAX);
887 ret = -EINVAL;
888 goto out;
889 } else {
890 printk(KERN_INFO DRV_NAME
891 ": %s: Setting MII monitoring interval to %d.\n",
892 bond->dev->name, new_value);
893 bond->params.miimon = new_value;
894 if(bond->params.updelay)
895 printk(KERN_INFO DRV_NAME
896 ": %s: Note: Updating updelay (to %d) "
897 "since it is a multiple of the miimon value.\n",
898 bond->dev->name,
899 bond->params.updelay * bond->params.miimon);
900 if(bond->params.downdelay)
901 printk(KERN_INFO DRV_NAME
902 ": %s: Note: Updating downdelay (to %d) "
903 "since it is a multiple of the miimon value.\n",
904 bond->dev->name,
905 bond->params.downdelay * bond->params.miimon);
906 if (bond->params.arp_interval) {
907 printk(KERN_INFO DRV_NAME
908 ": %s: MII monitoring cannot be used with "
909 "ARP monitoring. Disabling ARP monitoring...\n",
910 bond->dev->name);
911 bond->params.arp_interval = 0;
912 /* Kill ARP timer, else it brings bond's link down */
913 if (bond->mii_timer.function) {
914 printk(KERN_INFO DRV_NAME
915 ": %s: Kill ARP timer, else it brings bond's link down...\n",
916 bond->dev->name);
917 del_timer_sync(&bond->arp_timer);
918 }
919 }
920
921 if (bond->dev->flags & IFF_UP) {
922 /* If the interface is up, we may need to fire off
923 * the MII timer. If the interface is down, the
924 * timer will get fired off when the open function
925 * is called.
926 */
927 if (bond->mii_timer.function) {
928 /* The timer's already set up, so fire it off */
929 mod_timer(&bond->mii_timer, jiffies + 1);
930 } else {
931 /* Set up the timer. */
932 init_timer(&bond->mii_timer);
933 bond->mii_timer.expires = jiffies + 1;
934 bond->mii_timer.data =
935 (unsigned long) bond->dev;
936 bond->mii_timer.function =
937 (void *) &bond_mii_monitor;
938 add_timer(&bond->mii_timer);
939 }
940 }
941 }
942out:
943 return ret;
944}
945static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
946
947/*
948 * Show and set the primary slave. The store function is much
949 * simpler than bonding_store_slaves function because it only needs to
950 * handle one interface name.
951 * The bond must be a mode that supports a primary for this be
952 * set.
953 */
954static ssize_t bonding_show_primary(struct class_device *cd, char *buf)
955{
956 int count = 0;
957 struct bonding *bond = to_bond(cd);
958
959 if (bond->primary_slave)
960 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1;
961 else
962 count = sprintf(buf, "\n") + 1;
963
964 return count;
965}
966
967static ssize_t bonding_store_primary(struct class_device *cd, const char *buf, size_t count)
968{
969 int i;
970 struct slave *slave;
971 struct bonding *bond = to_bond(cd);
972
973 write_lock_bh(&bond->lock);
974 if (!USES_PRIMARY(bond->params.mode)) {
975 printk(KERN_INFO DRV_NAME
976 ": %s: Unable to set primary slave; %s is in mode %d\n",
977 bond->dev->name, bond->dev->name, bond->params.mode);
978 } else {
979 bond_for_each_slave(bond, slave, i) {
980 if (strnicmp
981 (slave->dev->name, buf,
982 strlen(slave->dev->name)) == 0) {
983 printk(KERN_INFO DRV_NAME
984 ": %s: Setting %s as primary slave.\n",
985 bond->dev->name, slave->dev->name);
986 bond->primary_slave = slave;
987 bond_select_active_slave(bond);
988 goto out;
989 }
990 }
991
992 /* if we got here, then we didn't match the name of any slave */
993
994 if (strlen(buf) == 0 || buf[0] == '\n') {
995 printk(KERN_INFO DRV_NAME
996 ": %s: Setting primary slave to None.\n",
997 bond->dev->name);
998 bond->primary_slave = 0;
999 bond_select_active_slave(bond);
1000 } else {
1001 printk(KERN_INFO DRV_NAME
1002 ": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
1003 bond->dev->name, (int)strlen(buf) - 1, buf);
1004 }
1005 }
1006out:
1007 write_unlock_bh(&bond->lock);
1008 return count;
1009}
1010static CLASS_DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
1011
1012/*
1013 * Show and set the use_carrier flag.
1014 */
1015static ssize_t bonding_show_carrier(struct class_device *cd, char *buf)
1016{
1017 struct bonding *bond = to_bond(cd);
1018
1019 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1;
1020}
1021
1022static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, size_t count)
1023{
1024 int new_value, ret = count;
1025 struct bonding *bond = to_bond(cd);
1026
1027
1028 if (sscanf(buf, "%d", &new_value) != 1) {
1029 printk(KERN_ERR DRV_NAME
1030 ": %s: no use_carrier value specified.\n",
1031 bond->dev->name);
1032 ret = -EINVAL;
1033 goto out;
1034 }
1035 if ((new_value == 0) || (new_value == 1)) {
1036 bond->params.use_carrier = new_value;
1037 printk(KERN_INFO DRV_NAME ": %s: Setting use_carrier to %d.\n",
1038 bond->dev->name, new_value);
1039 } else {
1040 printk(KERN_INFO DRV_NAME
1041 ": %s: Ignoring invalid use_carrier value %d.\n",
1042 bond->dev->name, new_value);
1043 }
1044out:
1045 return count;
1046}
1047static CLASS_DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
1048
1049
1050/*
1051 * Show and set currently active_slave.
1052 */
1053static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf)
1054{
1055 struct slave *curr;
1056 struct bonding *bond = to_bond(cd);
1057 int count;
1058
1059
1060 read_lock(&bond->curr_slave_lock);
1061 curr = bond->curr_active_slave;
1062 read_unlock(&bond->curr_slave_lock);
1063
1064 if (USES_PRIMARY(bond->params.mode) && curr)
1065 count = sprintf(buf, "%s\n", curr->dev->name) + 1;
1066 else
1067 count = sprintf(buf, "\n") + 1;
1068 return count;
1069}
1070
1071static ssize_t bonding_store_active_slave(struct class_device *cd, const char *buf, size_t count)
1072{
1073 int i;
1074 struct slave *slave;
1075 struct slave *old_active = NULL;
1076 struct slave *new_active = NULL;
1077 struct bonding *bond = to_bond(cd);
1078
1079 write_lock_bh(&bond->lock);
1080 if (!USES_PRIMARY(bond->params.mode)) {
1081 printk(KERN_INFO DRV_NAME
1082 ": %s: Unable to change active slave; %s is in mode %d\n",
1083 bond->dev->name, bond->dev->name, bond->params.mode);
1084 } else {
1085 bond_for_each_slave(bond, slave, i) {
1086 if (strnicmp
1087 (slave->dev->name, buf,
1088 strlen(slave->dev->name)) == 0) {
1089 old_active = bond->curr_active_slave;
1090 new_active = slave;
1091 if (new_active && (new_active == old_active)) {
1092 /* do nothing */
1093 printk(KERN_INFO DRV_NAME
1094 ": %s: %s is already the current active slave.\n",
1095 bond->dev->name, slave->dev->name);
1096 goto out;
1097 }
1098 else {
1099 if ((new_active) &&
1100 (old_active) &&
1101 (new_active->link == BOND_LINK_UP) &&
1102 IS_UP(new_active->dev)) {
1103 printk(KERN_INFO DRV_NAME
1104 ": %s: Setting %s as active slave.\n",
1105 bond->dev->name, slave->dev->name);
1106 bond_change_active_slave(bond, new_active);
1107 }
1108 else {
1109 printk(KERN_INFO DRV_NAME
1110 ": %s: Could not set %s as active slave; "
1111 "either %s is down or the link is down.\n",
1112 bond->dev->name, slave->dev->name,
1113 slave->dev->name);
1114 }
1115 goto out;
1116 }
1117 }
1118 }
1119
1120 /* if we got here, then we didn't match the name of any slave */
1121
1122 if (strlen(buf) == 0 || buf[0] == '\n') {
1123 printk(KERN_INFO DRV_NAME
1124 ": %s: Setting active slave to None.\n",
1125 bond->dev->name);
1126 bond->primary_slave = 0;
1127 bond_select_active_slave(bond);
1128 } else {
1129 printk(KERN_INFO DRV_NAME
1130 ": %s: Unable to set %.*s as active slave as it is not a slave.\n",
1131 bond->dev->name, (int)strlen(buf) - 1, buf);
1132 }
1133 }
1134out:
1135 write_unlock_bh(&bond->lock);
1136 return count;
1137
1138}
1139static CLASS_DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
1140
1141
1142/*
1143 * Show link status of the bond interface.
1144 */
1145static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf)
1146{
1147 struct slave *curr;
1148 struct bonding *bond = to_bond(cd);
1149
1150 read_lock(&bond->curr_slave_lock);
1151 curr = bond->curr_active_slave;
1152 read_unlock(&bond->curr_slave_lock);
1153
1154 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1;
1155}
1156static CLASS_DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1157
1158
1159/*
1160 * Show current 802.3ad aggregator ID.
1161 */
1162static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf)
1163{
1164 int count = 0;
1165 struct bonding *bond = to_bond(cd);
1166
1167 if (bond->params.mode == BOND_MODE_8023AD) {
1168 struct ad_info ad_info;
1169 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id) + 1;
1170 }
1171 else
1172 count = sprintf(buf, "\n") + 1;
1173
1174 return count;
1175}
1176static CLASS_DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
1177
1178
1179/*
1180 * Show number of active 802.3ad ports.
1181 */
1182static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf)
1183{
1184 int count = 0;
1185 struct bonding *bond = to_bond(cd);
1186
1187 if (bond->params.mode == BOND_MODE_8023AD) {
1188 struct ad_info ad_info;
1189 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports) + 1;
1190 }
1191 else
1192 count = sprintf(buf, "\n") + 1;
1193
1194 return count;
1195}
1196static CLASS_DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
1197
1198
1199/*
1200 * Show current 802.3ad actor key.
1201 */
1202static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf)
1203{
1204 int count = 0;
1205 struct bonding *bond = to_bond(cd);
1206
1207 if (bond->params.mode == BOND_MODE_8023AD) {
1208 struct ad_info ad_info;
1209 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key) + 1;
1210 }
1211 else
1212 count = sprintf(buf, "\n") + 1;
1213
1214 return count;
1215}
1216static CLASS_DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
1217
1218
1219/*
1220 * Show current 802.3ad partner key.
1221 */
1222static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf)
1223{
1224 int count = 0;
1225 struct bonding *bond = to_bond(cd);
1226
1227 if (bond->params.mode == BOND_MODE_8023AD) {
1228 struct ad_info ad_info;
1229 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key) + 1;
1230 }
1231 else
1232 count = sprintf(buf, "\n") + 1;
1233
1234 return count;
1235}
1236static CLASS_DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
1237
1238
1239/*
1240 * Show current 802.3ad partner mac.
1241 */
1242static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf)
1243{
1244 int count = 0;
1245 struct bonding *bond = to_bond(cd);
1246
1247 if (bond->params.mode == BOND_MODE_8023AD) {
1248 struct ad_info ad_info;
1249 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) {
1250 count = sprintf(buf,"%02x:%02x:%02x:%02x:%02x:%02x\n",
1251 ad_info.partner_system[0],
1252 ad_info.partner_system[1],
1253 ad_info.partner_system[2],
1254 ad_info.partner_system[3],
1255 ad_info.partner_system[4],
1256 ad_info.partner_system[5]) + 1;
1257 }
1258 }
1259 else
1260 count = sprintf(buf, "\n") + 1;
1261
1262 return count;
1263}
1264static CLASS_DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1265
1266
1267
1268static struct attribute *per_bond_attrs[] = {
1269 &class_device_attr_slaves.attr,
1270 &class_device_attr_mode.attr,
1271 &class_device_attr_arp_interval.attr,
1272 &class_device_attr_arp_ip_target.attr,
1273 &class_device_attr_downdelay.attr,
1274 &class_device_attr_updelay.attr,
1275 &class_device_attr_lacp_rate.attr,
1276 &class_device_attr_xmit_hash_policy.attr,
1277 &class_device_attr_miimon.attr,
1278 &class_device_attr_primary.attr,
1279 &class_device_attr_use_carrier.attr,
1280 &class_device_attr_active_slave.attr,
1281 &class_device_attr_mii_status.attr,
1282 &class_device_attr_ad_aggregator.attr,
1283 &class_device_attr_ad_num_ports.attr,
1284 &class_device_attr_ad_actor_key.attr,
1285 &class_device_attr_ad_partner_key.attr,
1286 &class_device_attr_ad_partner_mac.attr,
1287 NULL,
1288};
1289
1290static struct attribute_group bonding_group = {
1291 .name = "bonding",
1292 .attrs = per_bond_attrs,
1293};
1294
1295/*
1296 * Initialize sysfs. This sets up the bonding_masters file in
1297 * /sys/class/net.
1298 */
1299int bond_create_sysfs(void)
1300{
1301 int ret = 0;
1302 struct bonding *firstbond;
1303
1304 init_rwsem(&bonding_rwsem);
1305
1306 /* get the netdev class pointer */
1307 firstbond = container_of(bond_dev_list.next, struct bonding, bond_list);
1308 if (!firstbond)
1309 return -ENODEV;
1310
1311 netdev_class = firstbond->dev->class_dev.class;
1312 if (!netdev_class)
1313 return -ENODEV;
1314
1315 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1316
1317 return ret;
1318
1319}
1320
1321/*
1322 * Remove /sys/class/net/bonding_masters.
1323 */
1324void bond_destroy_sysfs(void)
1325{
1326 if (netdev_class)
1327 class_remove_file(netdev_class, &class_attr_bonding_masters);
1328}
1329
1330/*
1331 * Initialize sysfs for each bond. This sets up and registers
1332 * the 'bondctl' directory for each individual bond under /sys/class/net.
1333 */
1334int bond_create_sysfs_entry(struct bonding *bond)
1335{
1336 struct net_device *dev = bond->dev;
1337 int err;
1338
1339 err = sysfs_create_group(&(dev->class_dev.kobj), &bonding_group);
1340 if (err) {
1341 printk(KERN_EMERG "eek! didn't create group!\n");
1342 }
1343
1344 if (expected_refcount < 1)
1345 expected_refcount = atomic_read(&bond->dev->class_dev.kobj.kref.refcount);
1346
1347 return err;
1348}
1349/*
1350 * Remove sysfs entries for each bond.
1351 */
1352void bond_destroy_sysfs_entry(struct bonding *bond)
1353{
1354 struct net_device *dev = bond->dev;
1355
1356 sysfs_remove_group(&(dev->class_dev.kobj), &bonding_group);
1357}
1358
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 1433e91db0f7..015c7f1d1bc0 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -10,25 +10,6 @@
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU Public License, incorporated herein by reference. 11 * of the GNU Public License, incorporated herein by reference.
12 * 12 *
13 *
14 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
15 * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
16 * Shmulik Hen <shmulik.hen at intel dot com>
17 * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
18 *
19 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
20 * Amir Noam <amir.noam at intel dot com>
21 * - Code beautification and style changes (mainly in comments).
22 *
23 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
24 * - Added support for Transmit load balancing mode.
25 *
26 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
27 * - Code cleanup and style changes
28 *
29 * 2005/05/05 - Jason Gabler <jygabler at lbl dot gov>
30 * - added "xmit_policy" kernel parameter for alternate hashing policy
31 * support for mode 2
32 */ 13 */
33 14
34#ifndef _LINUX_BONDING_H 15#ifndef _LINUX_BONDING_H
@@ -37,11 +18,12 @@
37#include <linux/timer.h> 18#include <linux/timer.h>
38#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
39#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/kobject.h>
40#include "bond_3ad.h" 22#include "bond_3ad.h"
41#include "bond_alb.h" 23#include "bond_alb.h"
42 24
43#define DRV_VERSION "2.6.5" 25#define DRV_VERSION "3.0.0"
44#define DRV_RELDATE "November 4, 2005" 26#define DRV_RELDATE "November 8, 2005"
45#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 29
@@ -152,6 +134,11 @@ struct bond_params {
152 u32 arp_targets[BOND_MAX_ARP_TARGETS]; 134 u32 arp_targets[BOND_MAX_ARP_TARGETS];
153}; 135};
154 136
137struct bond_parm_tbl {
138 char *modename;
139 int mode;
140};
141
155struct vlan_entry { 142struct vlan_entry {
156 struct list_head vlan_list; 143 struct list_head vlan_list;
157 u32 vlan_ip; 144 u32 vlan_ip;
@@ -159,7 +146,7 @@ struct vlan_entry {
159}; 146};
160 147
161struct slave { 148struct slave {
162 struct net_device *dev; /* first - usefull for panic debug */ 149 struct net_device *dev; /* first - useful for panic debug */
163 struct slave *next; 150 struct slave *next;
164 struct slave *prev; 151 struct slave *prev;
165 s16 delay; 152 s16 delay;
@@ -185,7 +172,7 @@ struct slave {
185 * beforehand. 172 * beforehand.
186 */ 173 */
187struct bonding { 174struct bonding {
188 struct net_device *dev; /* first - usefull for panic debug */ 175 struct net_device *dev; /* first - useful for panic debug */
189 struct slave *first_slave; 176 struct slave *first_slave;
190 struct slave *curr_active_slave; 177 struct slave *curr_active_slave;
191 struct slave *current_arp_slave; 178 struct slave *current_arp_slave;
@@ -255,6 +242,25 @@ extern inline void bond_set_slave_active_flags(struct slave *slave)
255 242
256struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 243struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
257int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 244int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
245int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
246void bond_deinit(struct net_device *bond_dev);
247int bond_create_sysfs(void);
248void bond_destroy_sysfs(void);
249void bond_destroy_sysfs_entry(struct bonding *bond);
250int bond_create_sysfs_entry(struct bonding *bond);
251int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
252void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
253int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
254int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
255int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev);
256void bond_mii_monitor(struct net_device *bond_dev);
257void bond_loadbalance_arp_mon(struct net_device *bond_dev);
258void bond_activebackup_arp_mon(struct net_device *bond_dev);
259void bond_set_mode_ops(struct bonding *bond, int mode);
260int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl);
261const char *bond_mode_name(int mode);
262void bond_select_active_slave(struct bonding *bond);
263void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
258 264
259#endif /* _LINUX_BONDING_H */ 265#endif /* _LINUX_BONDING_H */
260 266
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 53b41d99b00b..2c5b849b7ba4 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1332,8 +1332,8 @@ intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1332 * 1332 *
1333 * This runs with softirqs disabled. 1333 * This runs with softirqs disabled.
1334 */ 1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1335static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev) 1336 unsigned int qid, struct net_device *dev)
1337{ 1337{
1338 struct sge *sge = adapter->sge; 1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid]; 1339 struct cmdQ *q = &sge->cmdQ[qid];
@@ -1352,9 +1352,10 @@ unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1352 set_bit(dev->if_port, &sge->stopped_tx_queues); 1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++; 1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock); 1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n", 1355 if (!netif_queue_stopped(dev))
1356 adapter->name); 1356 CH_ERR("%s: Tx ring full while queue awake!\n",
1357 return 1; 1357 adapter->name);
1358 return NETDEV_TX_BUSY;
1358 } 1359 }
1359 if (unlikely(credits - count < q->stop_thres)) { 1360 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++; 1361 sge->stats.cmdQ_full[3]++;
@@ -1389,7 +1390,7 @@ unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1390 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 } 1391 }
1391 } 1392 }
1392 return 0; 1393 return NETDEV_TX_OK;
1393} 1394}
1394 1395
1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1396#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
@@ -1449,7 +1450,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1449 if (unlikely(skb->len < ETH_HLEN || 1450 if (unlikely(skb->len < ETH_HLEN ||
1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) { 1451 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1451 dev_kfree_skb_any(skb); 1452 dev_kfree_skb_any(skb);
1452 return NET_XMIT_SUCCESS; 1453 return NETDEV_TX_OK;
1453 } 1454 }
1454 1455
1455 /* 1456 /*
@@ -1467,7 +1468,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl)); 1468 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb); 1469 dev_kfree_skb_any(orig_skb);
1469 if (!skb) 1470 if (!skb)
1470 return -ENOMEM; 1471 return NETDEV_TX_OK;
1471 } 1472 }
1472 1473
1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1474 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
@@ -1475,7 +1476,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1475 skb->nh.iph->protocol == IPPROTO_UDP) 1476 skb->nh.iph->protocol == IPPROTO_UDP)
1476 if (unlikely(skb_checksum_help(skb, 0))) { 1477 if (unlikely(skb_checksum_help(skb, 0))) {
1477 dev_kfree_skb_any(skb); 1478 dev_kfree_skb_any(skb);
1478 return -ENOMEM; 1479 return NETDEV_TX_OK;
1479 } 1480 }
1480 1481
1481 /* Hmmm, assuming to catch the gratious arp... and we'll use 1482 /* Hmmm, assuming to catch the gratious arp... and we'll use
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 434b25586851..6d0d24a6364f 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -89,8 +89,6 @@ int t1_sge_configure(struct sge *, struct sge_params *);
89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
90void t1_sge_destroy(struct sge *); 90void t1_sge_destroy(struct sge *);
91intr_handler_t t1_select_intr_handler(adapter_t *adapter); 91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 92int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
95void t1_set_vlan_accel(struct adapter *adapter, int on_off); 93void t1_set_vlan_accel(struct adapter *adapter, int on_off);
96void t1_sge_start(struct sge *); 94void t1_sge_start(struct sge *);
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c
index 2a290cc397ad..70b47e4c4e9c 100644
--- a/drivers/net/dgrs.c
+++ b/drivers/net/dgrs.c
@@ -1458,6 +1458,8 @@ static struct pci_driver dgrs_pci_driver = {
1458 .probe = dgrs_pci_probe, 1458 .probe = dgrs_pci_probe,
1459 .remove = __devexit_p(dgrs_pci_remove), 1459 .remove = __devexit_p(dgrs_pci_remove),
1460}; 1460};
1461#else
1462static struct pci_driver dgrs_pci_driver = {};
1461#endif 1463#endif
1462 1464
1463 1465
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3f653a93e1bc..e02e9ba2e18b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -188,11 +188,13 @@ struct e1000_tx_ring {
188 /* array of buffer information structs */ 188 /* array of buffer information structs */
189 struct e1000_buffer *buffer_info; 189 struct e1000_buffer *buffer_info;
190 190
191 struct e1000_buffer previous_buffer_info;
192 spinlock_t tx_lock; 191 spinlock_t tx_lock;
193 uint16_t tdh; 192 uint16_t tdh;
194 uint16_t tdt; 193 uint16_t tdt;
195 uint64_t pkt; 194 uint64_t pkt;
195
196 boolean_t last_tx_tso;
197
196}; 198};
197 199
198struct e1000_rx_ring { 200struct e1000_rx_ring {
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 8eae8ba27e84..c88f1a3c1b1d 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -562,10 +562,29 @@ e1000_get_drvinfo(struct net_device *netdev,
562 struct ethtool_drvinfo *drvinfo) 562 struct ethtool_drvinfo *drvinfo)
563{ 563{
564 struct e1000_adapter *adapter = netdev_priv(netdev); 564 struct e1000_adapter *adapter = netdev_priv(netdev);
565 char firmware_version[32];
566 uint16_t eeprom_data;
565 567
566 strncpy(drvinfo->driver, e1000_driver_name, 32); 568 strncpy(drvinfo->driver, e1000_driver_name, 32);
567 strncpy(drvinfo->version, e1000_driver_version, 32); 569 strncpy(drvinfo->version, e1000_driver_version, 32);
568 strncpy(drvinfo->fw_version, "N/A", 32); 570
571 /* EEPROM image version # is reported as firware version # for
572 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) {
575 case e1000_82571:
576 case e1000_82572:
577 case e1000_82573:
578 sprintf(firmware_version, "%d.%d-%d",
579 (eeprom_data & 0xF000) >> 12,
580 (eeprom_data & 0x0FF0) >> 4,
581 eeprom_data & 0x000F);
582 break;
583 default:
584 sprintf(firmware_version, "n/a");
585 }
586
587 strncpy(drvinfo->fw_version, firmware_version, 32);
569 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 588 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
570 drvinfo->n_stats = E1000_STATS_LEN; 589 drvinfo->n_stats = E1000_STATS_LEN;
571 drvinfo->testinfo_len = E1000_TEST_LEN; 590 drvinfo->testinfo_len = E1000_TEST_LEN;
@@ -960,13 +979,21 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
960 } 979 }
961 } 980 }
962 981
963 if(txdr->desc) 982 if(txdr->desc) {
964 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 983 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
965 if(rxdr->desc) 984 txdr->desc = NULL;
985 }
986 if(rxdr->desc) {
966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 987 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
988 rxdr->desc = NULL;
989 }
967 990
968 kfree(txdr->buffer_info); 991 kfree(txdr->buffer_info);
992 txdr->buffer_info = NULL;
993
969 kfree(rxdr->buffer_info); 994 kfree(rxdr->buffer_info);
995 rxdr->buffer_info = NULL;
996
970 return; 997 return;
971} 998}
972 999
@@ -1301,21 +1328,32 @@ static int
1301e1000_setup_loopback_test(struct e1000_adapter *adapter) 1328e1000_setup_loopback_test(struct e1000_adapter *adapter)
1302{ 1329{
1303 uint32_t rctl; 1330 uint32_t rctl;
1331 struct e1000_hw *hw = &adapter->hw;
1304 1332
1305 if(adapter->hw.media_type == e1000_media_type_fiber || 1333 if (hw->media_type == e1000_media_type_fiber ||
1306 adapter->hw.media_type == e1000_media_type_internal_serdes) { 1334 hw->media_type == e1000_media_type_internal_serdes) {
1307 if(adapter->hw.mac_type == e1000_82545 || 1335 switch (hw->mac_type) {
1308 adapter->hw.mac_type == e1000_82546 || 1336 case e1000_82545:
1309 adapter->hw.mac_type == e1000_82545_rev_3 || 1337 case e1000_82546:
1310 adapter->hw.mac_type == e1000_82546_rev_3) 1338 case e1000_82545_rev_3:
1339 case e1000_82546_rev_3:
1311 return e1000_set_phy_loopback(adapter); 1340 return e1000_set_phy_loopback(adapter);
1312 else { 1341 break;
1313 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1342 case e1000_82571:
1343 case e1000_82572:
1344#define E1000_SERDES_LB_ON 0x410
1345 e1000_set_phy_loopback(adapter);
1346 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON);
1347 msec_delay(10);
1348 return 0;
1349 break;
1350 default:
1351 rctl = E1000_READ_REG(hw, RCTL);
1314 rctl |= E1000_RCTL_LBM_TCVR; 1352 rctl |= E1000_RCTL_LBM_TCVR;
1315 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1353 E1000_WRITE_REG(hw, RCTL, rctl);
1316 return 0; 1354 return 0;
1317 } 1355 }
1318 } else if(adapter->hw.media_type == e1000_media_type_copper) 1356 } else if (hw->media_type == e1000_media_type_copper)
1319 return e1000_set_phy_loopback(adapter); 1357 return e1000_set_phy_loopback(adapter);
1320 1358
1321 return 7; 1359 return 7;
@@ -1326,25 +1364,36 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
1326{ 1364{
1327 uint32_t rctl; 1365 uint32_t rctl;
1328 uint16_t phy_reg; 1366 uint16_t phy_reg;
1367 struct e1000_hw *hw = &adapter->hw;
1329 1368
1330 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1369 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1331 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1370 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1332 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1333 1372
1334 if(adapter->hw.media_type == e1000_media_type_copper || 1373 switch (hw->mac_type) {
1335 ((adapter->hw.media_type == e1000_media_type_fiber || 1374 case e1000_82571:
1336 adapter->hw.media_type == e1000_media_type_internal_serdes) && 1375 case e1000_82572:
1337 (adapter->hw.mac_type == e1000_82545 || 1376 if (hw->media_type == e1000_media_type_fiber ||
1338 adapter->hw.mac_type == e1000_82546 || 1377 hw->media_type == e1000_media_type_internal_serdes){
1339 adapter->hw.mac_type == e1000_82545_rev_3 || 1378#define E1000_SERDES_LB_OFF 0x400
1340 adapter->hw.mac_type == e1000_82546_rev_3))) { 1379 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1341 adapter->hw.autoneg = TRUE; 1380 msec_delay(10);
1342 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1381 break;
1343 if(phy_reg & MII_CR_LOOPBACK) { 1382 }
1383 /* fall thru for Cu adapters */
1384 case e1000_82545:
1385 case e1000_82546:
1386 case e1000_82545_rev_3:
1387 case e1000_82546_rev_3:
1388 default:
1389 hw->autoneg = TRUE;
1390 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1391 if (phy_reg & MII_CR_LOOPBACK) {
1344 phy_reg &= ~MII_CR_LOOPBACK; 1392 phy_reg &= ~MII_CR_LOOPBACK;
1345 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg); 1393 e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
1346 e1000_phy_reset(&adapter->hw); 1394 e1000_phy_reset(hw);
1347 } 1395 }
1396 break;
1348 } 1397 }
1349} 1398}
1350 1399
@@ -1440,9 +1489,11 @@ static int
1440e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1489e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1441{ 1490{
1442 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; 1491 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
1443 if((*data = e1000_setup_loopback_test(adapter))) goto err_loopback; 1492 if((*data = e1000_setup_loopback_test(adapter)))
1493 goto err_loopback_setup;
1444 *data = e1000_run_loopback_test(adapter); 1494 *data = e1000_run_loopback_test(adapter);
1445 e1000_loopback_cleanup(adapter); 1495 e1000_loopback_cleanup(adapter);
1496err_loopback_setup:
1446 e1000_free_desc_rings(adapter); 1497 e1000_free_desc_rings(adapter);
1447err_loopback: 1498err_loopback:
1448 return *data; 1499 return *data;
@@ -1671,6 +1722,14 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1671 msleep_interruptible(data * 1000); 1722 msleep_interruptible(data * 1000);
1672 del_timer_sync(&adapter->blink_timer); 1723 del_timer_sync(&adapter->blink_timer);
1673 } 1724 }
1725 else if(adapter->hw.mac_type < e1000_82573) {
1726 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
1727 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1728 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1729 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1730 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1731 msleep_interruptible(data * 1000);
1732 }
1674 else { 1733 else {
1675 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1734 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
1676 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 1735 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index a267c5235fc0..136fc031e4ad 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -563,11 +563,13 @@ e1000_reset_hw(struct e1000_hw *hw)
563 msec_delay(20); 563 msec_delay(20);
564 break; 564 break;
565 case e1000_82573: 565 case e1000_82573:
566 udelay(10); 566 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
567 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 567 udelay(10);
568 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 568 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
569 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 569 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
570 E1000_WRITE_FLUSH(hw); 570 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
571 E1000_WRITE_FLUSH(hw);
572 }
571 /* fall through */ 573 /* fall through */
572 case e1000_82571: 574 case e1000_82571:
573 case e1000_82572: 575 case e1000_82572:
@@ -844,19 +846,27 @@ e1000_setup_link(struct e1000_hw *hw)
844 * control setting, then the variable hw->fc will 846 * control setting, then the variable hw->fc will
845 * be initialized based on a value in the EEPROM. 847 * be initialized based on a value in the EEPROM.
846 */ 848 */
847 if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data)) { 849 if (hw->fc == e1000_fc_default) {
848 DEBUGOUT("EEPROM Read Error\n"); 850 switch (hw->mac_type) {
849 return -E1000_ERR_EEPROM; 851 case e1000_82573:
850 }
851
852 if(hw->fc == e1000_fc_default) {
853 if((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
854 hw->fc = e1000_fc_none;
855 else if((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
856 EEPROM_WORD0F_ASM_DIR)
857 hw->fc = e1000_fc_tx_pause;
858 else
859 hw->fc = e1000_fc_full; 852 hw->fc = e1000_fc_full;
853 break;
854 default:
855 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
856 1, &eeprom_data);
857 if (ret_val) {
858 DEBUGOUT("EEPROM Read Error\n");
859 return -E1000_ERR_EEPROM;
860 }
861 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
862 hw->fc = e1000_fc_none;
863 else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
864 EEPROM_WORD0F_ASM_DIR)
865 hw->fc = e1000_fc_tx_pause;
866 else
867 hw->fc = e1000_fc_full;
868 break;
869 }
860 } 870 }
861 871
862 /* We want to save off the original Flow Control configuration just 872 /* We want to save off the original Flow Control configuration just
@@ -2962,13 +2972,22 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2962 if(hw->mac_type > e1000_82543) { 2972 if(hw->mac_type > e1000_82543) {
2963 /* Read the device control register and assert the E1000_CTRL_PHY_RST 2973 /* Read the device control register and assert the E1000_CTRL_PHY_RST
2964 * bit. Then, take it out of reset. 2974 * bit. Then, take it out of reset.
2975 * For pre-e1000_82571 hardware, we delay for 10ms between the assert
2976 * and deassert. For e1000_82571 hardware and later, we instead delay
2977 * for 10ms after the deassertion.
2965 */ 2978 */
2966 ctrl = E1000_READ_REG(hw, CTRL); 2979 ctrl = E1000_READ_REG(hw, CTRL);
2967 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); 2980 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
2968 E1000_WRITE_FLUSH(hw); 2981 E1000_WRITE_FLUSH(hw);
2969 msec_delay(10); 2982
2983 if (hw->mac_type < e1000_82571)
2984 msec_delay(10);
2985
2970 E1000_WRITE_REG(hw, CTRL, ctrl); 2986 E1000_WRITE_REG(hw, CTRL, ctrl);
2971 E1000_WRITE_FLUSH(hw); 2987 E1000_WRITE_FLUSH(hw);
2988
2989 if (hw->mac_type >= e1000_82571)
2990 msec_delay(10);
2972 } else { 2991 } else {
2973 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 2992 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
2974 * bit to put the PHY into reset. Then, take it out of reset. 2993 * bit to put the PHY into reset. Then, take it out of reset.
@@ -5278,11 +5297,15 @@ e1000_get_bus_info(struct e1000_hw *hw)
5278 hw->bus_speed = e1000_bus_speed_unknown; 5297 hw->bus_speed = e1000_bus_speed_unknown;
5279 hw->bus_width = e1000_bus_width_unknown; 5298 hw->bus_width = e1000_bus_width_unknown;
5280 break; 5299 break;
5281 case e1000_82571:
5282 case e1000_82572: 5300 case e1000_82572:
5283 case e1000_82573: 5301 case e1000_82573:
5284 hw->bus_type = e1000_bus_type_pci_express; 5302 hw->bus_type = e1000_bus_type_pci_express;
5285 hw->bus_speed = e1000_bus_speed_2500; 5303 hw->bus_speed = e1000_bus_speed_2500;
5304 hw->bus_width = e1000_bus_width_pciex_1;
5305 break;
5306 case e1000_82571:
5307 hw->bus_type = e1000_bus_type_pci_express;
5308 hw->bus_speed = e1000_bus_speed_2500;
5286 hw->bus_width = e1000_bus_width_pciex_4; 5309 hw->bus_width = e1000_bus_width_pciex_4;
5287 break; 5310 break;
5288 default: 5311 default:
@@ -6650,6 +6673,12 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
6650 break; 6673 break;
6651 } 6674 }
6652 6675
6676 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6677 * Need to wait for PHY configuration completion before accessing NVM
6678 * and PHY. */
6679 if (hw->mac_type == e1000_82573)
6680 msec_delay(25);
6681
6653 return E1000_SUCCESS; 6682 return E1000_SUCCESS;
6654} 6683}
6655 6684
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 76ce12809a11..7caa35748cea 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -123,6 +123,7 @@ typedef enum {
123 e1000_bus_width_32, 123 e1000_bus_width_32,
124 e1000_bus_width_64, 124 e1000_bus_width_64,
125 e1000_bus_width_pciex_1, 125 e1000_bus_width_pciex_1,
126 e1000_bus_width_pciex_2,
126 e1000_bus_width_pciex_4, 127 e1000_bus_width_pciex_4,
127 e1000_bus_width_reserved 128 e1000_bus_width_reserved
128} e1000_bus_width; 129} e1000_bus_width;
@@ -149,6 +150,7 @@ typedef enum {
149 e1000_igp_cable_length_90 = 90, 150 e1000_igp_cable_length_90 = 90,
150 e1000_igp_cable_length_100 = 100, 151 e1000_igp_cable_length_100 = 100,
151 e1000_igp_cable_length_110 = 110, 152 e1000_igp_cable_length_110 = 110,
153 e1000_igp_cable_length_115 = 115,
152 e1000_igp_cable_length_120 = 120, 154 e1000_igp_cable_length_120 = 120,
153 e1000_igp_cable_length_130 = 130, 155 e1000_igp_cable_length_130 = 130,
154 e1000_igp_cable_length_140 = 140, 156 e1000_igp_cable_length_140 = 140,
@@ -1457,6 +1459,7 @@ struct e1000_hw {
1457#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 1459#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
1458#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ 1460#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
1459#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 1461#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
1462#define E1000_EECD_SECVAL_SHIFT 22
1460#define E1000_STM_OPCODE 0xDB00 1463#define E1000_STM_OPCODE 0xDB00
1461#define E1000_HICR_FW_RESET 0xC0 1464#define E1000_HICR_FW_RESET 0xC0
1462 1465
@@ -1951,7 +1954,6 @@ struct e1000_host_command_info {
1951 1954
1952#define E1000_MDALIGN 4096 1955#define E1000_MDALIGN 4096
1953 1956
1954#define E1000_GCR_BEM32 0x00400000
1955#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1957#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1956/* Function Active and Power State to MNG */ 1958/* Function Active and Power State to MNG */
1957#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1959#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8b207f0e139e..438a931fd55d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -711,6 +711,7 @@ e1000_probe(struct pci_dev *pdev,
711 break; 711 break;
712 case e1000_82546: 712 case e1000_82546:
713 case e1000_82546_rev_3: 713 case e1000_82546_rev_3:
714 case e1000_82571:
714 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 715 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
715 && (adapter->hw.media_type == e1000_media_type_copper)) { 716 && (adapter->hw.media_type == e1000_media_type_copper)) {
716 e1000_read_eeprom(&adapter->hw, 717 e1000_read_eeprom(&adapter->hw,
@@ -1158,7 +1159,6 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1158 return -ENOMEM; 1159 return -ENOMEM;
1159 } 1160 }
1160 memset(txdr->buffer_info, 0, size); 1161 memset(txdr->buffer_info, 0, size);
1161 memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
1162 1162
1163 /* round up to nearest 4K */ 1163 /* round up to nearest 4K */
1164 1164
@@ -1813,11 +1813,6 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1813 1813
1814 /* Free all the Tx ring sk_buffs */ 1814 /* Free all the Tx ring sk_buffs */
1815 1815
1816 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
1817 e1000_unmap_and_free_tx_resource(adapter,
1818 &tx_ring->previous_buffer_info);
1819 }
1820
1821 for(i = 0; i < tx_ring->count; i++) { 1816 for(i = 0; i < tx_ring->count; i++) {
1822 buffer_info = &tx_ring->buffer_info[i]; 1817 buffer_info = &tx_ring->buffer_info[i];
1823 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1818 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
@@ -1832,6 +1827,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1832 1827
1833 tx_ring->next_to_use = 0; 1828 tx_ring->next_to_use = 0;
1834 tx_ring->next_to_clean = 0; 1829 tx_ring->next_to_clean = 0;
1830 tx_ring->last_tx_tso = 0;
1835 1831
1836 writel(0, adapter->hw.hw_addr + tx_ring->tdh); 1832 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
1837 writel(0, adapter->hw.hw_addr + tx_ring->tdt); 1833 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
@@ -2437,6 +2433,16 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2437 buffer_info = &tx_ring->buffer_info[i]; 2433 buffer_info = &tx_ring->buffer_info[i];
2438 size = min(len, max_per_txd); 2434 size = min(len, max_per_txd);
2439#ifdef NETIF_F_TSO 2435#ifdef NETIF_F_TSO
2436 /* Workaround for Controller erratum --
2437 * descriptor for non-tso packet in a linear SKB that follows a
2438 * tso gets written back prematurely before the data is fully
2439 * DMAd to the controller */
2440 if (!skb->data_len && tx_ring->last_tx_tso &&
2441 !skb_shinfo(skb)->tso_size) {
2442 tx_ring->last_tx_tso = 0;
2443 size -= 4;
2444 }
2445
2440 /* Workaround for premature desc write-backs 2446 /* Workaround for premature desc write-backs
2441 * in TSO mode. Append 4-byte sentinel desc */ 2447 * in TSO mode. Append 4-byte sentinel desc */
2442 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2448 if(unlikely(mss && !nr_frags && size == len && size > 8))
@@ -2621,19 +2627,7 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2621 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2627 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2622 return 0; 2628 return 0;
2623 } 2629 }
2624 if(htons(ETH_P_IP) == skb->protocol) { 2630 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2625 const struct iphdr *ip = skb->nh.iph;
2626 if(IPPROTO_UDP == ip->protocol) {
2627 struct udphdr *udp = (struct udphdr *)(skb->h.uh);
2628 if(ntohs(udp->dest) == 67) {
2629 offset = (uint8_t *)udp + 8 - skb->data;
2630 length = skb->len - offset;
2631
2632 return e1000_mng_write_dhcp_info(hw,
2633 (uint8_t *)udp + 8, length);
2634 }
2635 }
2636 } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2637 struct ethhdr *eth = (struct ethhdr *) skb->data; 2631 struct ethhdr *eth = (struct ethhdr *) skb->data;
2638 if((htons(ETH_P_IP) == eth->h_proto)) { 2632 if((htons(ETH_P_IP) == eth->h_proto)) {
2639 const struct iphdr *ip = 2633 const struct iphdr *ip =
@@ -2705,6 +2699,14 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2705 if(skb->ip_summed == CHECKSUM_HW) 2699 if(skb->ip_summed == CHECKSUM_HW)
2706 count++; 2700 count++;
2707#endif 2701#endif
2702
2703#ifdef NETIF_F_TSO
2704 /* Controller Erratum workaround */
2705 if (!skb->data_len && tx_ring->last_tx_tso &&
2706 !skb_shinfo(skb)->tso_size)
2707 count++;
2708#endif
2709
2708 count += TXD_USE_COUNT(len, max_txd_pwr); 2710 count += TXD_USE_COUNT(len, max_txd_pwr);
2709 2711
2710 if(adapter->pcix_82544) 2712 if(adapter->pcix_82544)
@@ -2786,9 +2788,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2786 return NETDEV_TX_OK; 2788 return NETDEV_TX_OK;
2787 } 2789 }
2788 2790
2789 if (likely(tso)) 2791 if (likely(tso)) {
2792 tx_ring->last_tx_tso = 1;
2790 tx_flags |= E1000_TX_FLAGS_TSO; 2793 tx_flags |= E1000_TX_FLAGS_TSO;
2791 else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 2794 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
2792 tx_flags |= E1000_TX_FLAGS_CSUM; 2795 tx_flags |= E1000_TX_FLAGS_CSUM;
2793 2796
2794 /* Old method was to assume IPv4 packet by default if TSO was enabled. 2797 /* Old method was to assume IPv4 packet by default if TSO was enabled.
@@ -3239,37 +3242,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3239 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3242 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3240 3243
3241 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3244 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3242 /* Premature writeback of Tx descriptors clear (free buffers
3243 * and unmap pci_mapping) previous_buffer_info */
3244 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
3245 e1000_unmap_and_free_tx_resource(adapter,
3246 &tx_ring->previous_buffer_info);
3247 }
3248
3249 for(cleaned = FALSE; !cleaned; ) { 3245 for(cleaned = FALSE; !cleaned; ) {
3250 tx_desc = E1000_TX_DESC(*tx_ring, i); 3246 tx_desc = E1000_TX_DESC(*tx_ring, i);
3251 buffer_info = &tx_ring->buffer_info[i]; 3247 buffer_info = &tx_ring->buffer_info[i];
3252 cleaned = (i == eop); 3248 cleaned = (i == eop);
3253 3249
3254#ifdef NETIF_F_TSO 3250 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3255 if (!(netdev->features & NETIF_F_TSO)) {
3256#endif
3257 e1000_unmap_and_free_tx_resource(adapter,
3258 buffer_info);
3259#ifdef NETIF_F_TSO
3260 } else {
3261 if (cleaned) {
3262 memcpy(&tx_ring->previous_buffer_info,
3263 buffer_info,
3264 sizeof(struct e1000_buffer));
3265 memset(buffer_info, 0,
3266 sizeof(struct e1000_buffer));
3267 } else {
3268 e1000_unmap_and_free_tx_resource(
3269 adapter, buffer_info);
3270 }
3271 }
3272#endif
3273 3251
3274 tx_desc->buffer_addr = 0; 3252 tx_desc->buffer_addr = 0;
3275 tx_desc->lower.data = 0; 3253 tx_desc->lower.data = 0;
@@ -3330,12 +3308,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3330 netif_stop_queue(netdev); 3308 netif_stop_queue(netdev);
3331 } 3309 }
3332 } 3310 }
3333#ifdef NETIF_F_TSO
3334 if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3335 time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
3336 e1000_unmap_and_free_tx_resource(
3337 adapter, &tx_ring->previous_buffer_info);
3338#endif
3339 return cleaned; 3311 return cleaned;
3340} 3312}
3341 3313
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 525624fc03b4..c39344adecce 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -10,7 +10,7 @@
10 * trademarks of NVIDIA Corporation in the United States and other 10 * trademarks of NVIDIA Corporation in the United States and other
11 * countries. 11 * countries.
12 * 12 *
13 * Copyright (C) 2003,4 Manfred Spraul 13 * Copyright (C) 2003,4,5 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
@@ -100,6 +100,7 @@
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check 100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 104 *
104 * Known bugs: 105 * Known bugs:
105 * We suspect that on some hardware no TX done interrupts are generated. 106 * We suspect that on some hardware no TX done interrupts are generated.
@@ -111,7 +112,7 @@
111 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 112 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
112 * superfluous timer interrupts from the nic. 113 * superfluous timer interrupts from the nic.
113 */ 114 */
114#define FORCEDETH_VERSION "0.47" 115#define FORCEDETH_VERSION "0.48"
115#define DRV_NAME "forcedeth" 116#define DRV_NAME "forcedeth"
116 117
117#include <linux/module.h> 118#include <linux/module.h>
@@ -871,8 +872,8 @@ static int nv_alloc_rx(struct net_device *dev)
871 } else { 872 } else {
872 skb = np->rx_skbuff[nr]; 873 skb = np->rx_skbuff[nr];
873 } 874 }
874 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, 875 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
875 PCI_DMA_FROMDEVICE); 876 skb->end-skb->data, PCI_DMA_FROMDEVICE);
876 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 877 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
877 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 878 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
878 wmb(); 879 wmb();
@@ -999,7 +1000,7 @@ static void nv_drain_rx(struct net_device *dev)
999 wmb(); 1000 wmb();
1000 if (np->rx_skbuff[i]) { 1001 if (np->rx_skbuff[i]) {
1001 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1002 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1002 np->rx_skbuff[i]->len, 1003 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1003 PCI_DMA_FROMDEVICE); 1004 PCI_DMA_FROMDEVICE);
1004 dev_kfree_skb(np->rx_skbuff[i]); 1005 dev_kfree_skb(np->rx_skbuff[i]);
1005 np->rx_skbuff[i] = NULL; 1006 np->rx_skbuff[i] = NULL;
@@ -1334,7 +1335,7 @@ static void nv_rx_process(struct net_device *dev)
1334 * the performance. 1335 * the performance.
1335 */ 1336 */
1336 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1337 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1337 np->rx_skbuff[i]->len, 1338 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1338 PCI_DMA_FROMDEVICE); 1339 PCI_DMA_FROMDEVICE);
1339 1340
1340 { 1341 {
@@ -2455,7 +2456,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2455 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 2456 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
2456 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2457 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2457#ifdef NETIF_F_TSO 2458#ifdef NETIF_F_TSO
2458 dev->features |= NETIF_F_TSO; 2459 /* disabled dev->features |= NETIF_F_TSO; */
2459#endif 2460#endif
2460 } 2461 }
2461 2462
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0f030b73cbb3..146f9513aea5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2,7 +2,8 @@
2 * drivers/net/gianfar.c 2 * drivers/net/gianfar.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c 7 * Based on 8260_io/fcc_enet.c
7 * 8 *
8 * Author: Andy Fleming 9 * Author: Andy Fleming
@@ -22,8 +23,6 @@
22 * B-V +1.62 23 * B-V +1.62
23 * 24 *
24 * Theory of operation 25 * Theory of operation
25 * This driver is designed for the non-CPM ethernet controllers
26 * on the 85xx and 83xx family of integrated processors
27 * 26 *
28 * The driver is initialized through platform_device. Structures which 27 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a 28 * define the configuration needed by the board are defined in a
@@ -110,7 +109,7 @@
110#endif 109#endif
111 110
112const char gfar_driver_name[] = "Gianfar Ethernet"; 111const char gfar_driver_name[] = "Gianfar Ethernet";
113const char gfar_driver_version[] = "1.2"; 112const char gfar_driver_version[] = "1.3";
114 113
115static int gfar_enet_open(struct net_device *dev); 114static int gfar_enet_open(struct net_device *dev);
116static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -139,6 +138,10 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l
139static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
140 struct vlan_group *grp); 139 struct vlan_group *grp);
141static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 140static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
141void gfar_halt(struct net_device *dev);
142void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
142 145
143extern struct ethtool_ops gfar_ethtool_ops; 146extern struct ethtool_ops gfar_ethtool_ops;
144 147
@@ -146,12 +149,10 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
146MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
148 151
149int gfar_uses_fcb(struct gfar_private *priv) 152/* Returns 1 if incoming frames use an FCB */
153static inline int gfar_uses_fcb(struct gfar_private *priv)
150{ 154{
151 if (priv->vlan_enable || priv->rx_csum_enable) 155 return (priv->vlan_enable || priv->rx_csum_enable);
152 return 1;
153 else
154 return 0;
155} 156}
156 157
157/* Set up the ethernet device structure, private data, 158/* Set up the ethernet device structure, private data,
@@ -320,15 +321,10 @@ static int gfar_probe(struct platform_device *pdev)
320 else 321 else
321 priv->padding = 0; 322 priv->padding = 0;
322 323
323 dev->hard_header_len += priv->padding;
324
325 if (dev->features & NETIF_F_IP_CSUM) 324 if (dev->features & NETIF_F_IP_CSUM)
326 dev->hard_header_len += GMAC_FCB_LEN; 325 dev->hard_header_len += GMAC_FCB_LEN;
327 326
328 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 327 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
329#ifdef CONFIG_GFAR_BUFSTASH
330 priv->rx_stash_size = STASH_LENGTH;
331#endif
332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 328 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 329 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
334 330
@@ -350,6 +346,9 @@ static int gfar_probe(struct platform_device *pdev)
350 goto register_fail; 346 goto register_fail;
351 } 347 }
352 348
349 /* Create all the sysfs files */
350 gfar_init_sysfs(dev);
351
353 /* Print out the device info */ 352 /* Print out the device info */
354 printk(KERN_INFO DEVICE_NAME, dev->name); 353 printk(KERN_INFO DEVICE_NAME, dev->name);
355 for (idx = 0; idx < 6; idx++) 354 for (idx = 0; idx < 6; idx++)
@@ -357,8 +356,7 @@ static int gfar_probe(struct platform_device *pdev)
357 printk("\n"); 356 printk("\n");
358 357
359 /* Even more device info helps when determining which kernel */ 358 /* Even more device info helps when determining which kernel */
360 /* provided which set of benchmarks. Since this is global for all */ 359 /* provided which set of benchmarks. */
361 /* devices, we only print it once */
362#ifdef CONFIG_GFAR_NAPI 360#ifdef CONFIG_GFAR_NAPI
363 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 361 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
364#else 362#else
@@ -463,19 +461,9 @@ static void init_registers(struct net_device *dev)
463 /* Initialize the max receive buffer length */ 461 /* Initialize the max receive buffer length */
464 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 462 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
465 463
466#ifdef CONFIG_GFAR_BUFSTASH
467 /* If we are stashing buffers, we need to set the
468 * extraction length to the size of the buffer */
469 gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
470#endif
471
472 /* Initialize the Minimum Frame Length Register */ 464 /* Initialize the Minimum Frame Length Register */
473 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 465 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
474 466
475 /* Setup Attributes so that snooping is on for rx */
476 gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
477 gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
478
479 /* Assign the TBI an address which won't conflict with the PHYs */ 467 /* Assign the TBI an address which won't conflict with the PHYs */
480 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 468 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
481} 469}
@@ -577,8 +565,7 @@ static void free_skb_resources(struct gfar_private *priv)
577 for (i = 0; i < priv->rx_ring_size; i++) { 565 for (i = 0; i < priv->rx_ring_size; i++) {
578 if (priv->rx_skbuff[i]) { 566 if (priv->rx_skbuff[i]) {
579 dma_unmap_single(NULL, rxbdp->bufPtr, 567 dma_unmap_single(NULL, rxbdp->bufPtr,
580 priv->rx_buffer_size 568 priv->rx_buffer_size,
581 + RXBUF_ALIGNMENT,
582 DMA_FROM_DEVICE); 569 DMA_FROM_DEVICE);
583 570
584 dev_kfree_skb_any(priv->rx_skbuff[i]); 571 dev_kfree_skb_any(priv->rx_skbuff[i]);
@@ -636,6 +623,7 @@ int startup_gfar(struct net_device *dev)
636 struct gfar *regs = priv->regs; 623 struct gfar *regs = priv->regs;
637 int err = 0; 624 int err = 0;
638 u32 rctrl = 0; 625 u32 rctrl = 0;
626 u32 attrs = 0;
639 627
640 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 628 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
641 629
@@ -795,18 +783,50 @@ int startup_gfar(struct net_device *dev)
795 if (priv->rx_csum_enable) 783 if (priv->rx_csum_enable)
796 rctrl |= RCTRL_CHECKSUMMING; 784 rctrl |= RCTRL_CHECKSUMMING;
797 785
798 if (priv->extended_hash) 786 if (priv->extended_hash) {
799 rctrl |= RCTRL_EXTHASH; 787 rctrl |= RCTRL_EXTHASH;
800 788
789 gfar_clear_exact_match(dev);
790 rctrl |= RCTRL_EMEN;
791 }
792
801 if (priv->vlan_enable) 793 if (priv->vlan_enable)
802 rctrl |= RCTRL_VLAN; 794 rctrl |= RCTRL_VLAN;
803 795
796 if (priv->padding) {
797 rctrl &= ~RCTRL_PAL_MASK;
798 rctrl |= RCTRL_PADDING(priv->padding);
799 }
800
804 /* Init rctrl based on our settings */ 801 /* Init rctrl based on our settings */
805 gfar_write(&priv->regs->rctrl, rctrl); 802 gfar_write(&priv->regs->rctrl, rctrl);
806 803
807 if (dev->features & NETIF_F_IP_CSUM) 804 if (dev->features & NETIF_F_IP_CSUM)
808 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 805 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
809 806
807 /* Set the extraction length and index */
808 attrs = ATTRELI_EL(priv->rx_stash_size) |
809 ATTRELI_EI(priv->rx_stash_index);
810
811 gfar_write(&priv->regs->attreli, attrs);
812
813 /* Start with defaults, and add stashing or locking
814 * depending on the approprate variables */
815 attrs = ATTR_INIT_SETTINGS;
816
817 if (priv->bd_stash_en)
818 attrs |= ATTR_BDSTASH;
819
820 if (priv->rx_stash_size != 0)
821 attrs |= ATTR_BUFSTASH;
822
823 gfar_write(&priv->regs->attr, attrs);
824
825 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
826 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
827 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
828
829 /* Start the controller */
810 gfar_start(dev); 830 gfar_start(dev);
811 831
812 return 0; 832 return 0;
@@ -851,34 +871,32 @@ static int gfar_enet_open(struct net_device *dev)
851 return err; 871 return err;
852} 872}
853 873
854static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 874static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
855{ 875{
856 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 876 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
857 877
858 memset(fcb, 0, GMAC_FCB_LEN); 878 memset(fcb, 0, GMAC_FCB_LEN);
859 879
860 /* Flag the bd so the controller looks for the FCB */
861 bdp->status |= TXBD_TOE;
862
863 return fcb; 880 return fcb;
864} 881}
865 882
866static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 883static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
867{ 884{
868 int len; 885 u8 flags = 0;
869 886
870 /* If we're here, it's a IP packet with a TCP or UDP 887 /* If we're here, it's a IP packet with a TCP or UDP
871 * payload. We set it to checksum, using a pseudo-header 888 * payload. We set it to checksum, using a pseudo-header
872 * we provide 889 * we provide
873 */ 890 */
874 fcb->ip = 1; 891 flags = TXFCB_DEFAULT;
875 fcb->tup = 1;
876 fcb->ctu = 1;
877 fcb->nph = 1;
878 892
879 /* Notify the controller what the protocol is */ 893 /* Tell the controller what the protocol is */
880 if (skb->nh.iph->protocol == IPPROTO_UDP) 894 /* And provide the already calculated phcs */
881 fcb->udp = 1; 895 if (skb->nh.iph->protocol == IPPROTO_UDP) {
896 flags |= TXFCB_UDP;
897 fcb->phcs = skb->h.uh->check;
898 } else
899 fcb->phcs = skb->h.th->check;
882 900
883 /* l3os is the distance between the start of the 901 /* l3os is the distance between the start of the
884 * frame (skb->data) and the start of the IP hdr. 902 * frame (skb->data) and the start of the IP hdr.
@@ -887,17 +905,12 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
887 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 905 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
888 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 906 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
889 907
890 len = skb->nh.iph->tot_len - fcb->l4os; 908 fcb->flags = flags;
891
892 /* Provide the pseudoheader csum */
893 fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr,
894 skb->nh.iph->daddr, len,
895 skb->nh.iph->protocol, 0);
896} 909}
897 910
898void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 911void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
899{ 912{
900 fcb->vln = 1; 913 fcb->flags |= TXFCB_VLN;
901 fcb->vlctl = vlan_tx_tag_get(skb); 914 fcb->vlctl = vlan_tx_tag_get(skb);
902} 915}
903 916
@@ -908,6 +921,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
908 struct gfar_private *priv = netdev_priv(dev); 921 struct gfar_private *priv = netdev_priv(dev);
909 struct txfcb *fcb = NULL; 922 struct txfcb *fcb = NULL;
910 struct txbd8 *txbdp; 923 struct txbd8 *txbdp;
924 u16 status;
911 925
912 /* Update transmit stats */ 926 /* Update transmit stats */
913 priv->stats.tx_bytes += skb->len; 927 priv->stats.tx_bytes += skb->len;
@@ -919,19 +933,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
919 txbdp = priv->cur_tx; 933 txbdp = priv->cur_tx;
920 934
921 /* Clear all but the WRAP status flags */ 935 /* Clear all but the WRAP status flags */
922 txbdp->status &= TXBD_WRAP; 936 status = txbdp->status & TXBD_WRAP;
923 937
924 /* Set up checksumming */ 938 /* Set up checksumming */
925 if ((dev->features & NETIF_F_IP_CSUM) 939 if (likely((dev->features & NETIF_F_IP_CSUM)
926 && (CHECKSUM_HW == skb->ip_summed)) { 940 && (CHECKSUM_HW == skb->ip_summed))) {
927 fcb = gfar_add_fcb(skb, txbdp); 941 fcb = gfar_add_fcb(skb, txbdp);
942 status |= TXBD_TOE;
928 gfar_tx_checksum(skb, fcb); 943 gfar_tx_checksum(skb, fcb);
929 } 944 }
930 945
931 if (priv->vlan_enable && 946 if (priv->vlan_enable &&
932 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 947 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
933 if (NULL == fcb) 948 if (unlikely(NULL == fcb)) {
934 fcb = gfar_add_fcb(skb, txbdp); 949 fcb = gfar_add_fcb(skb, txbdp);
950 status |= TXBD_TOE;
951 }
935 952
936 gfar_tx_vlan(skb, fcb); 953 gfar_tx_vlan(skb, fcb);
937 } 954 }
@@ -949,14 +966,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
949 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 966 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
950 967
951 /* Flag the BD as interrupt-causing */ 968 /* Flag the BD as interrupt-causing */
952 txbdp->status |= TXBD_INTERRUPT; 969 status |= TXBD_INTERRUPT;
953 970
954 /* Flag the BD as ready to go, last in frame, and */ 971 /* Flag the BD as ready to go, last in frame, and */
955 /* in need of CRC */ 972 /* in need of CRC */
956 txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 973 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
957 974
958 dev->trans_start = jiffies; 975 dev->trans_start = jiffies;
959 976
977 txbdp->status = status;
978
960 /* If this was the last BD in the ring, the next one */ 979 /* If this was the last BD in the ring, the next one */
961 /* is at the beginning of the ring */ 980 /* is at the beginning of the ring */
962 if (txbdp->status & TXBD_WRAP) 981 if (txbdp->status & TXBD_WRAP)
@@ -1010,21 +1029,7 @@ static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1010/* Changes the mac address if the controller is not running. */ 1029/* Changes the mac address if the controller is not running. */
1011int gfar_set_mac_address(struct net_device *dev) 1030int gfar_set_mac_address(struct net_device *dev)
1012{ 1031{
1013 struct gfar_private *priv = netdev_priv(dev); 1032 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1014 int i;
1015 char tmpbuf[MAC_ADDR_LEN];
1016 u32 tempval;
1017
1018 /* Now copy it into the mac registers backwards, cuz */
1019 /* little endian is silly */
1020 for (i = 0; i < MAC_ADDR_LEN; i++)
1021 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
1022
1023 gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
1024
1025 tempval = *((u32 *) (tmpbuf + 4));
1026
1027 gfar_write(&priv->regs->macstnaddr2, tempval);
1028 1033
1029 return 0; 1034 return 0;
1030} 1035}
@@ -1110,7 +1115,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1110 INCREMENTAL_BUFFER_SIZE; 1115 INCREMENTAL_BUFFER_SIZE;
1111 1116
1112 /* Only stop and start the controller if it isn't already 1117 /* Only stop and start the controller if it isn't already
1113 * stopped */ 1118 * stopped, and we changed something */
1114 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1119 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1115 stop_gfar(dev); 1120 stop_gfar(dev);
1116 1121
@@ -1220,6 +1225,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1220 1225
1221struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1226struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1222{ 1227{
1228 unsigned int alignamount;
1223 struct gfar_private *priv = netdev_priv(dev); 1229 struct gfar_private *priv = netdev_priv(dev);
1224 struct sk_buff *skb = NULL; 1230 struct sk_buff *skb = NULL;
1225 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1231 unsigned int timeout = SKB_ALLOC_TIMEOUT;
@@ -1231,18 +1237,18 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1231 if (NULL == skb) 1237 if (NULL == skb)
1232 return NULL; 1238 return NULL;
1233 1239
1240 alignamount = RXBUF_ALIGNMENT -
1241 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1242
1234 /* We need the data buffer to be aligned properly. We will reserve 1243 /* We need the data buffer to be aligned properly. We will reserve
1235 * as many bytes as needed to align the data properly 1244 * as many bytes as needed to align the data properly
1236 */ 1245 */
1237 skb_reserve(skb, 1246 skb_reserve(skb, alignamount);
1238 RXBUF_ALIGNMENT -
1239 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
1240 1247
1241 skb->dev = dev; 1248 skb->dev = dev;
1242 1249
1243 bdp->bufPtr = dma_map_single(NULL, skb->data, 1250 bdp->bufPtr = dma_map_single(NULL, skb->data,
1244 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1251 priv->rx_buffer_size, DMA_FROM_DEVICE);
1245 DMA_FROM_DEVICE);
1246 1252
1247 bdp->length = 0; 1253 bdp->length = 0;
1248 1254
@@ -1350,7 +1356,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1350 /* If valid headers were found, and valid sums 1356 /* If valid headers were found, and valid sums
1351 * were verified, then we tell the kernel that no 1357 * were verified, then we tell the kernel that no
1352 * checksumming is necessary. Otherwise, it is */ 1358 * checksumming is necessary. Otherwise, it is */
1353 if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu) 1359 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1354 skb->ip_summed = CHECKSUM_UNNECESSARY; 1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1355 else 1361 else
1356 skb->ip_summed = CHECKSUM_NONE; 1362 skb->ip_summed = CHECKSUM_NONE;
@@ -1401,7 +1407,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1401 skb->protocol = eth_type_trans(skb, dev); 1407 skb->protocol = eth_type_trans(skb, dev);
1402 1408
1403 /* Send the packet up the stack */ 1409 /* Send the packet up the stack */
1404 if (unlikely(priv->vlgrp && fcb->vln)) 1410 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1405 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1411 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1406 else 1412 else
1407 ret = RECEIVE(skb); 1413 ret = RECEIVE(skb);
@@ -1620,6 +1626,7 @@ static void adjust_link(struct net_device *dev)
1620 spin_lock_irqsave(&priv->lock, flags); 1626 spin_lock_irqsave(&priv->lock, flags);
1621 if (phydev->link) { 1627 if (phydev->link) {
1622 u32 tempval = gfar_read(&regs->maccfg2); 1628 u32 tempval = gfar_read(&regs->maccfg2);
1629 u32 ecntrl = gfar_read(&regs->ecntrl);
1623 1630
1624 /* Now we make sure that we can be in full duplex mode. 1631 /* Now we make sure that we can be in full duplex mode.
1625 * If not, we operate in half-duplex mode. */ 1632 * If not, we operate in half-duplex mode. */
@@ -1644,6 +1651,13 @@ static void adjust_link(struct net_device *dev)
1644 case 10: 1651 case 10:
1645 tempval = 1652 tempval =
1646 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1653 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1654
1655 /* Reduced mode distinguishes
1656 * between 10 and 100 */
1657 if (phydev->speed == SPEED_100)
1658 ecntrl |= ECNTRL_R100;
1659 else
1660 ecntrl &= ~(ECNTRL_R100);
1647 break; 1661 break;
1648 default: 1662 default:
1649 if (netif_msg_link(priv)) 1663 if (netif_msg_link(priv))
@@ -1657,6 +1671,7 @@ static void adjust_link(struct net_device *dev)
1657 } 1671 }
1658 1672
1659 gfar_write(&regs->maccfg2, tempval); 1673 gfar_write(&regs->maccfg2, tempval);
1674 gfar_write(&regs->ecntrl, ecntrl);
1660 1675
1661 if (!priv->oldlink) { 1676 if (!priv->oldlink) {
1662 new_state = 1; 1677 new_state = 1;
@@ -1721,6 +1736,9 @@ static void gfar_set_multi(struct net_device *dev)
1721 gfar_write(&regs->gaddr6, 0xffffffff); 1736 gfar_write(&regs->gaddr6, 0xffffffff);
1722 gfar_write(&regs->gaddr7, 0xffffffff); 1737 gfar_write(&regs->gaddr7, 0xffffffff);
1723 } else { 1738 } else {
1739 int em_num;
1740 int idx;
1741
1724 /* zero out the hash */ 1742 /* zero out the hash */
1725 gfar_write(&regs->igaddr0, 0x0); 1743 gfar_write(&regs->igaddr0, 0x0);
1726 gfar_write(&regs->igaddr1, 0x0); 1744 gfar_write(&regs->igaddr1, 0x0);
@@ -1739,18 +1757,47 @@ static void gfar_set_multi(struct net_device *dev)
1739 gfar_write(&regs->gaddr6, 0x0); 1757 gfar_write(&regs->gaddr6, 0x0);
1740 gfar_write(&regs->gaddr7, 0x0); 1758 gfar_write(&regs->gaddr7, 0x0);
1741 1759
1760 /* If we have extended hash tables, we need to
1761 * clear the exact match registers to prepare for
1762 * setting them */
1763 if (priv->extended_hash) {
1764 em_num = GFAR_EM_NUM + 1;
1765 gfar_clear_exact_match(dev);
1766 idx = 1;
1767 } else {
1768 idx = 0;
1769 em_num = 0;
1770 }
1771
1742 if(dev->mc_count == 0) 1772 if(dev->mc_count == 0)
1743 return; 1773 return;
1744 1774
1745 /* Parse the list, and set the appropriate bits */ 1775 /* Parse the list, and set the appropriate bits */
1746 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1776 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1747 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1777 if (idx < em_num) {
1778 gfar_set_mac_for_addr(dev, idx,
1779 mc_ptr->dmi_addr);
1780 idx++;
1781 } else
1782 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1748 } 1783 }
1749 } 1784 }
1750 1785
1751 return; 1786 return;
1752} 1787}
1753 1788
1789
1790/* Clears each of the exact match registers to zero, so they
1791 * don't interfere with normal reception */
1792static void gfar_clear_exact_match(struct net_device *dev)
1793{
1794 int idx;
1795 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1796
1797 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1798 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1799}
1800
1754/* Set the appropriate hash bit for the given addr */ 1801/* Set the appropriate hash bit for the given addr */
1755/* The algorithm works like so: 1802/* The algorithm works like so:
1756 * 1) Take the Destination Address (ie the multicast address), and 1803 * 1) Take the Destination Address (ie the multicast address), and
@@ -1781,6 +1828,32 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1781 return; 1828 return;
1782} 1829}
1783 1830
1831
1832/* There are multiple MAC Address register pairs on some controllers
1833 * This function sets the numth pair to a given address
1834 */
1835static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1836{
1837 struct gfar_private *priv = netdev_priv(dev);
1838 int idx;
1839 char tmpbuf[MAC_ADDR_LEN];
1840 u32 tempval;
1841 u32 *macptr = &priv->regs->macstnaddr1;
1842
1843 macptr += num*2;
1844
1845 /* Now copy it into the mac registers backwards, cuz */
1846 /* little endian is silly */
1847 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1848 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1849
1850 gfar_write(macptr, *((u32 *) (tmpbuf)));
1851
1852 tempval = *((u32 *) (tmpbuf + 4));
1853
1854 gfar_write(macptr+1, tempval);
1855}
1856
1784/* GFAR error interrupt handler */ 1857/* GFAR error interrupt handler */
1785static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 1858static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1786{ 1859{
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 5065ba82cb76..94a91da84fbb 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -90,12 +90,26 @@ extern const char gfar_driver_version[];
90#define GFAR_RX_MAX_RING_SIZE 256 90#define GFAR_RX_MAX_RING_SIZE 256
91#define GFAR_TX_MAX_RING_SIZE 256 91#define GFAR_TX_MAX_RING_SIZE 256
92 92
93#define GFAR_MAX_FIFO_THRESHOLD 511
94#define GFAR_MAX_FIFO_STARVE 511
95#define GFAR_MAX_FIFO_STARVE_OFF 511
96
93#define DEFAULT_RX_BUFFER_SIZE 1536 97#define DEFAULT_RX_BUFFER_SIZE 1536
94#define TX_RING_MOD_MASK(size) (size-1) 98#define TX_RING_MOD_MASK(size) (size-1)
95#define RX_RING_MOD_MASK(size) (size-1) 99#define RX_RING_MOD_MASK(size) (size-1)
96#define JUMBO_BUFFER_SIZE 9728 100#define JUMBO_BUFFER_SIZE 9728
97#define JUMBO_FRAME_SIZE 9600 101#define JUMBO_FRAME_SIZE 9600
98 102
103#define DEFAULT_FIFO_TX_THR 0x100
104#define DEFAULT_FIFO_TX_STARVE 0x40
105#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
106#define DEFAULT_BD_STASH 1
107#define DEFAULT_STASH_LENGTH 64
108#define DEFAULT_STASH_INDEX 0
109
110/* The number of Exact Match registers */
111#define GFAR_EM_NUM 15
112
99/* Latency of interface clock in nanoseconds */ 113/* Latency of interface clock in nanoseconds */
100/* Interface clock latency , in this case, means the 114/* Interface clock latency , in this case, means the
101 * time described by a value of 1 in the interrupt 115 * time described by a value of 1 in the interrupt
@@ -112,11 +126,11 @@ extern const char gfar_driver_version[];
112 126
113#define DEFAULT_TX_COALESCE 1 127#define DEFAULT_TX_COALESCE 1
114#define DEFAULT_TXCOUNT 16 128#define DEFAULT_TXCOUNT 16
115#define DEFAULT_TXTIME 400 129#define DEFAULT_TXTIME 4
116 130
117#define DEFAULT_RX_COALESCE 1 131#define DEFAULT_RX_COALESCE 1
118#define DEFAULT_RXCOUNT 16 132#define DEFAULT_RXCOUNT 16
119#define DEFAULT_RXTIME 400 133#define DEFAULT_RXTIME 4
120 134
121#define TBIPA_VALUE 0x1f 135#define TBIPA_VALUE 0x1f
122#define MIIMCFG_INIT_VALUE 0x00000007 136#define MIIMCFG_INIT_VALUE 0x00000007
@@ -147,6 +161,7 @@ extern const char gfar_driver_version[];
147 161
148#define ECNTRL_INIT_SETTINGS 0x00001000 162#define ECNTRL_INIT_SETTINGS 0x00001000
149#define ECNTRL_TBI_MODE 0x00000020 163#define ECNTRL_TBI_MODE 0x00000020
164#define ECNTRL_R100 0x00000008
150 165
151#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE 166#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
152 167
@@ -181,10 +196,12 @@ extern const char gfar_driver_version[];
181#define RCTRL_PRSDEP_MASK 0x000000c0 196#define RCTRL_PRSDEP_MASK 0x000000c0
182#define RCTRL_PRSDEP_INIT 0x000000c0 197#define RCTRL_PRSDEP_INIT 0x000000c0
183#define RCTRL_PROM 0x00000008 198#define RCTRL_PROM 0x00000008
199#define RCTRL_EMEN 0x00000002
184#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \ 200#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \
185 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT) 201 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT)
186#define RCTRL_EXTHASH (RCTRL_GHTX) 202#define RCTRL_EXTHASH (RCTRL_GHTX)
187#define RCTRL_VLAN (RCTRL_PRSDEP_INIT) 203#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
204#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
188 205
189 206
190#define RSTAT_CLEAR_RHALT 0x00800000 207#define RSTAT_CLEAR_RHALT 0x00800000
@@ -251,28 +268,26 @@ extern const char gfar_driver_version[];
251 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 268 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
252 | IMASK_PERR) 269 | IMASK_PERR)
253 270
271/* Fifo management */
272#define FIFO_TX_THR_MASK 0x01ff
273#define FIFO_TX_STARVE_MASK 0x01ff
274#define FIFO_TX_STARVE_OFF_MASK 0x01ff
254 275
255/* Attribute fields */ 276/* Attribute fields */
256 277
257/* This enables rx snooping for buffers and descriptors */ 278/* This enables rx snooping for buffers and descriptors */
258#ifdef CONFIG_GFAR_BDSTASH
259#define ATTR_BDSTASH 0x00000800 279#define ATTR_BDSTASH 0x00000800
260#else
261#define ATTR_BDSTASH 0x00000000
262#endif
263 280
264#ifdef CONFIG_GFAR_BUFSTASH
265#define ATTR_BUFSTASH 0x00004000 281#define ATTR_BUFSTASH 0x00004000
266#define STASH_LENGTH 64
267#else
268#define ATTR_BUFSTASH 0x00000000
269#endif
270 282
271#define ATTR_SNOOPING 0x000000c0 283#define ATTR_SNOOPING 0x000000c0
272#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \ 284#define ATTR_INIT_SETTINGS ATTR_SNOOPING
273 | ATTR_BDSTASH | ATTR_BUFSTASH)
274 285
275#define ATTRELI_INIT_SETTINGS 0x0 286#define ATTRELI_INIT_SETTINGS 0x0
287#define ATTRELI_EL_MASK 0x3fff0000
288#define ATTRELI_EL(x) (x << 16)
289#define ATTRELI_EI_MASK 0x00003fff
290#define ATTRELI_EI(x) (x)
276 291
277 292
278/* TxBD status field bits */ 293/* TxBD status field bits */
@@ -328,6 +343,7 @@ extern const char gfar_driver_version[];
328#define RXFCB_CTU 0x0400 343#define RXFCB_CTU 0x0400
329#define RXFCB_EIP 0x0200 344#define RXFCB_EIP 0x0200
330#define RXFCB_ETU 0x0100 345#define RXFCB_ETU 0x0100
346#define RXFCB_CSUM_MASK 0x0f00
331#define RXFCB_PERR_MASK 0x000c 347#define RXFCB_PERR_MASK 0x000c
332#define RXFCB_PERR_BADL3 0x0008 348#define RXFCB_PERR_BADL3 0x0008
333 349
@@ -339,14 +355,7 @@ struct txbd8
339}; 355};
340 356
341struct txfcb { 357struct txfcb {
342 u8 vln:1, 358 u8 flags;
343 ip:1,
344 ip6:1,
345 tup:1,
346 udp:1,
347 cip:1,
348 ctu:1,
349 nph:1;
350 u8 reserved; 359 u8 reserved;
351 u8 l4os; /* Level 4 Header Offset */ 360 u8 l4os; /* Level 4 Header Offset */
352 u8 l3os; /* Level 3 Header Offset */ 361 u8 l3os; /* Level 3 Header Offset */
@@ -362,14 +371,7 @@ struct rxbd8
362}; 371};
363 372
364struct rxfcb { 373struct rxfcb {
365 u16 vln:1, 374 u16 flags;
366 ip:1,
367 ip6:1,
368 tup:1,
369 cip:1,
370 ctu:1,
371 eip:1,
372 etu:1;
373 u8 rq; /* Receive Queue index */ 375 u8 rq; /* Receive Queue index */
374 u8 pro; /* Layer 4 Protocol */ 376 u8 pro; /* Layer 4 Protocol */
375 u16 reserved; 377 u16 reserved;
@@ -688,12 +690,17 @@ struct gfar_private {
688 spinlock_t lock; 690 spinlock_t lock;
689 unsigned int rx_buffer_size; 691 unsigned int rx_buffer_size;
690 unsigned int rx_stash_size; 692 unsigned int rx_stash_size;
693 unsigned int rx_stash_index;
691 unsigned int tx_ring_size; 694 unsigned int tx_ring_size;
692 unsigned int rx_ring_size; 695 unsigned int rx_ring_size;
696 unsigned int fifo_threshold;
697 unsigned int fifo_starve;
698 unsigned int fifo_starve_off;
693 699
694 unsigned char vlan_enable:1, 700 unsigned char vlan_enable:1,
695 rx_csum_enable:1, 701 rx_csum_enable:1,
696 extended_hash:1; 702 extended_hash:1,
703 bd_stash_en:1;
697 unsigned short padding; 704 unsigned short padding;
698 struct vlan_group *vlgrp; 705 struct vlan_group *vlgrp;
699 /* Info structure initialized by board setup code */ 706 /* Info structure initialized by board setup code */
@@ -731,6 +738,6 @@ extern void stop_gfar(struct net_device *dev);
731extern void gfar_halt(struct net_device *dev); 738extern void gfar_halt(struct net_device *dev);
732extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 739extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
733 int enable, u32 regnum, u32 read); 740 int enable, u32 regnum, u32 read);
734void gfar_setup_stashing(struct net_device *dev); 741void gfar_init_sysfs(struct net_device *dev);
735 742
736#endif /* __GIANFAR_H */ 743#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index cfa3cd7c91a0..765e810620fe 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -125,7 +125,7 @@ static char stat_gstrings[][ETH_GSTRING_LEN] = {
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) 125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{ 126{
127 struct gfar_private *priv = netdev_priv(dev); 127 struct gfar_private *priv = netdev_priv(dev);
128 128
129 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 129 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else 131 else
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index e85eb216fb5b..d527cf2f9c1d 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -24,6 +24,7 @@
24#define MII_READ_COMMAND 0x00000001 24#define MII_READ_COMMAND 0x00000001
25 25
26#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ 26#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
27 | SUPPORTED_10baseT_Full \
27 | SUPPORTED_100baseT_Half \ 28 | SUPPORTED_100baseT_Half \
28 | SUPPORTED_100baseT_Full \ 29 | SUPPORTED_100baseT_Full \
29 | SUPPORTED_Autoneg \ 30 | SUPPORTED_Autoneg \
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
new file mode 100644
index 000000000000..10d34cb19192
--- /dev/null
+++ b/drivers/net/gianfar_sysfs.c
@@ -0,0 +1,311 @@
1/*
2 * drivers/net/gianfar_sysfs.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
11 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 * Sysfs file creation and management
20 */
21
22#include <linux/config.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include <linux/unistd.h>
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/etherdevice.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/device.h>
35
36#include <asm/uaccess.h>
37#include <linux/module.h>
38#include <linux/version.h>
39
40#include "gianfar.h"
41
42#define GFAR_ATTR(_name) \
43static ssize_t gfar_show_##_name(struct class_device *cdev, char *buf); \
44static ssize_t gfar_set_##_name(struct class_device *cdev, \
45 const char *buf, size_t count); \
46static CLASS_DEVICE_ATTR(_name, 0644, gfar_show_##_name, gfar_set_##_name)
47
48#define GFAR_CREATE_FILE(_dev, _name) \
49 class_device_create_file(&_dev->class_dev, &class_device_attr_##_name)
50
51GFAR_ATTR(bd_stash);
52GFAR_ATTR(rx_stash_size);
53GFAR_ATTR(rx_stash_index);
54GFAR_ATTR(fifo_threshold);
55GFAR_ATTR(fifo_starve);
56GFAR_ATTR(fifo_starve_off);
57
58#define to_net_dev(cd) container_of(cd, struct net_device, class_dev)
59
60static ssize_t gfar_show_bd_stash(struct class_device *cdev, char *buf)
61{
62 struct net_device *dev = to_net_dev(cdev);
63 struct gfar_private *priv = netdev_priv(dev);
64
65 return sprintf(buf, "%s\n", priv->bd_stash_en? "on" : "off");
66}
67
68static ssize_t gfar_set_bd_stash(struct class_device *cdev,
69 const char *buf, size_t count)
70{
71 struct net_device *dev = to_net_dev(cdev);
72 struct gfar_private *priv = netdev_priv(dev);
73 int new_setting = 0;
74 u32 temp;
75 unsigned long flags;
76
77 /* Find out the new setting */
78 if (!strncmp("on", buf, count-1) || !strncmp("1", buf, count-1))
79 new_setting = 1;
80 else if (!strncmp("off", buf, count-1) || !strncmp("0", buf, count-1))
81 new_setting = 0;
82 else
83 return count;
84
85 spin_lock_irqsave(&priv->lock, flags);
86
87 /* Set the new stashing value */
88 priv->bd_stash_en = new_setting;
89
90 temp = gfar_read(&priv->regs->attr);
91
92 if (new_setting)
93 temp |= ATTR_BDSTASH;
94 else
95 temp &= ~(ATTR_BDSTASH);
96
97 gfar_write(&priv->regs->attr, temp);
98
99 spin_unlock_irqrestore(&priv->lock, flags);
100
101 return count;
102}
103
104static ssize_t gfar_show_rx_stash_size(struct class_device *cdev, char *buf)
105{
106 struct net_device *dev = to_net_dev(cdev);
107 struct gfar_private *priv = netdev_priv(dev);
108
109 return sprintf(buf, "%d\n", priv->rx_stash_size);
110}
111
112static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
113 const char *buf, size_t count)
114{
115 struct net_device *dev = to_net_dev(cdev);
116 struct gfar_private *priv = netdev_priv(dev);
117 unsigned int length = simple_strtoul(buf, NULL, 0);
118 u32 temp;
119 unsigned long flags;
120
121 spin_lock_irqsave(&priv->lock, flags);
122 if (length > priv->rx_buffer_size)
123 return count;
124
125 if (length == priv->rx_stash_size)
126 return count;
127
128 priv->rx_stash_size = length;
129
130 temp = gfar_read(&priv->regs->attreli);
131 temp &= ~ATTRELI_EL_MASK;
132 temp |= ATTRELI_EL(length);
133 gfar_write(&priv->regs->attreli, temp);
134
135 /* Turn stashing on/off as appropriate */
136 temp = gfar_read(&priv->regs->attr);
137
138 if (length)
139 temp |= ATTR_BUFSTASH;
140 else
141 temp &= ~(ATTR_BUFSTASH);
142
143 gfar_write(&priv->regs->attr, temp);
144
145 spin_unlock_irqrestore(&priv->lock, flags);
146
147 return count;
148}
149
150
151/* Stashing will only be enabled when rx_stash_size != 0 */
152static ssize_t gfar_show_rx_stash_index(struct class_device *cdev, char *buf)
153{
154 struct net_device *dev = to_net_dev(cdev);
155 struct gfar_private *priv = netdev_priv(dev);
156
157 return sprintf(buf, "%d\n", priv->rx_stash_index);
158}
159
160static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
161 const char *buf, size_t count)
162{
163 struct net_device *dev = to_net_dev(cdev);
164 struct gfar_private *priv = netdev_priv(dev);
165 unsigned short index = simple_strtoul(buf, NULL, 0);
166 u32 temp;
167 unsigned long flags;
168
169 spin_lock_irqsave(&priv->lock, flags);
170 if (index > priv->rx_stash_size)
171 return count;
172
173 if (index == priv->rx_stash_index)
174 return count;
175
176 priv->rx_stash_index = index;
177
178 temp = gfar_read(&priv->regs->attreli);
179 temp &= ~ATTRELI_EI_MASK;
180 temp |= ATTRELI_EI(index);
181 gfar_write(&priv->regs->attreli, flags);
182
183 spin_unlock_irqrestore(&priv->lock, flags);
184
185 return count;
186}
187
188static ssize_t gfar_show_fifo_threshold(struct class_device *cdev, char *buf)
189{
190 struct net_device *dev = to_net_dev(cdev);
191 struct gfar_private *priv = netdev_priv(dev);
192
193 return sprintf(buf, "%d\n", priv->fifo_threshold);
194}
195
196static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
197 const char *buf, size_t count)
198{
199 struct net_device *dev = to_net_dev(cdev);
200 struct gfar_private *priv = netdev_priv(dev);
201 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp;
203 unsigned long flags;
204
205 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count;
207
208 spin_lock_irqsave(&priv->lock, flags);
209
210 priv->fifo_threshold = length;
211
212 temp = gfar_read(&priv->regs->fifo_tx_thr);
213 temp &= ~FIFO_TX_THR_MASK;
214 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp);
216
217 spin_unlock_irqrestore(&priv->lock, flags);
218
219 return count;
220}
221
222static ssize_t gfar_show_fifo_starve(struct class_device *cdev, char *buf)
223{
224 struct net_device *dev = to_net_dev(cdev);
225 struct gfar_private *priv = netdev_priv(dev);
226
227 return sprintf(buf, "%d\n", priv->fifo_starve);
228}
229
230
231static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
232 const char *buf, size_t count)
233{
234 struct net_device *dev = to_net_dev(cdev);
235 struct gfar_private *priv = netdev_priv(dev);
236 unsigned int num = simple_strtoul(buf, NULL, 0);
237 u32 temp;
238 unsigned long flags;
239
240 if (num > GFAR_MAX_FIFO_STARVE)
241 return count;
242
243 spin_lock_irqsave(&priv->lock, flags);
244
245 priv->fifo_starve = num;
246
247 temp = gfar_read(&priv->regs->fifo_tx_starve);
248 temp &= ~FIFO_TX_STARVE_MASK;
249 temp |= num;
250 gfar_write(&priv->regs->fifo_tx_starve, temp);
251
252 spin_unlock_irqrestore(&priv->lock, flags);
253
254 return count;
255}
256
257static ssize_t gfar_show_fifo_starve_off(struct class_device *cdev, char *buf)
258{
259 struct net_device *dev = to_net_dev(cdev);
260 struct gfar_private *priv = netdev_priv(dev);
261
262 return sprintf(buf, "%d\n", priv->fifo_starve_off);
263}
264
265static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
266 const char *buf, size_t count)
267{
268 struct net_device *dev = to_net_dev(cdev);
269 struct gfar_private *priv = netdev_priv(dev);
270 unsigned int num = simple_strtoul(buf, NULL, 0);
271 u32 temp;
272 unsigned long flags;
273
274 if (num > GFAR_MAX_FIFO_STARVE_OFF)
275 return count;
276
277 spin_lock_irqsave(&priv->lock, flags);
278
279 priv->fifo_starve_off = num;
280
281 temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff);
282 temp &= ~FIFO_TX_STARVE_OFF_MASK;
283 temp |= num;
284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
285
286 spin_unlock_irqrestore(&priv->lock, flags);
287
288 return count;
289}
290
291void gfar_init_sysfs(struct net_device *dev)
292{
293 struct gfar_private *priv = netdev_priv(dev);
294
295 /* Initialize the default values */
296 priv->rx_stash_size = DEFAULT_STASH_LENGTH;
297 priv->rx_stash_index = DEFAULT_STASH_INDEX;
298 priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
299 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
300 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
301 priv->bd_stash_en = DEFAULT_BD_STASH;
302
303 /* Create our sysfs files */
304 GFAR_CREATE_FILE(dev, bd_stash);
305 GFAR_CREATE_FILE(dev, rx_stash_size);
306 GFAR_CREATE_FILE(dev, rx_stash_index);
307 GFAR_CREATE_FILE(dev, fifo_threshold);
308 GFAR_CREATE_FILE(dev, fifo_starve);
309 GFAR_CREATE_FILE(dev, fifo_starve_off);
310
311}
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index eb7d69478715..1da8a66f91e1 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -65,7 +65,7 @@
65 */ 65 */
66 66
67#define DRV_NAME "emac" 67#define DRV_NAME "emac"
68#define DRV_VERSION "3.53" 68#define DRV_VERSION "3.54"
69#define DRV_DESC "PPC 4xx OCP EMAC driver" 69#define DRV_DESC "PPC 4xx OCP EMAC driver"
70 70
71MODULE_DESCRIPTION(DRV_DESC); 71MODULE_DESCRIPTION(DRV_DESC);
@@ -158,6 +158,14 @@ static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
158#define PHY_POLL_LINK_ON HZ 158#define PHY_POLL_LINK_ON HZ
159#define PHY_POLL_LINK_OFF (HZ / 5) 159#define PHY_POLL_LINK_OFF (HZ / 5)
160 160
161/* Graceful stop timeouts in us.
162 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
163 */
164#define STOP_TIMEOUT_10 1230
165#define STOP_TIMEOUT_100 124
166#define STOP_TIMEOUT_1000 13
167#define STOP_TIMEOUT_1000_JUMBO 73
168
161/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */ 169/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
162static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = { 170static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
163 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum", 171 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
@@ -222,10 +230,12 @@ static void emac_tx_disable(struct ocp_enet_private *dev)
222 230
223 r = in_be32(&p->mr0); 231 r = in_be32(&p->mr0);
224 if (r & EMAC_MR0_TXE) { 232 if (r & EMAC_MR0_TXE) {
225 int n = 300; 233 int n = dev->stop_timeout;
226 out_be32(&p->mr0, r & ~EMAC_MR0_TXE); 234 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
227 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) 235 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
236 udelay(1);
228 --n; 237 --n;
238 }
229 if (unlikely(!n)) 239 if (unlikely(!n))
230 emac_report_timeout_error(dev, "TX disable timeout"); 240 emac_report_timeout_error(dev, "TX disable timeout");
231 } 241 }
@@ -248,9 +258,11 @@ static void emac_rx_enable(struct ocp_enet_private *dev)
248 if (!(r & EMAC_MR0_RXE)) { 258 if (!(r & EMAC_MR0_RXE)) {
249 if (unlikely(!(r & EMAC_MR0_RXI))) { 259 if (unlikely(!(r & EMAC_MR0_RXI))) {
250 /* Wait if previous async disable is still in progress */ 260 /* Wait if previous async disable is still in progress */
251 int n = 100; 261 int n = dev->stop_timeout;
252 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) 262 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
263 udelay(1);
253 --n; 264 --n;
265 }
254 if (unlikely(!n)) 266 if (unlikely(!n))
255 emac_report_timeout_error(dev, 267 emac_report_timeout_error(dev,
256 "RX disable timeout"); 268 "RX disable timeout");
@@ -273,10 +285,12 @@ static void emac_rx_disable(struct ocp_enet_private *dev)
273 285
274 r = in_be32(&p->mr0); 286 r = in_be32(&p->mr0);
275 if (r & EMAC_MR0_RXE) { 287 if (r & EMAC_MR0_RXE) {
276 int n = 300; 288 int n = dev->stop_timeout;
277 out_be32(&p->mr0, r & ~EMAC_MR0_RXE); 289 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
278 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) 290 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 udelay(1);
279 --n; 292 --n;
293 }
280 if (unlikely(!n)) 294 if (unlikely(!n))
281 emac_report_timeout_error(dev, "RX disable timeout"); 295 emac_report_timeout_error(dev, "RX disable timeout");
282 } 296 }
@@ -395,6 +409,7 @@ static int emac_configure(struct ocp_enet_private *dev)
395 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST; 409 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
396 if (dev->phy.duplex == DUPLEX_FULL) 410 if (dev->phy.duplex == DUPLEX_FULL)
397 r |= EMAC_MR1_FDE; 411 r |= EMAC_MR1_FDE;
412 dev->stop_timeout = STOP_TIMEOUT_10;
398 switch (dev->phy.speed) { 413 switch (dev->phy.speed) {
399 case SPEED_1000: 414 case SPEED_1000:
400 if (emac_phy_gpcs(dev->phy.mode)) { 415 if (emac_phy_gpcs(dev->phy.mode)) {
@@ -409,12 +424,16 @@ static int emac_configure(struct ocp_enet_private *dev)
409 r |= EMAC_MR1_MF_1000; 424 r |= EMAC_MR1_MF_1000;
410 r |= EMAC_MR1_RFS_16K; 425 r |= EMAC_MR1_RFS_16K;
411 gige = 1; 426 gige = 1;
412 427
413 if (dev->ndev->mtu > ETH_DATA_LEN) 428 if (dev->ndev->mtu > ETH_DATA_LEN) {
414 r |= EMAC_MR1_JPSM; 429 r |= EMAC_MR1_JPSM;
430 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
431 } else
432 dev->stop_timeout = STOP_TIMEOUT_1000;
415 break; 433 break;
416 case SPEED_100: 434 case SPEED_100:
417 r |= EMAC_MR1_MF_100; 435 r |= EMAC_MR1_MF_100;
436 dev->stop_timeout = STOP_TIMEOUT_100;
418 /* Fall through */ 437 /* Fall through */
419 default: 438 default:
420 r |= EMAC_MR1_RFS_4K; 439 r |= EMAC_MR1_RFS_4K;
@@ -2048,6 +2067,7 @@ static int __init emac_probe(struct ocp_device *ocpdev)
2048 dev->phy.duplex = DUPLEX_FULL; 2067 dev->phy.duplex = DUPLEX_FULL;
2049 dev->phy.autoneg = AUTONEG_DISABLE; 2068 dev->phy.autoneg = AUTONEG_DISABLE;
2050 dev->phy.pause = dev->phy.asym_pause = 0; 2069 dev->phy.pause = dev->phy.asym_pause = 0;
2070 dev->stop_timeout = STOP_TIMEOUT_100;
2051 init_timer(&dev->link_timer); 2071 init_timer(&dev->link_timer);
2052 dev->link_timer.function = emac_link_timer; 2072 dev->link_timer.function = emac_link_timer;
2053 dev->link_timer.data = (unsigned long)dev; 2073 dev->link_timer.data = (unsigned long)dev;
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
index e9b44d030ac3..911abbaf471b 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.h
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -189,6 +189,8 @@ struct ocp_enet_private {
189 struct timer_list link_timer; 189 struct timer_list link_timer;
190 int reset_failed; 190 int reset_failed;
191 191
192 int stop_timeout; /* in us */
193
192 struct ibm_emac_error_stats estats; 194 struct ibm_emac_error_stats estats;
193 struct net_device_stats nstats; 195 struct net_device_stats nstats;
194 196
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index c22c0517883c..fa176ffb4ad5 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1539,7 +1539,6 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1539 * USB device callbacks 1539 * USB device callbacks
1540 */ 1540 */
1541static struct usb_driver irda_driver = { 1541static struct usb_driver irda_driver = {
1542 .owner = THIS_MODULE,
1543 .name = "irda-usb", 1542 .name = "irda-usb",
1544 .probe = irda_usb_probe, 1543 .probe = irda_usb_probe,
1545 .disconnect = irda_usb_disconnect, 1544 .disconnect = irda_usb_disconnect,
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 3961a754e920..31867e4b891b 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -1152,7 +1152,6 @@ static int stir_resume(struct usb_interface *intf)
1152 * USB device callbacks 1152 * USB device callbacks
1153 */ 1153 */
1154static struct usb_driver irda_driver = { 1154static struct usb_driver irda_driver = {
1155 .owner = THIS_MODULE,
1156 .name = "stir4200", 1155 .name = "stir4200",
1157 .probe = stir_probe, 1156 .probe = stir_probe,
1158 .disconnect = stir_disconnect, 1157 .disconnect = stir_disconnect,
diff --git a/drivers/net/ixp2000/Kconfig b/drivers/net/ixp2000/Kconfig
new file mode 100644
index 000000000000..2fec2415651f
--- /dev/null
+++ b/drivers/net/ixp2000/Kconfig
@@ -0,0 +1,6 @@
1config ENP2611_MSF_NET
2 tristate "Radisys ENP2611 MSF network interface support"
3 depends on ARCH_ENP2611
4 help
5 This is a driver for the MSF network interface unit in
6 the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ixp2000/Makefile b/drivers/net/ixp2000/Makefile
new file mode 100644
index 000000000000..fd38351ceaa7
--- /dev/null
+++ b/drivers/net/ixp2000/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
2
3enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ixp2000/caleb.c b/drivers/net/ixp2000/caleb.c
new file mode 100644
index 000000000000..3595e107df22
--- /dev/null
+++ b/drivers/net/ixp2000/caleb.c
@@ -0,0 +1,137 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <asm/io.h>
16#include "caleb.h"
17
18#define CALEB_IDLO 0x00
19#define CALEB_IDHI 0x01
20#define CALEB_RID 0x02
21#define CALEB_RESET 0x03
22#define CALEB_INTREN0 0x04
23#define CALEB_INTREN1 0x05
24#define CALEB_INTRSTAT0 0x06
25#define CALEB_INTRSTAT1 0x07
26#define CALEB_PORTEN 0x08
27#define CALEB_BURST 0x09
28#define CALEB_PORTPAUS 0x0A
29#define CALEB_PORTPAUSD 0x0B
30#define CALEB_PHY0RX 0x10
31#define CALEB_PHY1RX 0x11
32#define CALEB_PHY0TX 0x12
33#define CALEB_PHY1TX 0x13
34#define CALEB_IXPRX_HI_CNTR 0x15
35#define CALEB_PHY0RX_HI_CNTR 0x16
36#define CALEB_PHY1RX_HI_CNTR 0x17
37#define CALEB_IXPRX_CNTR 0x18
38#define CALEB_PHY0RX_CNTR 0x19
39#define CALEB_PHY1RX_CNTR 0x1A
40#define CALEB_IXPTX_CNTR 0x1B
41#define CALEB_PHY0TX_CNTR 0x1C
42#define CALEB_PHY1TX_CNTR 0x1D
43#define CALEB_DEBUG0 0x1E
44#define CALEB_DEBUG1 0x1F
45
46
47static u8 caleb_reg_read(int reg)
48{
49 u8 value;
50
51 value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
52
53// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
54
55 return value;
56}
57
58static void caleb_reg_write(int reg, u8 value)
59{
60 u8 dummy;
61
62// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
63
64 *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
65
66 dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
67 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
68}
69
70
71void caleb_reset(void)
72{
73 /*
74 * Perform a chip reset.
75 */
76 caleb_reg_write(CALEB_RESET, 0x02);
77 udelay(1);
78
79 /*
80 * Enable all interrupt sources. This is needed to get
81 * meaningful results out of the status bits (register 6
82 * and 7.)
83 */
84 caleb_reg_write(CALEB_INTREN0, 0xff);
85 caleb_reg_write(CALEB_INTREN1, 0x07);
86
87 /*
88 * Set RX and TX FIFO thresholds to 1.5kb.
89 */
90 caleb_reg_write(CALEB_PHY0RX, 0x11);
91 caleb_reg_write(CALEB_PHY1RX, 0x11);
92 caleb_reg_write(CALEB_PHY0TX, 0x11);
93 caleb_reg_write(CALEB_PHY1TX, 0x11);
94
95 /*
96 * Program SPI-3 burst size.
97 */
98 caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
99// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
100// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
101}
102
103void caleb_enable_rx(int port)
104{
105 u8 temp;
106
107 temp = caleb_reg_read(CALEB_PORTEN);
108 temp |= 1 << port;
109 caleb_reg_write(CALEB_PORTEN, temp);
110}
111
112void caleb_disable_rx(int port)
113{
114 u8 temp;
115
116 temp = caleb_reg_read(CALEB_PORTEN);
117 temp &= ~(1 << port);
118 caleb_reg_write(CALEB_PORTEN, temp);
119}
120
121void caleb_enable_tx(int port)
122{
123 u8 temp;
124
125 temp = caleb_reg_read(CALEB_PORTEN);
126 temp |= 1 << (port + 4);
127 caleb_reg_write(CALEB_PORTEN, temp);
128}
129
130void caleb_disable_tx(int port)
131{
132 u8 temp;
133
134 temp = caleb_reg_read(CALEB_PORTEN);
135 temp &= ~(1 << (port + 4));
136 caleb_reg_write(CALEB_PORTEN, temp);
137}
diff --git a/drivers/net/ixp2000/caleb.h b/drivers/net/ixp2000/caleb.h
new file mode 100644
index 000000000000..e93a1ef5b8a3
--- /dev/null
+++ b/drivers/net/ixp2000/caleb.h
@@ -0,0 +1,22 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __CALEB_H
13#define __CALEB_H
14
15void caleb_reset(void);
16void caleb_enable_rx(int port);
17void caleb_disable_rx(int port);
18void caleb_enable_tx(int port);
19void caleb_disable_tx(int port);
20
21
22#endif
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
new file mode 100644
index 000000000000..d82651a97bae
--- /dev/null
+++ b/drivers/net/ixp2000/enp2611.c
@@ -0,0 +1,245 @@
1/*
2 * IXP2400 MSF network device driver for the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/init.h>
18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h>
20#include <asm/mach-types.h>
21#include <asm/io.h>
22#include "ixpdev.h"
23#include "caleb.h"
24#include "ixp2400-msf.h"
25#include "pm3386.h"
26
27/***********************************************************************
28 * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
29 * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
30 * to the IXP2400.
31 *
32 * +-------------+
33 * SFP GBIC #0 ---+ | +---------+
34 * | PM3386 #0 +-------+ |
35 * SFP GBIC #1 ---+ | | "Caleb" | +---------+
36 * +-------------+ | | | |
37 * | SPI-3 +---------+ IXP2400 |
38 * +-------------+ | bridge | | |
39 * SFP GBIC #2 ---+ | | FPGA | +---------+
40 * | PM3386 #1 +-------+ |
41 * | | +---------+
42 * +-------------+
43 * ^ ^ ^
44 * | 1.25Gbaud | 104MHz | 104MHz
45 * | SERDES ea. | SPI-3 ea. | SPI-3
46 *
47 ***********************************************************************/
48static struct ixp2400_msf_parameters enp2611_msf_parameters =
49{
50 .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
51 IXP2400_RX_MODE_1x32 |
52 IXP2400_RX_MODE_MPHY |
53 IXP2400_RX_MODE_MPHY_32 |
54 IXP2400_RX_MODE_MPHY_POLLED_STATUS |
55 IXP2400_RX_MODE_MPHY_LEVEL3 |
56 IXP2400_RX_MODE_RBUF_SIZE_64,
57
58 .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
59
60 .rx_poll_ports = 3,
61
62 .rx_channel_mode = {
63 IXP2400_PORT_RX_MODE_MASTER |
64 IXP2400_PORT_RX_MODE_POS_PHY |
65 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
66 IXP2400_PORT_RX_MODE_ODD_PARITY |
67 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
68
69 IXP2400_PORT_RX_MODE_MASTER |
70 IXP2400_PORT_RX_MODE_POS_PHY |
71 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
72 IXP2400_PORT_RX_MODE_ODD_PARITY |
73 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
74
75 IXP2400_PORT_RX_MODE_MASTER |
76 IXP2400_PORT_RX_MODE_POS_PHY |
77 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
78 IXP2400_PORT_RX_MODE_ODD_PARITY |
79 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
80
81 IXP2400_PORT_RX_MODE_MASTER |
82 IXP2400_PORT_RX_MODE_POS_PHY |
83 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
84 IXP2400_PORT_RX_MODE_ODD_PARITY |
85 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
86 },
87
88 .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
89 IXP2400_TX_MODE_1x32 |
90 IXP2400_TX_MODE_MPHY |
91 IXP2400_TX_MODE_MPHY_32 |
92 IXP2400_TX_MODE_MPHY_POLLED_STATUS |
93 IXP2400_TX_MODE_MPHY_LEVEL3 |
94 IXP2400_TX_MODE_TBUF_SIZE_64,
95
96 .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
97
98 .tx_poll_ports = 3,
99
100 .tx_channel_mode = {
101 IXP2400_PORT_TX_MODE_MASTER |
102 IXP2400_PORT_TX_MODE_POS_PHY |
103 IXP2400_PORT_TX_MODE_ODD_PARITY |
104 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
105
106 IXP2400_PORT_TX_MODE_MASTER |
107 IXP2400_PORT_TX_MODE_POS_PHY |
108 IXP2400_PORT_TX_MODE_ODD_PARITY |
109 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
110
111 IXP2400_PORT_TX_MODE_MASTER |
112 IXP2400_PORT_TX_MODE_POS_PHY |
113 IXP2400_PORT_TX_MODE_ODD_PARITY |
114 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
115
116 IXP2400_PORT_TX_MODE_MASTER |
117 IXP2400_PORT_TX_MODE_POS_PHY |
118 IXP2400_PORT_TX_MODE_ODD_PARITY |
119 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
120 }
121};
122
123struct enp2611_ixpdev_priv
124{
125 struct ixpdev_priv ixpdev_priv;
126 struct net_device_stats stats;
127};
128
129static struct net_device *nds[3];
130static struct timer_list link_check_timer;
131
132static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
133{
134 struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
135
136 pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
137
138 return &(ip->stats);
139}
140
141/* @@@ Poll the SFP moddef0 line too. */
142/* @@@ Try to use the pm3386 DOOL interrupt as well. */
143static void enp2611_check_link_status(unsigned long __dummy)
144{
145 int i;
146
147 for (i = 0; i < 3; i++) {
148 struct net_device *dev;
149 int status;
150
151 dev = nds[i];
152
153 status = pm3386_is_link_up(i);
154 if (status && !netif_carrier_ok(dev)) {
155 /* @@@ Should report autonegotiation status. */
156 printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
157
158 pm3386_enable_tx(i);
159 caleb_enable_tx(i);
160 netif_carrier_on(dev);
161 } else if (!status && netif_carrier_ok(dev)) {
162 printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
163
164 netif_carrier_off(dev);
165 caleb_disable_tx(i);
166 pm3386_disable_tx(i);
167 }
168 }
169
170 link_check_timer.expires = jiffies + HZ / 10;
171 add_timer(&link_check_timer);
172}
173
174static void enp2611_set_port_admin_status(int port, int up)
175{
176 if (up) {
177 caleb_enable_rx(port);
178
179 pm3386_set_carrier(port, 1);
180 pm3386_enable_rx(port);
181 } else {
182 caleb_disable_tx(port);
183 pm3386_disable_tx(port);
184 /* @@@ Flush out pending packets. */
185 pm3386_set_carrier(port, 0);
186
187 pm3386_disable_rx(port);
188 caleb_disable_rx(port);
189 }
190}
191
192static int __init enp2611_init_module(void)
193{
194 int i;
195
196 if (!machine_is_enp2611())
197 return -ENODEV;
198
199 caleb_reset();
200 pm3386_reset();
201
202 for (i = 0; i < 3; i++) {
203 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
204 if (nds[i] == NULL) {
205 while (--i >= 0)
206 free_netdev(nds[i]);
207 return -ENOMEM;
208 }
209
210 SET_MODULE_OWNER(nds[i]);
211 nds[i]->get_stats = enp2611_get_stats;
212 pm3386_init_port(i);
213 pm3386_get_mac(i, nds[i]->dev_addr);
214 }
215
216 ixp2400_msf_init(&enp2611_msf_parameters);
217
218 if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
219 for (i = 0; i < 3; i++)
220 free_netdev(nds[i]);
221 return -EINVAL;
222 }
223
224 init_timer(&link_check_timer);
225 link_check_timer.function = enp2611_check_link_status;
226 link_check_timer.expires = jiffies;
227 add_timer(&link_check_timer);
228
229 return 0;
230}
231
232static void __exit enp2611_cleanup_module(void)
233{
234 int i;
235
236 del_timer_sync(&link_check_timer);
237
238 ixpdev_deinit();
239 for (i = 0; i < 3; i++)
240 free_netdev(nds[i]);
241}
242
243module_init(enp2611_init_module);
244module_exit(enp2611_cleanup_module);
245MODULE_LICENSE("GPL");
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ixp2000/ixp2400-msf.c
new file mode 100644
index 000000000000..48a3a891d3a4
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400-msf.c
@@ -0,0 +1,213 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <asm/hardware.h>
18#include <asm/arch/ixp2000-regs.h>
19#include <asm/delay.h>
20#include <asm/io.h>
21#include "ixp2400-msf.h"
22
23/*
24 * This is the Intel recommended PLL init procedure as described on
25 * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
26 */
27static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
28{
29 int rx_dual_clock;
30 int tx_dual_clock;
31 u32 value;
32
33 /*
34 * If the RX mode is not 1x32, we have to enable both RX PLLs
35 * (#0 and #1.) The same thing for the TX direction.
36 */
37 rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
38 tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
39
40 /*
41 * Read initial value.
42 */
43 value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
44
45 /*
46 * Put PLLs in powerdown and bypass mode.
47 */
48 value |= 0x0000f0f0;
49 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
50
51 /*
52 * Set single or dual clock mode bits.
53 */
54 value &= ~0x03000000;
55 value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
56
57 /*
58 * Set multipliers.
59 */
60 value &= ~0x00ff0000;
61 value |= mp->rxclk01_multiplier << 16;
62 value |= mp->rxclk23_multiplier << 18;
63 value |= mp->txclk01_multiplier << 20;
64 value |= mp->txclk23_multiplier << 22;
65
66 /*
67 * And write value.
68 */
69 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
70
71 /*
72 * Disable PLL bypass mode.
73 */
74 value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
75 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
76
77 /*
78 * Turn on PLLs.
79 */
80 value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
81 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
82
83 /*
84 * Wait for PLLs to lock. There are lock status bits, but IXP2400
85 * erratum #65 says that these lock bits should not be relied upon
86 * as they might not accurately reflect the true state of the PLLs.
87 */
88 udelay(100);
89}
90
91/*
92 * Needed according to p480 of Programmer's Reference Manual.
93 */
94static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
95{
96 int size_bits;
97 int i;
98
99 /*
100 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
101 * corruption) in the Intel-recommended way: do not add the RBUF
102 * elements susceptible to corruption to the freelist.
103 */
104 size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
105 if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
106 for (i = 1; i < 128; i++) {
107 if (i == 9 || i == 18 || i == 27)
108 continue;
109 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
110 }
111 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
112 for (i = 1; i < 64; i++) {
113 if (i == 4 || i == 9 || i == 13)
114 continue;
115 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
116 }
117 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
118 for (i = 1; i < 32; i++) {
119 if (i == 2 || i == 4 || i == 6)
120 continue;
121 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
122 }
123 }
124}
125
126static u32 ixp2400_msf_valid_channels(u32 reg)
127{
128 u32 channels;
129
130 channels = 0;
131 switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
132 case IXP2400_RX_MODE_1x32:
133 channels = 0x1;
134 if (reg & IXP2400_RX_MODE_MPHY &&
135 !(reg & IXP2400_RX_MODE_MPHY_32))
136 channels = 0xf;
137 break;
138
139 case IXP2400_RX_MODE_2x16:
140 channels = 0x5;
141 break;
142
143 case IXP2400_RX_MODE_4x8:
144 channels = 0xf;
145 break;
146
147 case IXP2400_RX_MODE_1x16_2x8:
148 channels = 0xd;
149 break;
150 }
151
152 return channels;
153}
154
155static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
156{
157 u32 value;
158
159 value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
160 value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
161 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
162}
163
164static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
165{
166 u32 value;
167
168 value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
169 value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
170 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
171}
172
173
174void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
175{
176 u32 value;
177 int i;
178
179 /*
180 * Init the RX/TX PLLs based on the passed parameter block.
181 */
182 ixp2400_pll_init(mp);
183
184 /*
185 * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
186 */
187 value = ixp2000_reg_read(IXP2000_RESET0);
188 ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
189 ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
190
191 /*
192 * Initialise the RX section.
193 */
194 ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
195 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
196 for (i = 0; i < 4; i++) {
197 ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
198 mp->rx_channel_mode[i]);
199 }
200 ixp2400_msf_free_rbuf_entries(mp);
201 ixp2400_msf_enable_rx(mp);
202
203 /*
204 * Initialise the TX section.
205 */
206 ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
207 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
208 for (i = 0; i < 4; i++) {
209 ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
210 mp->tx_channel_mode[i]);
211 }
212 ixp2400_msf_enable_tx(mp);
213}
diff --git a/drivers/net/ixp2000/ixp2400-msf.h b/drivers/net/ixp2000/ixp2400-msf.h
new file mode 100644
index 000000000000..3ac1af2771da
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400-msf.h
@@ -0,0 +1,115 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#ifndef __IXP2400_MSF_H
15#define __IXP2400_MSF_H
16
17struct ixp2400_msf_parameters
18{
19 u32 rx_mode;
20 unsigned rxclk01_multiplier:2;
21 unsigned rxclk23_multiplier:2;
22 unsigned rx_poll_ports:6;
23 u32 rx_channel_mode[4];
24
25 u32 tx_mode;
26 unsigned txclk01_multiplier:2;
27 unsigned txclk23_multiplier:2;
28 unsigned tx_poll_ports:6;
29 u32 tx_channel_mode[4];
30};
31
32void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
33
34#define IXP2400_PLL_MULTIPLIER_48 0x00
35#define IXP2400_PLL_MULTIPLIER_24 0x01
36#define IXP2400_PLL_MULTIPLIER_16 0x02
37#define IXP2400_PLL_MULTIPLIER_12 0x03
38
39#define IXP2400_RX_MODE_CSIX 0x00400000
40#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
41#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
42#define IXP2400_RX_MODE_1x16_2x8 0x00300000
43#define IXP2400_RX_MODE_4x8 0x00200000
44#define IXP2400_RX_MODE_2x16 0x00100000
45#define IXP2400_RX_MODE_1x32 0x00000000
46#define IXP2400_RX_MODE_MPHY 0x00080000
47#define IXP2400_RX_MODE_SPHY 0x00000000
48#define IXP2400_RX_MODE_MPHY_32 0x00040000
49#define IXP2400_RX_MODE_MPHY_4 0x00000000
50#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
51#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
52#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
53#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
54#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
55#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
56#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
57#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
58#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
59#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
60#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
61#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
62#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
63#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
64
65#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
66#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
67#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
68#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
69#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
70#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
71#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
72#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
73#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
74#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
75#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
76#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
77#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
78
79#define IXP2400_TX_MODE_CSIX 0x00400000
80#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
81#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
82#define IXP2400_TX_MODE_1x16_2x8 0x00300000
83#define IXP2400_TX_MODE_4x8 0x00200000
84#define IXP2400_TX_MODE_2x16 0x00100000
85#define IXP2400_TX_MODE_1x32 0x00000000
86#define IXP2400_TX_MODE_MPHY 0x00080000
87#define IXP2400_TX_MODE_SPHY 0x00000000
88#define IXP2400_TX_MODE_MPHY_32 0x00040000
89#define IXP2400_TX_MODE_MPHY_4 0x00000000
90#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
91#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
92#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
93#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
94#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
95#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
96#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
97#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
98#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
99#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
100#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
101#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
102
103#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
104#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
105#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
106#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
107#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
108#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
109#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
110#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
111#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
112#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
113
114
115#endif
diff --git a/drivers/net/ixp2000/ixp2400_rx.uc b/drivers/net/ixp2000/ixp2400_rx.uc
new file mode 100644
index 000000000000..42a73e357afa
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_rx.uc
@@ -0,0 +1,408 @@
1/*
2 * RX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one full element list is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The RBUF uses 64-byte mpackets.
17 * - RX descriptors reside in SRAM, and have the following format:
18 * struct rx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 buf_length;
23 *
24 * // from uengine
25 * u32 channel;
26 * u32 pkt_length;
27 * };
28 * - Packet data resides in DRAM.
29 * - Packet buffer addresses are 8-byte aligned.
30 * - Scratch ring 0 is rx_pending.
31 * - Scratch ring 1 is rx_done, and has status condition 'full'.
32 * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
33 * - This code is run on all eight threads of the microengine it runs on.
34 *
35 * Local memory is used for per-channel RX state.
36 */
37
38#define RX_THREAD_FREELIST_0 0x0030
39#define RBUF_ELEMENT_DONE 0x0044
40
41#define CHANNEL_FLAGS *l$index0[0]
42#define CHANNEL_FLAG_RECEIVING 1
43#define PACKET_LENGTH *l$index0[1]
44#define PACKET_CHECKSUM *l$index0[2]
45#define BUFFER_HANDLE *l$index0[3]
46#define BUFFER_START *l$index0[4]
47#define BUFFER_LENGTH *l$index0[5]
48
49#define CHANNEL_STATE_SIZE 24 // in bytes
50#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
51
52
53 .sig volatile sig1
54 .sig volatile sig2
55 .sig volatile sig3
56
57 .sig mpacket_arrived
58 .reg add_to_rx_freelist
59 .reg read $rsw0, $rsw1
60 .xfer_order $rsw0 $rsw1
61
62 .reg zero
63
64 /*
65 * Initialise add_to_rx_freelist.
66 */
67 .begin
68 .reg temp
69 .reg temp2
70
71 immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
72 immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
73
74 local_csr_rd[ACTIVE_CTX_STS]
75 immed[temp, 0]
76 alu[temp2, temp, and, 0x1f]
77 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
78 alu[temp2, temp, and, 0x80]
79 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
80 .end
81
82 immed[zero, 0]
83
84 /*
85 * Skip context 0 initialisation?
86 */
87 .begin
88 br!=ctx[0, mpacket_receive_loop#]
89 .end
90
91 /*
92 * Initialise local memory.
93 */
94 .begin
95 .reg addr
96 .reg temp
97
98 immed[temp, 0]
99 init_local_mem_loop#:
100 alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
101 local_csr_wr[ACTIVE_LM_ADDR_0, addr]
102 nop
103 nop
104 nop
105
106 immed[CHANNEL_FLAGS, 0]
107
108 alu[temp, temp, +, 1]
109 alu[--, temp, and, 0x20]
110 beq[init_local_mem_loop#]
111 .end
112
113 /*
114 * Initialise signal pipeline.
115 */
116 .begin
117 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
118 .set_sig sig1
119
120 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
121 .set_sig sig2
122
123 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
124 .set_sig sig3
125 .end
126
127mpacket_receive_loop#:
128 /*
129 * Synchronise and wait for mpacket.
130 */
131 .begin
132 ctx_arb[sig1]
133 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
134
135 msf[fast_wr, --, add_to_rx_freelist, 0]
136 .set_sig mpacket_arrived
137 ctx_arb[mpacket_arrived]
138 .set $rsw0 $rsw1
139 .end
140
141 /*
142 * We halt if we see {inbparerr,parerr,null,soperror}.
143 */
144 .begin
145 alu_shf[--, 0x1b, and, $rsw0, >>8]
146 bne[abort_rswerr#]
147 .end
148
149 /*
150 * Point local memory pointer to this channel's state area.
151 */
152 .begin
153 .reg chanaddr
154
155 alu[chanaddr, $rsw0, and, 0x1f]
156 alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
157 local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
158 nop
159 nop
160 nop
161 .end
162
163 /*
164 * Check whether we received a SOP mpacket while we were already
165 * working on a packet, or a non-SOP mpacket while there was no
166 * packet pending. (SOP == RECEIVING -> abort) If everything's
167 * okay, update the RECEIVING flag to reflect our new state.
168 */
169 .begin
170 .reg temp
171 .reg eop
172
173 #if CHANNEL_FLAG_RECEIVING != 1
174 #error CHANNEL_FLAG_RECEIVING is not 1
175 #endif
176
177 alu_shf[temp, 1, and, $rsw0, >>15]
178 alu[temp, temp, xor, CHANNEL_FLAGS]
179 alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
180 beq[abort_proterr#]
181
182 alu_shf[eop, 1, and, $rsw0, >>14]
183 alu[CHANNEL_FLAGS, temp, xor, eop]
184 .end
185
186 /*
187 * Copy the mpacket into the right spot, and in case of EOP,
188 * write back the descriptor and pass the packet on.
189 */
190 .begin
191 .reg buffer_offset
192 .reg _packet_length
193 .reg _packet_checksum
194 .reg _buffer_handle
195 .reg _buffer_start
196 .reg _buffer_length
197
198 /*
199 * Determine buffer_offset, _packet_length and
200 * _packet_checksum.
201 */
202 .begin
203 .reg temp
204
205 alu[--, 1, and, $rsw0, >>15]
206 beq[not_sop#]
207
208 immed[PACKET_LENGTH, 0]
209 immed[PACKET_CHECKSUM, 0]
210
211 not_sop#:
212 alu[buffer_offset, --, b, PACKET_LENGTH]
213 alu_shf[temp, 0xff, and, $rsw0, >>16]
214 alu[_packet_length, buffer_offset, +, temp]
215 alu[PACKET_LENGTH, --, b, _packet_length]
216
217 immed[temp, 0xffff]
218 alu[temp, $rsw1, and, temp]
219 alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
220 alu[PACKET_CHECKSUM, --, b, _packet_checksum]
221 .end
222
223 /*
224 * Allocate buffer in case of SOP.
225 */
226 .begin
227 .reg temp
228
229 alu[temp, 1, and, $rsw0, >>15]
230 beq[skip_buffer_alloc#]
231
232 .begin
233 .sig zzz
234 .reg read $stemp $stemp2
235 .xfer_order $stemp $stemp2
236
237 rx_nobufs#:
238 scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
239 alu[_buffer_handle, --, b, $stemp]
240 beq[rx_nobufs#]
241
242 sram[read, $stemp, _buffer_handle, 0, 2],
243 ctx_swap[zzz]
244 alu[_buffer_start, --, b, $stemp]
245 alu[_buffer_length, --, b, $stemp2]
246 .end
247
248 skip_buffer_alloc#:
249 .end
250
251 /*
252 * Resynchronise.
253 */
254 .begin
255 ctx_arb[sig2]
256 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
257 .end
258
259 /*
260 * Synchronise buffer state.
261 */
262 .begin
263 .reg temp
264
265 alu[temp, 1, and, $rsw0, >>15]
266 beq[copy_from_local_mem#]
267
268 alu[BUFFER_HANDLE, --, b, _buffer_handle]
269 alu[BUFFER_START, --, b, _buffer_start]
270 alu[BUFFER_LENGTH, --, b, _buffer_length]
271 br[sync_state_done#]
272
273 copy_from_local_mem#:
274 alu[_buffer_handle, --, b, BUFFER_HANDLE]
275 alu[_buffer_start, --, b, BUFFER_START]
276 alu[_buffer_length, --, b, BUFFER_LENGTH]
277
278 sync_state_done#:
279 .end
280
281#if 0
282 /*
283 * Debug buffer state management.
284 */
285 .begin
286 .reg temp
287
288 alu[temp, 1, and, $rsw0, >>14]
289 beq[no_poison#]
290 immed[BUFFER_HANDLE, 0xdead]
291 immed[BUFFER_START, 0xdead]
292 immed[BUFFER_LENGTH, 0xdead]
293 no_poison#:
294
295 immed[temp, 0xdead]
296 alu[--, _buffer_handle, -, temp]
297 beq[state_corrupted#]
298 alu[--, _buffer_start, -, temp]
299 beq[state_corrupted#]
300 alu[--, _buffer_length, -, temp]
301 beq[state_corrupted#]
302 .end
303#endif
304
305 /*
306 * Check buffer length.
307 */
308 .begin
309 alu[--, _buffer_length, -, _packet_length]
310 blo[buffer_overflow#]
311 .end
312
313 /*
314 * Copy the mpacket and give back the RBUF element.
315 */
316 .begin
317 .reg element
318 .reg xfer_size
319 .reg temp
320 .sig copy_sig
321
322 alu_shf[element, 0x7f, and, $rsw0, >>24]
323 alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
324
325 alu[xfer_size, xfer_size, -, 1]
326 alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
327 alu_shf[temp, 0x10, or, xfer_size, <<21]
328 alu_shf[temp, temp, or, element, <<11]
329 alu_shf[--, temp, or, 1, <<18]
330
331 dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
332 indirect_ref, sig_done[copy_sig]
333 ctx_arb[copy_sig]
334
335 alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
336 msf[fast_wr, --, temp, 0]
337 .end
338
339 /*
340 * If EOP, write back the packet descriptor.
341 */
342 .begin
343 .reg write $stemp $stemp2
344 .xfer_order $stemp $stemp2
345 .sig zzz
346
347 alu_shf[--, 1, and, $rsw0, >>14]
348 beq[no_writeback#]
349
350 alu[$stemp, $rsw0, and, 0x1f]
351 alu[$stemp2, --, b, _packet_length]
352 sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
353
354 no_writeback#:
355 .end
356
357 /*
358 * Resynchronise.
359 */
360 .begin
361 ctx_arb[sig3]
362 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
363 .end
364
365 /*
366 * If EOP, put the buffer back onto the scratch ring.
367 */
368 .begin
369 .reg write $stemp
370 .sig zzz
371
372 br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
373
374 alu_shf[--, 1, and, $rsw0, >>14]
375 beq[mpacket_receive_loop#]
376
377 alu[--, 1, and, $rsw0, >>10]
378 bne[rxerr#]
379
380 alu[$stemp, --, b, _buffer_handle]
381 scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
382 cap[fast_wr, 0, XSCALE_INT_A]
383 br[mpacket_receive_loop#]
384
385 rxerr#:
386 alu[$stemp, --, b, _buffer_handle]
387 scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
388 br[mpacket_receive_loop#]
389 .end
390 .end
391
392
393abort_rswerr#:
394 halt
395
396abort_proterr#:
397 halt
398
399state_corrupted#:
400 halt
401
402buffer_overflow#:
403 halt
404
405rx_done_ring_overflow#:
406 halt
407
408
diff --git a/drivers/net/ixp2000/ixp2400_rx.ucode b/drivers/net/ixp2000/ixp2400_rx.ucode
new file mode 100644
index 000000000000..e8aee2f81aad
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_rx.ucode
@@ -0,0 +1,130 @@
1static struct ixp2000_uengine_code ixp2400_rx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 109,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x0c, 0xc0, 0x05,
21 0xf4, 0x44, 0x0c, 0x00, 0x05,
22 0xfc, 0x04, 0x4c, 0x00, 0x00,
23 0xf0, 0x00, 0x00, 0x3b, 0x00,
24 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
25 0x8a, 0xc0, 0x50, 0x3e, 0x05,
26 0xb4, 0x40, 0xf0, 0x3b, 0x80,
27 0x9a, 0xe0, 0x00, 0x3e, 0x05,
28 0xf0, 0x00, 0x00, 0x07, 0x00,
29 0xd8, 0x05, 0xc0, 0x00, 0x11,
30 0xf0, 0x00, 0x00, 0x0f, 0x00,
31 0x91, 0xb0, 0x20, 0x0e, 0x00,
32 0xfc, 0x06, 0x60, 0x0b, 0x00,
33 0xf0, 0x00, 0x0c, 0x03, 0x00,
34 0xf0, 0x00, 0x0c, 0x03, 0x00,
35 0xf0, 0x00, 0x0c, 0x03, 0x00,
36 0xf0, 0x00, 0x0c, 0x02, 0x00,
37 0xb0, 0xc0, 0x30, 0x0f, 0x01,
38 0xa4, 0x70, 0x00, 0x0f, 0x20,
39 0xd8, 0x02, 0xc0, 0x01, 0x00,
40 0xfc, 0x10, 0xac, 0x23, 0x08,
41 0xfc, 0x10, 0xac, 0x43, 0x10,
42 0xfc, 0x10, 0xac, 0x63, 0x18,
43 0xe0, 0x00, 0x00, 0x00, 0x02,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0x3d, 0x00, 0x04, 0x03, 0x20,
46 0xe0, 0x00, 0x00, 0x00, 0x10,
47 0x84, 0x82, 0x02, 0x01, 0x3b,
48 0xd8, 0x1a, 0x00, 0x01, 0x01,
49 0xb4, 0x00, 0x8c, 0x7d, 0x80,
50 0x91, 0xb0, 0x80, 0x22, 0x00,
51 0xfc, 0x06, 0x60, 0x23, 0x00,
52 0xf0, 0x00, 0x0c, 0x03, 0x00,
53 0xf0, 0x00, 0x0c, 0x03, 0x00,
54 0xf0, 0x00, 0x0c, 0x03, 0x00,
55 0x94, 0xf0, 0x92, 0x01, 0x21,
56 0xac, 0x40, 0x60, 0x26, 0x00,
57 0xa4, 0x30, 0x0c, 0x04, 0x06,
58 0xd8, 0x1a, 0x40, 0x01, 0x00,
59 0x94, 0xe0, 0xa2, 0x01, 0x21,
60 0xac, 0x20, 0x00, 0x28, 0x06,
61 0x84, 0xf2, 0x02, 0x01, 0x21,
62 0xd8, 0x0b, 0x40, 0x01, 0x00,
63 0xf0, 0x00, 0x0c, 0x02, 0x01,
64 0xf0, 0x00, 0x0c, 0x02, 0x02,
65 0xa0, 0x00, 0x08, 0x04, 0x00,
66 0x95, 0x00, 0xc6, 0x01, 0xff,
67 0xa0, 0x80, 0x10, 0x30, 0x00,
68 0xa0, 0x60, 0x1c, 0x00, 0x01,
69 0xf0, 0x0f, 0xf0, 0x33, 0xff,
70 0xb4, 0x00, 0xc0, 0x31, 0x81,
71 0xb0, 0x80, 0xb0, 0x32, 0x02,
72 0xa0, 0x20, 0x20, 0x2c, 0x00,
73 0x94, 0xf0, 0xd2, 0x01, 0x21,
74 0xd8, 0x0f, 0x40, 0x01, 0x00,
75 0x19, 0x40, 0x10, 0x04, 0x20,
76 0xa0, 0x00, 0x26, 0x04, 0x00,
77 0xd8, 0x0d, 0xc0, 0x01, 0x00,
78 0x00, 0x42, 0x10, 0x80, 0x02,
79 0xb0, 0x00, 0x46, 0x04, 0x00,
80 0xb0, 0x00, 0x56, 0x08, 0x00,
81 0xe0, 0x00, 0x00, 0x00, 0x04,
82 0xfc, 0x10, 0xae, 0x43, 0x90,
83 0x84, 0xf0, 0x32, 0x01, 0x21,
84 0xd8, 0x11, 0x40, 0x01, 0x00,
85 0xa0, 0x60, 0x3c, 0x00, 0x02,
86 0xa0, 0x20, 0x40, 0x10, 0x00,
87 0xa0, 0x20, 0x50, 0x14, 0x00,
88 0xd8, 0x12, 0x00, 0x00, 0x18,
89 0xa0, 0x00, 0x28, 0x0c, 0x00,
90 0xb0, 0x00, 0x48, 0x10, 0x00,
91 0xb0, 0x00, 0x58, 0x14, 0x00,
92 0xaa, 0xf0, 0x00, 0x14, 0x01,
93 0xd8, 0x1a, 0xc0, 0x01, 0x05,
94 0x85, 0x80, 0x42, 0x01, 0xff,
95 0x95, 0x00, 0x66, 0x01, 0xff,
96 0xba, 0xc0, 0x60, 0x1b, 0x01,
97 0x9a, 0x30, 0x60, 0x19, 0x30,
98 0x9a, 0xb0, 0x70, 0x1a, 0x30,
99 0x9b, 0x50, 0x78, 0x1e, 0x04,
100 0x8a, 0xe2, 0x08, 0x1e, 0x21,
101 0x6a, 0x4e, 0x00, 0x13, 0x00,
102 0xe0, 0x00, 0x00, 0x00, 0x30,
103 0x9b, 0x00, 0x7a, 0x92, 0x04,
104 0x3d, 0x00, 0x04, 0x1f, 0x20,
105 0x84, 0xe2, 0x02, 0x01, 0x21,
106 0xd8, 0x16, 0x80, 0x01, 0x00,
107 0xa4, 0x18, 0x0c, 0x7d, 0x80,
108 0xa0, 0x58, 0x1c, 0x00, 0x01,
109 0x01, 0x42, 0x00, 0xa0, 0x02,
110 0xe0, 0x00, 0x00, 0x00, 0x08,
111 0xfc, 0x10, 0xae, 0x63, 0x98,
112 0xd8, 0x1b, 0x00, 0xc2, 0x14,
113 0x84, 0xe2, 0x02, 0x01, 0x21,
114 0xd8, 0x05, 0xc0, 0x01, 0x00,
115 0x84, 0xa2, 0x02, 0x01, 0x21,
116 0xd8, 0x19, 0x40, 0x01, 0x01,
117 0xa0, 0x58, 0x0c, 0x00, 0x02,
118 0x1a, 0x40, 0x00, 0x04, 0x24,
119 0x33, 0x00, 0x01, 0x2f, 0x20,
120 0xd8, 0x05, 0xc0, 0x00, 0x18,
121 0xa0, 0x58, 0x0c, 0x00, 0x02,
122 0x1a, 0x40, 0x00, 0x04, 0x20,
123 0xd8, 0x05, 0xc0, 0x00, 0x18,
124 0xe0, 0x00, 0x02, 0x00, 0x00,
125 0xe0, 0x00, 0x02, 0x00, 0x00,
126 0xe0, 0x00, 0x02, 0x00, 0x00,
127 0xe0, 0x00, 0x02, 0x00, 0x00,
128 0xe0, 0x00, 0x02, 0x00, 0x00,
129 }
130};
diff --git a/drivers/net/ixp2000/ixp2400_tx.uc b/drivers/net/ixp2000/ixp2400_tx.uc
new file mode 100644
index 000000000000..d090d1884fb7
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_tx.uc
@@ -0,0 +1,272 @@
1/*
2 * TX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one TBUF partition is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The TBUF uses 64-byte mpackets.
17 * - TX descriptors reside in SRAM, and have the following format:
18 * struct tx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 pkt_length;
23 * u32 channel;
24 * };
25 * - Packet data resides in DRAM.
26 * - Packet buffer addresses are 8-byte aligned.
27 * - Scratch ring 2 is tx_pending.
28 * - Scratch ring 3 is tx_done, and has status condition 'full'.
29 * - This code is run on all eight threads of the microengine it runs on.
30 */
31
32#define TX_SEQUENCE_0 0x0060
33#define TBUF_CTRL 0x1800
34
35#define PARTITION_SIZE 128
36#define PARTITION_THRESH 96
37
38
39 .sig volatile sig1
40 .sig volatile sig2
41 .sig volatile sig3
42
43 .reg @old_tx_seq_0
44 .reg @mpkts_in_flight
45 .reg @next_tbuf_mpacket
46
47 .reg @buffer_handle
48 .reg @buffer_start
49 .reg @packet_length
50 .reg @channel
51 .reg @packet_offset
52
53 .reg zero
54
55 immed[zero, 0]
56
57 /*
58 * Skip context 0 initialisation?
59 */
60 .begin
61 br!=ctx[0, mpacket_tx_loop#]
62 .end
63
64 /*
65 * Wait until all pending TBUF elements have been transmitted.
66 */
67 .begin
68 .reg read $tx
69 .sig zzz
70
71 loop_empty#:
72 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
73 alu_shf[--, --, b, $tx, >>31]
74 beq[loop_empty#]
75
76 alu[@old_tx_seq_0, --, b, $tx]
77 .end
78
79 immed[@mpkts_in_flight, 0]
80 alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
81
82 immed[@buffer_handle, 0]
83
84 /*
85 * Initialise signal pipeline.
86 */
87 .begin
88 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
89 .set_sig sig1
90
91 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
92 .set_sig sig2
93
94 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
95 .set_sig sig3
96 .end
97
98mpacket_tx_loop#:
99 .begin
100 .reg tbuf_element_index
101 .reg buffer_handle
102 .reg sop_eop
103 .reg packet_data
104 .reg channel
105 .reg mpacket_size
106
107 /*
108 * If there is no packet currently being transmitted,
109 * dequeue the next TX descriptor, and fetch the buffer
110 * address, packet length and destination channel number.
111 */
112 .begin
113 .reg read $stemp $stemp2 $stemp3
114 .xfer_order $stemp $stemp2 $stemp3
115 .sig zzz
116
117 ctx_arb[sig1]
118
119 alu[--, --, b, @buffer_handle]
120 bne[already_got_packet#]
121
122 tx_nobufs#:
123 scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
124 alu[@buffer_handle, --, b, $stemp]
125 beq[tx_nobufs#]
126
127 sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
128 alu[@buffer_start, --, b, $stemp]
129 alu[@packet_length, --, b, $stemp2]
130 beq[zero_byte_packet#]
131 alu[@channel, --, b, $stemp3]
132 immed[@packet_offset, 0]
133
134 already_got_packet#:
135 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
136 .end
137
138 /*
139 * Determine tbuf element index, SOP/EOP flags, mpacket
140 * offset and mpacket size and cache buffer_handle and
141 * channel number.
142 */
143 .begin
144 alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
145 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
146 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
147 (PARTITION_SIZE - 1)]
148
149 alu[buffer_handle, --, b, @buffer_handle]
150 immed[@buffer_handle, 0]
151
152 immed[sop_eop, 1]
153
154 alu[packet_data, --, b, @packet_offset]
155 bne[no_sop#]
156 alu[sop_eop, sop_eop, or, 2]
157 no_sop#:
158 alu[packet_data, packet_data, +, @buffer_start]
159
160 alu[channel, --, b, @channel]
161
162 alu[mpacket_size, @packet_length, -, @packet_offset]
163 alu[--, 64, -, mpacket_size]
164 bhs[eop#]
165 alu[@buffer_handle, --, b, buffer_handle]
166 immed[mpacket_size, 64]
167 alu[sop_eop, sop_eop, and, 2]
168 eop#:
169
170 alu[@packet_offset, @packet_offset, +, mpacket_size]
171 .end
172
173 /*
174 * Wait until there's enough space in the TBUF.
175 */
176 .begin
177 .reg read $tx
178 .reg temp
179 .sig zzz
180
181 ctx_arb[sig2]
182
183 br[test_space#]
184
185 loop_space#:
186 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
187
188 alu[temp, $tx, -, @old_tx_seq_0]
189 alu[temp, temp, and, 0xff]
190 alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
191
192 alu[@old_tx_seq_0, --, b, $tx]
193
194 test_space#:
195 alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
196 blo[loop_space#]
197
198 alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
199
200 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
201 .end
202
203 /*
204 * Copy the packet data to the TBUF.
205 */
206 .begin
207 .reg temp
208 .sig copy_sig
209
210 alu[temp, mpacket_size, -, 1]
211 alu_shf[temp, 0x10, or, temp, >>3]
212 alu_shf[temp, 0x10, or, temp, <<21]
213 alu_shf[temp, temp, or, tbuf_element_index, <<11]
214 alu_shf[--, temp, or, 1, <<18]
215
216 dram[tbuf_wr, --, packet_data, 0, max_8],
217 indirect_ref, sig_done[copy_sig]
218 ctx_arb[copy_sig]
219 .end
220
221 /*
222 * Mark TBUF element as ready-to-be-transmitted.
223 */
224 .begin
225 .reg write $tsw $tsw2
226 .xfer_order $tsw $tsw2
227 .reg temp
228 .sig zzz
229
230 alu_shf[temp, channel, or, mpacket_size, <<24]
231 alu_shf[$tsw, temp, or, sop_eop, <<8]
232 immed[$tsw2, 0]
233
234 immed[temp, TBUF_CTRL]
235 alu_shf[temp, temp, or, tbuf_element_index, <<3]
236 msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
237 .end
238
239 /*
240 * Resynchronise.
241 */
242 .begin
243 ctx_arb[sig3]
244 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
245 .end
246
247 /*
248 * If this was an EOP mpacket, recycle the TX buffer
249 * and signal the host.
250 */
251 .begin
252 .reg write $stemp
253 .sig zzz
254
255 alu[--, sop_eop, and, 1]
256 beq[mpacket_tx_loop#]
257
258 tx_done_ring_full#:
259 br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
260
261 alu[$stemp, --, b, buffer_handle]
262 scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
263 cap[fast_wr, 0, XSCALE_INT_A]
264 br[mpacket_tx_loop#]
265 .end
266 .end
267
268
269zero_byte_packet#:
270 halt
271
272
diff --git a/drivers/net/ixp2000/ixp2400_tx.ucode b/drivers/net/ixp2000/ixp2400_tx.ucode
new file mode 100644
index 000000000000..a433e24b0a51
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_tx.ucode
@@ -0,0 +1,98 @@
1static struct ixp2000_uengine_code ixp2400_tx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 77,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x00, 0x07, 0x00,
21 0xd8, 0x03, 0x00, 0x00, 0x11,
22 0x3c, 0x40, 0x00, 0x04, 0xe0,
23 0x81, 0xf2, 0x02, 0x01, 0x00,
24 0xd8, 0x00, 0x80, 0x01, 0x00,
25 0xb0, 0x08, 0x06, 0x00, 0x00,
26 0xf0, 0x00, 0x0c, 0x00, 0x80,
27 0xb4, 0x49, 0x02, 0x03, 0x7f,
28 0xf0, 0x00, 0x02, 0x83, 0x00,
29 0xfc, 0x10, 0xac, 0x23, 0x08,
30 0xfc, 0x10, 0xac, 0x43, 0x10,
31 0xfc, 0x10, 0xac, 0x63, 0x18,
32 0xe0, 0x00, 0x00, 0x00, 0x02,
33 0xa0, 0x30, 0x02, 0x80, 0x00,
34 0xd8, 0x06, 0x00, 0x01, 0x01,
35 0x19, 0x40, 0x00, 0x04, 0x28,
36 0xb0, 0x0a, 0x06, 0x00, 0x00,
37 0xd8, 0x03, 0xc0, 0x01, 0x00,
38 0x00, 0x44, 0x00, 0x80, 0x80,
39 0xa0, 0x09, 0x06, 0x00, 0x00,
40 0xb0, 0x0b, 0x06, 0x04, 0x00,
41 0xd8, 0x13, 0x00, 0x01, 0x00,
42 0xb0, 0x0c, 0x06, 0x08, 0x00,
43 0xf0, 0x00, 0x0c, 0x00, 0xa0,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0xa0, 0x00, 0x12, 0x40, 0x00,
46 0xb0, 0xc9, 0x02, 0x43, 0x01,
47 0xb4, 0x49, 0x02, 0x43, 0x7f,
48 0xb0, 0x00, 0x22, 0x80, 0x00,
49 0xf0, 0x00, 0x02, 0x83, 0x00,
50 0xf0, 0x00, 0x0c, 0x04, 0x02,
51 0xb0, 0x40, 0x6c, 0x00, 0xa0,
52 0xd8, 0x08, 0x80, 0x01, 0x01,
53 0xaa, 0x00, 0x2c, 0x08, 0x02,
54 0xa0, 0xc0, 0x30, 0x18, 0x90,
55 0xa0, 0x00, 0x43, 0x00, 0x00,
56 0xba, 0xc0, 0x32, 0xc0, 0xa0,
57 0xaa, 0xb0, 0x00, 0x0f, 0x40,
58 0xd8, 0x0a, 0x80, 0x01, 0x04,
59 0xb0, 0x0a, 0x00, 0x08, 0x00,
60 0xf0, 0x00, 0x00, 0x0f, 0x40,
61 0xa4, 0x00, 0x2c, 0x08, 0x02,
62 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
63 0xe0, 0x00, 0x00, 0x00, 0x04,
64 0xd8, 0x0c, 0x80, 0x00, 0x18,
65 0x3c, 0x40, 0x00, 0x04, 0xe0,
66 0xba, 0x80, 0x42, 0x01, 0x80,
67 0xb4, 0x40, 0x40, 0x13, 0xff,
68 0xaa, 0x88, 0x00, 0x10, 0x80,
69 0xb0, 0x08, 0x06, 0x00, 0x00,
70 0xaa, 0xf0, 0x0d, 0x80, 0x80,
71 0xd8, 0x0b, 0x40, 0x01, 0x05,
72 0xa0, 0x88, 0x0c, 0x04, 0x80,
73 0xfc, 0x10, 0xae, 0x43, 0x90,
74 0xba, 0xc0, 0x50, 0x0f, 0x01,
75 0x9a, 0x30, 0x50, 0x15, 0x30,
76 0x9a, 0xb0, 0x50, 0x16, 0x30,
77 0x9b, 0x50, 0x58, 0x16, 0x01,
78 0x8a, 0xe2, 0x08, 0x16, 0x21,
79 0x6b, 0x4e, 0x00, 0x83, 0x03,
80 0xe0, 0x00, 0x00, 0x00, 0x30,
81 0x9a, 0x80, 0x70, 0x0e, 0x04,
82 0x8b, 0x88, 0x08, 0x1e, 0x02,
83 0xf0, 0x00, 0x0c, 0x01, 0x81,
84 0xf0, 0x01, 0x80, 0x1f, 0x00,
85 0x9b, 0xd0, 0x78, 0x1e, 0x01,
86 0x3d, 0x42, 0x00, 0x1c, 0x20,
87 0xe0, 0x00, 0x00, 0x00, 0x08,
88 0xfc, 0x10, 0xae, 0x63, 0x98,
89 0xa4, 0x30, 0x0c, 0x04, 0x02,
90 0xd8, 0x03, 0x00, 0x01, 0x00,
91 0xd8, 0x11, 0xc1, 0x42, 0x14,
92 0xa0, 0x18, 0x00, 0x08, 0x00,
93 0x1a, 0x40, 0x00, 0x04, 0x2c,
94 0x33, 0x00, 0x01, 0x2f, 0x20,
95 0xd8, 0x03, 0x00, 0x00, 0x18,
96 0xe0, 0x00, 0x02, 0x00, 0x00,
97 }
98};
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
new file mode 100644
index 000000000000..09f03f493bea
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -0,0 +1,421 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/init.h>
18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h>
20#include <asm/mach-types.h>
21#include <asm/io.h>
22#include "ixp2400_rx.ucode"
23#include "ixp2400_tx.ucode"
24#include "ixpdev_priv.h"
25#include "ixpdev.h"
26
27#define DRV_MODULE_VERSION "0.2"
28
29static int nds_count;
30static struct net_device **nds;
31static int nds_open;
32static void (*set_port_admin_status)(int port, int up);
33
34static struct ixpdev_rx_desc * const rx_desc =
35 (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
36static struct ixpdev_tx_desc * const tx_desc =
37 (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
38static int tx_pointer;
39
40
41static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
42{
43 struct ixpdev_priv *ip = netdev_priv(dev);
44 struct ixpdev_tx_desc *desc;
45 int entry;
46
47 if (unlikely(skb->len > PAGE_SIZE)) {
48 /* @@@ Count drops. */
49 dev_kfree_skb(skb);
50 return 0;
51 }
52
53 entry = tx_pointer;
54 tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
55
56 desc = tx_desc + entry;
57 desc->pkt_length = skb->len;
58 desc->channel = ip->channel;
59
60 skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
61 dev_kfree_skb(skb);
62
63 ixp2000_reg_write(RING_TX_PENDING,
64 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
65
66 dev->trans_start = jiffies;
67
68 local_irq_disable();
69 ip->tx_queue_entries++;
70 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
71 netif_stop_queue(dev);
72 local_irq_enable();
73
74 return 0;
75}
76
77
78static int ixpdev_rx(struct net_device *dev, int *budget)
79{
80 while (*budget > 0) {
81 struct ixpdev_rx_desc *desc;
82 struct sk_buff *skb;
83 void *buf;
84 u32 _desc;
85
86 _desc = ixp2000_reg_read(RING_RX_DONE);
87 if (_desc == 0)
88 return 0;
89
90 desc = rx_desc +
91 ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
92 buf = phys_to_virt(desc->buf_addr);
93
94 if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
95 printk(KERN_ERR "ixp2000: rx err, length %d\n",
96 desc->pkt_length);
97 goto err;
98 }
99
100 if (desc->channel < 0 || desc->channel >= nds_count) {
101 printk(KERN_ERR "ixp2000: rx err, channel %d\n",
102 desc->channel);
103 goto err;
104 }
105
106 /* @@@ Make FCS stripping configurable. */
107 desc->pkt_length -= 4;
108
109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err;
111
112 skb = dev_alloc_skb(desc->pkt_length + 2);
113 if (likely(skb != NULL)) {
114 skb->dev = nds[desc->channel];
115 skb_reserve(skb, 2);
116 eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
117 skb_put(skb, desc->pkt_length);
118 skb->protocol = eth_type_trans(skb, skb->dev);
119
120 skb->dev->last_rx = jiffies;
121
122 netif_receive_skb(skb);
123 }
124
125err:
126 ixp2000_reg_write(RING_RX_PENDING, _desc);
127 dev->quota--;
128 (*budget)--;
129 }
130
131 return 1;
132}
133
134/* dev always points to nds[0]. */
135static int ixpdev_poll(struct net_device *dev, int *budget)
136{
137 /* @@@ Have to stop polling when nds[0] is administratively
138 * downed while we are polling. */
139 do {
140 ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
141
142 if (ixpdev_rx(dev, budget))
143 return 1;
144 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
145
146 netif_rx_complete(dev);
147 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
148
149 return 0;
150}
151
152static void ixpdev_tx_complete(void)
153{
154 int channel;
155 u32 wake;
156
157 wake = 0;
158 while (1) {
159 struct ixpdev_priv *ip;
160 u32 desc;
161 int entry;
162
163 desc = ixp2000_reg_read(RING_TX_DONE);
164 if (desc == 0)
165 break;
166
167 /* @@@ Check whether entries come back in order. */
168 entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
169 channel = tx_desc[entry].channel;
170
171 if (channel < 0 || channel >= nds_count) {
172 printk(KERN_ERR "ixp2000: txcomp channel index "
173 "out of bounds (%d, %.8i, %d)\n",
174 channel, (unsigned int)desc, entry);
175 continue;
176 }
177
178 ip = netdev_priv(nds[channel]);
179 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
180 wake |= 1 << channel;
181 ip->tx_queue_entries--;
182 }
183
184 for (channel = 0; wake != 0; channel++) {
185 if (wake & (1 << channel)) {
186 netif_wake_queue(nds[channel]);
187 wake &= ~(1 << channel);
188 }
189 }
190}
191
192static irqreturn_t ixpdev_interrupt(int irq, void *dev_id, struct pt_regs *regs)
193{
194 u32 status;
195
196 status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
197 if (status == 0)
198 return IRQ_NONE;
199
200 /*
201 * Any of the eight receive units signaled RX?
202 */
203 if (status & 0x00ff) {
204 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
205 if (likely(__netif_rx_schedule_prep(nds[0]))) {
206 __netif_rx_schedule(nds[0]);
207 } else {
208 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
209 }
210 }
211
212 /*
213 * Any of the eight transmit units signaled TXdone?
214 */
215 if (status & 0xff00) {
216 ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
217 ixpdev_tx_complete();
218 }
219
220 return IRQ_HANDLED;
221}
222
223#ifdef CONFIG_NET_POLL_CONTROLLER
224static void ixpdev_poll_controller(struct net_device *dev)
225{
226 disable_irq(IRQ_IXP2000_THDA0);
227 ixpdev_interrupt(IRQ_IXP2000_THDA0, dev, NULL);
228 enable_irq(IRQ_IXP2000_THDA0);
229}
230#endif
231
232static int ixpdev_open(struct net_device *dev)
233{
234 struct ixpdev_priv *ip = netdev_priv(dev);
235 int err;
236
237 if (!nds_open++) {
238 err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
239 SA_SHIRQ, "ixp2000_eth", nds);
240 if (err) {
241 nds_open--;
242 return err;
243 }
244
245 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
246 }
247
248 set_port_admin_status(ip->channel, 1);
249 netif_start_queue(dev);
250
251 return 0;
252}
253
254static int ixpdev_close(struct net_device *dev)
255{
256 struct ixpdev_priv *ip = netdev_priv(dev);
257
258 netif_stop_queue(dev);
259 set_port_admin_status(ip->channel, 0);
260
261 if (!--nds_open) {
262 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
263 free_irq(IRQ_IXP2000_THDA0, nds);
264 }
265
266 return 0;
267}
268
269struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
270{
271 struct net_device *dev;
272 struct ixpdev_priv *ip;
273
274 dev = alloc_etherdev(sizeof_priv);
275 if (dev == NULL)
276 return NULL;
277
278 dev->hard_start_xmit = ixpdev_xmit;
279 dev->poll = ixpdev_poll;
280 dev->open = ixpdev_open;
281 dev->stop = ixpdev_close;
282#ifdef CONFIG_NET_POLL_CONTROLLER
283 dev->poll_controller = ixpdev_poll_controller;
284#endif
285
286 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
287 dev->weight = 64;
288
289 ip = netdev_priv(dev);
290 ip->channel = channel;
291 ip->tx_queue_entries = 0;
292
293 return dev;
294}
295
296int ixpdev_init(int __nds_count, struct net_device **__nds,
297 void (*__set_port_admin_status)(int port, int up))
298{
299 int i;
300 int err;
301
302 if (RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192) {
303 static void __too_many_rx_or_tx_buffers(void);
304 __too_many_rx_or_tx_buffers();
305 }
306
307 printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
308
309 nds_count = __nds_count;
310 nds = __nds;
311 set_port_admin_status = __set_port_admin_status;
312
313 for (i = 0; i < RX_BUF_COUNT; i++) {
314 void *buf;
315
316 buf = (void *)get_zeroed_page(GFP_KERNEL);
317 if (buf == NULL) {
318 err = -ENOMEM;
319 while (--i >= 0)
320 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
321 goto err_out;
322 }
323 rx_desc[i].buf_addr = virt_to_phys(buf);
324 rx_desc[i].buf_length = PAGE_SIZE;
325 }
326
327 /* @@@ Maybe we shouldn't be preallocating TX buffers. */
328 for (i = 0; i < TX_BUF_COUNT; i++) {
329 void *buf;
330
331 buf = (void *)get_zeroed_page(GFP_KERNEL);
332 if (buf == NULL) {
333 err = -ENOMEM;
334 while (--i >= 0)
335 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
336 goto err_free_rx;
337 }
338 tx_desc[i].buf_addr = virt_to_phys(buf);
339 }
340
341 /* 256 entries, ring status set means 'empty', base address 0x0000. */
342 ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
343 ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
344 ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
345
346 /* 256 entries, ring status set means 'full', base address 0x0400. */
347 ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
348 ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
349 ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
350
351 for (i = 0; i < RX_BUF_COUNT; i++) {
352 ixp2000_reg_write(RING_RX_PENDING,
353 RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
354 }
355
356 ixp2000_uengine_load(0, &ixp2400_rx);
357 ixp2000_uengine_start_contexts(0, 0xff);
358
359 /* 256 entries, ring status set means 'empty', base address 0x0800. */
360 ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
361 ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
362 ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
363
364 /* 256 entries, ring status set means 'full', base address 0x0c00. */
365 ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
366 ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
367 ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
368
369 ixp2000_uengine_load(1, &ixp2400_tx);
370 ixp2000_uengine_start_contexts(1, 0xff);
371
372 for (i = 0; i < nds_count; i++) {
373 err = register_netdev(nds[i]);
374 if (err) {
375 while (--i >= 0)
376 unregister_netdev(nds[i]);
377 goto err_free_tx;
378 }
379 }
380
381 for (i = 0; i < nds_count; i++) {
382 printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), "
383 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", nds[i]->name, i,
384 nds[i]->dev_addr[0], nds[i]->dev_addr[1],
385 nds[i]->dev_addr[2], nds[i]->dev_addr[3],
386 nds[i]->dev_addr[4], nds[i]->dev_addr[5]);
387 }
388
389 return 0;
390
391err_free_tx:
392 for (i = 0; i < TX_BUF_COUNT; i++)
393 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
394
395err_free_rx:
396 for (i = 0; i < RX_BUF_COUNT; i++)
397 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
398
399err_out:
400 return err;
401}
402
403void ixpdev_deinit(void)
404{
405 int i;
406
407 /* @@@ Flush out pending packets. */
408
409 for (i = 0; i < nds_count; i++)
410 unregister_netdev(nds[i]);
411
412 ixp2000_uengine_stop_contexts(1, 0xff);
413 ixp2000_uengine_stop_contexts(0, 0xff);
414 ixp2000_uengine_reset(0x3);
415
416 for (i = 0; i < TX_BUF_COUNT; i++)
417 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
418
419 for (i = 0; i < RX_BUF_COUNT; i++)
420 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
421}
diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ixp2000/ixpdev.h
new file mode 100644
index 000000000000..bd686cb63058
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev.h
@@ -0,0 +1,27 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_H
13#define __IXPDEV_H
14
15struct ixpdev_priv
16{
17 int channel;
18 int tx_queue_entries;
19};
20
21struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
22int ixpdev_init(int num_ports, struct net_device **nds,
23 void (*set_port_admin_status)(int port, int up));
24void ixpdev_deinit(void);
25
26
27#endif
diff --git a/drivers/net/ixp2000/ixpdev_priv.h b/drivers/net/ixp2000/ixpdev_priv.h
new file mode 100644
index 000000000000..86aa08ea0c33
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev_priv.h
@@ -0,0 +1,57 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_PRIV_H
13#define __IXPDEV_PRIV_H
14
15#define RX_BUF_DESC_BASE 0x00001000
16#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
17#define TX_BUF_DESC_BASE 0x00002000
18#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
19#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
20
21#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
22#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
23#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
24#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
25
26#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
27#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
28#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
29#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
30#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
31#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
32#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
33#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
34#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
35#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
36#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
37#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
38#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
39
40struct ixpdev_rx_desc
41{
42 u32 buf_addr;
43 u32 buf_length;
44 u32 channel;
45 u32 pkt_length;
46};
47
48struct ixpdev_tx_desc
49{
50 u32 buf_addr;
51 u32 pkt_length;
52 u32 channel;
53 u32 unused;
54};
55
56
57#endif
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
new file mode 100644
index 000000000000..5c7ab7564053
--- /dev/null
+++ b/drivers/net/ixp2000/pm3386.c
@@ -0,0 +1,334 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <linux/netdevice.h>
16#include <asm/io.h>
17#include "pm3386.h"
18
19/*
20 * Read from register 'reg' of PM3386 device 'pm'.
21 */
22static u16 pm3386_reg_read(int pm, int reg)
23{
24 void *_reg;
25 u16 value;
26
27 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
28 if (pm == 1)
29 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
30
31 value = *((volatile u16 *)(_reg + (reg << 1)));
32
33// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
34
35 return value;
36}
37
38/*
39 * Write to register 'reg' of PM3386 device 'pm', and perform
40 * a readback from the identification register.
41 */
42static void pm3386_reg_write(int pm, int reg, u16 value)
43{
44 void *_reg;
45 u16 dummy;
46
47// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
48
49 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
50 if (pm == 1)
51 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
52
53 *((volatile u16 *)(_reg + (reg << 1))) = value;
54
55 dummy = *((volatile u16 *)_reg);
56 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
57}
58
59/*
60 * Read from port 'port' register 'reg', where the registers
61 * for the different ports are 'spacing' registers apart.
62 */
63static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
64{
65 int reg;
66
67 reg = _reg;
68 if (port & 1)
69 reg += spacing;
70
71 return pm3386_reg_read(port >> 1, reg);
72}
73
74/*
75 * Write to port 'port' register 'reg', where the registers
76 * for the different ports are 'spacing' registers apart.
77 */
78static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
79{
80 int reg;
81
82 reg = _reg;
83 if (port & 1)
84 reg += spacing;
85
86 pm3386_reg_write(port >> 1, reg, value);
87}
88
89
90void pm3386_reset(void)
91{
92 u8 mac[3][6];
93
94 /* Save programmed MAC addresses. */
95 pm3386_get_mac(0, mac[0]);
96 pm3386_get_mac(1, mac[1]);
97 pm3386_get_mac(2, mac[2]);
98
99 /* Assert analog and digital reset. */
100 pm3386_reg_write(0, 0x002, 0x0060);
101 pm3386_reg_write(1, 0x002, 0x0060);
102 mdelay(1);
103
104 /* Deassert analog reset. */
105 pm3386_reg_write(0, 0x002, 0x0062);
106 pm3386_reg_write(1, 0x002, 0x0062);
107 mdelay(10);
108
109 /* Deassert digital reset. */
110 pm3386_reg_write(0, 0x002, 0x0063);
111 pm3386_reg_write(1, 0x002, 0x0063);
112 mdelay(10);
113
114 /* Restore programmed MAC addresses. */
115 pm3386_set_mac(0, mac[0]);
116 pm3386_set_mac(1, mac[1]);
117 pm3386_set_mac(2, mac[2]);
118
119 /* Disable carrier on all ports. */
120 pm3386_set_carrier(0, 0);
121 pm3386_set_carrier(1, 0);
122 pm3386_set_carrier(2, 0);
123}
124
125static u16 swaph(u16 x)
126{
127 return ((x << 8) | (x >> 8)) & 0xffff;
128}
129
130void pm3386_init_port(int port)
131{
132 int pm = port >> 1;
133
134 /*
135 * Work around ENP2611 bootloader programming MAC address
136 * in reverse.
137 */
138 if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
139 (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
140 u16 temp[3];
141
142 temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
143 temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
144 temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
145 pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
146 pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
147 pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
148 }
149
150 /*
151 * Initialise narrowbanding mode. See application note 2010486
152 * for more information. (@@@ We also need to issue a reset
153 * when ROOL or DOOL are detected.)
154 */
155 pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
156 udelay(500);
157 pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
158
159 /*
160 * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
161 * towards SPI-3 bridge.
162 */
163 pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
164
165 /*
166 * Enable ingress protocol checking, and soft reset the
167 * SPI-3 ingress block.
168 */
169 pm3386_reg_write(pm, 0x103, 0x0003);
170 while (!(pm3386_reg_read(pm, 0x103) & 0x80))
171 ;
172
173 /*
174 * SPI-3 egress block. Gather 12288 bytes of the current
175 * packet in the TX fifo before initiating transmit on the
176 * SERDES interface. (Prevents TX underflows.)
177 */
178 pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
179
180 /*
181 * Enforce odd parity from the SPI-3 bridge, and soft reset
182 * the SPI-3 egress block.
183 */
184 pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
185 while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
186 ;
187
188 /*
189 * EGMAC block. Set this channels to reject long preambles,
190 * not send or transmit PAUSE frames, enable preamble checking,
191 * disable frame length checking, enable FCS appending, enable
192 * TX frame padding.
193 */
194 pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
195
196 /*
197 * Soft reset the EGMAC block.
198 */
199 pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
200 pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
201
202 /*
203 * Auto-sense autonegotiation status.
204 */
205 pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
206
207 /*
208 * Allow reception of jumbo frames.
209 */
210 pm3386_port_reg_write(port, 0x310, 0x100, 9018);
211
212 /*
213 * Allow transmission of jumbo frames.
214 */
215 pm3386_port_reg_write(port, 0x336, 0x100, 9018);
216
217 /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
218
219 /*
220 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
221 */
222 pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
223
224 /*
225 * Enable and restart autonegotiation.
226 */
227 pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
228 pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
229}
230
231void pm3386_get_mac(int port, u8 *mac)
232{
233 u16 temp;
234
235 temp = pm3386_port_reg_read(port, 0x308, 0x100);
236 mac[0] = temp & 0xff;
237 mac[1] = (temp >> 8) & 0xff;
238
239 temp = pm3386_port_reg_read(port, 0x309, 0x100);
240 mac[2] = temp & 0xff;
241 mac[3] = (temp >> 8) & 0xff;
242
243 temp = pm3386_port_reg_read(port, 0x30a, 0x100);
244 mac[4] = temp & 0xff;
245 mac[5] = (temp >> 8) & 0xff;
246}
247
248void pm3386_set_mac(int port, u8 *mac)
249{
250 pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
251 pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
252 pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
253}
254
255static u32 pm3386_get_stat(int port, u16 base)
256{
257 u32 value;
258
259 value = pm3386_port_reg_read(port, base, 0x100);
260 value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
261
262 return value;
263}
264
265void pm3386_get_stats(int port, struct net_device_stats *stats)
266{
267 /*
268 * Snapshot statistics counters.
269 */
270 pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
271 while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
272 ;
273
274 memset(stats, 0, sizeof(*stats));
275
276 stats->rx_packets = pm3386_get_stat(port, 0x510);
277 stats->tx_packets = pm3386_get_stat(port, 0x590);
278 stats->rx_bytes = pm3386_get_stat(port, 0x514);
279 stats->tx_bytes = pm3386_get_stat(port, 0x594);
280 /* @@@ Add other stats. */
281}
282
283void pm3386_set_carrier(int port, int state)
284{
285 pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
286}
287
288int pm3386_is_link_up(int port)
289{
290 u16 temp;
291
292 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
293 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
294
295 return !!(temp & 0x0002);
296}
297
298void pm3386_enable_rx(int port)
299{
300 u16 temp;
301
302 temp = pm3386_port_reg_read(port, 0x303, 0x100);
303 temp |= 0x1000;
304 pm3386_port_reg_write(port, 0x303, 0x100, temp);
305}
306
307void pm3386_disable_rx(int port)
308{
309 u16 temp;
310
311 temp = pm3386_port_reg_read(port, 0x303, 0x100);
312 temp &= 0xefff;
313 pm3386_port_reg_write(port, 0x303, 0x100, temp);
314}
315
316void pm3386_enable_tx(int port)
317{
318 u16 temp;
319
320 temp = pm3386_port_reg_read(port, 0x303, 0x100);
321 temp |= 0x4000;
322 pm3386_port_reg_write(port, 0x303, 0x100, temp);
323}
324
325void pm3386_disable_tx(int port)
326{
327 u16 temp;
328
329 temp = pm3386_port_reg_read(port, 0x303, 0x100);
330 temp &= 0xbfff;
331 pm3386_port_reg_write(port, 0x303, 0x100, temp);
332}
333
334MODULE_LICENSE("GPL");
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
new file mode 100644
index 000000000000..fe92bb056ac4
--- /dev/null
+++ b/drivers/net/ixp2000/pm3386.h
@@ -0,0 +1,28 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __PM3386_H
13#define __PM3386_H
14
15void pm3386_reset(void);
16void pm3386_init_port(int port);
17void pm3386_get_mac(int port, u8 *mac);
18void pm3386_set_mac(int port, u8 *mac);
19void pm3386_get_stats(int port, struct net_device_stats *stats);
20void pm3386_set_carrier(int port, int state);
21int pm3386_is_link_up(int port);
22void pm3386_enable_rx(int port);
23void pm3386_disable_rx(int port);
24void pm3386_enable_tx(int port);
25void pm3386_disable_tx(int port);
26
27
28#endif
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index b039bd89ceb9..272d331d29cd 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -296,7 +296,7 @@ static int __init jazz_sonic_init_module(void)
296 } 296 }
297 297
298 jazz_sonic_device = platform_device_alloc(jazz_sonic_string, 0); 298 jazz_sonic_device = platform_device_alloc(jazz_sonic_string, 0);
299 if (!jazz_sonnic_device) 299 if (!jazz_sonic_device)
300 goto out_unregister; 300 goto out_unregister;
301 301
302 if (platform_device_add(jazz_sonic_device)) { 302 if (platform_device_add(jazz_sonic_device)) {
@@ -307,7 +307,7 @@ static int __init jazz_sonic_init_module(void)
307 return 0; 307 return 0;
308 308
309out_unregister: 309out_unregister:
310 driver_unregister(&jazz_sonic_driver); 310 platform_driver_unregister(&jazz_sonic_driver);
311 311
312 return -ENOMEM; 312 return -ENOMEM;
313} 313}
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
index 878535953cb1..026c732024c9 100644
--- a/drivers/net/mipsnet.h
+++ b/drivers/net/mipsnet.h
@@ -1,28 +1,8 @@
1// 1/*
2// <COPYRIGHT CLASS="1B" YEAR="2005"> 2 * This file is subject to the terms and conditions of the GNU General Public
3// Unpublished work (c) MIPS Technologies, Inc. All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4// Unpublished rights reserved under the copyright laws of the U.S.A. and 4 * for more details.
5// other countries. 5 */
6//
7// PROPRIETARY / SECRET CONFIDENTIAL INFORMATION OF MIPS TECHNOLOGIES, INC.
8// FOR INTERNAL USE ONLY.
9//
10// Under no circumstances (contract or otherwise) may this information be
11// disclosed to, or copied, modified or used by anyone other than employees
12// or contractors of MIPS Technologies having a need to know.
13// </COPYRIGHT>
14//
15//++
16// File: MIPS_Net.h
17//
18// Description:
19// The definition of the emulated MIPSNET device's interface.
20//
21// Notes: This include file needs to work from a Linux device drivers.
22//
23//--
24//
25
26#ifndef __MIPSNET_H 6#ifndef __MIPSNET_H
27#define __MIPSNET_H 7#define __MIPSNET_H
28 8
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index f857ae94d261..b0c3b6ab6263 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -115,6 +115,7 @@
115#include <linux/ethtool.h> 115#include <linux/ethtool.h>
116#include <linux/timer.h> 116#include <linux/timer.h>
117#include <linux/if_vlan.h> 117#include <linux/if_vlan.h>
118#include <linux/rtnetlink.h>
118 119
119#include <asm/io.h> 120#include <asm/io.h>
120#include <asm/uaccess.h> 121#include <asm/uaccess.h>
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 384a736a0d2f..356f50909222 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -131,10 +131,9 @@ typedef struct local_info_t {
131 u_short tx_queue_len; 131 u_short tx_queue_len;
132 cardtype_t cardtype; 132 cardtype_t cardtype;
133 u_short sent; 133 u_short sent;
134 u_char mc_filter[8];
135} local_info_t; 134} local_info_t;
136 135
137#define MC_FILTERBREAK 8 136#define MC_FILTERBREAK 64
138 137
139/*====================================================================*/ 138/*====================================================================*/
140/* 139/*
@@ -1005,15 +1004,8 @@ static void fjn_reset(struct net_device *dev)
1005 for (i = 0; i < 6; i++) 1004 for (i = 0; i < 6; i++)
1006 outb(dev->dev_addr[i], ioaddr + NODE_ID + i); 1005 outb(dev->dev_addr[i], ioaddr + NODE_ID + i);
1007 1006
1008 /* Switch to bank 1 */ 1007 /* (re)initialize the multicast table */
1009 if (lp->cardtype == MBH10302) 1008 set_rx_mode(dev);
1010 outb(BANK_1, ioaddr + CONFIG_1);
1011 else
1012 outb(BANK_1U, ioaddr + CONFIG_1);
1013
1014 /* set the multicast table to accept none. */
1015 for (i = 0; i < 8; i++)
1016 outb(0x00, ioaddr + MAR_ADR + i);
1017 1009
1018 /* Switch to bank 2 (runtime mode) */ 1010 /* Switch to bank 2 (runtime mode) */
1019 if (lp->cardtype == MBH10302) 1011 if (lp->cardtype == MBH10302)
@@ -1264,11 +1256,11 @@ static struct net_device_stats *fjn_get_stats(struct net_device *dev)
1264static void set_rx_mode(struct net_device *dev) 1256static void set_rx_mode(struct net_device *dev)
1265{ 1257{
1266 kio_addr_t ioaddr = dev->base_addr; 1258 kio_addr_t ioaddr = dev->base_addr;
1267 struct local_info_t *lp = netdev_priv(dev);
1268 u_char mc_filter[8]; /* Multicast hash filter */ 1259 u_char mc_filter[8]; /* Multicast hash filter */
1269 u_long flags; 1260 u_long flags;
1270 int i; 1261 int i;
1271 1262
1263 int saved_bank;
1272 int saved_config_0 = inb(ioaddr + CONFIG_0); 1264 int saved_config_0 = inb(ioaddr + CONFIG_0);
1273 1265
1274 local_irq_save(flags); 1266 local_irq_save(flags);
@@ -1306,15 +1298,13 @@ static void set_rx_mode(struct net_device *dev)
1306 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 1298 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
1307 } 1299 }
1308 1300
1309 if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) { 1301 /* Switch to bank 1 and set the multicast table. */
1310 int saved_bank = inb(ioaddr + CONFIG_1); 1302 saved_bank = inb(ioaddr + CONFIG_1);
1311 /* Switch to bank 1 and set the multicast table. */ 1303 outb(0xe4, ioaddr + CONFIG_1);
1312 outb(0xe4, ioaddr + CONFIG_1); 1304
1313 for (i = 0; i < 8; i++) 1305 for (i = 0; i < 8; i++)
1314 outb(mc_filter[i], ioaddr + MAR_ADR + i); 1306 outb(mc_filter[i], ioaddr + MAR_ADR + i);
1315 memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter)); 1307 outb(saved_bank, ioaddr + CONFIG_1);
1316 outb(saved_bank, ioaddr + CONFIG_1);
1317 }
1318 1308
1319 outb(saved_config_0, ioaddr + CONFIG_0); 1309 outb(saved_config_0, ioaddr + CONFIG_0);
1320 1310
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index be319229f543..8f6cf8c896a4 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1251,12 +1251,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1251 1251
1252 if (memcmp(promaddr, dev->dev_addr, 6) 1252 if (memcmp(promaddr, dev->dev_addr, 6)
1253 || !is_valid_ether_addr(dev->dev_addr)) { 1253 || !is_valid_ether_addr(dev->dev_addr)) {
1254#ifndef __powerpc__
1255 if (is_valid_ether_addr(promaddr)) { 1254 if (is_valid_ether_addr(promaddr)) {
1256#else
1257 if (!is_valid_ether_addr(dev->dev_addr)
1258 && is_valid_ether_addr(promaddr)) {
1259#endif
1260 if (pcnet32_debug & NETIF_MSG_PROBE) { 1255 if (pcnet32_debug & NETIF_MSG_PROBE) {
1261 printk(" warning: CSR address invalid,\n"); 1256 printk(" warning: CSR address invalid,\n");
1262 printk(KERN_INFO " using instead PROM address of"); 1257 printk(KERN_INFO " using instead PROM address of");
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16bebe7a7ce1..7da0e3dd5fe3 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -38,6 +38,10 @@
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40 40
41MODULE_DESCRIPTION("PHY library");
42MODULE_AUTHOR("Andy Fleming");
43MODULE_LICENSE("GPL");
44
41static struct phy_driver genphy_driver; 45static struct phy_driver genphy_driver;
42extern int mdio_bus_init(void); 46extern int mdio_bus_init(void);
43extern void mdio_bus_exit(void); 47extern void mdio_bus_exit(void);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 50430f79f8cf..1c6d328165bb 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -524,9 +524,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
524 if (copy_from_user(&uprog, arg, sizeof(uprog))) 524 if (copy_from_user(&uprog, arg, sizeof(uprog)))
525 return -EFAULT; 525 return -EFAULT;
526 526
527 if (uprog.len > BPF_MAXINSNS)
528 return -EINVAL;
529
530 if (!uprog.len) { 527 if (!uprog.len) {
531 *p = NULL; 528 *p = NULL;
532 return 0; 529 return 0;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index a842ecc60a34..9369f811075d 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -85,7 +85,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
85static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); 85static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
86static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 86static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
87 87
88static struct proto_ops pppoe_ops; 88static const struct proto_ops pppoe_ops;
89static DEFINE_RWLOCK(pppoe_hash_lock); 89static DEFINE_RWLOCK(pppoe_hash_lock);
90 90
91static struct ppp_channel_ops pppoe_chan_ops; 91static struct ppp_channel_ops pppoe_chan_ops;
@@ -383,8 +383,6 @@ static int pppoe_rcv(struct sk_buff *skb,
383{ 383{
384 struct pppoe_hdr *ph; 384 struct pppoe_hdr *ph;
385 struct pppox_sock *po; 385 struct pppox_sock *po;
386 struct sock *sk;
387 int ret;
388 386
389 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 387 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
390 goto drop; 388 goto drop;
@@ -395,24 +393,8 @@ static int pppoe_rcv(struct sk_buff *skb,
395 ph = (struct pppoe_hdr *) skb->nh.raw; 393 ph = (struct pppoe_hdr *) skb->nh.raw;
396 394
397 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 395 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
398 if (!po) 396 if (po != NULL)
399 goto drop; 397 return sk_receive_skb(sk_pppox(po), skb);
400
401 sk = sk_pppox(po);
402 bh_lock_sock(sk);
403
404 /* Socket state is unknown, must put skb into backlog. */
405 if (sock_owned_by_user(sk) != 0) {
406 sk_add_backlog(sk, skb);
407 ret = NET_RX_SUCCESS;
408 } else {
409 ret = pppoe_rcv_core(sk, skb);
410 }
411
412 bh_unlock_sock(sk);
413 sock_put(sk);
414
415 return ret;
416drop: 398drop:
417 kfree_skb(skb); 399 kfree_skb(skb);
418out: 400out:
@@ -1081,9 +1063,7 @@ static int __init pppoe_proc_init(void)
1081static inline int pppoe_proc_init(void) { return 0; } 1063static inline int pppoe_proc_init(void) { return 0; }
1082#endif /* CONFIG_PROC_FS */ 1064#endif /* CONFIG_PROC_FS */
1083 1065
1084/* ->ioctl are set at pppox_create */ 1066static const struct proto_ops pppoe_ops = {
1085
1086static struct proto_ops pppoe_ops = {
1087 .family = AF_PPPOX, 1067 .family = AF_PPPOX,
1088 .owner = THIS_MODULE, 1068 .owner = THIS_MODULE,
1089 .release = pppoe_release, 1069 .release = pppoe_release,
@@ -1099,7 +1079,8 @@ static struct proto_ops pppoe_ops = {
1099 .getsockopt = sock_no_getsockopt, 1079 .getsockopt = sock_no_getsockopt,
1100 .sendmsg = pppoe_sendmsg, 1080 .sendmsg = pppoe_sendmsg,
1101 .recvmsg = pppoe_recvmsg, 1081 .recvmsg = pppoe_recvmsg,
1102 .mmap = sock_no_mmap 1082 .mmap = sock_no_mmap,
1083 .ioctl = pppox_ioctl,
1103}; 1084};
1104 1085
1105static struct pppox_proto pppoe_proto = { 1086static struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 0c1e114527fb..9315046b3f55 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -68,8 +68,7 @@ EXPORT_SYMBOL(register_pppox_proto);
68EXPORT_SYMBOL(unregister_pppox_proto); 68EXPORT_SYMBOL(unregister_pppox_proto);
69EXPORT_SYMBOL(pppox_unbind_sock); 69EXPORT_SYMBOL(pppox_unbind_sock);
70 70
71static int pppox_ioctl(struct socket* sock, unsigned int cmd, 71int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
72 unsigned long arg)
73{ 72{
74 struct sock *sk = sock->sk; 73 struct sock *sk = sock->sk;
75 struct pppox_sock *po = pppox_sk(sk); 74 struct pppox_sock *po = pppox_sk(sk);
@@ -105,6 +104,7 @@ static int pppox_ioctl(struct socket* sock, unsigned int cmd,
105 return rc; 104 return rc;
106} 105}
107 106
107EXPORT_SYMBOL(pppox_ioctl);
108 108
109static int pppox_create(struct socket *sock, int protocol) 109static int pppox_create(struct socket *sock, int protocol)
110{ 110{
@@ -119,11 +119,7 @@ static int pppox_create(struct socket *sock, int protocol)
119 goto out; 119 goto out;
120 120
121 rc = pppox_protos[protocol]->create(sock); 121 rc = pppox_protos[protocol]->create(sock);
122 if (!rc) { 122
123 /* We get to set the ioctl handler. */
124 /* For everything else, pppox is just a shell. */
125 sock->ops->ioctl = pppox_ioctl;
126 }
127 module_put(pppox_protos[protocol]->owner); 123 module_put(pppox_protos[protocol]->owner);
128out: 124out:
129 return rc; 125 return rc;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e57df8dfe6b4..89c46787676c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -66,7 +66,7 @@
66#include "s2io.h" 66#include "s2io.h"
67#include "s2io-regs.h" 67#include "s2io-regs.h"
68 68
69#define DRV_VERSION "Version 2.0.9.3" 69#define DRV_VERSION "Version 2.0.9.4"
70 70
71/* S2io Driver name & version. */ 71/* S2io Driver name & version. */
72static char s2io_driver_name[] = "Neterion"; 72static char s2io_driver_name[] = "Neterion";
@@ -412,7 +412,7 @@ static int init_shared_mem(struct s2io_nic *nic)
412 config->tx_cfg[i].fifo_len - 1; 412 config->tx_cfg[i].fifo_len - 1;
413 mac_control->fifos[i].fifo_no = i; 413 mac_control->fifos[i].fifo_no = i;
414 mac_control->fifos[i].nic = nic; 414 mac_control->fifos[i].nic = nic;
415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1; 415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
416 416
417 for (j = 0; j < page_num; j++) { 417 for (j = 0; j < page_num; j++) {
418 int k = 0; 418 int k = 0;
@@ -459,6 +459,10 @@ static int init_shared_mem(struct s2io_nic *nic)
459 } 459 }
460 } 460 }
461 461
462 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
463 if (!nic->ufo_in_band_v)
464 return -ENOMEM;
465
462 /* Allocation and initialization of RXDs in Rings */ 466 /* Allocation and initialization of RXDs in Rings */
463 size = 0; 467 size = 0;
464 for (i = 0; i < config->rx_ring_num; i++) { 468 for (i = 0; i < config->rx_ring_num; i++) {
@@ -731,6 +735,8 @@ static void free_shared_mem(struct s2io_nic *nic)
731 mac_control->stats_mem, 735 mac_control->stats_mem,
732 mac_control->stats_mem_phy); 736 mac_control->stats_mem_phy);
733 } 737 }
738 if (nic->ufo_in_band_v)
739 kfree(nic->ufo_in_band_v);
734} 740}
735 741
736/** 742/**
@@ -2003,6 +2009,49 @@ static int start_nic(struct s2io_nic *nic)
2003 2009
2004 return SUCCESS; 2010 return SUCCESS;
2005} 2011}
2012/**
2013 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2014 */
2015static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2016{
2017 nic_t *nic = fifo_data->nic;
2018 struct sk_buff *skb;
2019 TxD_t *txds;
2020 u16 j, frg_cnt;
2021
2022 txds = txdlp;
2023 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2024 pci_unmap_single(nic->pdev, (dma_addr_t)
2025 txds->Buffer_Pointer, sizeof(u64),
2026 PCI_DMA_TODEVICE);
2027 txds++;
2028 }
2029
2030 skb = (struct sk_buff *) ((unsigned long)
2031 txds->Host_Control);
2032 if (!skb) {
2033 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2034 return NULL;
2035 }
2036 pci_unmap_single(nic->pdev, (dma_addr_t)
2037 txds->Buffer_Pointer,
2038 skb->len - skb->data_len,
2039 PCI_DMA_TODEVICE);
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 if (frg_cnt) {
2042 txds++;
2043 for (j = 0; j < frg_cnt; j++, txds++) {
2044 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2045 if (!txds->Buffer_Pointer)
2046 break;
2047 pci_unmap_page(nic->pdev, (dma_addr_t)
2048 txds->Buffer_Pointer,
2049 frag->size, PCI_DMA_TODEVICE);
2050 }
2051 }
2052 txdlp->Host_Control = 0;
2053 return(skb);
2054}
2006 2055
2007/** 2056/**
2008 * free_tx_buffers - Free all queued Tx buffers 2057 * free_tx_buffers - Free all queued Tx buffers
@@ -2020,7 +2069,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2020 int i, j; 2069 int i, j;
2021 mac_info_t *mac_control; 2070 mac_info_t *mac_control;
2022 struct config_param *config; 2071 struct config_param *config;
2023 int cnt = 0, frg_cnt; 2072 int cnt = 0;
2024 2073
2025 mac_control = &nic->mac_control; 2074 mac_control = &nic->mac_control;
2026 config = &nic->config; 2075 config = &nic->config;
@@ -2029,38 +2078,11 @@ static void free_tx_buffers(struct s2io_nic *nic)
2029 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2078 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2030 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2079 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2031 list_virt_addr; 2080 list_virt_addr;
2032 skb = 2081 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2033 (struct sk_buff *) ((unsigned long) txdp-> 2082 if (skb) {
2034 Host_Control); 2083 dev_kfree_skb(skb);
2035 if (skb == NULL) { 2084 cnt++;
2036 memset(txdp, 0, sizeof(TxD_t) *
2037 config->max_txds);
2038 continue;
2039 } 2085 }
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 pci_unmap_single(nic->pdev, (dma_addr_t)
2042 txdp->Buffer_Pointer,
2043 skb->len - skb->data_len,
2044 PCI_DMA_TODEVICE);
2045 if (frg_cnt) {
2046 TxD_t *temp;
2047 temp = txdp;
2048 txdp++;
2049 for (j = 0; j < frg_cnt; j++, txdp++) {
2050 skb_frag_t *frag =
2051 &skb_shinfo(skb)->frags[j];
2052 pci_unmap_page(nic->pdev,
2053 (dma_addr_t)
2054 txdp->
2055 Buffer_Pointer,
2056 frag->size,
2057 PCI_DMA_TODEVICE);
2058 }
2059 txdp = temp;
2060 }
2061 dev_kfree_skb(skb);
2062 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2063 cnt++;
2064 } 2086 }
2065 DBG_PRINT(INTR_DBG, 2087 DBG_PRINT(INTR_DBG,
2066 "%s:forcibly freeing %d skbs on FIFO%d\n", 2088 "%s:forcibly freeing %d skbs on FIFO%d\n",
@@ -2661,7 +2683,6 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2661 tx_curr_get_info_t get_info, put_info; 2683 tx_curr_get_info_t get_info, put_info;
2662 struct sk_buff *skb; 2684 struct sk_buff *skb;
2663 TxD_t *txdlp; 2685 TxD_t *txdlp;
2664 u16 j, frg_cnt;
2665 2686
2666 get_info = fifo_data->tx_curr_get_info; 2687 get_info = fifo_data->tx_curr_get_info;
2667 put_info = fifo_data->tx_curr_put_info; 2688 put_info = fifo_data->tx_curr_put_info;
@@ -2684,8 +2705,7 @@ to loss of link\n");
2684 } 2705 }
2685 } 2706 }
2686 2707
2687 skb = (struct sk_buff *) ((unsigned long) 2708 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2688 txdlp->Host_Control);
2689 if (skb == NULL) { 2709 if (skb == NULL) {
2690 DBG_PRINT(ERR_DBG, "%s: Null skb ", 2710 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2691 __FUNCTION__); 2711 __FUNCTION__);
@@ -2693,34 +2713,6 @@ to loss of link\n");
2693 return; 2713 return;
2694 } 2714 }
2695 2715
2696 frg_cnt = skb_shinfo(skb)->nr_frags;
2697 nic->tx_pkt_count++;
2698
2699 pci_unmap_single(nic->pdev, (dma_addr_t)
2700 txdlp->Buffer_Pointer,
2701 skb->len - skb->data_len,
2702 PCI_DMA_TODEVICE);
2703 if (frg_cnt) {
2704 TxD_t *temp;
2705 temp = txdlp;
2706 txdlp++;
2707 for (j = 0; j < frg_cnt; j++, txdlp++) {
2708 skb_frag_t *frag =
2709 &skb_shinfo(skb)->frags[j];
2710 if (!txdlp->Buffer_Pointer)
2711 break;
2712 pci_unmap_page(nic->pdev,
2713 (dma_addr_t)
2714 txdlp->
2715 Buffer_Pointer,
2716 frag->size,
2717 PCI_DMA_TODEVICE);
2718 }
2719 txdlp = temp;
2720 }
2721 memset(txdlp, 0,
2722 (sizeof(TxD_t) * fifo_data->max_txds));
2723
2724 /* Updating the statistics block */ 2716 /* Updating the statistics block */
2725 nic->stats.tx_bytes += skb->len; 2717 nic->stats.tx_bytes += skb->len;
2726 dev_kfree_skb_irq(skb); 2718 dev_kfree_skb_irq(skb);
@@ -3078,7 +3070,7 @@ int s2io_set_swapper(nic_t * sp)
3078 3070
3079static int wait_for_msix_trans(nic_t *nic, int i) 3071static int wait_for_msix_trans(nic_t *nic, int i)
3080{ 3072{
3081 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3073 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3082 u64 val64; 3074 u64 val64;
3083 int ret = 0, cnt = 0; 3075 int ret = 0, cnt = 0;
3084 3076
@@ -3099,7 +3091,7 @@ static int wait_for_msix_trans(nic_t *nic, int i)
3099 3091
3100void restore_xmsi_data(nic_t *nic) 3092void restore_xmsi_data(nic_t *nic)
3101{ 3093{
3102 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3094 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3103 u64 val64; 3095 u64 val64;
3104 int i; 3096 int i;
3105 3097
@@ -3117,7 +3109,7 @@ void restore_xmsi_data(nic_t *nic)
3117 3109
3118static void store_xmsi_data(nic_t *nic) 3110static void store_xmsi_data(nic_t *nic)
3119{ 3111{
3120 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3112 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3121 u64 val64, addr, data; 3113 u64 val64, addr, data;
3122 int i; 3114 int i;
3123 3115
@@ -3140,7 +3132,7 @@ static void store_xmsi_data(nic_t *nic)
3140 3132
3141int s2io_enable_msi(nic_t *nic) 3133int s2io_enable_msi(nic_t *nic)
3142{ 3134{
3143 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3135 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3144 u16 msi_ctrl, msg_val; 3136 u16 msi_ctrl, msg_val;
3145 struct config_param *config = &nic->config; 3137 struct config_param *config = &nic->config;
3146 struct net_device *dev = nic->dev; 3138 struct net_device *dev = nic->dev;
@@ -3190,7 +3182,7 @@ int s2io_enable_msi(nic_t *nic)
3190 3182
3191int s2io_enable_msi_x(nic_t *nic) 3183int s2io_enable_msi_x(nic_t *nic)
3192{ 3184{
3193 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3185 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3194 u64 tx_mat, rx_mat; 3186 u64 tx_mat, rx_mat;
3195 u16 msi_control; /* Temp variable */ 3187 u16 msi_control; /* Temp variable */
3196 int ret, i, j, msix_indx = 1; 3188 int ret, i, j, msix_indx = 1;
@@ -3331,7 +3323,7 @@ failed\n", dev->name);
3331 s2io_msix_fifo_handle, 0, sp->desc1, 3323 s2io_msix_fifo_handle, 0, sp->desc1,
3332 sp->s2io_entries[i].arg); 3324 sp->s2io_entries[i].arg);
3333 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, 3325 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3334 sp->msix_info[i].addr); 3326 (unsigned long long)sp->msix_info[i].addr);
3335 } else { 3327 } else {
3336 sprintf(sp->desc2, "%s:MSI-X-%d-RX", 3328 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3337 dev->name, i); 3329 dev->name, i);
@@ -3339,7 +3331,7 @@ failed\n", dev->name);
3339 s2io_msix_ring_handle, 0, sp->desc2, 3331 s2io_msix_ring_handle, 0, sp->desc2,
3340 sp->s2io_entries[i].arg); 3332 sp->s2io_entries[i].arg);
3341 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, 3333 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3342 sp->msix_info[i].addr); 3334 (unsigned long long)sp->msix_info[i].addr);
3343 } 3335 }
3344 if (err) { 3336 if (err) {
3345 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \ 3337 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
@@ -3527,6 +3519,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3527 return 0; 3519 return 0;
3528 } 3520 }
3529 3521
3522 txdp->Control_1 = 0;
3523 txdp->Control_2 = 0;
3530#ifdef NETIF_F_TSO 3524#ifdef NETIF_F_TSO
3531 mss = skb_shinfo(skb)->tso_size; 3525 mss = skb_shinfo(skb)->tso_size;
3532 if (mss) { 3526 if (mss) {
@@ -3534,19 +3528,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3534 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3528 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3535 } 3529 }
3536#endif 3530#endif
3537
3538 frg_cnt = skb_shinfo(skb)->nr_frags;
3539 frg_len = skb->len - skb->data_len;
3540
3541 txdp->Buffer_Pointer = pci_map_single
3542 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3543 txdp->Host_Control = (unsigned long) skb;
3544 if (skb->ip_summed == CHECKSUM_HW) { 3531 if (skb->ip_summed == CHECKSUM_HW) {
3545 txdp->Control_2 |= 3532 txdp->Control_2 |=
3546 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3533 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3547 TXD_TX_CKO_UDP_EN); 3534 TXD_TX_CKO_UDP_EN);
3548 } 3535 }
3549 3536 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3537 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3550 txdp->Control_2 |= config->tx_intr_type; 3538 txdp->Control_2 |= config->tx_intr_type;
3551 3539
3552 if (sp->vlgrp && vlan_tx_tag_present(skb)) { 3540 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
@@ -3554,10 +3542,40 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3554 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 3542 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3555 } 3543 }
3556 3544
3557 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3545 frg_len = skb->len - skb->data_len;
3558 TXD_GATHER_CODE_FIRST); 3546 if (skb_shinfo(skb)->ufo_size) {
3559 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3547 int ufo_size;
3548
3549 ufo_size = skb_shinfo(skb)->ufo_size;
3550 ufo_size &= ~7;
3551 txdp->Control_1 |= TXD_UFO_EN;
3552 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3553 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3554#ifdef __BIG_ENDIAN
3555 sp->ufo_in_band_v[put_off] =
3556 (u64)skb_shinfo(skb)->ip6_frag_id;
3557#else
3558 sp->ufo_in_band_v[put_off] =
3559 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3560#endif
3561 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3562 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3563 sp->ufo_in_band_v,
3564 sizeof(u64), PCI_DMA_TODEVICE);
3565 txdp++;
3566 txdp->Control_1 = 0;
3567 txdp->Control_2 = 0;
3568 }
3560 3569
3570 txdp->Buffer_Pointer = pci_map_single
3571 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3572 txdp->Host_Control = (unsigned long) skb;
3573 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3574
3575 if (skb_shinfo(skb)->ufo_size)
3576 txdp->Control_1 |= TXD_UFO_EN;
3577
3578 frg_cnt = skb_shinfo(skb)->nr_frags;
3561 /* For fragmented SKB. */ 3579 /* For fragmented SKB. */
3562 for (i = 0; i < frg_cnt; i++) { 3580 for (i = 0; i < frg_cnt; i++) {
3563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3569,9 +3587,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3569 (sp->pdev, frag->page, frag->page_offset, 3587 (sp->pdev, frag->page, frag->page_offset,
3570 frag->size, PCI_DMA_TODEVICE); 3588 frag->size, PCI_DMA_TODEVICE);
3571 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size); 3589 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3590 if (skb_shinfo(skb)->ufo_size)
3591 txdp->Control_1 |= TXD_UFO_EN;
3572 } 3592 }
3573 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3593 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3574 3594
3595 if (skb_shinfo(skb)->ufo_size)
3596 frg_cnt++; /* as Txd0 was used for inband header */
3597
3575 tx_fifo = mac_control->tx_FIFO_start[queue]; 3598 tx_fifo = mac_control->tx_FIFO_start[queue];
3576 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr; 3599 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3577 writeq(val64, &tx_fifo->TxDL_Pointer); 3600 writeq(val64, &tx_fifo->TxDL_Pointer);
@@ -3583,6 +3606,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3583 if (mss) 3606 if (mss)
3584 val64 |= TX_FIFO_SPECIAL_FUNC; 3607 val64 |= TX_FIFO_SPECIAL_FUNC;
3585#endif 3608#endif
3609 if (skb_shinfo(skb)->ufo_size)
3610 val64 |= TX_FIFO_SPECIAL_FUNC;
3586 writeq(val64, &tx_fifo->List_Control); 3611 writeq(val64, &tx_fifo->List_Control);
3587 3612
3588 mmiowb(); 3613 mmiowb();
@@ -4721,7 +4746,10 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4721 fail = 1; 4746 fail = 1;
4722 4747
4723 if (ret_data != 0x012345) { 4748 if (ret_data != 0x012345) {
4724 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 4749 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
4750 "Data written %llx Data read %llx\n",
4751 dev->name, (unsigned long long)0x12345,
4752 (unsigned long long)ret_data);
4725 fail = 1; 4753 fail = 1;
4726 } 4754 }
4727 4755
@@ -4740,7 +4768,10 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4740 fail = 1; 4768 fail = 1;
4741 4769
4742 if (ret_data != 0x012345) { 4770 if (ret_data != 0x012345) {
4743 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 4771 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
4772 "Data written %llx Data read %llx\n",
4773 dev->name, (unsigned long long)0x12345,
4774 (unsigned long long)ret_data);
4744 fail = 1; 4775 fail = 1;
4745 } 4776 }
4746 4777
@@ -5190,6 +5221,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5190 .get_tso = ethtool_op_get_tso, 5221 .get_tso = ethtool_op_get_tso,
5191 .set_tso = ethtool_op_set_tso, 5222 .set_tso = ethtool_op_set_tso,
5192#endif 5223#endif
5224 .get_ufo = ethtool_op_get_ufo,
5225 .set_ufo = ethtool_op_set_ufo,
5193 .self_test_count = s2io_ethtool_self_test_count, 5226 .self_test_count = s2io_ethtool_self_test_count,
5194 .self_test = s2io_ethtool_test, 5227 .self_test = s2io_ethtool_test,
5195 .get_strings = s2io_ethtool_get_strings, 5228 .get_strings = s2io_ethtool_get_strings,
@@ -5941,7 +5974,8 @@ Defaulting to INTA\n");
5941 break; 5974 break;
5942 } 5975 }
5943 } 5976 }
5944 config->max_txds = MAX_SKB_FRAGS + 1; 5977 /* + 2 because one Txd for skb->data and one Txd for UFO */
5978 config->max_txds = MAX_SKB_FRAGS + 2;
5945 5979
5946 /* Rx side parameters. */ 5980 /* Rx side parameters. */
5947 if (rx_ring_sz[0] == 0) 5981 if (rx_ring_sz[0] == 0)
@@ -6035,6 +6069,10 @@ Defaulting to INTA\n");
6035#ifdef NETIF_F_TSO 6069#ifdef NETIF_F_TSO
6036 dev->features |= NETIF_F_TSO; 6070 dev->features |= NETIF_F_TSO;
6037#endif 6071#endif
6072 if (sp->device_type & XFRAME_II_DEVICE) {
6073 dev->features |= NETIF_F_UFO;
6074 dev->features |= NETIF_F_HW_CSUM;
6075 }
6038 6076
6039 dev->tx_timeout = &s2io_tx_watchdog; 6077 dev->tx_timeout = &s2io_tx_watchdog;
6040 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6078 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 419aad7f10e7..852a6a899d07 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -393,7 +393,9 @@ typedef struct _TxD {
393#define TXD_GATHER_CODE_LAST BIT(23) 393#define TXD_GATHER_CODE_LAST BIT(23)
394#define TXD_TCP_LSO_EN BIT(30) 394#define TXD_TCP_LSO_EN BIT(30)
395#define TXD_UDP_COF_EN BIT(31) 395#define TXD_UDP_COF_EN BIT(31)
396#define TXD_UFO_EN BIT(31) | BIT(30)
396#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14) 397#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
398#define TXD_UFO_MSS(val) vBIT(val,34,14)
397#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16) 399#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
398 400
399 u64 Control_2; 401 u64 Control_2;
@@ -789,6 +791,7 @@ struct s2io_nic {
789 791
790 spinlock_t rx_lock; 792 spinlock_t rx_lock;
791 atomic_t isr_cnt; 793 atomic_t isr_cnt;
794 u64 *ufo_in_band_v;
792}; 795};
793 796
794#define RESET_ERROR 1; 797#define RESET_ERROR 1;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 1d4d88680db1..3d95fa20cd88 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. 1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
2 Copyright 1999 Silicon Integrated System Corporation 2 Copyright 1999 Silicon Integrated System Corporation
3 Revision: 1.08.08 Jan. 22 2005 3 Revision: 1.08.09 Sep. 19 2005
4 4
5 Modified from the driver which is originally written by Donald Becker. 5 Modified from the driver which is originally written by Donald Becker.
6 6
@@ -17,6 +17,7 @@
17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, 17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
18 preliminary Rev. 1.0 Jan. 18, 1998 18 preliminary Rev. 1.0 Jan. 18, 1998
19 19
20 Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
20 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages 21 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
21 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support 22 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
22 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support 23 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
@@ -76,7 +77,7 @@
76#include "sis900.h" 77#include "sis900.h"
77 78
78#define SIS900_MODULE_NAME "sis900" 79#define SIS900_MODULE_NAME "sis900"
79#define SIS900_DRV_VERSION "v1.08.08 Jan. 22 2005" 80#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
80 81
81static char version[] __devinitdata = 82static char version[] __devinitdata =
82KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; 83KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -538,6 +539,11 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
538 printk("%2.2x:", (u8)net_dev->dev_addr[i]); 539 printk("%2.2x:", (u8)net_dev->dev_addr[i]);
539 printk("%2.2x.\n", net_dev->dev_addr[i]); 540 printk("%2.2x.\n", net_dev->dev_addr[i]);
540 541
542 /* Detect Wake on Lan support */
543 ret = inl(CFGPMC & PMESP);
544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
546
541 return 0; 547 return 0;
542 548
543 err_unmap_rx: 549 err_unmap_rx:
@@ -2015,6 +2021,67 @@ static int sis900_nway_reset(struct net_device *net_dev)
2015 return mii_nway_restart(&sis_priv->mii_info); 2021 return mii_nway_restart(&sis_priv->mii_info);
2016} 2022}
2017 2023
2024/**
2025 * sis900_set_wol - Set up Wake on Lan registers
2026 * @net_dev: the net device to probe
2027 * @wol: container for info passed to the driver
2028 *
2029 * Process ethtool command "wol" to setup wake on lan features.
2030 * SiS900 supports sending WoL events if a correct packet is received,
2031 * but there is no simple way to filter them to only a subset (broadcast,
2032 * multicast, unicast or arp).
2033 */
2034
2035static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2036{
2037 struct sis900_private *sis_priv = net_dev->priv;
2038 long pmctrl_addr = net_dev->base_addr + pmctrl;
2039 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2040
2041 if (wol->wolopts == 0) {
2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2043 cfgpmcsr |= ~PME_EN;
2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2045 outl(pmctrl_bits, pmctrl_addr);
2046 if (netif_msg_wol(sis_priv))
2047 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2048 return 0;
2049 }
2050
2051 if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
2052 | WAKE_BCAST | WAKE_ARP))
2053 return -EINVAL;
2054
2055 if (wol->wolopts & WAKE_MAGIC)
2056 pmctrl_bits |= MAGICPKT;
2057 if (wol->wolopts & WAKE_PHY)
2058 pmctrl_bits |= LINKON;
2059
2060 outl(pmctrl_bits, pmctrl_addr);
2061
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr |= PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 if (netif_msg_wol(sis_priv))
2066 printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
2067
2068 return 0;
2069}
2070
2071static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2072{
2073 long pmctrl_addr = net_dev->base_addr + pmctrl;
2074 u32 pmctrl_bits;
2075
2076 pmctrl_bits = inl(pmctrl_addr);
2077 if (pmctrl_bits & MAGICPKT)
2078 wol->wolopts |= WAKE_MAGIC;
2079 if (pmctrl_bits & LINKON)
2080 wol->wolopts |= WAKE_PHY;
2081
2082 wol->supported = (WAKE_PHY | WAKE_MAGIC);
2083}
2084
2018static struct ethtool_ops sis900_ethtool_ops = { 2085static struct ethtool_ops sis900_ethtool_ops = {
2019 .get_drvinfo = sis900_get_drvinfo, 2086 .get_drvinfo = sis900_get_drvinfo,
2020 .get_msglevel = sis900_get_msglevel, 2087 .get_msglevel = sis900_get_msglevel,
@@ -2023,6 +2090,8 @@ static struct ethtool_ops sis900_ethtool_ops = {
2023 .get_settings = sis900_get_settings, 2090 .get_settings = sis900_get_settings,
2024 .set_settings = sis900_set_settings, 2091 .set_settings = sis900_set_settings,
2025 .nway_reset = sis900_nway_reset, 2092 .nway_reset = sis900_nway_reset,
2093 .get_wol = sis900_get_wol,
2094 .set_wol = sis900_set_wol
2026}; 2095};
2027 2096
2028/** 2097/**
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index de3c06735d15..4233ea55670f 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -33,6 +33,7 @@ enum sis900_registers {
33 rxcfg=0x34, //Receive Configuration Register 33 rxcfg=0x34, //Receive Configuration Register
34 flctrl=0x38, //Flow Control Register 34 flctrl=0x38, //Flow Control Register
35 rxlen=0x3c, //Receive Packet Length Register 35 rxlen=0x3c, //Receive Packet Length Register
36 cfgpmcsr=0x44, //Configuration Power Management Control/Status Register
36 rfcr=0x48, //Receive Filter Control Register 37 rfcr=0x48, //Receive Filter Control Register
37 rfdr=0x4C, //Receive Filter Data Register 38 rfdr=0x4C, //Receive Filter Data Register
38 pmctrl=0xB0, //Power Management Control Register 39 pmctrl=0xB0, //Power Management Control Register
@@ -140,6 +141,50 @@ enum sis96x_eeprom_command {
140 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100 141 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
141}; 142};
142 143
144/* PCI Registers */
145enum sis900_pci_registers {
146 CFGPMC = 0x40,
147 CFGPMCSR = 0x44
148};
149
150/* Power management capabilities bits */
151enum sis900_cfgpmc_register_bits {
152 PMVER = 0x00070000,
153 DSI = 0x00100000,
154 PMESP = 0xf8000000
155};
156
157enum sis900_pmesp_bits {
158 PME_D0 = 0x1,
159 PME_D1 = 0x2,
160 PME_D2 = 0x4,
161 PME_D3H = 0x8,
162 PME_D3C = 0x10
163};
164
165/* Power management control/status bits */
166enum sis900_cfgpmcsr_register_bits {
167 PMESTS = 0x00004000,
168 PME_EN = 0x00000100, // Power management enable
169 PWR_STA = 0x00000003 // Current power state
170};
171
172/* Wake-on-LAN support. */
173enum sis900_power_management_control_register_bits {
174 LINKLOSS = 0x00000001,
175 LINKON = 0x00000002,
176 MAGICPKT = 0x00000400,
177 ALGORITHM = 0x00000800,
178 FRM1EN = 0x00100000,
179 FRM2EN = 0x00200000,
180 FRM3EN = 0x00400000,
181 FRM1ACS = 0x01000000,
182 FRM2ACS = 0x02000000,
183 FRM3ACS = 0x04000000,
184 WAKEALL = 0x40000000,
185 GATECLK = 0x80000000
186};
187
143/* Management Data I/O (mdio) frame */ 188/* Management Data I/O (mdio) frame */
144#define MIIread 0x6000 189#define MIIread 0x6000
145#define MIIwrite 0x5002 190#define MIIwrite 0x5002
diff --git a/drivers/net/sk98lin/Makefile b/drivers/net/sk98lin/Makefile
index 6783039ffb75..afd900d5d730 100644
--- a/drivers/net/sk98lin/Makefile
+++ b/drivers/net/sk98lin/Makefile
@@ -26,9 +26,7 @@ sk98lin-objs := \
26 skrlmt.o \ 26 skrlmt.o \
27 sktimer.o \ 27 sktimer.o \
28 skvpd.o \ 28 skvpd.o \
29 skxmac2.o \ 29 skxmac2.o
30 skproc.o \
31 skcsum.o
32 30
33# DBGDEF = \ 31# DBGDEF = \
34# -DDEBUG 32# -DDEBUG
@@ -77,7 +75,7 @@ endif
77# SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources 75# SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources
78# SK_DBGCAT_DRV_EVENT 0x08000000 driver events 76# SK_DBGCAT_DRV_EVENT 0x08000000 driver events
79 77
80EXTRA_CFLAGS += -Idrivers/net/sk98lin -DSK_DIAG_SUPPORT -DSK_USE_CSUM -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM) 78EXTRA_CFLAGS += -Idrivers/net/sk98lin -DSK_DIAG_SUPPORT -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM)
81 79
82clean: 80clean:
83 rm -f core *.o *.a *.s 81 rm -f core *.o *.a *.s
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
index 542cec57f86a..778d9e618ebd 100644
--- a/drivers/net/sk98lin/h/skdrv2nd.h
+++ b/drivers/net/sk98lin/h/skdrv2nd.h
@@ -60,7 +60,6 @@ extern SK_U64 SkOsGetTime(SK_AC*);
60extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*); 60extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*);
61extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*); 61extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*);
62extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*); 62extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*);
63extern int SkPciWriteCfgDWord(SK_AC*, int, SK_U32);
64extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16); 63extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16);
65extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8); 64extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8);
66extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA); 65extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA);
@@ -268,8 +267,6 @@ typedef struct s_DevNet DEV_NET;
268struct s_DevNet { 267struct s_DevNet {
269 int PortNr; 268 int PortNr;
270 int NetNr; 269 int NetNr;
271 int Mtu;
272 int Up;
273 SK_AC *pAC; 270 SK_AC *pAC;
274}; 271};
275 272
@@ -298,6 +295,7 @@ struct s_RxPort {
298 RXD *pRxdRingTail; /* Tail of Rx rings */ 295 RXD *pRxdRingTail; /* Tail of Rx rings */
299 RXD *pRxdRingPrev; /* descriptor given to BMU previously */ 296 RXD *pRxdRingPrev; /* descriptor given to BMU previously */
300 int RxdRingFree; /* # of free entrys */ 297 int RxdRingFree; /* # of free entrys */
298 int RxCsum; /* use receive checksum hardware */
301 spinlock_t RxDesRingLock; /* serialize descriptor accesses */ 299 spinlock_t RxDesRingLock; /* serialize descriptor accesses */
302 int RxFillLimit; /* limit for buffers in ring */ 300 int RxFillLimit; /* limit for buffers in ring */
303 SK_IOC HwAddr; /* bmu registers address */ 301 SK_IOC HwAddr; /* bmu registers address */
@@ -390,12 +388,10 @@ struct s_AC {
390 388
391 SK_IOC IoBase; /* register set of adapter */ 389 SK_IOC IoBase; /* register set of adapter */
392 int BoardLevel; /* level of active hw init (0-2) */ 390 int BoardLevel; /* level of active hw init (0-2) */
393 char DeviceStr[80]; /* adapter string from vpd */ 391
394 SK_U32 AllocFlag; /* flag allocation of resources */ 392 SK_U32 AllocFlag; /* flag allocation of resources */
395 struct pci_dev *PciDev; /* for access to pci config space */ 393 struct pci_dev *PciDev; /* for access to pci config space */
396 SK_U32 PciDevId; /* pci device id */
397 struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */ 394 struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */
398 char Name[30]; /* driver name */
399 395
400 int RxBufSize; /* length of receive buffers */ 396 int RxBufSize; /* length of receive buffers */
401 struct net_device_stats stats; /* linux 'netstat -i' statistics */ 397 struct net_device_stats stats; /* linux 'netstat -i' statistics */
@@ -425,16 +421,11 @@ struct s_AC {
425 TX_PORT TxPort[SK_MAX_MACS][2]; 421 TX_PORT TxPort[SK_MAX_MACS][2];
426 RX_PORT RxPort[SK_MAX_MACS]; 422 RX_PORT RxPort[SK_MAX_MACS];
427 423
428 unsigned int CsOfs1; /* for checksum calculation */
429 unsigned int CsOfs2; /* for checksum calculation */
430 SK_U32 CsOfs; /* for checksum calculation */
431
432 SK_BOOL CheckQueue; /* check event queue soon */ 424 SK_BOOL CheckQueue; /* check event queue soon */
433 SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */ 425 SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */
434 DIM_INFO DynIrqModInfo; /* all data related to DIM */ 426 DIM_INFO DynIrqModInfo; /* all data related to DIM */
435 427
436 /* Only for tests */ 428 /* Only for tests */
437 int PortUp;
438 int PortDown; 429 int PortDown;
439 int ChipsetType; /* Chipset family type 430 int ChipsetType; /* Chipset family type
440 * 0 == Genesis family support 431 * 0 == Genesis family support
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h
index bdc1a5eaaae9..daa9a8d154fc 100644
--- a/drivers/net/sk98lin/h/skvpd.h
+++ b/drivers/net/sk98lin/h/skvpd.h
@@ -130,14 +130,12 @@ typedef struct s_vpd_key {
130#ifndef VPD_DO_IO 130#ifndef VPD_DO_IO
131#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val) 131#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val)
132#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val) 132#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val)
133#define VPD_OUT32(pAC,IoC,Addr,Val) (void)SkPciWriteCfgDWord(pAC,Addr,Val)
134#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal) 133#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal)
135#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal) 134#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal)
136#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal) 135#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal)
137#else /* VPD_DO_IO */ 136#else /* VPD_DO_IO */
138#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val) 137#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val)
139#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val) 138#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val)
140#define VPD_OUT32(pAC,IoC,Addr,Val) SK_OUT32(IoC,PCI_C(Addr),Val)
141#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal) 139#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal)
142#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal) 140#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal)
143#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal) 141#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal)
@@ -155,12 +153,6 @@ typedef struct s_vpd_key {
155 else \ 153 else \
156 SK_OUT16(pAC,PCI_C(Addr),Val); \ 154 SK_OUT16(pAC,PCI_C(Addr),Val); \
157 } 155 }
158#define VPD_OUT32(pAC,Ioc,Addr,Val) { \
159 if ((pAC)->DgT.DgUseCfgCycle) \
160 SkPciWriteCfgDWord(pAC,Addr,Val); \
161 else \
162 SK_OUT32(pAC,PCI_C(Addr),Val); \
163 }
164#define VPD_IN8(pAC,Ioc,Addr,pVal) { \ 156#define VPD_IN8(pAC,Ioc,Addr,pVal) { \
165 if ((pAC)->DgT.DgUseCfgCycle) \ 157 if ((pAC)->DgT.DgUseCfgCycle) \
166 SkPciReadCfgByte(pAC,Addr,pVal); \ 158 SkPciReadCfgByte(pAC,Addr,pVal); \
diff --git a/drivers/net/sk98lin/skcsum.c b/drivers/net/sk98lin/skcsum.c
deleted file mode 100644
index 38a6e7a631f3..000000000000
--- a/drivers/net/sk98lin/skcsum.c
+++ /dev/null
@@ -1,871 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skcsum.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.12 $
6 * Date: $Date: 2003/08/20 13:55:53 $
7 * Purpose: Store/verify Internet checksum in send/receive packets.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2003 SysKonnect GmbH.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * The information in this file is provided "AS IS" without warranty.
21 *
22 ******************************************************************************/
23
24#ifdef SK_USE_CSUM /* Check if CSUM is to be used. */
25
26#ifndef lint
27static const char SysKonnectFileId[] =
28 "@(#) $Id: skcsum.c,v 1.12 2003/08/20 13:55:53 mschmid Exp $ (C) SysKonnect.";
29#endif /* !lint */
30
31/******************************************************************************
32 *
33 * Description:
34 *
35 * This is the "GEnesis" common module "CSUM".
36 *
37 * This module contains the code necessary to calculate, store, and verify the
38 * Internet Checksum of IP, TCP, and UDP frames.
39 *
40 * "GEnesis" is an abbreviation of "Gigabit Ethernet Network System in Silicon"
41 * and is the code name of this SysKonnect project.
42 *
43 * Compilation Options:
44 *
45 * SK_USE_CSUM - Define if CSUM is to be used. Otherwise, CSUM will be an
46 * empty module.
47 *
48 * SKCS_OVERWRITE_PROTO - Define to overwrite the default protocol id
49 * definitions. In this case, all SKCS_PROTO_xxx definitions must be made
50 * external.
51 *
52 * SKCS_OVERWRITE_STATUS - Define to overwrite the default return status
53 * definitions. In this case, all SKCS_STATUS_xxx definitions must be made
54 * external.
55 *
56 * Include File Hierarchy:
57 *
58 * "h/skdrv1st.h"
59 * "h/skcsum.h"
60 * "h/sktypes.h"
61 * "h/skqueue.h"
62 * "h/skdrv2nd.h"
63 *
64 ******************************************************************************/
65
66#include "h/skdrv1st.h"
67#include "h/skcsum.h"
68#include "h/skdrv2nd.h"
69
70/* defines ********************************************************************/
71
72/* The size of an Ethernet MAC header. */
73#define SKCS_ETHERNET_MAC_HEADER_SIZE (6+6+2)
74
75/* The size of the used topology's MAC header. */
76#define SKCS_MAC_HEADER_SIZE SKCS_ETHERNET_MAC_HEADER_SIZE
77
78/* The size of the IP header without any option fields. */
79#define SKCS_IP_HEADER_SIZE 20
80
81/*
82 * Field offsets within the IP header.
83 */
84
85/* "Internet Header Version" and "Length". */
86#define SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH 0
87
88/* "Total Length". */
89#define SKCS_OFS_IP_TOTAL_LENGTH 2
90
91/* "Flags" "Fragment Offset". */
92#define SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET 6
93
94/* "Next Level Protocol" identifier. */
95#define SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL 9
96
97/* Source IP address. */
98#define SKCS_OFS_IP_SOURCE_ADDRESS 12
99
100/* Destination IP address. */
101#define SKCS_OFS_IP_DESTINATION_ADDRESS 16
102
103
104/*
105 * Field offsets within the UDP header.
106 */
107
108/* UDP checksum. */
109#define SKCS_OFS_UDP_CHECKSUM 6
110
111/* IP "Next Level Protocol" identifiers (see RFC 790). */
112#define SKCS_PROTO_ID_TCP 6 /* Transport Control Protocol */
113#define SKCS_PROTO_ID_UDP 17 /* User Datagram Protocol */
114
115/* IP "Don't Fragment" bit. */
116#define SKCS_IP_DONT_FRAGMENT SKCS_HTON16(0x4000)
117
118/* Add a byte offset to a pointer. */
119#define SKCS_IDX(pPtr, Ofs) ((void *) ((char *) (pPtr) + (Ofs)))
120
121/*
122 * Macros that convert host to network representation and vice versa, i.e.
123 * little/big endian conversion on little endian machines only.
124 */
125#ifdef SK_LITTLE_ENDIAN
126#define SKCS_HTON16(Val16) (((unsigned) (Val16) >> 8) | (((Val16) & 0xff) << 8))
127#endif /* SK_LITTLE_ENDIAN */
128#ifdef SK_BIG_ENDIAN
129#define SKCS_HTON16(Val16) (Val16)
130#endif /* SK_BIG_ENDIAN */
131#define SKCS_NTOH16(Val16) SKCS_HTON16(Val16)
132
133/* typedefs *******************************************************************/
134
135/* function prototypes ********************************************************/
136
137/******************************************************************************
138 *
139 * SkCsGetSendInfo - get checksum information for a send packet
140 *
141 * Description:
142 * Get all checksum information necessary to send a TCP or UDP packet. The
143 * function checks the IP header passed to it. If the high-level protocol
144 * is either TCP or UDP the pseudo header checksum is calculated and
145 * returned.
146 *
147 * The function returns the total length of the IP header (including any
148 * IP option fields), which is the same as the start offset of the IP data
149 * which in turn is the start offset of the TCP or UDP header.
150 *
151 * The function also returns the TCP or UDP pseudo header checksum, which
152 * should be used as the start value for the hardware checksum calculation.
153 * (Note that any actual pseudo header checksum can never calculate to
154 * zero.)
155 *
156 * Note:
157 * There is a bug in the GENESIS ASIC which may lead to wrong checksums.
158 *
159 * Arguments:
160 * pAc - A pointer to the adapter context struct.
161 *
162 * pIpHeader - Pointer to IP header. Must be at least the IP header *not*
163 * including any option fields, i.e. at least 20 bytes.
164 *
165 * Note: This pointer will be used to address 8-, 16-, and 32-bit
166 * variables with the respective alignment offsets relative to the pointer.
167 * Thus, the pointer should point to a 32-bit aligned address. If the
168 * target system cannot address 32-bit variables on non 32-bit aligned
169 * addresses, then the pointer *must* point to a 32-bit aligned address.
170 *
171 * pPacketInfo - A pointer to the packet information structure for this
172 * packet. Before calling this SkCsGetSendInfo(), the following field must
173 * be initialized:
174 *
175 * ProtocolFlags - Initialize with any combination of
176 * SKCS_PROTO_XXX bit flags. SkCsGetSendInfo() will only work on
177 * the protocols specified here. Any protocol(s) not specified
178 * here will be ignored.
179 *
180 * Note: Only one checksum can be calculated in hardware. Thus, if
181 * SKCS_PROTO_IP is specified in the 'ProtocolFlags',
182 * SkCsGetSendInfo() must calculate the IP header checksum in
183 * software. It might be a better idea to have the calling
184 * protocol stack calculate the IP header checksum.
185 *
186 * Returns: N/A
187 * On return, the following fields in 'pPacketInfo' may or may not have
188 * been filled with information, depending on the protocol(s) found in the
189 * packet:
190 *
191 * ProtocolFlags - Returns the SKCS_PROTO_XXX bit flags of the protocol(s)
192 * that were both requested by the caller and actually found in the packet.
193 * Protocol(s) not specified by the caller and/or not found in the packet
194 * will have their respective SKCS_PROTO_XXX bit flags reset.
195 *
196 * Note: For IP fragments, TCP and UDP packet information is ignored.
197 *
198 * IpHeaderLength - The total length in bytes of the complete IP header
199 * including any option fields is returned here. This is the start offset
200 * of the IP data, i.e. the TCP or UDP header if present.
201 *
202 * IpHeaderChecksum - If IP has been specified in the 'ProtocolFlags', the
203 * 16-bit Internet Checksum of the IP header is returned here. This value
204 * is to be stored into the packet's 'IP Header Checksum' field.
205 *
206 * PseudoHeaderChecksum - If this is a TCP or UDP packet and if TCP or UDP
207 * has been specified in the 'ProtocolFlags', the 16-bit Internet Checksum
208 * of the TCP or UDP pseudo header is returned here.
209 */
210void SkCsGetSendInfo(
211SK_AC *pAc, /* Adapter context struct. */
212void *pIpHeader, /* IP header. */
213SKCS_PACKET_INFO *pPacketInfo, /* Packet information struct. */
214int NetNumber) /* Net number */
215{
216 /* Internet Header Version found in IP header. */
217 unsigned InternetHeaderVersion;
218
219 /* Length of the IP header as found in IP header. */
220 unsigned IpHeaderLength;
221
222 /* Bit field specifiying the desired/found protocols. */
223 unsigned ProtocolFlags;
224
225 /* Next level protocol identifier found in IP header. */
226 unsigned NextLevelProtocol;
227
228 /* Length of IP data portion. */
229 unsigned IpDataLength;
230
231 /* TCP/UDP pseudo header checksum. */
232 unsigned long PseudoHeaderChecksum;
233
234 /* Pointer to next level protocol statistics structure. */
235 SKCS_PROTO_STATS *NextLevelProtoStats;
236
237 /* Temporary variable. */
238 unsigned Tmp;
239
240 Tmp = *(SK_U8 *)
241 SKCS_IDX(pIpHeader, SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH);
242
243 /* Get the Internet Header Version (IHV). */
244 /* Note: The IHV is stored in the upper four bits. */
245
246 InternetHeaderVersion = Tmp >> 4;
247
248 /* Check the Internet Header Version. */
249 /* Note: We currently only support IP version 4. */
250
251 if (InternetHeaderVersion != 4) { /* IPv4? */
252 SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_TX,
253 ("Tx: Unknown Internet Header Version %u.\n",
254 InternetHeaderVersion));
255 pPacketInfo->ProtocolFlags = 0;
256 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxUnableCts++;
257 return;
258 }
259
260 /* Get the IP header length (IHL). */
261 /*
262 * Note: The IHL is stored in the lower four bits as the number of
263 * 4-byte words.
264 */
265
266 IpHeaderLength = (Tmp & 0xf) * 4;
267 pPacketInfo->IpHeaderLength = IpHeaderLength;
268
269 /* Check the IP header length. */
270
271 /* 04-Aug-1998 sw - Really check the IHL? Necessary? */
272
273 if (IpHeaderLength < 5*4) {
274 SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_TX,
275 ("Tx: Invalid IP Header Length %u.\n", IpHeaderLength));
276 pPacketInfo->ProtocolFlags = 0;
277 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxUnableCts++;
278 return;
279 }
280
281 /* This is an IPv4 frame with a header of valid length. */
282
283 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxOkCts++;
284
285 /* Check if we should calculate the IP header checksum. */
286
287 ProtocolFlags = pPacketInfo->ProtocolFlags;
288
289 if (ProtocolFlags & SKCS_PROTO_IP) {
290 pPacketInfo->IpHeaderChecksum =
291 SkCsCalculateChecksum(pIpHeader, IpHeaderLength);
292 }
293
294 /* Get the next level protocol identifier. */
295
296 NextLevelProtocol =
297 *(SK_U8 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL);
298
299 /*
300 * Check if this is a TCP or UDP frame and if we should calculate the
301 * TCP/UDP pseudo header checksum.
302 *
303 * Also clear all protocol bit flags of protocols not present in the
304 * frame.
305 */
306
307 if ((ProtocolFlags & SKCS_PROTO_TCP) != 0 &&
308 NextLevelProtocol == SKCS_PROTO_ID_TCP) {
309 /* TCP/IP frame. */
310 ProtocolFlags &= SKCS_PROTO_TCP | SKCS_PROTO_IP;
311 NextLevelProtoStats =
312 &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_TCP];
313 }
314 else if ((ProtocolFlags & SKCS_PROTO_UDP) != 0 &&
315 NextLevelProtocol == SKCS_PROTO_ID_UDP) {
316 /* UDP/IP frame. */
317 ProtocolFlags &= SKCS_PROTO_UDP | SKCS_PROTO_IP;
318 NextLevelProtoStats =
319 &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_UDP];
320 }
321 else {
322 /*
323 * Either not a TCP or UDP frame and/or TCP/UDP processing not
324 * specified.
325 */
326 pPacketInfo->ProtocolFlags = ProtocolFlags & SKCS_PROTO_IP;
327 return;
328 }
329
330 /* Check if this is an IP fragment. */
331
332 /*
333 * Note: An IP fragment has a non-zero "Fragment Offset" field and/or
334 * the "More Fragments" bit set. Thus, if both the "Fragment Offset"
335 * and the "More Fragments" are zero, it is *not* a fragment. We can
336 * easily check both at the same time since they are in the same 16-bit
337 * word.
338 */
339
340 if ((*(SK_U16 *)
341 SKCS_IDX(pIpHeader, SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET) &
342 ~SKCS_IP_DONT_FRAGMENT) != 0) {
343 /* IP fragment; ignore all other protocols. */
344 pPacketInfo->ProtocolFlags = ProtocolFlags & SKCS_PROTO_IP;
345 NextLevelProtoStats->TxUnableCts++;
346 return;
347 }
348
349 /*
350 * Calculate the TCP/UDP pseudo header checksum.
351 */
352
353 /* Get total length of IP header and data. */
354
355 IpDataLength =
356 *(SK_U16 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_TOTAL_LENGTH);
357
358 /* Get length of IP data portion. */
359
360 IpDataLength = SKCS_NTOH16(IpDataLength) - IpHeaderLength;
361
362 /* Calculate the sum of all pseudo header fields (16-bit). */
363
364 PseudoHeaderChecksum =
365 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
366 SKCS_OFS_IP_SOURCE_ADDRESS + 0) +
367 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
368 SKCS_OFS_IP_SOURCE_ADDRESS + 2) +
369 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
370 SKCS_OFS_IP_DESTINATION_ADDRESS + 0) +
371 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
372 SKCS_OFS_IP_DESTINATION_ADDRESS + 2) +
373 (unsigned long) SKCS_HTON16(NextLevelProtocol) +
374 (unsigned long) SKCS_HTON16(IpDataLength);
375
376 /* Add-in any carries. */
377
378 SKCS_OC_ADD(PseudoHeaderChecksum, PseudoHeaderChecksum, 0);
379
380 /* Add-in any new carry. */
381
382 SKCS_OC_ADD(pPacketInfo->PseudoHeaderChecksum, PseudoHeaderChecksum, 0);
383
384 pPacketInfo->ProtocolFlags = ProtocolFlags;
385 NextLevelProtoStats->TxOkCts++; /* Success. */
386} /* SkCsGetSendInfo */
387
388
389/******************************************************************************
390 *
391 * SkCsGetReceiveInfo - verify checksum information for a received packet
392 *
393 * Description:
394 * Verify a received frame's checksum. The function returns a status code
395 * reflecting the result of the verification.
396 *
397 * Note:
398 * Before calling this function you have to verify that the frame is
399 * not padded and Checksum1 and Checksum2 are bigger than 1.
400 *
401 * Arguments:
402 * pAc - Pointer to adapter context struct.
403 *
404 * pIpHeader - Pointer to IP header. Must be at least the length in bytes
405 * of the received IP header including any option fields. For UDP packets,
406 * 8 additional bytes are needed to access the UDP checksum.
407 *
408 * Note: The actual length of the IP header is stored in the lower four
409 * bits of the first octet of the IP header as the number of 4-byte words,
410 * so it must be multiplied by four to get the length in bytes. Thus, the
411 * maximum IP header length is 15 * 4 = 60 bytes.
412 *
413 * Checksum1 - The first 16-bit Internet Checksum calculated by the
414 * hardware starting at the offset returned by SkCsSetReceiveFlags().
415 *
416 * Checksum2 - The second 16-bit Internet Checksum calculated by the
417 * hardware starting at the offset returned by SkCsSetReceiveFlags().
418 *
419 * Returns:
420 * SKCS_STATUS_UNKNOWN_IP_VERSION - Not an IP v4 frame.
421 * SKCS_STATUS_IP_CSUM_ERROR - IP checksum error.
422 * SKCS_STATUS_IP_CSUM_ERROR_TCP - IP checksum error in TCP frame.
423 * SKCS_STATUS_IP_CSUM_ERROR_UDP - IP checksum error in UDP frame
424 * SKCS_STATUS_IP_FRAGMENT - IP fragment (IP checksum ok).
425 * SKCS_STATUS_IP_CSUM_OK - IP checksum ok (not a TCP or UDP frame).
426 * SKCS_STATUS_TCP_CSUM_ERROR - TCP checksum error (IP checksum ok).
427 * SKCS_STATUS_UDP_CSUM_ERROR - UDP checksum error (IP checksum ok).
428 * SKCS_STATUS_TCP_CSUM_OK - IP and TCP checksum ok.
429 * SKCS_STATUS_UDP_CSUM_OK - IP and UDP checksum ok.
430 * SKCS_STATUS_IP_CSUM_OK_NO_UDP - IP checksum OK and no UDP checksum.
431 *
432 * Note: If SKCS_OVERWRITE_STATUS is defined, the SKCS_STATUS_XXX values
433 * returned here can be defined in some header file by the module using CSUM.
434 * In this way, the calling module can assign return values for its own needs,
435 * e.g. by assigning bit flags to the individual protocols.
436 */
437SKCS_STATUS SkCsGetReceiveInfo(
438SK_AC *pAc, /* Adapter context struct. */
439void *pIpHeader, /* IP header. */
440unsigned Checksum1, /* Hardware checksum 1. */
441unsigned Checksum2, /* Hardware checksum 2. */
442int NetNumber) /* Net number */
443{
444 /* Internet Header Version found in IP header. */
445 unsigned InternetHeaderVersion;
446
447 /* Length of the IP header as found in IP header. */
448 unsigned IpHeaderLength;
449
450 /* Length of IP data portion. */
451 unsigned IpDataLength;
452
453 /* IP header checksum. */
454 unsigned IpHeaderChecksum;
455
456 /* IP header options checksum, if any. */
457 unsigned IpOptionsChecksum;
458
459 /* IP data checksum, i.e. TCP/UDP checksum. */
460 unsigned IpDataChecksum;
461
462 /* Next level protocol identifier found in IP header. */
463 unsigned NextLevelProtocol;
464
465 /* The checksum of the "next level protocol", i.e. TCP or UDP. */
466 unsigned long NextLevelProtocolChecksum;
467
468 /* Pointer to next level protocol statistics structure. */
469 SKCS_PROTO_STATS *NextLevelProtoStats;
470
471 /* Temporary variable. */
472 unsigned Tmp;
473
474 Tmp = *(SK_U8 *)
475 SKCS_IDX(pIpHeader, SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH);
476
477 /* Get the Internet Header Version (IHV). */
478 /* Note: The IHV is stored in the upper four bits. */
479
480 InternetHeaderVersion = Tmp >> 4;
481
482 /* Check the Internet Header Version. */
483 /* Note: We currently only support IP version 4. */
484
485 if (InternetHeaderVersion != 4) { /* IPv4? */
486 SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_RX,
487 ("Rx: Unknown Internet Header Version %u.\n",
488 InternetHeaderVersion));
489 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxUnableCts++;
490 return (SKCS_STATUS_UNKNOWN_IP_VERSION);
491 }
492
493 /* Get the IP header length (IHL). */
494 /*
495 * Note: The IHL is stored in the lower four bits as the number of
496 * 4-byte words.
497 */
498
499 IpHeaderLength = (Tmp & 0xf) * 4;
500
501 /* Check the IP header length. */
502
503 /* 04-Aug-1998 sw - Really check the IHL? Necessary? */
504
505 if (IpHeaderLength < 5*4) {
506 SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_RX,
507 ("Rx: Invalid IP Header Length %u.\n", IpHeaderLength));
508 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxErrCts++;
509 return (SKCS_STATUS_IP_CSUM_ERROR);
510 }
511
512 /* This is an IPv4 frame with a header of valid length. */
513
514 /* Get the IP header and data checksum. */
515
516 IpDataChecksum = Checksum2;
517
518 /*
519 * The IP header checksum is calculated as follows:
520 *
521 * IpHeaderChecksum = Checksum1 - Checksum2
522 */
523
524 SKCS_OC_SUB(IpHeaderChecksum, Checksum1, Checksum2);
525
526 /* Check if any IP header options. */
527
528 if (IpHeaderLength > SKCS_IP_HEADER_SIZE) {
529
530 /* Get the IP options checksum. */
531
532 IpOptionsChecksum = SkCsCalculateChecksum(
533 SKCS_IDX(pIpHeader, SKCS_IP_HEADER_SIZE),
534 IpHeaderLength - SKCS_IP_HEADER_SIZE);
535
536 /* Adjust the IP header and IP data checksums. */
537
538 SKCS_OC_ADD(IpHeaderChecksum, IpHeaderChecksum, IpOptionsChecksum);
539
540 SKCS_OC_SUB(IpDataChecksum, IpDataChecksum, IpOptionsChecksum);
541 }
542
543 /*
544 * Check if the IP header checksum is ok.
545 *
546 * NOTE: We must check the IP header checksum even if the caller just wants
547 * us to check upper-layer checksums, because we cannot do any further
548 * processing of the packet without a valid IP checksum.
549 */
550
551 /* Get the next level protocol identifier. */
552
553 NextLevelProtocol = *(SK_U8 *)
554 SKCS_IDX(pIpHeader, SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL);
555
556 if (IpHeaderChecksum != 0xffff) {
557 pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxErrCts++;
558 /* the NDIS tester wants to know the upper level protocol too */
559 if (NextLevelProtocol == SKCS_PROTO_ID_TCP) {
560 return(SKCS_STATUS_IP_CSUM_ERROR_TCP);
561 }
562 else if (NextLevelProtocol == SKCS_PROTO_ID_UDP) {
563 return(SKCS_STATUS_IP_CSUM_ERROR_UDP);
564 }
565 return (SKCS_STATUS_IP_CSUM_ERROR);
566 }
567
568 /*
569 * Check if this is a TCP or UDP frame and if we should calculate the
570 * TCP/UDP pseudo header checksum.
571 *
572 * Also clear all protocol bit flags of protocols not present in the
573 * frame.
574 */
575
576 if ((pAc->Csum.ReceiveFlags[NetNumber] & SKCS_PROTO_TCP) != 0 &&
577 NextLevelProtocol == SKCS_PROTO_ID_TCP) {
578 /* TCP/IP frame. */
579 NextLevelProtoStats =
580 &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_TCP];
581 }
582 else if ((pAc->Csum.ReceiveFlags[NetNumber] & SKCS_PROTO_UDP) != 0 &&
583 NextLevelProtocol == SKCS_PROTO_ID_UDP) {
584 /* UDP/IP frame. */
585 NextLevelProtoStats =
586 &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_UDP];
587 }
588 else {
589 /*
590 * Either not a TCP or UDP frame and/or TCP/UDP processing not
591 * specified.
592 */
593 return (SKCS_STATUS_IP_CSUM_OK);
594 }
595
596 /* Check if this is an IP fragment. */
597
598 /*
599 * Note: An IP fragment has a non-zero "Fragment Offset" field and/or
600 * the "More Fragments" bit set. Thus, if both the "Fragment Offset"
601 * and the "More Fragments" are zero, it is *not* a fragment. We can
602 * easily check both at the same time since they are in the same 16-bit
603 * word.
604 */
605
606 if ((*(SK_U16 *)
607 SKCS_IDX(pIpHeader, SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET) &
608 ~SKCS_IP_DONT_FRAGMENT) != 0) {
609 /* IP fragment; ignore all other protocols. */
610 NextLevelProtoStats->RxUnableCts++;
611 return (SKCS_STATUS_IP_FRAGMENT);
612 }
613
614 /*
615 * 08-May-2000 ra
616 *
617 * From RFC 768 (UDP)
618 * If the computed checksum is zero, it is transmitted as all ones (the
619 * equivalent in one's complement arithmetic). An all zero transmitted
620 * checksum value means that the transmitter generated no checksum (for
621 * debugging or for higher level protocols that don't care).
622 */
623
624 if (NextLevelProtocol == SKCS_PROTO_ID_UDP &&
625 *(SK_U16*)SKCS_IDX(pIpHeader, IpHeaderLength + 6) == 0x0000) {
626
627 NextLevelProtoStats->RxOkCts++;
628
629 return (SKCS_STATUS_IP_CSUM_OK_NO_UDP);
630 }
631
632 /*
633 * Calculate the TCP/UDP checksum.
634 */
635
636 /* Get total length of IP header and data. */
637
638 IpDataLength =
639 *(SK_U16 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_TOTAL_LENGTH);
640
641 /* Get length of IP data portion. */
642
643 IpDataLength = SKCS_NTOH16(IpDataLength) - IpHeaderLength;
644
645 NextLevelProtocolChecksum =
646
647 /* Calculate the pseudo header checksum. */
648
649 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
650 SKCS_OFS_IP_SOURCE_ADDRESS + 0) +
651 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
652 SKCS_OFS_IP_SOURCE_ADDRESS + 2) +
653 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
654 SKCS_OFS_IP_DESTINATION_ADDRESS + 0) +
655 (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
656 SKCS_OFS_IP_DESTINATION_ADDRESS + 2) +
657 (unsigned long) SKCS_HTON16(NextLevelProtocol) +
658 (unsigned long) SKCS_HTON16(IpDataLength) +
659
660 /* Add the TCP/UDP header checksum. */
661
662 (unsigned long) IpDataChecksum;
663
664 /* Add-in any carries. */
665
666 SKCS_OC_ADD(NextLevelProtocolChecksum, NextLevelProtocolChecksum, 0);
667
668 /* Add-in any new carry. */
669
670 SKCS_OC_ADD(NextLevelProtocolChecksum, NextLevelProtocolChecksum, 0);
671
672 /* Check if the TCP/UDP checksum is ok. */
673
674 if ((unsigned) NextLevelProtocolChecksum == 0xffff) {
675
676 /* TCP/UDP checksum ok. */
677
678 NextLevelProtoStats->RxOkCts++;
679
680 return (NextLevelProtocol == SKCS_PROTO_ID_TCP ?
681 SKCS_STATUS_TCP_CSUM_OK : SKCS_STATUS_UDP_CSUM_OK);
682 }
683
684 /* TCP/UDP checksum error. */
685
686 NextLevelProtoStats->RxErrCts++;
687
688 return (NextLevelProtocol == SKCS_PROTO_ID_TCP ?
689 SKCS_STATUS_TCP_CSUM_ERROR : SKCS_STATUS_UDP_CSUM_ERROR);
690} /* SkCsGetReceiveInfo */
691
692
693/******************************************************************************
694 *
695 * SkCsSetReceiveFlags - set checksum receive flags
696 *
697 * Description:
698 * Use this function to set the various receive flags. According to the
699 * protocol flags set by the caller, the start offsets within received
700 * packets of the two hardware checksums are returned. These offsets must
701 * be stored in all receive descriptors.
702 *
703 * Arguments:
704 * pAc - Pointer to adapter context struct.
705 *
706 * ReceiveFlags - Any combination of SK_PROTO_XXX flags of the protocols
707 * for which the caller wants checksum information on received frames.
708 *
709 * pChecksum1Offset - The start offset of the first receive descriptor
710 * hardware checksum to be calculated for received frames is returned
711 * here.
712 *
713 * pChecksum2Offset - The start offset of the second receive descriptor
714 * hardware checksum to be calculated for received frames is returned
715 * here.
716 *
717 * Returns: N/A
718 * Returns the two hardware checksum start offsets.
719 */
720void SkCsSetReceiveFlags(
721SK_AC *pAc, /* Adapter context struct. */
722unsigned ReceiveFlags, /* New receive flags. */
723unsigned *pChecksum1Offset, /* Offset for hardware checksum 1. */
724unsigned *pChecksum2Offset, /* Offset for hardware checksum 2. */
725int NetNumber)
726{
727 /* Save the receive flags. */
728
729 pAc->Csum.ReceiveFlags[NetNumber] = ReceiveFlags;
730
731 /* First checksum start offset is the IP header. */
732 *pChecksum1Offset = SKCS_MAC_HEADER_SIZE;
733
734 /*
735 * Second checksum start offset is the IP data. Note that this may vary
736 * if there are any IP header options in the actual packet.
737 */
738 *pChecksum2Offset = SKCS_MAC_HEADER_SIZE + SKCS_IP_HEADER_SIZE;
739} /* SkCsSetReceiveFlags */
740
741#ifndef SK_CS_CALCULATE_CHECKSUM
742
743/******************************************************************************
744 *
745 * SkCsCalculateChecksum - calculate checksum for specified data
746 *
747 * Description:
748 * Calculate and return the 16-bit Internet Checksum for the specified
749 * data.
750 *
751 * Arguments:
752 * pData - Pointer to data for which the checksum shall be calculated.
753 * Note: The pointer should be aligned on a 16-bit boundary.
754 *
755 * Length - Length in bytes of data to checksum.
756 *
757 * Returns:
758 * The 16-bit Internet Checksum for the specified data.
759 *
760 * Note: The checksum is calculated in the machine's natural byte order,
761 * i.e. little vs. big endian. Thus, the resulting checksum is different
762 * for the same input data on little and big endian machines.
763 *
764 * However, when written back to the network packet, the byte order is
765 * always in correct network order.
766 */
767unsigned SkCsCalculateChecksum(
768void *pData, /* Data to checksum. */
769unsigned Length) /* Length of data. */
770{
771 SK_U16 *pU16; /* Pointer to the data as 16-bit words. */
772 unsigned long Checksum; /* Checksum; must be at least 32 bits. */
773
774 /* Sum up all 16-bit words. */
775
776 pU16 = (SK_U16 *) pData;
777 for (Checksum = 0; Length > 1; Length -= 2) {
778 Checksum += *pU16++;
779 }
780
781 /* If this is an odd number of bytes, add-in the last byte. */
782
783 if (Length > 0) {
784#ifdef SK_BIG_ENDIAN
785 /* Add the last byte as the high byte. */
786 Checksum += ((unsigned) *(SK_U8 *) pU16) << 8;
787#else /* !SK_BIG_ENDIAN */
788 /* Add the last byte as the low byte. */
789 Checksum += *(SK_U8 *) pU16;
790#endif /* !SK_BIG_ENDIAN */
791 }
792
793 /* Add-in any carries. */
794
795 SKCS_OC_ADD(Checksum, Checksum, 0);
796
797 /* Add-in any new carry. */
798
799 SKCS_OC_ADD(Checksum, Checksum, 0);
800
801 /* Note: All bits beyond the 16-bit limit are now zero. */
802
803 return ((unsigned) Checksum);
804} /* SkCsCalculateChecksum */
805
806#endif /* SK_CS_CALCULATE_CHECKSUM */
807
808/******************************************************************************
809 *
810 * SkCsEvent - the CSUM event dispatcher
811 *
812 * Description:
813 * This is the event handler for the CSUM module.
814 *
815 * Arguments:
816 * pAc - Pointer to adapter context.
817 *
818 * Ioc - I/O context.
819 *
820 * Event - Event id.
821 *
822 * Param - Event dependent parameter.
823 *
824 * Returns:
825 * The 16-bit Internet Checksum for the specified data.
826 *
827 * Note: The checksum is calculated in the machine's natural byte order,
828 * i.e. little vs. big endian. Thus, the resulting checksum is different
829 * for the same input data on little and big endian machines.
830 *
831 * However, when written back to the network packet, the byte order is
832 * always in correct network order.
833 */
834int SkCsEvent(
835SK_AC *pAc, /* Pointer to adapter context. */
836SK_IOC Ioc, /* I/O context. */
837SK_U32 Event, /* Event id. */
838SK_EVPARA Param) /* Event dependent parameter. */
839{
840 int ProtoIndex;
841 int NetNumber;
842
843 switch (Event) {
844 /*
845 * Clear protocol statistics.
846 *
847 * Param - Protocol index, or -1 for all protocols.
848 * - Net number.
849 */
850 case SK_CSUM_EVENT_CLEAR_PROTO_STATS:
851
852 ProtoIndex = (int)Param.Para32[1];
853 NetNumber = (int)Param.Para32[0];
854 if (ProtoIndex < 0) { /* Clear for all protocols. */
855 if (NetNumber >= 0) {
856 SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][0], 0,
857 sizeof(pAc->Csum.ProtoStats[NetNumber]));
858 }
859 }
860 else { /* Clear for individual protocol. */
861 SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][ProtoIndex], 0,
862 sizeof(pAc->Csum.ProtoStats[NetNumber][ProtoIndex]));
863 }
864 break;
865 default:
866 break;
867 }
868 return (0); /* Success. */
869} /* SkCsEvent */
870
871#endif /* SK_USE_CSUM */
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c
index fb639959292b..4265ed91a9c4 100644
--- a/drivers/net/sk98lin/skethtool.c
+++ b/drivers/net/sk98lin/skethtool.c
@@ -539,6 +539,48 @@ static int setPauseParams(struct net_device *dev , struct ethtool_pauseparam *ep
539 return ret ? -EIO : 0; 539 return ret ? -EIO : 0;
540} 540}
541 541
542/* Only Yukon supports checksum offload. */
543static int setScatterGather(struct net_device *dev, u32 data)
544{
545 DEV_NET *pNet = netdev_priv(dev);
546 SK_AC *pAC = pNet->pAC;
547
548 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
549 return -EOPNOTSUPP;
550 return ethtool_op_set_sg(dev, data);
551}
552
553static int setTxCsum(struct net_device *dev, u32 data)
554{
555 DEV_NET *pNet = netdev_priv(dev);
556 SK_AC *pAC = pNet->pAC;
557
558 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
559 return -EOPNOTSUPP;
560
561 return ethtool_op_set_tx_csum(dev, data);
562}
563
564static u32 getRxCsum(struct net_device *dev)
565{
566 DEV_NET *pNet = netdev_priv(dev);
567 SK_AC *pAC = pNet->pAC;
568
569 return pAC->RxPort[pNet->PortNr].RxCsum;
570}
571
572static int setRxCsum(struct net_device *dev, u32 data)
573{
574 DEV_NET *pNet = netdev_priv(dev);
575 SK_AC *pAC = pNet->pAC;
576
577 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
578 return -EOPNOTSUPP;
579
580 pAC->RxPort[pNet->PortNr].RxCsum = data != 0;
581 return 0;
582}
583
542struct ethtool_ops SkGeEthtoolOps = { 584struct ethtool_ops SkGeEthtoolOps = {
543 .get_settings = getSettings, 585 .get_settings = getSettings,
544 .set_settings = setSettings, 586 .set_settings = setSettings,
@@ -549,4 +591,12 @@ struct ethtool_ops SkGeEthtoolOps = {
549 .phys_id = locateDevice, 591 .phys_id = locateDevice,
550 .get_pauseparam = getPauseParams, 592 .get_pauseparam = getPauseParams,
551 .set_pauseparam = setPauseParams, 593 .set_pauseparam = setPauseParams,
594 .get_link = ethtool_op_get_link,
595 .get_perm_addr = ethtool_op_get_perm_addr,
596 .get_sg = ethtool_op_get_sg,
597 .set_sg = setScatterGather,
598 .get_tx_csum = ethtool_op_get_tx_csum,
599 .set_tx_csum = setTxCsum,
600 .get_rx_csum = getRxCsum,
601 .set_rx_csum = setRxCsum,
552}; 602};
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index b18c92cb629e..9a76ac180b11 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -101,18 +101,18 @@
101 * "h/skgeinit.h" 101 * "h/skgeinit.h"
102 * "h/skaddr.h" 102 * "h/skaddr.h"
103 * "h/skgesirq.h" 103 * "h/skgesirq.h"
104 * "h/skcsum.h"
105 * "h/skrlmt.h" 104 * "h/skrlmt.h"
106 * 105 *
107 ******************************************************************************/ 106 ******************************************************************************/
108 107
109#include "h/skversion.h" 108#include "h/skversion.h"
110 109
110#include <linux/in.h>
111#include <linux/module.h> 111#include <linux/module.h>
112#include <linux/moduleparam.h> 112#include <linux/moduleparam.h>
113#include <linux/init.h> 113#include <linux/init.h>
114#include <linux/proc_fs.h>
115#include <linux/dma-mapping.h> 114#include <linux/dma-mapping.h>
115#include <linux/ip.h>
116 116
117#include "h/skdrv1st.h" 117#include "h/skdrv1st.h"
118#include "h/skdrv2nd.h" 118#include "h/skdrv2nd.h"
@@ -206,7 +206,6 @@ static void SkGeSetRxMode(struct SK_NET_DEVICE *dev);
206static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev); 206static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev);
207static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd); 207static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd);
208static void GetConfiguration(SK_AC*); 208static void GetConfiguration(SK_AC*);
209static void ProductStr(SK_AC*);
210static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*); 209static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*);
211static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*); 210static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*);
212static void FillRxRing(SK_AC*, RX_PORT*); 211static void FillRxRing(SK_AC*, RX_PORT*);
@@ -235,28 +234,6 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
235 * Extern Function Prototypes 234 * Extern Function Prototypes
236 * 235 *
237 ******************************************************************************/ 236 ******************************************************************************/
238static const char SKRootName[] = "net/sk98lin";
239static struct proc_dir_entry *pSkRootDir;
240extern struct file_operations sk_proc_fops;
241
242static inline void SkGeProcCreate(struct net_device *dev)
243{
244 struct proc_dir_entry *pe;
245
246 if (pSkRootDir &&
247 (pe = create_proc_entry(dev->name, S_IRUGO, pSkRootDir))) {
248 pe->proc_fops = &sk_proc_fops;
249 pe->data = dev;
250 pe->owner = THIS_MODULE;
251 }
252}
253
254static inline void SkGeProcRemove(struct net_device *dev)
255{
256 if (pSkRootDir)
257 remove_proc_entry(dev->name, pSkRootDir);
258}
259
260extern void SkDimEnableModerationIfNeeded(SK_AC *pAC); 237extern void SkDimEnableModerationIfNeeded(SK_AC *pAC);
261extern void SkDimDisplayModerationSettings(SK_AC *pAC); 238extern void SkDimDisplayModerationSettings(SK_AC *pAC);
262extern void SkDimStartModerationTimer(SK_AC *pAC); 239extern void SkDimStartModerationTimer(SK_AC *pAC);
@@ -279,6 +256,27 @@ static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480};
279 256
280/***************************************************************************** 257/*****************************************************************************
281 * 258 *
259 * SkPciWriteCfgDWord - write a 32 bit value to pci config space
260 *
261 * Description:
262 * This routine writes a 32 bit value to the pci configuration
263 * space.
264 *
265 * Returns:
266 * 0 - indicate everything worked ok.
267 * != 0 - error indication
268 */
269static inline int SkPciWriteCfgDWord(
270SK_AC *pAC, /* Adapter Control structure pointer */
271int PciAddr, /* PCI register address */
272SK_U32 Val) /* pointer to store the read value */
273{
274 pci_write_config_dword(pAC->PciDev, PciAddr, Val);
275 return(0);
276} /* SkPciWriteCfgDWord */
277
278/*****************************************************************************
279 *
282 * SkGeInitPCI - Init the PCI resources 280 * SkGeInitPCI - Init the PCI resources
283 * 281 *
284 * Description: 282 * Description:
@@ -300,7 +298,7 @@ int SkGeInitPCI(SK_AC *pAC)
300 dev->mem_start = pci_resource_start (pdev, 0); 298 dev->mem_start = pci_resource_start (pdev, 0);
301 pci_set_master(pdev); 299 pci_set_master(pdev);
302 300
303 if (pci_request_regions(pdev, pAC->Name) != 0) { 301 if (pci_request_regions(pdev, "sk98lin") != 0) {
304 retval = 2; 302 retval = 2;
305 goto out_disable; 303 goto out_disable;
306 } 304 }
@@ -578,10 +576,10 @@ SK_BOOL DualNet;
578 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); 576 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
579 577
580 if (pAC->GIni.GIMacsFound == 2) { 578 if (pAC->GIni.GIMacsFound == 2) {
581 Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev); 579 Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, "sk98lin", dev);
582 } else if (pAC->GIni.GIMacsFound == 1) { 580 } else if (pAC->GIni.GIMacsFound == 1) {
583 Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ, 581 Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ,
584 pAC->Name, dev); 582 "sk98lin", dev);
585 } else { 583 } else {
586 printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n", 584 printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
587 pAC->GIni.GIMacsFound); 585 pAC->GIni.GIMacsFound);
@@ -601,11 +599,6 @@ SK_BOOL DualNet;
601 return(-EAGAIN); 599 return(-EAGAIN);
602 } 600 }
603 601
604 SkCsSetReceiveFlags(pAC,
605 SKCS_PROTO_IP | SKCS_PROTO_TCP | SKCS_PROTO_UDP,
606 &pAC->CsOfs1, &pAC->CsOfs2, 0);
607 pAC->CsOfs = (pAC->CsOfs2 << 16) | pAC->CsOfs1;
608
609 BoardInitMem(pAC); 602 BoardInitMem(pAC);
610 /* tschilling: New common function with minimum size check. */ 603 /* tschilling: New common function with minimum size check. */
611 DualNet = SK_FALSE; 604 DualNet = SK_FALSE;
@@ -823,7 +816,7 @@ uintptr_t VNextDescr; /* the virtual bus address of the next descriptor */
823 /* set the pointers right */ 816 /* set the pointers right */
824 pDescr->VNextRxd = VNextDescr & 0xffffffffULL; 817 pDescr->VNextRxd = VNextDescr & 0xffffffffULL;
825 pDescr->pNextRxd = pNextDescr; 818 pDescr->pNextRxd = pNextDescr;
826 pDescr->TcpSumStarts = pAC->CsOfs; 819 if (!IsTx) pDescr->TcpSumStarts = ETH_HLEN << 16 | ETH_HLEN;
827 820
828 /* advance one step */ 821 /* advance one step */
829 pPrevDescr = pDescr; 822 pPrevDescr = pDescr;
@@ -1270,7 +1263,6 @@ struct SK_NET_DEVICE *dev)
1270 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); 1263 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
1271 1264
1272 pAC->MaxPorts++; 1265 pAC->MaxPorts++;
1273 pNet->Up = 1;
1274 1266
1275 1267
1276 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, 1268 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
@@ -1400,7 +1392,6 @@ struct SK_NET_DEVICE *dev)
1400 sizeof(SK_PNMI_STRUCT_DATA)); 1392 sizeof(SK_PNMI_STRUCT_DATA));
1401 1393
1402 pAC->MaxPorts--; 1394 pAC->MaxPorts--;
1403 pNet->Up = 0;
1404 1395
1405 return (0); 1396 return (0);
1406} /* SkGeClose */ 1397} /* SkGeClose */
@@ -1505,8 +1496,6 @@ struct sk_buff *pMessage) /* pointer to send-message */
1505 TXD *pOldTxd; 1496 TXD *pOldTxd;
1506 unsigned long Flags; 1497 unsigned long Flags;
1507 SK_U64 PhysAddr; 1498 SK_U64 PhysAddr;
1508 int Protocol;
1509 int IpHeaderLength;
1510 int BytesSend = pMessage->len; 1499 int BytesSend = pMessage->len;
1511 1500
1512 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X")); 1501 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X"));
@@ -1579,8 +1568,10 @@ struct sk_buff *pMessage) /* pointer to send-message */
1579 pTxd->pMBuf = pMessage; 1568 pTxd->pMBuf = pMessage;
1580 1569
1581 if (pMessage->ip_summed == CHECKSUM_HW) { 1570 if (pMessage->ip_summed == CHECKSUM_HW) {
1582 Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff); 1571 u16 hdrlen = pMessage->h.raw - pMessage->data;
1583 if ((Protocol == C_PROTO_ID_UDP) && 1572 u16 offset = hdrlen + pMessage->csum;
1573
1574 if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) &&
1584 (pAC->GIni.GIChipRev == 0) && 1575 (pAC->GIni.GIChipRev == 0) &&
1585 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { 1576 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1586 pTxd->TBControl = BMU_TCP_CHECK; 1577 pTxd->TBControl = BMU_TCP_CHECK;
@@ -1588,14 +1579,9 @@ struct sk_buff *pMessage) /* pointer to send-message */
1588 pTxd->TBControl = BMU_UDP_CHECK; 1579 pTxd->TBControl = BMU_UDP_CHECK;
1589 } 1580 }
1590 1581
1591 IpHeaderLength = (SK_U8)pMessage->data[C_OFFSET_IPHEADER]; 1582 pTxd->TcpSumOfs = 0;
1592 IpHeaderLength = (IpHeaderLength & 0xf) * 4; 1583 pTxd->TcpSumSt = hdrlen;
1593 pTxd->TcpSumOfs = 0; /* PH-Checksum already calculated */ 1584 pTxd->TcpSumWr = offset;
1594 pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength +
1595 (Protocol == C_PROTO_ID_UDP ?
1596 C_OFFSET_UDPHEADER_UDPCS :
1597 C_OFFSET_TCPHEADER_TCPCS);
1598 pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength;
1599 1585
1600 pTxd->TBControl |= BMU_OWN | BMU_STF | 1586 pTxd->TBControl |= BMU_OWN | BMU_STF |
1601 BMU_SW | BMU_EOF | 1587 BMU_SW | BMU_EOF |
@@ -1658,11 +1644,10 @@ struct sk_buff *pMessage) /* pointer to send-message */
1658 TXD *pTxdLst; 1644 TXD *pTxdLst;
1659 int CurrFrag; 1645 int CurrFrag;
1660 int BytesSend; 1646 int BytesSend;
1661 int IpHeaderLength;
1662 int Protocol;
1663 skb_frag_t *sk_frag; 1647 skb_frag_t *sk_frag;
1664 SK_U64 PhysAddr; 1648 SK_U64 PhysAddr;
1665 unsigned long Flags; 1649 unsigned long Flags;
1650 SK_U32 Control;
1666 1651
1667 spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); 1652 spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
1668#ifndef USE_TX_COMPLETE 1653#ifndef USE_TX_COMPLETE
@@ -1685,7 +1670,6 @@ struct sk_buff *pMessage) /* pointer to send-message */
1685 pTxdFst = pTxd; 1670 pTxdFst = pTxd;
1686 pTxdLst = pTxd; 1671 pTxdLst = pTxd;
1687 BytesSend = 0; 1672 BytesSend = 0;
1688 Protocol = 0;
1689 1673
1690 /* 1674 /*
1691 ** Map the first fragment (header) into the DMA-space 1675 ** Map the first fragment (header) into the DMA-space
@@ -1703,32 +1687,31 @@ struct sk_buff *pMessage) /* pointer to send-message */
1703 ** Does the HW need to evaluate checksum for TCP or UDP packets? 1687 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1704 */ 1688 */
1705 if (pMessage->ip_summed == CHECKSUM_HW) { 1689 if (pMessage->ip_summed == CHECKSUM_HW) {
1706 pTxd->TBControl = BMU_STF | BMU_STFWD | skb_headlen(pMessage); 1690 u16 hdrlen = pMessage->h.raw - pMessage->data;
1691 u16 offset = hdrlen + pMessage->csum;
1692
1693 Control = BMU_STFWD;
1694
1707 /* 1695 /*
1708 ** We have to use the opcode for tcp here, because the 1696 ** We have to use the opcode for tcp here, because the
1709 ** opcode for udp is not working in the hardware yet 1697 ** opcode for udp is not working in the hardware yet
1710 ** (Revision 2.0) 1698 ** (Revision 2.0)
1711 */ 1699 */
1712 Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff); 1700 if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) &&
1713 if ((Protocol == C_PROTO_ID_UDP) &&
1714 (pAC->GIni.GIChipRev == 0) && 1701 (pAC->GIni.GIChipRev == 0) &&
1715 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { 1702 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1716 pTxd->TBControl |= BMU_TCP_CHECK; 1703 Control |= BMU_TCP_CHECK;
1717 } else { 1704 } else {
1718 pTxd->TBControl |= BMU_UDP_CHECK; 1705 Control |= BMU_UDP_CHECK;
1719 } 1706 }
1720 1707
1721 IpHeaderLength = ((SK_U8)pMessage->data[C_OFFSET_IPHEADER] & 0xf)*4; 1708 pTxd->TcpSumOfs = 0;
1722 pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */ 1709 pTxd->TcpSumSt = hdrlen;
1723 pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength + 1710 pTxd->TcpSumWr = offset;
1724 (Protocol == C_PROTO_ID_UDP ? 1711 } else
1725 C_OFFSET_UDPHEADER_UDPCS : 1712 Control = BMU_CHECK | BMU_SW;
1726 C_OFFSET_TCPHEADER_TCPCS); 1713
1727 pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength; 1714 pTxd->TBControl = BMU_STF | Control | skb_headlen(pMessage);
1728 } else {
1729 pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_STF |
1730 skb_headlen(pMessage);
1731 }
1732 1715
1733 pTxd = pTxd->pNextTxd; 1716 pTxd = pTxd->pNextTxd;
1734 pTxPort->TxdRingFree--; 1717 pTxPort->TxdRingFree--;
@@ -1752,40 +1735,18 @@ struct sk_buff *pMessage) /* pointer to send-message */
1752 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); 1735 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1753 pTxd->pMBuf = pMessage; 1736 pTxd->pMBuf = pMessage;
1754 1737
1755 /* 1738 pTxd->TBControl = Control | BMU_OWN | sk_frag->size;;
1756 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1757 */
1758 if (pMessage->ip_summed == CHECKSUM_HW) {
1759 pTxd->TBControl = BMU_OWN | BMU_SW | BMU_STFWD;
1760 /*
1761 ** We have to use the opcode for tcp here because the
1762 ** opcode for udp is not working in the hardware yet
1763 ** (revision 2.0)
1764 */
1765 if ((Protocol == C_PROTO_ID_UDP) &&
1766 (pAC->GIni.GIChipRev == 0) &&
1767 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1768 pTxd->TBControl |= BMU_TCP_CHECK;
1769 } else {
1770 pTxd->TBControl |= BMU_UDP_CHECK;
1771 }
1772 } else {
1773 pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_OWN;
1774 }
1775 1739
1776 /* 1740 /*
1777 ** Do we have the last fragment? 1741 ** Do we have the last fragment?
1778 */ 1742 */
1779 if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) { 1743 if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) {
1780#ifdef USE_TX_COMPLETE 1744#ifdef USE_TX_COMPLETE
1781 pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF | sk_frag->size; 1745 pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF;
1782#else 1746#else
1783 pTxd->TBControl |= BMU_EOF | sk_frag->size; 1747 pTxd->TBControl |= BMU_EOF;
1784#endif 1748#endif
1785 pTxdFst->TBControl |= BMU_OWN | BMU_SW; 1749 pTxdFst->TBControl |= BMU_OWN | BMU_SW;
1786
1787 } else {
1788 pTxd->TBControl |= sk_frag->size;
1789 } 1750 }
1790 pTxdLst = pTxd; 1751 pTxdLst = pTxd;
1791 pTxd = pTxd->pNextTxd; 1752 pTxd = pTxd->pNextTxd;
@@ -2032,7 +1993,6 @@ SK_U32 Control; /* control field of descriptor */
2032struct sk_buff *pMsg; /* pointer to message holding frame */ 1993struct sk_buff *pMsg; /* pointer to message holding frame */
2033struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */ 1994struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */
2034int FrameLength; /* total length of received frame */ 1995int FrameLength; /* total length of received frame */
2035int IpFrameLength;
2036SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */ 1996SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */
2037SK_EVPARA EvPara; /* an event parameter union */ 1997SK_EVPARA EvPara; /* an event parameter union */
2038unsigned long Flags; /* for spin lock */ 1998unsigned long Flags; /* for spin lock */
@@ -2045,10 +2005,6 @@ SK_BOOL IsMc;
2045SK_BOOL IsBadFrame; /* Bad frame */ 2005SK_BOOL IsBadFrame; /* Bad frame */
2046 2006
2047SK_U32 FrameStat; 2007SK_U32 FrameStat;
2048unsigned short Csum1;
2049unsigned short Csum2;
2050unsigned short Type;
2051int Result;
2052SK_U64 PhysAddr; 2008SK_U64 PhysAddr;
2053 2009
2054rx_start: 2010rx_start:
@@ -2177,8 +2133,8 @@ rx_start:
2177 (dma_addr_t) PhysAddr, 2133 (dma_addr_t) PhysAddr,
2178 FrameLength, 2134 FrameLength,
2179 PCI_DMA_FROMDEVICE); 2135 PCI_DMA_FROMDEVICE);
2180 eth_copy_and_sum(pNewMsg, pMsg->data, 2136 memcpy(pNewMsg->data, pMsg, FrameLength);
2181 FrameLength, 0); 2137
2182 pci_dma_sync_single_for_device(pAC->PciDev, 2138 pci_dma_sync_single_for_device(pAC->PciDev,
2183 (dma_addr_t) PhysAddr, 2139 (dma_addr_t) PhysAddr,
2184 FrameLength, 2140 FrameLength,
@@ -2206,69 +2162,15 @@ rx_start:
2206 2162
2207 /* set length in message */ 2163 /* set length in message */
2208 skb_put(pMsg, FrameLength); 2164 skb_put(pMsg, FrameLength);
2209 /* hardware checksum */ 2165 } /* frame > SK_COPY_TRESHOLD */
2210 Type = ntohs(*((short*)&pMsg->data[12]));
2211 2166
2212#ifdef USE_SK_RX_CHECKSUM 2167#ifdef USE_SK_RX_CHECKSUM
2213 if (Type == 0x800) { 2168 pMsg->csum = pRxd->TcpSums & 0xffff;
2214 Csum1=le16_to_cpu(pRxd->TcpSums & 0xffff); 2169 pMsg->ip_summed = CHECKSUM_HW;
2215 Csum2=le16_to_cpu((pRxd->TcpSums >> 16) & 0xffff);
2216 IpFrameLength = (int) ntohs((unsigned short)
2217 ((unsigned short *) pMsg->data)[8]);
2218
2219 /*
2220 * Test: If frame is padded, a check is not possible!
2221 * Frame not padded? Length difference must be 14 (0xe)!
2222 */
2223 if ((FrameLength - IpFrameLength) != 0xe) {
2224 /* Frame padded => TCP offload not possible! */
2225 pMsg->ip_summed = CHECKSUM_NONE;
2226 } else {
2227 /* Frame not padded => TCP offload! */
2228 if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) &&
2229 (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) ||
2230 (pAC->ChipsetType)) {
2231 Result = SkCsGetReceiveInfo(pAC,
2232 &pMsg->data[14],
2233 Csum1, Csum2, pRxPort->PortIndex);
2234 if (Result ==
2235 SKCS_STATUS_IP_FRAGMENT ||
2236 Result ==
2237 SKCS_STATUS_IP_CSUM_OK ||
2238 Result ==
2239 SKCS_STATUS_TCP_CSUM_OK ||
2240 Result ==
2241 SKCS_STATUS_UDP_CSUM_OK) {
2242 pMsg->ip_summed =
2243 CHECKSUM_UNNECESSARY;
2244 }
2245 else if (Result ==
2246 SKCS_STATUS_TCP_CSUM_ERROR ||
2247 Result ==
2248 SKCS_STATUS_UDP_CSUM_ERROR ||
2249 Result ==
2250 SKCS_STATUS_IP_CSUM_ERROR_UDP ||
2251 Result ==
2252 SKCS_STATUS_IP_CSUM_ERROR_TCP ||
2253 Result ==
2254 SKCS_STATUS_IP_CSUM_ERROR ) {
2255 /* HW Checksum error */
2256 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2257 SK_DBGCAT_DRV_RX_PROGRESS,
2258 ("skge: CRC error. Frame dropped!\n"));
2259 goto rx_failed;
2260 } else {
2261 pMsg->ip_summed =
2262 CHECKSUM_NONE;
2263 }
2264 }/* checksumControl calculation valid */
2265 } /* Frame length check */
2266 } /* IP frame */
2267#else 2170#else
2268 pMsg->ip_summed = CHECKSUM_NONE; 2171 pMsg->ip_summed = CHECKSUM_NONE;
2269#endif 2172#endif
2270 } /* frame > SK_COPY_TRESHOLD */ 2173
2271
2272 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V")); 2174 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V"));
2273 ForRlmt = SK_RLMT_RX_PROTOCOL; 2175 ForRlmt = SK_RLMT_RX_PROTOCOL;
2274#if 0 2176#if 0
@@ -2643,7 +2545,7 @@ unsigned long Flags;
2643static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu) 2545static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu)
2644{ 2546{
2645DEV_NET *pNet; 2547DEV_NET *pNet;
2646DEV_NET *pOtherNet; 2548struct net_device *pOtherDev;
2647SK_AC *pAC; 2549SK_AC *pAC;
2648unsigned long Flags; 2550unsigned long Flags;
2649int i; 2551int i;
@@ -2673,11 +2575,11 @@ SK_EVPARA EvPara;
2673 } 2575 }
2674#endif 2576#endif
2675 2577
2676 pNet->Mtu = NewMtu; 2578 pOtherDev = pAC->dev[1 - pNet->NetNr];
2677 pOtherNet = netdev_priv(pAC->dev[1 - pNet->NetNr]); 2579
2678 if ((pOtherNet->Mtu>1500) && (NewMtu<=1500) && (pOtherNet->Up==1)) { 2580 if ( netif_running(pOtherDev) && (pOtherDev->mtu > 1500)
2679 return(0); 2581 && (NewMtu <= 1500))
2680 } 2582 return 0;
2681 2583
2682 pAC->RxBufSize = NewMtu + 32; 2584 pAC->RxBufSize = NewMtu + 32;
2683 dev->mtu = NewMtu; 2585 dev->mtu = NewMtu;
@@ -2839,7 +2741,8 @@ SK_EVPARA EvPara;
2839 EvPara.Para32[1] = -1; 2741 EvPara.Para32[1] = -1;
2840 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); 2742 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2841 2743
2842 if (pOtherNet->Up) { 2744 if (netif_running(pOtherDev)) {
2745 DEV_NET *pOtherNet = netdev_priv(pOtherDev);
2843 EvPara.Para32[0] = pOtherNet->PortNr; 2746 EvPara.Para32[0] = pOtherNet->PortNr;
2844 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); 2747 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2845 } 2748 }
@@ -2913,7 +2816,7 @@ unsigned long Flags; /* for spin lock */
2913 pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts; 2816 pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts;
2914 pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts; 2817 pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts;
2915 2818
2916 if (pNet->Mtu <= 1500) { 2819 if (dev->mtu <= 1500) {
2917 pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF; 2820 pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF;
2918 } else { 2821 } else {
2919 pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts - 2822 pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts -
@@ -3864,25 +3767,21 @@ int Capabilities[3][3] =
3864 * 3767 *
3865 * Returns: N/A 3768 * Returns: N/A
3866 */ 3769 */
3867static void ProductStr( 3770static inline int ProductStr(
3868SK_AC *pAC /* pointer to adapter context */ 3771 SK_AC *pAC, /* pointer to adapter context */
3772 char *DeviceStr, /* result string */
3773 int StrLen /* length of the string */
3869) 3774)
3870{ 3775{
3871int StrLen = 80; /* length of the string, defined in SK_AC */
3872char Keyword[] = VPD_NAME; /* vpd productname identifier */ 3776char Keyword[] = VPD_NAME; /* vpd productname identifier */
3873int ReturnCode; /* return code from vpd_read */ 3777int ReturnCode; /* return code from vpd_read */
3874unsigned long Flags; 3778unsigned long Flags;
3875 3779
3876 spin_lock_irqsave(&pAC->SlowPathLock, Flags); 3780 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
3877 ReturnCode = VpdRead(pAC, pAC->IoBase, Keyword, pAC->DeviceStr, 3781 ReturnCode = VpdRead(pAC, pAC->IoBase, Keyword, DeviceStr, &StrLen);
3878 &StrLen);
3879 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); 3782 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
3880 if (ReturnCode != 0) { 3783
3881 /* there was an error reading the vpd data */ 3784 return ReturnCode;
3882 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ERROR,
3883 ("Error reading VPD data: %d\n", ReturnCode));
3884 pAC->DeviceStr[0] = '\0';
3885 }
3886} /* ProductStr */ 3785} /* ProductStr */
3887 3786
3888/***************************************************************************** 3787/*****************************************************************************
@@ -4085,28 +3984,6 @@ SK_U8 *pVal) /* pointer to store the read value */
4085 3984
4086/***************************************************************************** 3985/*****************************************************************************
4087 * 3986 *
4088 * SkPciWriteCfgDWord - write a 32 bit value to pci config space
4089 *
4090 * Description:
4091 * This routine writes a 32 bit value to the pci configuration
4092 * space.
4093 *
4094 * Returns:
4095 * 0 - indicate everything worked ok.
4096 * != 0 - error indication
4097 */
4098int SkPciWriteCfgDWord(
4099SK_AC *pAC, /* Adapter Control structure pointer */
4100int PciAddr, /* PCI register address */
4101SK_U32 Val) /* pointer to store the read value */
4102{
4103 pci_write_config_dword(pAC->PciDev, PciAddr, Val);
4104 return(0);
4105} /* SkPciWriteCfgDWord */
4106
4107
4108/*****************************************************************************
4109 *
4110 * SkPciWriteCfgWord - write a 16 bit value to pci config space 3987 * SkPciWriteCfgWord - write a 16 bit value to pci config space
4111 * 3988 *
4112 * Description: 3989 * Description:
@@ -4243,6 +4120,7 @@ SK_BOOL DualNet;
4243 Flags); 4120 Flags);
4244 break; 4121 break;
4245 case SK_DRV_NET_UP: /* SK_U32 PortIdx */ 4122 case SK_DRV_NET_UP: /* SK_U32 PortIdx */
4123 { struct net_device *dev = pAC->dev[Param.Para32[0]];
4246 /* action list 5 */ 4124 /* action list 5 */
4247 FromPort = Param.Para32[0]; 4125 FromPort = Param.Para32[0];
4248 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, 4126 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
@@ -4326,22 +4204,12 @@ SK_BOOL DualNet;
4326 printk(" irq moderation: disabled\n"); 4204 printk(" irq moderation: disabled\n");
4327 4205
4328 4206
4329#ifdef SK_ZEROCOPY 4207 printk(" scatter-gather: %s\n",
4330 if (pAC->ChipsetType) 4208 (dev->features & NETIF_F_SG) ? "enabled" : "disabled");
4331#ifdef USE_SK_TX_CHECKSUM 4209 printk(" tx-checksum: %s\n",
4332 printk(" scatter-gather: enabled\n"); 4210 (dev->features & NETIF_F_IP_CSUM) ? "enabled" : "disabled");
4333#else 4211 printk(" rx-checksum: %s\n",
4334 printk(" tx-checksum: disabled\n"); 4212 pAC->RxPort[Param.Para32[0]].RxCsum ? "enabled" : "disabled");
4335#endif
4336 else
4337 printk(" scatter-gather: disabled\n");
4338#else
4339 printk(" scatter-gather: disabled\n");
4340#endif
4341
4342#ifndef USE_SK_RX_CHECKSUM
4343 printk(" rx-checksum: disabled\n");
4344#endif
4345 4213
4346 } else { 4214 } else {
4347 DoPrintInterfaceChange = SK_TRUE; 4215 DoPrintInterfaceChange = SK_TRUE;
@@ -4356,9 +4224,9 @@ SK_BOOL DualNet;
4356 } 4224 }
4357 4225
4358 /* Inform the world that link protocol is up. */ 4226 /* Inform the world that link protocol is up. */
4359 netif_carrier_on(pAC->dev[Param.Para32[0]]); 4227 netif_carrier_on(dev);
4360
4361 break; 4228 break;
4229 }
4362 case SK_DRV_NET_DOWN: /* SK_U32 Reason */ 4230 case SK_DRV_NET_DOWN: /* SK_U32 Reason */
4363 /* action list 7 */ 4231 /* action list 7 */
4364 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, 4232 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
@@ -4572,7 +4440,7 @@ SK_AC *pAc) /* pointer to adapter context */
4572 4440
4573 pAC->DiagModeActive = DIAG_ACTIVE; 4441 pAC->DiagModeActive = DIAG_ACTIVE;
4574 if (pAC->BoardLevel > SK_INIT_DATA) { 4442 if (pAC->BoardLevel > SK_INIT_DATA) {
4575 if (pNet->Up) { 4443 if (netif_running(pAC->dev[0])) {
4576 pAC->WasIfUp[0] = SK_TRUE; 4444 pAC->WasIfUp[0] = SK_TRUE;
4577 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ 4445 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4578 DoPrintInterfaceChange = SK_FALSE; 4446 DoPrintInterfaceChange = SK_FALSE;
@@ -4582,7 +4450,7 @@ SK_AC *pAc) /* pointer to adapter context */
4582 } 4450 }
4583 if (pNet != netdev_priv(pAC->dev[1])) { 4451 if (pNet != netdev_priv(pAC->dev[1])) {
4584 pNet = netdev_priv(pAC->dev[1]); 4452 pNet = netdev_priv(pAC->dev[1]);
4585 if (pNet->Up) { 4453 if (netif_running(pAC->dev[1])) {
4586 pAC->WasIfUp[1] = SK_TRUE; 4454 pAC->WasIfUp[1] = SK_TRUE;
4587 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ 4455 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4588 DoPrintInterfaceChange = SK_FALSE; 4456 DoPrintInterfaceChange = SK_FALSE;
@@ -4908,6 +4776,7 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4908 struct net_device *dev = NULL; 4776 struct net_device *dev = NULL;
4909 static int boards_found = 0; 4777 static int boards_found = 0;
4910 int error = -ENODEV; 4778 int error = -ENODEV;
4779 char DeviceStr[80];
4911 4780
4912 if (pci_enable_device(pdev)) 4781 if (pci_enable_device(pdev))
4913 goto out; 4782 goto out;
@@ -4935,18 +4804,15 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4935 memset(pNet->pAC, 0, sizeof(SK_AC)); 4804 memset(pNet->pAC, 0, sizeof(SK_AC));
4936 pAC = pNet->pAC; 4805 pAC = pNet->pAC;
4937 pAC->PciDev = pdev; 4806 pAC->PciDev = pdev;
4938 pAC->PciDevId = pdev->device; 4807
4939 pAC->dev[0] = dev; 4808 pAC->dev[0] = dev;
4940 pAC->dev[1] = dev; 4809 pAC->dev[1] = dev;
4941 sprintf(pAC->Name, "SysKonnect SK-98xx");
4942 pAC->CheckQueue = SK_FALSE; 4810 pAC->CheckQueue = SK_FALSE;
4943 4811
4944 pNet->Mtu = 1500;
4945 pNet->Up = 0;
4946 dev->irq = pdev->irq; 4812 dev->irq = pdev->irq;
4947 error = SkGeInitPCI(pAC); 4813 error = SkGeInitPCI(pAC);
4948 if (error) { 4814 if (error) {
4949 printk("SKGE: PCI setup failed: %i\n", error); 4815 printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error);
4950 goto out_free_netdev; 4816 goto out_free_netdev;
4951 } 4817 }
4952 4818
@@ -4965,30 +4831,38 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4965 SET_NETDEV_DEV(dev, &pdev->dev); 4831 SET_NETDEV_DEV(dev, &pdev->dev);
4966 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); 4832 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
4967 4833
4968#ifdef SK_ZEROCOPY 4834 /* Use only if yukon hardware */
4969#ifdef USE_SK_TX_CHECKSUM
4970 if (pAC->ChipsetType) { 4835 if (pAC->ChipsetType) {
4971 /* Use only if yukon hardware */ 4836#ifdef USE_SK_TX_CHECKSUM
4972 /* SK and ZEROCOPY - fly baby... */ 4837 dev->features |= NETIF_F_IP_CSUM;
4973 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 4838#endif
4974 } 4839#ifdef SK_ZEROCOPY
4840 dev->features |= NETIF_F_SG;
4975#endif 4841#endif
4842#ifdef USE_SK_RX_CHECKSUM
4843 pAC->RxPort[0].RxCsum = 1;
4976#endif 4844#endif
4845 }
4977 4846
4978 pAC->Index = boards_found++; 4847 pAC->Index = boards_found++;
4979 4848
4980 if (SkGeBoardInit(dev, pAC)) 4849 if (SkGeBoardInit(dev, pAC))
4981 goto out_free_netdev; 4850 goto out_free_netdev;
4982 4851
4852 /* Read Adapter name from VPD */
4853 if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) {
4854 printk(KERN_ERR "sk98lin: Could not read VPD data.\n");
4855 goto out_free_resources;
4856 }
4857
4983 /* Register net device */ 4858 /* Register net device */
4984 if (register_netdev(dev)) { 4859 if (register_netdev(dev)) {
4985 printk(KERN_ERR "SKGE: Could not register device.\n"); 4860 printk(KERN_ERR "sk98lin: Could not register device.\n");
4986 goto out_free_resources; 4861 goto out_free_resources;
4987 } 4862 }
4988 4863
4989 /* Print adapter specific string from vpd */ 4864 /* Print adapter specific string from vpd */
4990 ProductStr(pAC); 4865 printk("%s: %s\n", dev->name, DeviceStr);
4991 printk("%s: %s\n", dev->name, pAC->DeviceStr);
4992 4866
4993 /* Print configuration settings */ 4867 /* Print configuration settings */
4994 printk(" PrefPort:%c RlmtMode:%s\n", 4868 printk(" PrefPort:%c RlmtMode:%s\n",
@@ -5001,10 +4875,8 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
5001 4875
5002 SkGeYellowLED(pAC, pAC->IoBase, 1); 4876 SkGeYellowLED(pAC, pAC->IoBase, 1);
5003 4877
5004
5005 memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6); 4878 memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6);
5006 4879 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5007 SkGeProcCreate(dev);
5008 4880
5009 pNet->PortNr = 0; 4881 pNet->PortNr = 0;
5010 pNet->NetNr = 0; 4882 pNet->NetNr = 0;
@@ -5024,8 +4896,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
5024 pNet->PortNr = 1; 4896 pNet->PortNr = 1;
5025 pNet->NetNr = 1; 4897 pNet->NetNr = 1;
5026 pNet->pAC = pAC; 4898 pNet->pAC = pAC;
5027 pNet->Mtu = 1500;
5028 pNet->Up = 0;
5029 4899
5030 dev->open = &SkGeOpen; 4900 dev->open = &SkGeOpen;
5031 dev->stop = &SkGeClose; 4901 dev->stop = &SkGeClose;
@@ -5038,25 +4908,28 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
5038 SET_NETDEV_DEV(dev, &pdev->dev); 4908 SET_NETDEV_DEV(dev, &pdev->dev);
5039 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); 4909 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
5040 4910
5041#ifdef SK_ZEROCOPY
5042#ifdef USE_SK_TX_CHECKSUM
5043 if (pAC->ChipsetType) { 4911 if (pAC->ChipsetType) {
5044 /* SG and ZEROCOPY - fly baby... */ 4912#ifdef USE_SK_TX_CHECKSUM
5045 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 4913 dev->features |= NETIF_F_IP_CSUM;
5046 } 4914#endif
4915#ifdef SK_ZEROCOPY
4916 dev->features |= NETIF_F_SG;
5047#endif 4917#endif
4918#ifdef USE_SK_RX_CHECKSUM
4919 pAC->RxPort[1].RxCsum = 1;
5048#endif 4920#endif
4921 }
5049 4922
5050 if (register_netdev(dev)) { 4923 if (register_netdev(dev)) {
5051 printk(KERN_ERR "SKGE: Could not register device.\n"); 4924 printk(KERN_ERR "sk98lin: Could not register device for seconf port.\n");
5052 free_netdev(dev); 4925 free_netdev(dev);
5053 pAC->dev[1] = pAC->dev[0]; 4926 pAC->dev[1] = pAC->dev[0];
5054 } else { 4927 } else {
5055 SkGeProcCreate(dev);
5056 memcpy(&dev->dev_addr, 4928 memcpy(&dev->dev_addr,
5057 &pAC->Addr.Net[1].CurrentMacAddress, 6); 4929 &pAC->Addr.Net[1].CurrentMacAddress, 6);
4930 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5058 4931
5059 printk("%s: %s\n", dev->name, pAC->DeviceStr); 4932 printk("%s: %s\n", dev->name, DeviceStr);
5060 printk(" PrefPort:B RlmtMode:Dual Check Link State\n"); 4933 printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
5061 } 4934 }
5062 } 4935 }
@@ -5092,10 +4965,7 @@ static void __devexit skge_remove_one(struct pci_dev *pdev)
5092 SK_AC *pAC = pNet->pAC; 4965 SK_AC *pAC = pNet->pAC;
5093 struct net_device *otherdev = pAC->dev[1]; 4966 struct net_device *otherdev = pAC->dev[1];
5094 4967
5095 SkGeProcRemove(dev);
5096 unregister_netdev(dev); 4968 unregister_netdev(dev);
5097 if (otherdev != dev)
5098 SkGeProcRemove(otherdev);
5099 4969
5100 SkGeYellowLED(pAC, pAC->IoBase, 0); 4970 SkGeYellowLED(pAC, pAC->IoBase, 0);
5101 4971
@@ -5180,9 +5050,9 @@ static int skge_resume(struct pci_dev *pdev)
5180 pci_enable_device(pdev); 5050 pci_enable_device(pdev);
5181 pci_set_master(pdev); 5051 pci_set_master(pdev);
5182 if (pAC->GIni.GIMacsFound == 2) 5052 if (pAC->GIni.GIMacsFound == 2)
5183 ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev); 5053 ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, "sk98lin", dev);
5184 else 5054 else
5185 ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ, pAC->Name, dev); 5055 ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ, "sk98lin", dev);
5186 if (ret) { 5056 if (ret) {
5187 printk(KERN_WARNING "sk98lin: unable to acquire IRQ %d\n", dev->irq); 5057 printk(KERN_WARNING "sk98lin: unable to acquire IRQ %d\n", dev->irq);
5188 pAC->AllocFlag &= ~SK_ALLOC_IRQ; 5058 pAC->AllocFlag &= ~SK_ALLOC_IRQ;
@@ -5240,23 +5110,12 @@ static struct pci_driver skge_driver = {
5240 5110
5241static int __init skge_init(void) 5111static int __init skge_init(void)
5242{ 5112{
5243 int error; 5113 return pci_module_init(&skge_driver);
5244
5245 pSkRootDir = proc_mkdir(SKRootName, NULL);
5246 if (pSkRootDir)
5247 pSkRootDir->owner = THIS_MODULE;
5248
5249 error = pci_register_driver(&skge_driver);
5250 if (error)
5251 remove_proc_entry(SKRootName, NULL);
5252 return error;
5253} 5114}
5254 5115
5255static void __exit skge_exit(void) 5116static void __exit skge_exit(void)
5256{ 5117{
5257 pci_unregister_driver(&skge_driver); 5118 pci_unregister_driver(&skge_driver);
5258 remove_proc_entry(SKRootName, NULL);
5259
5260} 5119}
5261 5120
5262module_init(skge_init); 5121module_init(skge_init);
diff --git a/drivers/net/sk98lin/skproc.c b/drivers/net/sk98lin/skproc.c
deleted file mode 100644
index 5cece25c034e..000000000000
--- a/drivers/net/sk98lin/skproc.c
+++ /dev/null
@@ -1,265 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skproc.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.11 $
6 * Date: $Date: 2003/12/11 16:03:57 $
7 * Purpose: Funktions to display statictic data
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * Created 22-Nov-2000
22 * Author: Mirko Lindner (mlindner@syskonnect.de)
23 *
24 * The information in this file is provided "AS IS" without warranty.
25 *
26 ******************************************************************************/
27#include <linux/proc_fs.h>
28#include <linux/seq_file.h>
29
30#include "h/skdrv1st.h"
31#include "h/skdrv2nd.h"
32#include "h/skversion.h"
33
34static int sk_seq_show(struct seq_file *seq, void *v);
35static int sk_proc_open(struct inode *inode, struct file *file);
36
37struct file_operations sk_proc_fops = {
38 .owner = THIS_MODULE,
39 .open = sk_proc_open,
40 .read = seq_read,
41 .llseek = seq_lseek,
42 .release = single_release,
43};
44
45
46/*****************************************************************************
47 *
48 * sk_seq_show - show proc information of a particular adapter
49 *
50 * Description:
51 * This function fills the proc entry with statistic data about
52 * the ethernet device. It invokes the generic sk_gen_browse() to
53 * print out all items one per one.
54 *
55 * Returns: 0
56 *
57 */
58static int sk_seq_show(struct seq_file *seq, void *v)
59{
60 struct net_device *dev = seq->private;
61 DEV_NET *pNet = netdev_priv(dev);
62 SK_AC *pAC = pNet->pAC;
63 SK_PNMI_STRUCT_DATA *pPnmiStruct = &pAC->PnmiStruct;
64 unsigned long Flags;
65 unsigned int Size;
66 char sens_msg[50];
67 int t;
68 int i;
69
70 /* NetIndex in GetStruct is now required, zero is only dummy */
71 for (t=pAC->GIni.GIMacsFound; t > 0; t--) {
72 if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 1)
73 t--;
74
75 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
76 Size = SK_PNMI_STRUCT_SIZE;
77#ifdef SK_DIAG_SUPPORT
78 if (pAC->BoardLevel == SK_INIT_DATA) {
79 SK_MEMCPY(&(pAC->PnmiStruct), &(pAC->PnmiBackup), sizeof(SK_PNMI_STRUCT_DATA));
80 if (pAC->DiagModeActive == DIAG_NOTACTIVE) {
81 pAC->Pnmi.DiagAttached = SK_DIAG_IDLE;
82 }
83 } else {
84 SkPnmiGetStruct(pAC, pAC->IoBase, pPnmiStruct, &Size, t-1);
85 }
86#else
87 SkPnmiGetStruct(pAC, pAC->IoBase,
88 pPnmiStruct, &Size, t-1);
89#endif
90 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
91
92 if (pAC->dev[t-1] == dev) {
93 SK_PNMI_STAT *pPnmiStat = &pPnmiStruct->Stat[0];
94
95 seq_printf(seq, "\nDetailed statistic for device %s\n",
96 pAC->dev[t-1]->name);
97 seq_printf(seq, "=======================================\n");
98
99 /* Board statistics */
100 seq_printf(seq, "\nBoard statistics\n\n");
101 seq_printf(seq, "Active Port %c\n",
102 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt.
103 Net[t-1].PrefPort]->PortNumber);
104 seq_printf(seq, "Preferred Port %c\n",
105 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt.
106 Net[t-1].PrefPort]->PortNumber);
107
108 seq_printf(seq, "Bus speed (MHz) %d\n",
109 pPnmiStruct->BusSpeed);
110
111 seq_printf(seq, "Bus width (Bit) %d\n",
112 pPnmiStruct->BusWidth);
113 seq_printf(seq, "Driver version %s\n",
114 VER_STRING);
115 seq_printf(seq, "Hardware revision v%d.%d\n",
116 (pAC->GIni.GIPciHwRev >> 4) & 0x0F,
117 pAC->GIni.GIPciHwRev & 0x0F);
118
119 /* Print sensor informations */
120 for (i=0; i < pAC->I2c.MaxSens; i ++) {
121 /* Check type */
122 switch (pAC->I2c.SenTable[i].SenType) {
123 case 1:
124 strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
125 strcat(sens_msg, " (C)");
126 seq_printf(seq, "%-25s %d.%02d\n",
127 sens_msg,
128 pAC->I2c.SenTable[i].SenValue / 10,
129 pAC->I2c.SenTable[i].SenValue % 10);
130
131 strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
132 strcat(sens_msg, " (F)");
133 seq_printf(seq, "%-25s %d.%02d\n",
134 sens_msg,
135 ((((pAC->I2c.SenTable[i].SenValue)
136 *10)*9)/5 + 3200)/100,
137 ((((pAC->I2c.SenTable[i].SenValue)
138 *10)*9)/5 + 3200) % 10);
139 break;
140 case 2:
141 strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
142 strcat(sens_msg, " (V)");
143 seq_printf(seq, "%-25s %d.%03d\n",
144 sens_msg,
145 pAC->I2c.SenTable[i].SenValue / 1000,
146 pAC->I2c.SenTable[i].SenValue % 1000);
147 break;
148 case 3:
149 strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
150 strcat(sens_msg, " (rpm)");
151 seq_printf(seq, "%-25s %d\n",
152 sens_msg,
153 pAC->I2c.SenTable[i].SenValue);
154 break;
155 default:
156 break;
157 }
158 }
159
160 /*Receive statistics */
161 seq_printf(seq, "\nReceive statistics\n\n");
162
163 seq_printf(seq, "Received bytes %Lu\n",
164 (unsigned long long) pPnmiStat->StatRxOctetsOkCts);
165 seq_printf(seq, "Received packets %Lu\n",
166 (unsigned long long) pPnmiStat->StatRxOkCts);
167#if 0
168 if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC &&
169 pAC->HWRevision < 12) {
170 pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts -
171 pPnmiStat->StatRxShortsCts;
172 pPnmiStat->StatRxShortsCts = 0;
173 }
174#endif
175 if (dev->mtu > 1500)
176 pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts -
177 pPnmiStat->StatRxTooLongCts;
178
179 seq_printf(seq, "Receive errors %Lu\n",
180 (unsigned long long) pPnmiStruct->InErrorsCts);
181 seq_printf(seq, "Receive dropped %Lu\n",
182 (unsigned long long) pPnmiStruct->RxNoBufCts);
183 seq_printf(seq, "Received multicast %Lu\n",
184 (unsigned long long) pPnmiStat->StatRxMulticastOkCts);
185 seq_printf(seq, "Receive error types\n");
186 seq_printf(seq, " length %Lu\n",
187 (unsigned long long) pPnmiStat->StatRxRuntCts);
188 seq_printf(seq, " buffer overflow %Lu\n",
189 (unsigned long long) pPnmiStat->StatRxFifoOverflowCts);
190 seq_printf(seq, " bad crc %Lu\n",
191 (unsigned long long) pPnmiStat->StatRxFcsCts);
192 seq_printf(seq, " framing %Lu\n",
193 (unsigned long long) pPnmiStat->StatRxFramingCts);
194 seq_printf(seq, " missed frames %Lu\n",
195 (unsigned long long) pPnmiStat->StatRxMissedCts);
196
197 if (dev->mtu > 1500)
198 pPnmiStat->StatRxTooLongCts = 0;
199
200 seq_printf(seq, " too long %Lu\n",
201 (unsigned long long) pPnmiStat->StatRxTooLongCts);
202 seq_printf(seq, " carrier extension %Lu\n",
203 (unsigned long long) pPnmiStat->StatRxCextCts);
204 seq_printf(seq, " too short %Lu\n",
205 (unsigned long long) pPnmiStat->StatRxShortsCts);
206 seq_printf(seq, " symbol %Lu\n",
207 (unsigned long long) pPnmiStat->StatRxSymbolCts);
208 seq_printf(seq, " LLC MAC size %Lu\n",
209 (unsigned long long) pPnmiStat->StatRxIRLengthCts);
210 seq_printf(seq, " carrier event %Lu\n",
211 (unsigned long long) pPnmiStat->StatRxCarrierCts);
212 seq_printf(seq, " jabber %Lu\n",
213 (unsigned long long) pPnmiStat->StatRxJabberCts);
214
215
216 /*Transmit statistics */
217 seq_printf(seq, "\nTransmit statistics\n\n");
218
219 seq_printf(seq, "Transmited bytes %Lu\n",
220 (unsigned long long) pPnmiStat->StatTxOctetsOkCts);
221 seq_printf(seq, "Transmited packets %Lu\n",
222 (unsigned long long) pPnmiStat->StatTxOkCts);
223 seq_printf(seq, "Transmit errors %Lu\n",
224 (unsigned long long) pPnmiStat->StatTxSingleCollisionCts);
225 seq_printf(seq, "Transmit dropped %Lu\n",
226 (unsigned long long) pPnmiStruct->TxNoBufCts);
227 seq_printf(seq, "Transmit collisions %Lu\n",
228 (unsigned long long) pPnmiStat->StatTxSingleCollisionCts);
229 seq_printf(seq, "Transmit error types\n");
230 seq_printf(seq, " excessive collision %ld\n",
231 pAC->stats.tx_aborted_errors);
232 seq_printf(seq, " carrier %Lu\n",
233 (unsigned long long) pPnmiStat->StatTxCarrierCts);
234 seq_printf(seq, " fifo underrun %Lu\n",
235 (unsigned long long) pPnmiStat->StatTxFifoUnderrunCts);
236 seq_printf(seq, " heartbeat %Lu\n",
237 (unsigned long long) pPnmiStat->StatTxCarrierCts);
238 seq_printf(seq, " window %ld\n",
239 pAC->stats.tx_window_errors);
240
241 }
242 }
243 return 0;
244}
245
246/*****************************************************************************
247 *
248 * sk_proc_open - register the show function when proc is open'ed
249 *
250 * Description:
251 * This function is called whenever a sk98lin proc file is queried.
252 *
253 * Returns: the return value of single_open()
254 *
255 */
256static int sk_proc_open(struct inode *inode, struct file *file)
257{
258 return single_open(file, sk_seq_show, PDE(inode)->data);
259}
260
261/*******************************************************************************
262 *
263 * End of file
264 *
265 ******************************************************************************/
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 596c93b12daa..b538e3038058 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/config.h> 27#include <linux/config.h>
28#include <linux/in.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
@@ -43,7 +44,7 @@
43#include "skge.h" 44#include "skge.h"
44 45
45#define DRV_NAME "skge" 46#define DRV_NAME "skge"
46#define DRV_VERSION "1.2" 47#define DRV_VERSION "1.3"
47#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
48 49
49#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
@@ -88,15 +89,14 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
88 89
89static int skge_up(struct net_device *dev); 90static int skge_up(struct net_device *dev);
90static int skge_down(struct net_device *dev); 91static int skge_down(struct net_device *dev);
92static void skge_phy_reset(struct skge_port *skge);
91static void skge_tx_clean(struct skge_port *skge); 93static void skge_tx_clean(struct skge_port *skge);
92static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 94static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
93static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 95static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
94static void genesis_get_stats(struct skge_port *skge, u64 *data); 96static void genesis_get_stats(struct skge_port *skge, u64 *data);
95static void yukon_get_stats(struct skge_port *skge, u64 *data); 97static void yukon_get_stats(struct skge_port *skge, u64 *data);
96static void yukon_init(struct skge_hw *hw, int port); 98static void yukon_init(struct skge_hw *hw, int port);
97static void yukon_reset(struct skge_hw *hw, int port);
98static void genesis_mac_init(struct skge_hw *hw, int port); 99static void genesis_mac_init(struct skge_hw *hw, int port);
99static void genesis_reset(struct skge_hw *hw, int port);
100static void genesis_link_up(struct skge_port *skge); 100static void genesis_link_up(struct skge_port *skge);
101 101
102/* Avoid conditionals by using array */ 102/* Avoid conditionals by using array */
@@ -276,10 +276,9 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
276 skge->autoneg = ecmd->autoneg; 276 skge->autoneg = ecmd->autoneg;
277 skge->advertising = ecmd->advertising; 277 skge->advertising = ecmd->advertising;
278 278
279 if (netif_running(dev)) { 279 if (netif_running(dev))
280 skge_down(dev); 280 skge_phy_reset(skge);
281 skge_up(dev); 281
282 }
283 return (0); 282 return (0);
284} 283}
285 284
@@ -399,6 +398,7 @@ static int skge_set_ring_param(struct net_device *dev,
399 struct ethtool_ringparam *p) 398 struct ethtool_ringparam *p)
400{ 399{
401 struct skge_port *skge = netdev_priv(dev); 400 struct skge_port *skge = netdev_priv(dev);
401 int err;
402 402
403 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 403 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
404 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) 404 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
@@ -409,7 +409,9 @@ static int skge_set_ring_param(struct net_device *dev,
409 409
410 if (netif_running(dev)) { 410 if (netif_running(dev)) {
411 skge_down(dev); 411 skge_down(dev);
412 skge_up(dev); 412 err = skge_up(dev);
413 if (err)
414 dev_close(dev);
413 } 415 }
414 416
415 return 0; 417 return 0;
@@ -430,21 +432,11 @@ static void skge_set_msglevel(struct net_device *netdev, u32 value)
430static int skge_nway_reset(struct net_device *dev) 432static int skge_nway_reset(struct net_device *dev)
431{ 433{
432 struct skge_port *skge = netdev_priv(dev); 434 struct skge_port *skge = netdev_priv(dev);
433 struct skge_hw *hw = skge->hw;
434 int port = skge->port;
435 435
436 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) 436 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
437 return -EINVAL; 437 return -EINVAL;
438 438
439 spin_lock_bh(&hw->phy_lock); 439 skge_phy_reset(skge);
440 if (hw->chip_id == CHIP_ID_GENESIS) {
441 genesis_reset(hw, port);
442 genesis_mac_init(hw, port);
443 } else {
444 yukon_reset(hw, port);
445 yukon_init(hw, port);
446 }
447 spin_unlock_bh(&hw->phy_lock);
448 return 0; 440 return 0;
449} 441}
450 442
@@ -516,10 +508,8 @@ static int skge_set_pauseparam(struct net_device *dev,
516 else 508 else
517 skge->flow_control = FLOW_MODE_NONE; 509 skge->flow_control = FLOW_MODE_NONE;
518 510
519 if (netif_running(dev)) { 511 if (netif_running(dev))
520 skge_down(dev); 512 skge_phy_reset(skge);
521 skge_up(dev);
522 }
523 return 0; 513 return 0;
524} 514}
525 515
@@ -2019,6 +2009,25 @@ static void yukon_phy_intr(struct skge_port *skge)
2019 /* XXX restart autonegotiation? */ 2009 /* XXX restart autonegotiation? */
2020} 2010}
2021 2011
2012static void skge_phy_reset(struct skge_port *skge)
2013{
2014 struct skge_hw *hw = skge->hw;
2015 int port = skge->port;
2016
2017 netif_stop_queue(skge->netdev);
2018 netif_carrier_off(skge->netdev);
2019
2020 spin_lock_bh(&hw->phy_lock);
2021 if (hw->chip_id == CHIP_ID_GENESIS) {
2022 genesis_reset(hw, port);
2023 genesis_mac_init(hw, port);
2024 } else {
2025 yukon_reset(hw, port);
2026 yukon_init(hw, port);
2027 }
2028 spin_unlock_bh(&hw->phy_lock);
2029}
2030
2022/* Basic MII support */ 2031/* Basic MII support */
2023static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2032static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2024{ 2033{
@@ -2187,6 +2196,7 @@ static int skge_up(struct net_device *dev)
2187 kfree(skge->rx_ring.start); 2196 kfree(skge->rx_ring.start);
2188 free_pci_mem: 2197 free_pci_mem:
2189 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2198 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2199 skge->mem = NULL;
2190 2200
2191 return err; 2201 return err;
2192} 2202}
@@ -2197,6 +2207,9 @@ static int skge_down(struct net_device *dev)
2197 struct skge_hw *hw = skge->hw; 2207 struct skge_hw *hw = skge->hw;
2198 int port = skge->port; 2208 int port = skge->port;
2199 2209
2210 if (skge->mem == NULL)
2211 return 0;
2212
2200 if (netif_msg_ifdown(skge)) 2213 if (netif_msg_ifdown(skge))
2201 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2214 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2202 2215
@@ -2253,6 +2266,7 @@ static int skge_down(struct net_device *dev)
2253 kfree(skge->rx_ring.start); 2266 kfree(skge->rx_ring.start);
2254 kfree(skge->tx_ring.start); 2267 kfree(skge->tx_ring.start);
2255 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); 2268 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2269 skge->mem = NULL;
2256 return 0; 2270 return 0;
2257} 2271}
2258 2272
@@ -2280,11 +2294,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2280 } 2294 }
2281 2295
2282 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2296 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2283 netif_stop_queue(dev); 2297 if (!netif_queue_stopped(dev)) {
2284 spin_unlock_irqrestore(&skge->tx_lock, flags); 2298 netif_stop_queue(dev);
2285 2299
2286 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 2300 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2287 dev->name); 2301 dev->name);
2302 }
2303 spin_unlock_irqrestore(&skge->tx_lock, flags);
2288 return NETDEV_TX_BUSY; 2304 return NETDEV_TX_BUSY;
2289 } 2305 }
2290 2306
@@ -2300,14 +2316,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2300 td->dma_hi = map >> 32; 2316 td->dma_hi = map >> 32;
2301 2317
2302 if (skb->ip_summed == CHECKSUM_HW) { 2318 if (skb->ip_summed == CHECKSUM_HW) {
2303 const struct iphdr *ip
2304 = (const struct iphdr *) (skb->data + ETH_HLEN);
2305 int offset = skb->h.raw - skb->data; 2319 int offset = skb->h.raw - skb->data;
2306 2320
2307 /* This seems backwards, but it is what the sk98lin 2321 /* This seems backwards, but it is what the sk98lin
2308 * does. Looks like hardware is wrong? 2322 * does. Looks like hardware is wrong?
2309 */ 2323 */
2310 if (ip->protocol == IPPROTO_UDP 2324 if (skb->h.ipiph->protocol == IPPROTO_UDP
2311 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2325 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2312 control = BMU_TCP_CHECK; 2326 control = BMU_TCP_CHECK;
2313 else 2327 else
@@ -2413,18 +2427,23 @@ static void skge_tx_timeout(struct net_device *dev)
2413 2427
2414static int skge_change_mtu(struct net_device *dev, int new_mtu) 2428static int skge_change_mtu(struct net_device *dev, int new_mtu)
2415{ 2429{
2416 int err = 0; 2430 int err;
2417 int running = netif_running(dev);
2418 2431
2419 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2432 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2420 return -EINVAL; 2433 return -EINVAL;
2421 2434
2435 if (!netif_running(dev)) {
2436 dev->mtu = new_mtu;
2437 return 0;
2438 }
2439
2440 skge_down(dev);
2422 2441
2423 if (running)
2424 skge_down(dev);
2425 dev->mtu = new_mtu; 2442 dev->mtu = new_mtu;
2426 if (running) 2443
2427 skge_up(dev); 2444 err = skge_up(dev);
2445 if (err)
2446 dev_close(dev);
2428 2447
2429 return err; 2448 return err;
2430} 2449}
@@ -3398,8 +3417,8 @@ static int skge_resume(struct pci_dev *pdev)
3398 struct net_device *dev = hw->dev[i]; 3417 struct net_device *dev = hw->dev[i];
3399 if (dev) { 3418 if (dev) {
3400 netif_device_attach(dev); 3419 netif_device_attach(dev);
3401 if (netif_running(dev)) 3420 if (netif_running(dev) && skge_up(dev))
3402 skge_up(dev); 3421 dev_close(dev);
3403 } 3422 }
3404 } 3423 }
3405 return 0; 3424 return 0;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index ee123c15f545..2efdacc290e5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -475,18 +475,6 @@ enum {
475 Q_T2 = 0x40, /* 32 bit Test Register 2 */ 475 Q_T2 = 0x40, /* 32 bit Test Register 2 */
476 Q_T3 = 0x44, /* 32 bit Test Register 3 */ 476 Q_T3 = 0x44, /* 32 bit Test Register 3 */
477 477
478/* Yukon-2 */
479 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
480 Q_WM = 0x40, /* 16 bit FIFO Watermark */
481 Q_AL = 0x42, /* 8 bit FIFO Alignment */
482 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
483 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
484 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
485 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
486 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
487 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
488 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
489 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
490}; 478};
491#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) 479#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
492 480
@@ -675,22 +663,16 @@ enum {
675 LED_OFF = 1<<0, /* switch LED off */ 663 LED_OFF = 1<<0, /* switch LED off */
676}; 664};
677 665
678/* Receive GMAC FIFO (YUKON and Yukon-2) */ 666/* Receive GMAC FIFO (YUKON) */
679enum { 667enum {
680 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ 668 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
681 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ 669 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
682 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ 670 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
683 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ 671 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
684 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ 672 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
685 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
686
687 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
688 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ 673 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
689
690 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ 674 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
691
692 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ 675 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
693
694 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ 676 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
695}; 677};
696 678
@@ -855,48 +837,6 @@ enum {
855 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ 837 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
856}; 838};
857 839
858/* Status BMU Registers (Yukon-2 only)*/
859enum {
860 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
861 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
862 /* 0x0e85 - 0x0e86: reserved */
863 STAT_LIST_ADDR_LO = 0x0e88,/* 32 bit Status List Start Addr (low) */
864 STAT_LIST_ADDR_HI = 0x0e8c,/* 32 bit Status List Start Addr (high) */
865 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
866 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
867 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
868 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
869 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
870 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
871
872/* FIFO Control/Status Registers (Yukon-2 only)*/
873 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
874 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
875 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
876 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
877 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
878 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
879 STAT_FIFO_ISR_WM = 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
880
881/* Level and ISR Timer Registers (Yukon-2 only)*/
882 STAT_LEV_TIMER_INI = 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
883 STAT_LEV_TIMER_CNT = 0x0eb4,/* 32 bit Level Timer Counter Reg */
884 STAT_LEV_TIMER_CTRL = 0x0eb8,/* 8 bit Level Timer Control Reg */
885 STAT_LEV_TIMER_TEST = 0x0eb9,/* 8 bit Level Timer Test Reg */
886 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
887 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
888 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
889 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
890 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
891 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
892 STAT_ISR_TIMER_CTRL = 0x0ed8,/* 8 bit ISR Timer Control Reg */
893 STAT_ISR_TIMER_TEST = 0x0ed9,/* 8 bit ISR Timer Test Reg */
894
895 ST_LAST_IDX_MASK = 0x007f,/* Last Index Mask */
896 ST_TXRP_IDX_MASK = 0x0fff,/* Tx Report Index Mask */
897 ST_TXTH_IDX_MASK = 0x0fff,/* Tx Threshold Index Mask */
898 ST_WM_IDX_MASK = 0x3f,/* FIFO Watermark Index Mask */
899};
900 840
901enum { 841enum {
902 LINKLED_OFF = 0x01, 842 LINKLED_OFF = 0x01,
@@ -923,8 +863,6 @@ enum {
923 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ 863 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
924 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ 864 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
925 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ 865 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
926 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
927 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
928 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ 866 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
929 867
930/* WOL Pattern Length Registers (YUKON only) */ 868/* WOL Pattern Length Registers (YUKON only) */
@@ -1641,15 +1579,6 @@ enum {
1641 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ 1579 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1642}; 1580};
1643 1581
1644/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1645/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1646enum {
1647 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1648 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1649 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1650 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1651};
1652#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
1653 1582
1654/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ 1583/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1655enum { 1584enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
new file mode 100644
index 000000000000..f5d697c0c031
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,3262 @@
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26/*
27 * TOTEST
28 * - speed setting
29 * - suspend/resume
30 */
31
32#include <linux/config.h>
33#include <linux/crc32.h>
34#include <linux/kernel.h>
35#include <linux/version.h>
36#include <linux/module.h>
37#include <linux/netdevice.h>
38#include <linux/dma-mapping.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/pci.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
44#include <linux/in.h>
45#include <linux/delay.h>
46#include <linux/workqueue.h>
47#include <linux/if_vlan.h>
48#include <linux/prefetch.h>
49#include <linux/mii.h>
50
51#include <asm/irq.h>
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define SKY2_VLAN_TAG_USED 1
55#endif
56
57#include "sky2.h"
58
59#define DRV_NAME "sky2"
60#define DRV_VERSION "0.11"
61#define PFX DRV_NAME " "
62
63/*
64 * The Yukon II chipset takes 64 bit command blocks (called list elements)
65 * that are organized into three (receive, transmit, status) different rings
66 * similar to Tigon3. A transmit can require several elements;
67 * a receive requires one (or two if using 64 bit dma).
68 */
69
70#define is_ec_a1(hw) \
71 unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
72 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
73
74#define RX_LE_SIZE 512
75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
77#define RX_DEF_PENDING RX_MAX_PENDING
78
79#define TX_RING_SIZE 512
80#define TX_DEF_PENDING (TX_RING_SIZE - 1)
81#define TX_MIN_PENDING 64
82#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
83
84#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
85#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
86#define ETH_JUMBO_MTU 9000
87#define TX_WATCHDOG (5 * HZ)
88#define NAPI_WEIGHT 64
89#define PHY_RETRIES 1000
90
91static const u32 default_msg =
92 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
93 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
94 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
95
96static int debug = -1; /* defaults above */
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99
100static int copybreak __read_mostly = 256;
101module_param(copybreak, int, 0);
102MODULE_PARM_DESC(copybreak, "Receive copy threshold");
103
104static const struct pci_device_id sky2_id_table[] = {
105 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
124 { 0 }
125};
126
127MODULE_DEVICE_TABLE(pci, sky2_id_table);
128
129/* Avoid conditionals by using array */
130static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
131static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
132
133/* This driver supports yukon2 chipset only */
134static const char *yukon2_name[] = {
135 "XL", /* 0xb3 */
136 "EC Ultra", /* 0xb4 */
137 "UNKNOWN", /* 0xb5 */
138 "EC", /* 0xb6 */
139 "FE", /* 0xb7 */
140};
141
142/* Access to external PHY */
143static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
144{
145 int i;
146
147 gma_write16(hw, port, GM_SMI_DATA, val);
148 gma_write16(hw, port, GM_SMI_CTRL,
149 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
150
151 for (i = 0; i < PHY_RETRIES; i++) {
152 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
153 return 0;
154 udelay(1);
155 }
156
157 printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
158 return -ETIMEDOUT;
159}
160
161static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
162{
163 int i;
164
165 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
166 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
167
168 for (i = 0; i < PHY_RETRIES; i++) {
169 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) {
170 *val = gma_read16(hw, port, GM_SMI_DATA);
171 return 0;
172 }
173
174 udelay(1);
175 }
176
177 return -ETIMEDOUT;
178}
179
180static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
181{
182 u16 v;
183
184 if (__gm_phy_read(hw, port, reg, &v) != 0)
185 printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
186 return v;
187}
188
189static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
190{
191 u16 power_control;
192 u32 reg1;
193 int vaux;
194 int ret = 0;
195
196 pr_debug("sky2_set_power_state %d\n", state);
197 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
198
199 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
200 vaux = (sky2_read8(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
201 (power_control & PCI_PM_CAP_PME_D3cold);
202
203 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
204
205 power_control |= PCI_PM_CTRL_PME_STATUS;
206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
207
208 switch (state) {
209 case PCI_D0:
210 /* switch power to VCC (WA for VAUX problem) */
211 sky2_write8(hw, B0_POWER_CTRL,
212 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
213
214 /* disable Core Clock Division, */
215 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
216
217 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
218 /* enable bits are inverted */
219 sky2_write8(hw, B2_Y2_CLK_GATE,
220 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
221 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
222 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
223 else
224 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
225
226 /* Turn off phy power saving */
227 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
228 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
229
230 /* looks like this XL is back asswards .. */
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
232 reg1 |= PCI_Y2_PHY1_COMA;
233 if (hw->ports > 1)
234 reg1 |= PCI_Y2_PHY2_COMA;
235 }
236 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
237 break;
238
239 case PCI_D3hot:
240 case PCI_D3cold:
241 /* Turn on phy power saving */
242 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
243 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
244 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
245 else
246 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
247 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
248
249 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
250 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
251 else
252 /* enable bits are inverted */
253 sky2_write8(hw, B2_Y2_CLK_GATE,
254 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
255 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
256 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
257
258 /* switch power to VAUX */
259 if (vaux && state != PCI_D3cold)
260 sky2_write8(hw, B0_POWER_CTRL,
261 (PC_VAUX_ENA | PC_VCC_ENA |
262 PC_VAUX_ON | PC_VCC_OFF));
263 break;
264 default:
265 printk(KERN_ERR PFX "Unknown power state %d\n", state);
266 ret = -1;
267 }
268
269 pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
270 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
271 return ret;
272}
273
274static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
275{
276 u16 reg;
277
278 /* disable all GMAC IRQ's */
279 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
280 /* disable PHY IRQs */
281 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
282
283 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
284 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
285 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
286 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
287
288 reg = gma_read16(hw, port, GM_RX_CTRL);
289 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
290 gma_write16(hw, port, GM_RX_CTRL, reg);
291}
292
293static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
294{
295 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
296 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
297
298 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
299 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
300
301 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
302 PHY_M_EC_MAC_S_MSK);
303 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
304
305 if (hw->chip_id == CHIP_ID_YUKON_EC)
306 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
307 else
308 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
309
310 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
311 }
312
313 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
314 if (hw->copper) {
315 if (hw->chip_id == CHIP_ID_YUKON_FE) {
316 /* enable automatic crossover */
317 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
318 } else {
319 /* disable energy detect */
320 ctrl &= ~PHY_M_PC_EN_DET_MSK;
321
322 /* enable automatic crossover */
323 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
324
325 if (sky2->autoneg == AUTONEG_ENABLE &&
326 hw->chip_id == CHIP_ID_YUKON_XL) {
327 ctrl &= ~PHY_M_PC_DSC_MSK;
328 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
329 }
330 }
331 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
332 } else {
333 /* workaround for deviation #4.88 (CRC errors) */
334 /* disable Automatic Crossover */
335
336 ctrl &= ~PHY_M_PC_MDIX_MSK;
337 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
338
339 if (hw->chip_id == CHIP_ID_YUKON_XL) {
340 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
341 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
342 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
343 ctrl &= ~PHY_M_MAC_MD_MSK;
344 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
345 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
346
347 /* select page 1 to access Fiber registers */
348 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
349 }
350 }
351
352 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
353 if (sky2->autoneg == AUTONEG_DISABLE)
354 ctrl &= ~PHY_CT_ANE;
355 else
356 ctrl |= PHY_CT_ANE;
357
358 ctrl |= PHY_CT_RESET;
359 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
360
361 ctrl = 0;
362 ct1000 = 0;
363 adv = PHY_AN_CSMA;
364
365 if (sky2->autoneg == AUTONEG_ENABLE) {
366 if (hw->copper) {
367 if (sky2->advertising & ADVERTISED_1000baseT_Full)
368 ct1000 |= PHY_M_1000C_AFD;
369 if (sky2->advertising & ADVERTISED_1000baseT_Half)
370 ct1000 |= PHY_M_1000C_AHD;
371 if (sky2->advertising & ADVERTISED_100baseT_Full)
372 adv |= PHY_M_AN_100_FD;
373 if (sky2->advertising & ADVERTISED_100baseT_Half)
374 adv |= PHY_M_AN_100_HD;
375 if (sky2->advertising & ADVERTISED_10baseT_Full)
376 adv |= PHY_M_AN_10_FD;
377 if (sky2->advertising & ADVERTISED_10baseT_Half)
378 adv |= PHY_M_AN_10_HD;
379 } else /* special defines for FIBER (88E1011S only) */
380 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
381
382 /* Set Flow-control capabilities */
383 if (sky2->tx_pause && sky2->rx_pause)
384 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
385 else if (sky2->rx_pause && !sky2->tx_pause)
386 adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
387 else if (!sky2->rx_pause && sky2->tx_pause)
388 adv |= PHY_AN_PAUSE_ASYM; /* local */
389
390 /* Restart Auto-negotiation */
391 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
392 } else {
393 /* forced speed/duplex settings */
394 ct1000 = PHY_M_1000C_MSE;
395
396 if (sky2->duplex == DUPLEX_FULL)
397 ctrl |= PHY_CT_DUP_MD;
398
399 switch (sky2->speed) {
400 case SPEED_1000:
401 ctrl |= PHY_CT_SP1000;
402 break;
403 case SPEED_100:
404 ctrl |= PHY_CT_SP100;
405 break;
406 }
407
408 ctrl |= PHY_CT_RESET;
409 }
410
411 if (hw->chip_id != CHIP_ID_YUKON_FE)
412 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
413
414 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
415 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
416
417 /* Setup Phy LED's */
418 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
419 ledover = 0;
420
421 switch (hw->chip_id) {
422 case CHIP_ID_YUKON_FE:
423 /* on 88E3082 these bits are at 11..9 (shifted left) */
424 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
425
426 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
427
428 /* delete ACT LED control bits */
429 ctrl &= ~PHY_M_FELP_LED1_MSK;
430 /* change ACT LED control to blink mode */
431 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
432 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
433 break;
434
435 case CHIP_ID_YUKON_XL:
436 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
437
438 /* select page 3 to access LED control register */
439 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
440
441 /* set LED Function Control register */
442 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
443 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
444 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
445 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
446
447 /* set Polarity Control register */
448 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
449 (PHY_M_POLC_LS1_P_MIX(4) |
450 PHY_M_POLC_IS0_P_MIX(4) |
451 PHY_M_POLC_LOS_CTRL(2) |
452 PHY_M_POLC_INIT_CTRL(2) |
453 PHY_M_POLC_STA1_CTRL(2) |
454 PHY_M_POLC_STA0_CTRL(2)));
455
456 /* restore page register */
457 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
458 break;
459
460 default:
461 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
462 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
463 /* turn off the Rx LED (LED_RX) */
464 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
465 }
466
467 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
468
469 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
470 /* turn on 100 Mbps LED (LED_LINK100) */
471 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
472 }
473
474 if (ledover)
475 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
476
477 /* Enable phy interrupt on auto-negotiation complete (or link up) */
478 if (sky2->autoneg == AUTONEG_ENABLE)
479 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
480 else
481 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
482}
483
484/* Force a renegotiation */
485static void sky2_phy_reinit(struct sky2_port *sky2)
486{
487 down(&sky2->phy_sema);
488 sky2_phy_init(sky2->hw, sky2->port);
489 up(&sky2->phy_sema);
490}
491
492static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
493{
494 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
495 u16 reg;
496 int i;
497 const u8 *addr = hw->dev[port]->dev_addr;
498
499 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
500 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
501
502 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
503
504 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
505 /* WA DEV_472 -- looks like crossed wires on port 2 */
506 /* clear GMAC 1 Control reset */
507 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
508 do {
509 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
510 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
511 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
512 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
513 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
514 }
515
516 if (sky2->autoneg == AUTONEG_DISABLE) {
517 reg = gma_read16(hw, port, GM_GP_CTRL);
518 reg |= GM_GPCR_AU_ALL_DIS;
519 gma_write16(hw, port, GM_GP_CTRL, reg);
520 gma_read16(hw, port, GM_GP_CTRL);
521
522 switch (sky2->speed) {
523 case SPEED_1000:
524 reg |= GM_GPCR_SPEED_1000;
525 /* fallthru */
526 case SPEED_100:
527 reg |= GM_GPCR_SPEED_100;
528 }
529
530 if (sky2->duplex == DUPLEX_FULL)
531 reg |= GM_GPCR_DUP_FULL;
532 } else
533 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
534
535 if (!sky2->tx_pause && !sky2->rx_pause) {
536 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
537 reg |=
538 GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
539 } else if (sky2->tx_pause && !sky2->rx_pause) {
540 /* disable Rx flow-control */
541 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
542 }
543
544 gma_write16(hw, port, GM_GP_CTRL, reg);
545
546 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
547
548 down(&sky2->phy_sema);
549 sky2_phy_init(hw, port);
550 up(&sky2->phy_sema);
551
552 /* MIB clear */
553 reg = gma_read16(hw, port, GM_PHY_ADDR);
554 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
555
556 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
557 gma_read16(hw, port, GM_MIB_CNT_BASE + 8 * i);
558 gma_write16(hw, port, GM_PHY_ADDR, reg);
559
560 /* transmit control */
561 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
562
563 /* receive control reg: unicast + multicast + no FCS */
564 gma_write16(hw, port, GM_RX_CTRL,
565 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
566
567 /* transmit flow control */
568 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
569
570 /* transmit parameter */
571 gma_write16(hw, port, GM_TX_PARAM,
572 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
573 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
574 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
575 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
576
577 /* serial mode register */
578 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
579 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
580
581 if (hw->dev[port]->mtu > ETH_DATA_LEN)
582 reg |= GM_SMOD_JUMBO_ENA;
583
584 gma_write16(hw, port, GM_SERIAL_MODE, reg);
585
586 /* virtual address for data */
587 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
588
589 /* physical address: used for pause frames */
590 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
591
592 /* ignore counter overflows */
593 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
594 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
595 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
596
597 /* Configure Rx MAC FIFO */
598 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
599 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
600 GMF_RX_CTRL_DEF);
601
602 /* Flush Rx MAC FIFO on any flow control or error */
603 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
604
605 /* Set threshold to 0xa (64 bytes)
606 * ASF disabled so no need to do WA dev #4.30
607 */
608 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
609
610 /* Configure Tx MAC FIFO */
611 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
612 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
613
614 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
615 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
616 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
617 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
618 /* set Tx GMAC FIFO Almost Empty Threshold */
619 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
620 /* Disable Store & Forward mode for TX */
621 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
622 }
623 }
624
625}
626
627static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
628{
629 u32 end;
630
631 start /= 8;
632 len /= 8;
633 end = start + len - 1;
634
635 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
636 sky2_write32(hw, RB_ADDR(q, RB_START), start);
637 sky2_write32(hw, RB_ADDR(q, RB_END), end);
638 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
639 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
640
641 if (q == Q_R1 || q == Q_R2) {
642 u32 rxup, rxlo;
643
644 rxlo = len/2;
645 rxup = rxlo + len/4;
646
647 /* Set thresholds on receive queue's */
648 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
649 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
650 } else {
651 /* Enable store & forward on Tx queue's because
652 * Tx FIFO is only 1K on Yukon
653 */
654 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
655 }
656
657 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
658 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
659}
660
661/* Setup Bus Memory Interface */
662static void sky2_qset(struct sky2_hw *hw, u16 q)
663{
664 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
665 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
666 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
667 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
668}
669
670/* Setup prefetch unit registers. This is the interface between
671 * hardware and driver list elements
672 */
673static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
674 u64 addr, u32 last)
675{
676 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
677 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
678 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
679 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
680 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
681 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
682
683 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
684}
685
686static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
687{
688 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
689
690 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
691 return le;
692}
693
694/*
695 * This is a workaround code taken from SysKonnect sk98lin driver
696 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
697 */
698static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
699 u16 idx, u16 *last, u16 size)
700{
701 if (is_ec_a1(hw) && idx < *last) {
702 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
703
704 if (hwget == 0) {
705 /* Start prefetching again */
706 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
707 goto setnew;
708 }
709
710 if (hwget == size - 1) {
711 /* set watermark to one list element */
712 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
713
714 /* set put index to first list element */
715 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
716 } else /* have hardware go to end of list */
717 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
718 size - 1);
719 } else {
720setnew:
721 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
722 }
723 *last = idx;
724}
725
726
727static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
728{
729 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
730 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
731 return le;
732}
733
734/* Return high part of DMA address (could be 32 or 64 bit) */
735static inline u32 high32(dma_addr_t a)
736{
737 return (a >> 16) >> 16;
738}
739
740/* Build description to hardware about buffer */
741static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
742{
743 struct sky2_rx_le *le;
744 u32 hi = high32(map);
745 u16 len = sky2->rx_bufsize;
746
747 if (sky2->rx_addr64 != hi) {
748 le = sky2_next_rx(sky2);
749 le->addr = cpu_to_le32(hi);
750 le->ctrl = 0;
751 le->opcode = OP_ADDR64 | HW_OWNER;
752 sky2->rx_addr64 = high32(map + len);
753 }
754
755 le = sky2_next_rx(sky2);
756 le->addr = cpu_to_le32((u32) map);
757 le->length = cpu_to_le16(len);
758 le->ctrl = 0;
759 le->opcode = OP_PACKET | HW_OWNER;
760}
761
762
763/* Tell chip where to start receive checksum.
764 * Actually has two checksums, but set both same to avoid possible byte
765 * order problems.
766 */
767static void rx_set_checksum(struct sky2_port *sky2)
768{
769 struct sky2_rx_le *le;
770
771 le = sky2_next_rx(sky2);
772 le->addr = (ETH_HLEN << 16) | ETH_HLEN;
773 le->ctrl = 0;
774 le->opcode = OP_TCPSTART | HW_OWNER;
775
776 sky2_write32(sky2->hw,
777 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
778 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
779
780}
781
782/*
783 * The RX Stop command will not work for Yukon-2 if the BMU does not
784 * reach the end of packet and since we can't make sure that we have
785 * incoming data, we must reset the BMU while it is not doing a DMA
786 * transfer. Since it is possible that the RX path is still active,
787 * the RX RAM buffer will be stopped first, so any possible incoming
788 * data will not trigger a DMA. After the RAM buffer is stopped, the
789 * BMU is polled until any DMA in progress is ended and only then it
790 * will be reset.
791 */
792static void sky2_rx_stop(struct sky2_port *sky2)
793{
794 struct sky2_hw *hw = sky2->hw;
795 unsigned rxq = rxqaddr[sky2->port];
796 int i;
797
798 /* disable the RAM Buffer receive queue */
799 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
800
801 for (i = 0; i < 0xffff; i++)
802 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
803 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
804 goto stopped;
805
806 printk(KERN_WARNING PFX "%s: receiver stop failed\n",
807 sky2->netdev->name);
808stopped:
809 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
810
811 /* reset the Rx prefetch unit */
812 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
813}
814
815/* Clean out receive buffer area, assumes receiver hardware stopped */
816static void sky2_rx_clean(struct sky2_port *sky2)
817{
818 unsigned i;
819
820 memset(sky2->rx_le, 0, RX_LE_BYTES);
821 for (i = 0; i < sky2->rx_pending; i++) {
822 struct ring_info *re = sky2->rx_ring + i;
823
824 if (re->skb) {
825 pci_unmap_single(sky2->hw->pdev,
826 re->mapaddr, sky2->rx_bufsize,
827 PCI_DMA_FROMDEVICE);
828 kfree_skb(re->skb);
829 re->skb = NULL;
830 }
831 }
832}
833
834/* Basic MII support */
835static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
836{
837 struct mii_ioctl_data *data = if_mii(ifr);
838 struct sky2_port *sky2 = netdev_priv(dev);
839 struct sky2_hw *hw = sky2->hw;
840 int err = -EOPNOTSUPP;
841
842 if (!netif_running(dev))
843 return -ENODEV; /* Phy still in reset */
844
845 switch(cmd) {
846 case SIOCGMIIPHY:
847 data->phy_id = PHY_ADDR_MARV;
848
849 /* fallthru */
850 case SIOCGMIIREG: {
851 u16 val = 0;
852
853 down(&sky2->phy_sema);
854 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
855 up(&sky2->phy_sema);
856
857 data->val_out = val;
858 break;
859 }
860
861 case SIOCSMIIREG:
862 if (!capable(CAP_NET_ADMIN))
863 return -EPERM;
864
865 down(&sky2->phy_sema);
866 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
867 data->val_in);
868 up(&sky2->phy_sema);
869 break;
870 }
871 return err;
872}
873
874#ifdef SKY2_VLAN_TAG_USED
875static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
876{
877 struct sky2_port *sky2 = netdev_priv(dev);
878 struct sky2_hw *hw = sky2->hw;
879 u16 port = sky2->port;
880
881 spin_lock(&sky2->tx_lock);
882
883 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
884 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
885 sky2->vlgrp = grp;
886
887 spin_unlock(&sky2->tx_lock);
888}
889
890static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
891{
892 struct sky2_port *sky2 = netdev_priv(dev);
893 struct sky2_hw *hw = sky2->hw;
894 u16 port = sky2->port;
895
896 spin_lock(&sky2->tx_lock);
897
898 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
899 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
900 if (sky2->vlgrp)
901 sky2->vlgrp->vlan_devices[vid] = NULL;
902
903 spin_unlock(&sky2->tx_lock);
904}
905#endif
906
907/*
908 * Allocate and setup receiver buffer pool.
909 * In case of 64 bit dma, there are 2X as many list elements
910 * available as ring entries
911 * and need to reserve one list element so we don't wrap around.
912 *
913 * It appears the hardware has a bug in the FIFO logic that
914 * cause it to hang if the FIFO gets overrun and the receive buffer
915 * is not aligned. This means we can't use skb_reserve to align
916 * the IP header.
917 */
918static int sky2_rx_start(struct sky2_port *sky2)
919{
920 struct sky2_hw *hw = sky2->hw;
921 unsigned rxq = rxqaddr[sky2->port];
922 int i;
923
924 sky2->rx_put = sky2->rx_next = 0;
925 sky2_qset(hw, rxq);
926 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
927
928 rx_set_checksum(sky2);
929 for (i = 0; i < sky2->rx_pending; i++) {
930 struct ring_info *re = sky2->rx_ring + i;
931
932 re->skb = dev_alloc_skb(sky2->rx_bufsize);
933 if (!re->skb)
934 goto nomem;
935
936 re->mapaddr = pci_map_single(hw->pdev, re->skb->data,
937 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
938 sky2_rx_add(sky2, re->mapaddr);
939 }
940
941 /* Tell chip about available buffers */
942 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
943 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
944 return 0;
945nomem:
946 sky2_rx_clean(sky2);
947 return -ENOMEM;
948}
949
950/* Bring up network interface. */
951static int sky2_up(struct net_device *dev)
952{
953 struct sky2_port *sky2 = netdev_priv(dev);
954 struct sky2_hw *hw = sky2->hw;
955 unsigned port = sky2->port;
956 u32 ramsize, rxspace;
957 int err = -ENOMEM;
958
959 if (netif_msg_ifup(sky2))
960 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
961
962 /* must be power of 2 */
963 sky2->tx_le = pci_alloc_consistent(hw->pdev,
964 TX_RING_SIZE *
965 sizeof(struct sky2_tx_le),
966 &sky2->tx_le_map);
967 if (!sky2->tx_le)
968 goto err_out;
969
970 sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info),
971 GFP_KERNEL);
972 if (!sky2->tx_ring)
973 goto err_out;
974 sky2->tx_prod = sky2->tx_cons = 0;
975
976 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
977 &sky2->rx_le_map);
978 if (!sky2->rx_le)
979 goto err_out;
980 memset(sky2->rx_le, 0, RX_LE_BYTES);
981
982 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct ring_info),
983 GFP_KERNEL);
984 if (!sky2->rx_ring)
985 goto err_out;
986
987 sky2_mac_init(hw, port);
988
989 /* Configure RAM buffers */
990 if (hw->chip_id == CHIP_ID_YUKON_FE ||
991 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
992 ramsize = 4096;
993 else {
994 u8 e0 = sky2_read8(hw, B2_E_0);
995 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
996 }
997
998 /* 2/3 for Rx */
999 rxspace = (2 * ramsize) / 3;
1000 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1001 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1002
1003 /* Make sure SyncQ is disabled */
1004 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1005 RB_RST_SET);
1006
1007 sky2_qset(hw, txqaddr[port]);
1008 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
1009 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1010
1011
1012 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1013 TX_RING_SIZE - 1);
1014
1015 err = sky2_rx_start(sky2);
1016 if (err)
1017 goto err_out;
1018
1019 /* Enable interrupts from phy/mac for port */
1020 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1021 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1022 return 0;
1023
1024err_out:
1025 if (sky2->rx_le) {
1026 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1027 sky2->rx_le, sky2->rx_le_map);
1028 sky2->rx_le = NULL;
1029 }
1030 if (sky2->tx_le) {
1031 pci_free_consistent(hw->pdev,
1032 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1033 sky2->tx_le, sky2->tx_le_map);
1034 sky2->tx_le = NULL;
1035 }
1036 kfree(sky2->tx_ring);
1037 kfree(sky2->rx_ring);
1038
1039 sky2->tx_ring = NULL;
1040 sky2->rx_ring = NULL;
1041 return err;
1042}
1043
1044/* Modular subtraction in ring */
1045static inline int tx_dist(unsigned tail, unsigned head)
1046{
1047 return (head - tail) % TX_RING_SIZE;
1048}
1049
1050/* Number of list elements available for next tx */
1051static inline int tx_avail(const struct sky2_port *sky2)
1052{
1053 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1054}
1055
1056/* Estimate of number of transmit list elements required */
1057static inline unsigned tx_le_req(const struct sk_buff *skb)
1058{
1059 unsigned count;
1060
1061 count = sizeof(dma_addr_t) / sizeof(u32);
1062 count += skb_shinfo(skb)->nr_frags * count;
1063
1064 if (skb_shinfo(skb)->tso_size)
1065 ++count;
1066
1067 if (skb->ip_summed == CHECKSUM_HW)
1068 ++count;
1069
1070 return count;
1071}
1072
1073/*
1074 * Put one packet in ring for transmit.
1075 * A single packet can generate multiple list elements, and
1076 * the number of ring elements will probably be less than the number
1077 * of list elements used.
1078 *
1079 * No BH disabling for tx_lock here (like tg3)
1080 */
1081static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1082{
1083 struct sky2_port *sky2 = netdev_priv(dev);
1084 struct sky2_hw *hw = sky2->hw;
1085 struct sky2_tx_le *le = NULL;
1086 struct tx_ring_info *re;
1087 unsigned i, len;
1088 dma_addr_t mapping;
1089 u32 addr64;
1090 u16 mss;
1091 u8 ctrl;
1092
1093 if (!spin_trylock(&sky2->tx_lock))
1094 return NETDEV_TX_LOCKED;
1095
1096 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1097 /* There is a known but harmless race with lockless tx
1098 * and netif_stop_queue.
1099 */
1100 if (!netif_queue_stopped(dev)) {
1101 netif_stop_queue(dev);
1102 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1103 dev->name);
1104 }
1105 spin_unlock(&sky2->tx_lock);
1106
1107 return NETDEV_TX_BUSY;
1108 }
1109
1110 if (unlikely(netif_msg_tx_queued(sky2)))
1111 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1112 dev->name, sky2->tx_prod, skb->len);
1113
1114 len = skb_headlen(skb);
1115 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1116 addr64 = high32(mapping);
1117
1118 re = sky2->tx_ring + sky2->tx_prod;
1119
1120 /* Send high bits if changed or crosses boundary */
1121 if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
1122 le = get_tx_le(sky2);
1123 le->tx.addr = cpu_to_le32(addr64);
1124 le->ctrl = 0;
1125 le->opcode = OP_ADDR64 | HW_OWNER;
1126 sky2->tx_addr64 = high32(mapping + len);
1127 }
1128
1129 /* Check for TCP Segmentation Offload */
1130 mss = skb_shinfo(skb)->tso_size;
1131 if (mss != 0) {
1132 /* just drop the packet if non-linear expansion fails */
1133 if (skb_header_cloned(skb) &&
1134 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1135 dev_kfree_skb_any(skb);
1136 goto out_unlock;
1137 }
1138
1139 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1140 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1141 mss += ETH_HLEN;
1142 }
1143
1144 if (mss != sky2->tx_last_mss) {
1145 le = get_tx_le(sky2);
1146 le->tx.tso.size = cpu_to_le16(mss);
1147 le->tx.tso.rsvd = 0;
1148 le->opcode = OP_LRGLEN | HW_OWNER;
1149 le->ctrl = 0;
1150 sky2->tx_last_mss = mss;
1151 }
1152
1153 ctrl = 0;
1154#ifdef SKY2_VLAN_TAG_USED
1155 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1156 if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1157 if (!le) {
1158 le = get_tx_le(sky2);
1159 le->tx.addr = 0;
1160 le->opcode = OP_VLAN|HW_OWNER;
1161 le->ctrl = 0;
1162 } else
1163 le->opcode |= OP_VLAN;
1164 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1165 ctrl |= INS_VLAN;
1166 }
1167#endif
1168
1169 /* Handle TCP checksum offload */
1170 if (skb->ip_summed == CHECKSUM_HW) {
1171 u16 hdr = skb->h.raw - skb->data;
1172 u16 offset = hdr + skb->csum;
1173
1174 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1175 if (skb->nh.iph->protocol == IPPROTO_UDP)
1176 ctrl |= UDPTCP;
1177
1178 le = get_tx_le(sky2);
1179 le->tx.csum.start = cpu_to_le16(hdr);
1180 le->tx.csum.offset = cpu_to_le16(offset);
1181 le->length = 0; /* initial checksum value */
1182 le->ctrl = 1; /* one packet */
1183 le->opcode = OP_TCPLISW | HW_OWNER;
1184 }
1185
1186 le = get_tx_le(sky2);
1187 le->tx.addr = cpu_to_le32((u32) mapping);
1188 le->length = cpu_to_le16(len);
1189 le->ctrl = ctrl;
1190 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1191
1192 /* Record the transmit mapping info */
1193 re->skb = skb;
1194 pci_unmap_addr_set(re, mapaddr, mapping);
1195
1196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1197 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1198 struct tx_ring_info *fre;
1199
1200 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1201 frag->size, PCI_DMA_TODEVICE);
1202 addr64 = (mapping >> 16) >> 16;
1203 if (addr64 != sky2->tx_addr64) {
1204 le = get_tx_le(sky2);
1205 le->tx.addr = cpu_to_le32(addr64);
1206 le->ctrl = 0;
1207 le->opcode = OP_ADDR64 | HW_OWNER;
1208 sky2->tx_addr64 = addr64;
1209 }
1210
1211 le = get_tx_le(sky2);
1212 le->tx.addr = cpu_to_le32((u32) mapping);
1213 le->length = cpu_to_le16(frag->size);
1214 le->ctrl = ctrl;
1215 le->opcode = OP_BUFFER | HW_OWNER;
1216
1217 fre = sky2->tx_ring
1218 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
1219 pci_unmap_addr_set(fre, mapaddr, mapping);
1220 }
1221
1222 re->idx = sky2->tx_prod;
1223 le->ctrl |= EOP;
1224
1225 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
1226 &sky2->tx_last_put, TX_RING_SIZE);
1227
1228 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1229 netif_stop_queue(dev);
1230
1231out_unlock:
1232 mmiowb();
1233 spin_unlock(&sky2->tx_lock);
1234
1235 dev->trans_start = jiffies;
1236 return NETDEV_TX_OK;
1237}
1238
1239/*
1240 * Free ring elements from starting at tx_cons until "done"
1241 *
1242 * NB: the hardware will tell us about partial completion of multi-part
1243 * buffers; these are deferred until completion.
1244 */
1245static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1246{
1247 struct net_device *dev = sky2->netdev;
1248 struct pci_dev *pdev = sky2->hw->pdev;
1249 u16 nxt, put;
1250 unsigned i;
1251
1252 BUG_ON(done >= TX_RING_SIZE);
1253
1254 if (unlikely(netif_msg_tx_done(sky2)))
1255 printk(KERN_DEBUG "%s: tx done, up to %u\n",
1256 dev->name, done);
1257
1258 for (put = sky2->tx_cons; put != done; put = nxt) {
1259 struct tx_ring_info *re = sky2->tx_ring + put;
1260 struct sk_buff *skb = re->skb;
1261
1262 nxt = re->idx;
1263 BUG_ON(nxt >= TX_RING_SIZE);
1264 prefetch(sky2->tx_ring + nxt);
1265
1266 /* Check for partial status */
1267 if (tx_dist(put, done) < tx_dist(put, nxt))
1268 break;
1269
1270 skb = re->skb;
1271 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
1272 skb_headlen(skb), PCI_DMA_TODEVICE);
1273
1274 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1275 struct tx_ring_info *fre;
1276 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
1277 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1278 skb_shinfo(skb)->frags[i].size,
1279 PCI_DMA_TODEVICE);
1280 }
1281
1282 dev_kfree_skb_any(skb);
1283 }
1284
1285 spin_lock(&sky2->tx_lock);
1286 sky2->tx_cons = put;
1287 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1288 netif_wake_queue(dev);
1289 spin_unlock(&sky2->tx_lock);
1290}
1291
1292/* Cleanup all untransmitted buffers, assume transmitter not running */
1293static void sky2_tx_clean(struct sky2_port *sky2)
1294{
1295 sky2_tx_complete(sky2, sky2->tx_prod);
1296}
1297
1298/* Network shutdown */
1299static int sky2_down(struct net_device *dev)
1300{
1301 struct sky2_port *sky2 = netdev_priv(dev);
1302 struct sky2_hw *hw = sky2->hw;
1303 unsigned port = sky2->port;
1304 u16 ctrl;
1305
1306 /* Never really got started! */
1307 if (!sky2->tx_le)
1308 return 0;
1309
1310 if (netif_msg_ifdown(sky2))
1311 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1312
1313 /* Stop more packets from being queued */
1314 netif_stop_queue(dev);
1315
1316 /* Disable port IRQ */
1317 local_irq_disable();
1318 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1319 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1320 local_irq_enable();
1321
1322 flush_scheduled_work();
1323
1324 sky2_phy_reset(hw, port);
1325
1326 /* Stop transmitter */
1327 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1328 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1329
1330 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1331 RB_RST_SET | RB_DIS_OP_MD);
1332
1333 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1334 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1335 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1336
1337 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1338
1339 /* Workaround shared GMAC reset */
1340 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1341 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1342 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1343
1344 /* Disable Force Sync bit and Enable Alloc bit */
1345 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1346 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1347
1348 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1349 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1350 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1351
1352 /* Reset the PCI FIFO of the async Tx queue */
1353 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1354 BMU_RST_SET | BMU_FIFO_RST);
1355
1356 /* Reset the Tx prefetch units */
1357 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1358 PREF_UNIT_RST_SET);
1359
1360 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1361
1362 sky2_rx_stop(sky2);
1363
1364 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1365 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1366
1367 /* turn off LED's */
1368 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1369
1370 synchronize_irq(hw->pdev->irq);
1371
1372 sky2_tx_clean(sky2);
1373 sky2_rx_clean(sky2);
1374
1375 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1376 sky2->rx_le, sky2->rx_le_map);
1377 kfree(sky2->rx_ring);
1378
1379 pci_free_consistent(hw->pdev,
1380 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1381 sky2->tx_le, sky2->tx_le_map);
1382 kfree(sky2->tx_ring);
1383
1384 sky2->tx_le = NULL;
1385 sky2->rx_le = NULL;
1386
1387 sky2->rx_ring = NULL;
1388 sky2->tx_ring = NULL;
1389
1390 return 0;
1391}
1392
1393static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1394{
1395 if (!hw->copper)
1396 return SPEED_1000;
1397
1398 if (hw->chip_id == CHIP_ID_YUKON_FE)
1399 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1400
1401 switch (aux & PHY_M_PS_SPEED_MSK) {
1402 case PHY_M_PS_SPEED_1000:
1403 return SPEED_1000;
1404 case PHY_M_PS_SPEED_100:
1405 return SPEED_100;
1406 default:
1407 return SPEED_10;
1408 }
1409}
1410
1411static void sky2_link_up(struct sky2_port *sky2)
1412{
1413 struct sky2_hw *hw = sky2->hw;
1414 unsigned port = sky2->port;
1415 u16 reg;
1416
1417 /* Enable Transmit FIFO Underrun */
1418 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1419
1420 reg = gma_read16(hw, port, GM_GP_CTRL);
1421 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1422 reg |= GM_GPCR_DUP_FULL;
1423
1424 /* enable Rx/Tx */
1425 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1426 gma_write16(hw, port, GM_GP_CTRL, reg);
1427 gma_read16(hw, port, GM_GP_CTRL);
1428
1429 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1430
1431 netif_carrier_on(sky2->netdev);
1432 netif_wake_queue(sky2->netdev);
1433
1434 /* Turn on link LED */
1435 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1436 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1437
1438 if (hw->chip_id == CHIP_ID_YUKON_XL) {
1439 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1440
1441 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1442 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
1443 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1444 SPEED_10 ? 7 : 0) |
1445 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1446 SPEED_100 ? 7 : 0) |
1447 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1448 SPEED_1000 ? 7 : 0));
1449 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1450 }
1451
1452 if (netif_msg_link(sky2))
1453 printk(KERN_INFO PFX
1454 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1455 sky2->netdev->name, sky2->speed,
1456 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1457 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1458 sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1459}
1460
1461static void sky2_link_down(struct sky2_port *sky2)
1462{
1463 struct sky2_hw *hw = sky2->hw;
1464 unsigned port = sky2->port;
1465 u16 reg;
1466
1467 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1468
1469 reg = gma_read16(hw, port, GM_GP_CTRL);
1470 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1471 gma_write16(hw, port, GM_GP_CTRL, reg);
1472 gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1473
1474 if (sky2->rx_pause && !sky2->tx_pause) {
1475 /* restore Asymmetric Pause bit */
1476 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1477 gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1478 | PHY_M_AN_ASP);
1479 }
1480
1481 netif_carrier_off(sky2->netdev);
1482 netif_stop_queue(sky2->netdev);
1483
1484 /* Turn on link LED */
1485 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1486
1487 if (netif_msg_link(sky2))
1488 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1489 sky2_phy_init(hw, port);
1490}
1491
1492static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1493{
1494 struct sky2_hw *hw = sky2->hw;
1495 unsigned port = sky2->port;
1496 u16 lpa;
1497
1498 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1499
1500 if (lpa & PHY_M_AN_RF) {
1501 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1502 return -1;
1503 }
1504
1505 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1506 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1507 printk(KERN_ERR PFX "%s: master/slave fault",
1508 sky2->netdev->name);
1509 return -1;
1510 }
1511
1512 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1513 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1514 sky2->netdev->name);
1515 return -1;
1516 }
1517
1518 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1519
1520 sky2->speed = sky2_phy_speed(hw, aux);
1521
1522 /* Pause bits are offset (9..8) */
1523 if (hw->chip_id == CHIP_ID_YUKON_XL)
1524 aux >>= 6;
1525
1526 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1527 sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1528
1529 if ((sky2->tx_pause || sky2->rx_pause)
1530 && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1531 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1532 else
1533 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1534
1535 return 0;
1536}
1537
1538/*
1539 * Interrupt from PHY are handled outside of interrupt context
1540 * because accessing phy registers requires spin wait which might
1541 * cause excess interrupt latency.
1542 */
1543static void sky2_phy_task(void *arg)
1544{
1545 struct sky2_port *sky2 = arg;
1546 struct sky2_hw *hw = sky2->hw;
1547 u16 istatus, phystat;
1548
1549 down(&sky2->phy_sema);
1550 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
1551 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
1552
1553 if (netif_msg_intr(sky2))
1554 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1555 sky2->netdev->name, istatus, phystat);
1556
1557 if (istatus & PHY_M_IS_AN_COMPL) {
1558 if (sky2_autoneg_done(sky2, phystat) == 0)
1559 sky2_link_up(sky2);
1560 goto out;
1561 }
1562
1563 if (istatus & PHY_M_IS_LSP_CHANGE)
1564 sky2->speed = sky2_phy_speed(hw, phystat);
1565
1566 if (istatus & PHY_M_IS_DUP_CHANGE)
1567 sky2->duplex =
1568 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1569
1570 if (istatus & PHY_M_IS_LST_CHANGE) {
1571 if (phystat & PHY_M_PS_LINK_UP)
1572 sky2_link_up(sky2);
1573 else
1574 sky2_link_down(sky2);
1575 }
1576out:
1577 up(&sky2->phy_sema);
1578
1579 local_irq_disable();
1580 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1581 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1582 local_irq_enable();
1583}
1584
1585static void sky2_tx_timeout(struct net_device *dev)
1586{
1587 struct sky2_port *sky2 = netdev_priv(dev);
1588 struct sky2_hw *hw = sky2->hw;
1589 unsigned txq = txqaddr[sky2->port];
1590
1591 if (netif_msg_timer(sky2))
1592 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1593
1594 netif_stop_queue(dev);
1595
1596 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1597 sky2_read32(hw, Q_ADDR(txq, Q_CSR));
1598
1599 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1600
1601 sky2_tx_clean(sky2);
1602
1603 sky2_qset(hw, txq);
1604 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1605
1606 netif_wake_queue(dev);
1607}
1608
1609
1610#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1611/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */
1612static inline unsigned sky2_buf_size(int mtu)
1613{
1614 return roundup(mtu + ETH_HLEN + 4, 8);
1615}
1616
1617static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1618{
1619 struct sky2_port *sky2 = netdev_priv(dev);
1620 struct sky2_hw *hw = sky2->hw;
1621 int err;
1622 u16 ctl, mode;
1623
1624 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1625 return -EINVAL;
1626
1627 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1628 return -EINVAL;
1629
1630 if (!netif_running(dev)) {
1631 dev->mtu = new_mtu;
1632 return 0;
1633 }
1634
1635 sky2_write32(hw, B0_IMSK, 0);
1636
1637 dev->trans_start = jiffies; /* prevent tx timeout */
1638 netif_stop_queue(dev);
1639 netif_poll_disable(hw->dev[0]);
1640
1641 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1642 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1643 sky2_rx_stop(sky2);
1644 sky2_rx_clean(sky2);
1645
1646 dev->mtu = new_mtu;
1647 sky2->rx_bufsize = sky2_buf_size(new_mtu);
1648 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1649 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1650
1651 if (dev->mtu > ETH_DATA_LEN)
1652 mode |= GM_SMOD_JUMBO_ENA;
1653
1654 gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1655
1656 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1657
1658 err = sky2_rx_start(sky2);
1659 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1660
1661 if (err)
1662 dev_close(dev);
1663 else {
1664 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
1665
1666 netif_poll_enable(hw->dev[0]);
1667 netif_wake_queue(dev);
1668 }
1669
1670 return err;
1671}
1672
1673/*
1674 * Receive one packet.
1675 * For small packets or errors, just reuse existing skb.
1676 * For larger packets, get new buffer.
1677 */
1678static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1679 u16 length, u32 status)
1680{
1681 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1682 struct sk_buff *skb = NULL;
1683
1684 if (unlikely(netif_msg_rx_status(sky2)))
1685 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1686 sky2->netdev->name, sky2->rx_next, status, length);
1687
1688 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1689 prefetch(sky2->rx_ring + sky2->rx_next);
1690
1691 if (status & GMR_FS_ANY_ERR)
1692 goto error;
1693
1694 if (!(status & GMR_FS_RX_OK))
1695 goto resubmit;
1696
1697 if ((status >> 16) != length || length > sky2->rx_bufsize)
1698 goto oversize;
1699
1700 if (length < copybreak) {
1701 skb = alloc_skb(length + 2, GFP_ATOMIC);
1702 if (!skb)
1703 goto resubmit;
1704
1705 skb_reserve(skb, 2);
1706 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1707 length, PCI_DMA_FROMDEVICE);
1708 memcpy(skb->data, re->skb->data, length);
1709 skb->ip_summed = re->skb->ip_summed;
1710 skb->csum = re->skb->csum;
1711 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1712 length, PCI_DMA_FROMDEVICE);
1713 } else {
1714 struct sk_buff *nskb;
1715
1716 nskb = dev_alloc_skb(sky2->rx_bufsize);
1717 if (!nskb)
1718 goto resubmit;
1719
1720 skb = re->skb;
1721 re->skb = nskb;
1722 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1723 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1724 prefetch(skb->data);
1725
1726 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1727 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1728 }
1729
1730 skb_put(skb, length);
1731resubmit:
1732 re->skb->ip_summed = CHECKSUM_NONE;
1733 sky2_rx_add(sky2, re->mapaddr);
1734
1735 /* Tell receiver about new buffers. */
1736 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
1737 &sky2->rx_last_put, RX_LE_SIZE);
1738
1739 return skb;
1740
1741oversize:
1742 ++sky2->net_stats.rx_over_errors;
1743 goto resubmit;
1744
1745error:
1746 ++sky2->net_stats.rx_errors;
1747
1748 if (netif_msg_rx_err(sky2))
1749 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1750 sky2->netdev->name, status, length);
1751
1752 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1753 sky2->net_stats.rx_length_errors++;
1754 if (status & GMR_FS_FRAGMENT)
1755 sky2->net_stats.rx_frame_errors++;
1756 if (status & GMR_FS_CRC_ERR)
1757 sky2->net_stats.rx_crc_errors++;
1758 if (status & GMR_FS_RX_FF_OV)
1759 sky2->net_stats.rx_fifo_errors++;
1760
1761 goto resubmit;
1762}
1763
1764/*
1765 * Check for transmit complete
1766 */
1767#define TX_NO_STATUS 0xffff
1768
1769static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1770{
1771 if (last != TX_NO_STATUS) {
1772 struct net_device *dev = hw->dev[port];
1773 if (dev && netif_running(dev)) {
1774 struct sky2_port *sky2 = netdev_priv(dev);
1775 sky2_tx_complete(sky2, last);
1776 }
1777 }
1778}
1779
1780/*
1781 * Both ports share the same status interrupt, therefore there is only
1782 * one poll routine.
1783 */
1784static int sky2_poll(struct net_device *dev0, int *budget)
1785{
1786 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
1787 unsigned int to_do = min(dev0->quota, *budget);
1788 unsigned int work_done = 0;
1789 u16 hwidx;
1790 u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
1791
1792 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1793 BUG_ON(hwidx >= STATUS_RING_SIZE);
1794 rmb();
1795
1796 while (hwidx != hw->st_idx) {
1797 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1798 struct net_device *dev;
1799 struct sky2_port *sky2;
1800 struct sk_buff *skb;
1801 u32 status;
1802 u16 length;
1803 u8 op;
1804
1805 le = hw->st_le + hw->st_idx;
1806 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1807 prefetch(hw->st_le + hw->st_idx);
1808
1809 BUG_ON(le->link >= 2);
1810 dev = hw->dev[le->link];
1811 if (dev == NULL || !netif_running(dev))
1812 continue;
1813
1814 sky2 = netdev_priv(dev);
1815 status = le32_to_cpu(le->status);
1816 length = le16_to_cpu(le->length);
1817 op = le->opcode & ~HW_OWNER;
1818 le->opcode = 0;
1819
1820 switch (op) {
1821 case OP_RXSTAT:
1822 skb = sky2_receive(sky2, length, status);
1823 if (!skb)
1824 break;
1825
1826 skb->dev = dev;
1827 skb->protocol = eth_type_trans(skb, dev);
1828 dev->last_rx = jiffies;
1829
1830#ifdef SKY2_VLAN_TAG_USED
1831 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
1832 vlan_hwaccel_receive_skb(skb,
1833 sky2->vlgrp,
1834 be16_to_cpu(sky2->rx_tag));
1835 } else
1836#endif
1837 netif_receive_skb(skb);
1838
1839 if (++work_done >= to_do)
1840 goto exit_loop;
1841 break;
1842
1843#ifdef SKY2_VLAN_TAG_USED
1844 case OP_RXVLAN:
1845 sky2->rx_tag = length;
1846 break;
1847
1848 case OP_RXCHKSVLAN:
1849 sky2->rx_tag = length;
1850 /* fall through */
1851#endif
1852 case OP_RXCHKS:
1853 skb = sky2->rx_ring[sky2->rx_next].skb;
1854 skb->ip_summed = CHECKSUM_HW;
1855 skb->csum = le16_to_cpu(status);
1856 break;
1857
1858 case OP_TXINDEXLE:
1859 /* TX index reports status for both ports */
1860 tx_done[0] = status & 0xffff;
1861 tx_done[1] = ((status >> 24) & 0xff)
1862 | (u16)(length & 0xf) << 8;
1863 break;
1864
1865 default:
1866 if (net_ratelimit())
1867 printk(KERN_WARNING PFX
1868 "unknown status opcode 0x%x\n", op);
1869 break;
1870 }
1871 }
1872
1873exit_loop:
1874 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1875 mmiowb();
1876
1877 sky2_tx_check(hw, 0, tx_done[0]);
1878 sky2_tx_check(hw, 1, tx_done[1]);
1879
1880 if (sky2_read16(hw, STAT_PUT_IDX) == hw->st_idx) {
1881 /* need to restart TX timer */
1882 if (is_ec_a1(hw)) {
1883 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1884 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1885 }
1886
1887 netif_rx_complete(dev0);
1888 hw->intr_mask |= Y2_IS_STAT_BMU;
1889 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1890 mmiowb();
1891 return 0;
1892 } else {
1893 *budget -= work_done;
1894 dev0->quota -= work_done;
1895 return 1;
1896 }
1897}
1898
1899static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1900{
1901 struct net_device *dev = hw->dev[port];
1902
1903 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1904 dev->name, status);
1905
1906 if (status & Y2_IS_PAR_RD1) {
1907 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1908 dev->name);
1909 /* Clear IRQ */
1910 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1911 }
1912
1913 if (status & Y2_IS_PAR_WR1) {
1914 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1915 dev->name);
1916
1917 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1918 }
1919
1920 if (status & Y2_IS_PAR_MAC1) {
1921 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1922 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1923 }
1924
1925 if (status & Y2_IS_PAR_RX1) {
1926 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1927 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1928 }
1929
1930 if (status & Y2_IS_TCP_TXA1) {
1931 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
1932 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1933 }
1934}
1935
1936static void sky2_hw_intr(struct sky2_hw *hw)
1937{
1938 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1939
1940 if (status & Y2_IS_TIST_OV)
1941 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1942
1943 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1944 u16 pci_err;
1945
1946 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1947 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1948 pci_name(hw->pdev), pci_err);
1949
1950 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1951 pci_write_config_word(hw->pdev, PCI_STATUS,
1952 pci_err | PCI_STATUS_ERROR_BITS);
1953 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1954 }
1955
1956 if (status & Y2_IS_PCI_EXP) {
1957 /* PCI-Express uncorrectable Error occurred */
1958 u32 pex_err;
1959
1960 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1961
1962 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
1963 pci_name(hw->pdev), pex_err);
1964
1965 /* clear the interrupt */
1966 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1967 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1968 0xffffffffUL);
1969 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1970
1971 if (pex_err & PEX_FATAL_ERRORS) {
1972 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
1973 hwmsk &= ~Y2_IS_PCI_EXP;
1974 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
1975 }
1976 }
1977
1978 if (status & Y2_HWE_L1_MASK)
1979 sky2_hw_error(hw, 0, status);
1980 status >>= 8;
1981 if (status & Y2_HWE_L1_MASK)
1982 sky2_hw_error(hw, 1, status);
1983}
1984
1985static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1986{
1987 struct net_device *dev = hw->dev[port];
1988 struct sky2_port *sky2 = netdev_priv(dev);
1989 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1990
1991 if (netif_msg_intr(sky2))
1992 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
1993 dev->name, status);
1994
1995 if (status & GM_IS_RX_FF_OR) {
1996 ++sky2->net_stats.rx_fifo_errors;
1997 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1998 }
1999
2000 if (status & GM_IS_TX_FF_UR) {
2001 ++sky2->net_stats.tx_fifo_errors;
2002 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2003 }
2004}
2005
2006static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2007{
2008 struct net_device *dev = hw->dev[port];
2009 struct sky2_port *sky2 = netdev_priv(dev);
2010
2011 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
2012 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2013 schedule_work(&sky2->phy_task);
2014}
2015
2016static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2017{
2018 struct sky2_hw *hw = dev_id;
2019 struct net_device *dev0 = hw->dev[0];
2020 u32 status;
2021
2022 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2023 if (status == 0 || status == ~0)
2024 return IRQ_NONE;
2025
2026 if (status & Y2_IS_HW_ERR)
2027 sky2_hw_intr(hw);
2028
2029 /* Do NAPI for Rx and Tx status */
2030 if (status & Y2_IS_STAT_BMU) {
2031 hw->intr_mask &= ~Y2_IS_STAT_BMU;
2032 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2033
2034 if (likely(__netif_rx_schedule_prep(dev0))) {
2035 prefetch(&hw->st_le[hw->st_idx]);
2036 __netif_rx_schedule(dev0);
2037 }
2038 }
2039
2040 if (status & Y2_IS_IRQ_PHY1)
2041 sky2_phy_intr(hw, 0);
2042
2043 if (status & Y2_IS_IRQ_PHY2)
2044 sky2_phy_intr(hw, 1);
2045
2046 if (status & Y2_IS_IRQ_MAC1)
2047 sky2_mac_intr(hw, 0);
2048
2049 if (status & Y2_IS_IRQ_MAC2)
2050 sky2_mac_intr(hw, 1);
2051
2052 sky2_write32(hw, B0_Y2_SP_ICR, 2);
2053
2054 sky2_read32(hw, B0_IMSK);
2055
2056 return IRQ_HANDLED;
2057}
2058
2059#ifdef CONFIG_NET_POLL_CONTROLLER
2060static void sky2_netpoll(struct net_device *dev)
2061{
2062 struct sky2_port *sky2 = netdev_priv(dev);
2063
2064 sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
2065}
2066#endif
2067
2068/* Chip internal frequency for clock calculations */
2069static inline u32 sky2_mhz(const struct sky2_hw *hw)
2070{
2071 switch (hw->chip_id) {
2072 case CHIP_ID_YUKON_EC:
2073 case CHIP_ID_YUKON_EC_U:
2074 return 125; /* 125 Mhz */
2075 case CHIP_ID_YUKON_FE:
2076 return 100; /* 100 Mhz */
2077 default: /* YUKON_XL */
2078 return 156; /* 156 Mhz */
2079 }
2080}
2081
2082static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2083{
2084 return sky2_mhz(hw) * us;
2085}
2086
2087static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2088{
2089 return clk / sky2_mhz(hw);
2090}
2091
2092
2093static int sky2_reset(struct sky2_hw *hw)
2094{
2095 u32 ctst;
2096 u16 status;
2097 u8 t8, pmd_type;
2098 int i;
2099
2100 ctst = sky2_read32(hw, B0_CTST);
2101
2102 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2103 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2104 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2105 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2106 pci_name(hw->pdev), hw->chip_id);
2107 return -EOPNOTSUPP;
2108 }
2109
2110 /* ring for status responses */
2111 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
2112 &hw->st_dma);
2113 if (!hw->st_le)
2114 return -ENOMEM;
2115
2116 /* disable ASF */
2117 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2118 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2119 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2120 }
2121
2122 /* do a SW reset */
2123 sky2_write8(hw, B0_CTST, CS_RST_SET);
2124 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2125
2126 /* clear PCI errors, if any */
2127 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2128 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2129 pci_write_config_word(hw->pdev, PCI_STATUS,
2130 status | PCI_STATUS_ERROR_BITS);
2131
2132 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2133
2134 /* clear any PEX errors */
2135 if (is_pciex(hw)) {
2136 u16 lstat;
2137 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
2138 0xffffffffUL);
2139 pci_read_config_word(hw->pdev, PEX_LNK_STAT, &lstat);
2140 }
2141
2142 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2143 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
2144
2145 hw->ports = 1;
2146 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2147 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2148 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2149 ++hw->ports;
2150 }
2151 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2152
2153 sky2_set_power_state(hw, PCI_D0);
2154
2155 for (i = 0; i < hw->ports; i++) {
2156 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2157 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2158 }
2159
2160 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2161
2162 /* Clear I2C IRQ noise */
2163 sky2_write32(hw, B2_I2C_IRQ, 1);
2164
2165 /* turn off hardware timer (unused) */
2166 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2167 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2168
2169 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2170
2171 /* Turn off descriptor polling */
2172 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2173
2174 /* Turn off receive timestamp */
2175 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2176 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2177
2178 /* enable the Tx Arbiters */
2179 for (i = 0; i < hw->ports; i++)
2180 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2181
2182 /* Initialize ram interface */
2183 for (i = 0; i < hw->ports; i++) {
2184 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2185
2186 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2187 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2188 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2189 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2190 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2191 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2192 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2193 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2194 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2195 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2196 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2197 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2198 }
2199
2200 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2201
2202 for (i = 0; i < hw->ports; i++)
2203 sky2_phy_reset(hw, i);
2204
2205 memset(hw->st_le, 0, STATUS_LE_BYTES);
2206 hw->st_idx = 0;
2207
2208 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2209 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2210
2211 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2212 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2213
2214 /* Set the list last index */
2215 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2216
2217 /* These status setup values are copied from SysKonnect's driver */
2218 if (is_ec_a1(hw)) {
2219 /* WA for dev. #4.3 */
2220 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
2221
2222 /* set Status-FIFO watermark */
2223 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
2224
2225 /* set Status-FIFO ISR watermark */
2226 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
2227 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000));
2228 } else {
2229 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2230 sky2_write8(hw, STAT_FIFO_WM, 16);
2231
2232 /* set Status-FIFO ISR watermark */
2233 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2234 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2235 else
2236 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2237
2238 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2239 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2240 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2241 }
2242
2243 /* enable status unit */
2244 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2245
2246 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2247 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2248 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2249
2250 return 0;
2251}
2252
2253static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
2254{
2255 u32 modes;
2256 if (hw->copper) {
2257 modes = SUPPORTED_10baseT_Half
2258 | SUPPORTED_10baseT_Full
2259 | SUPPORTED_100baseT_Half
2260 | SUPPORTED_100baseT_Full
2261 | SUPPORTED_Autoneg | SUPPORTED_TP;
2262
2263 if (hw->chip_id != CHIP_ID_YUKON_FE)
2264 modes |= SUPPORTED_1000baseT_Half
2265 | SUPPORTED_1000baseT_Full;
2266 } else
2267 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
2268 | SUPPORTED_Autoneg;
2269 return modes;
2270}
2271
2272static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2273{
2274 struct sky2_port *sky2 = netdev_priv(dev);
2275 struct sky2_hw *hw = sky2->hw;
2276
2277 ecmd->transceiver = XCVR_INTERNAL;
2278 ecmd->supported = sky2_supported_modes(hw);
2279 ecmd->phy_address = PHY_ADDR_MARV;
2280 if (hw->copper) {
2281 ecmd->supported = SUPPORTED_10baseT_Half
2282 | SUPPORTED_10baseT_Full
2283 | SUPPORTED_100baseT_Half
2284 | SUPPORTED_100baseT_Full
2285 | SUPPORTED_1000baseT_Half
2286 | SUPPORTED_1000baseT_Full
2287 | SUPPORTED_Autoneg | SUPPORTED_TP;
2288 ecmd->port = PORT_TP;
2289 } else
2290 ecmd->port = PORT_FIBRE;
2291
2292 ecmd->advertising = sky2->advertising;
2293 ecmd->autoneg = sky2->autoneg;
2294 ecmd->speed = sky2->speed;
2295 ecmd->duplex = sky2->duplex;
2296 return 0;
2297}
2298
2299static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2300{
2301 struct sky2_port *sky2 = netdev_priv(dev);
2302 const struct sky2_hw *hw = sky2->hw;
2303 u32 supported = sky2_supported_modes(hw);
2304
2305 if (ecmd->autoneg == AUTONEG_ENABLE) {
2306 ecmd->advertising = supported;
2307 sky2->duplex = -1;
2308 sky2->speed = -1;
2309 } else {
2310 u32 setting;
2311
2312 switch (ecmd->speed) {
2313 case SPEED_1000:
2314 if (ecmd->duplex == DUPLEX_FULL)
2315 setting = SUPPORTED_1000baseT_Full;
2316 else if (ecmd->duplex == DUPLEX_HALF)
2317 setting = SUPPORTED_1000baseT_Half;
2318 else
2319 return -EINVAL;
2320 break;
2321 case SPEED_100:
2322 if (ecmd->duplex == DUPLEX_FULL)
2323 setting = SUPPORTED_100baseT_Full;
2324 else if (ecmd->duplex == DUPLEX_HALF)
2325 setting = SUPPORTED_100baseT_Half;
2326 else
2327 return -EINVAL;
2328 break;
2329
2330 case SPEED_10:
2331 if (ecmd->duplex == DUPLEX_FULL)
2332 setting = SUPPORTED_10baseT_Full;
2333 else if (ecmd->duplex == DUPLEX_HALF)
2334 setting = SUPPORTED_10baseT_Half;
2335 else
2336 return -EINVAL;
2337 break;
2338 default:
2339 return -EINVAL;
2340 }
2341
2342 if ((setting & supported) == 0)
2343 return -EINVAL;
2344
2345 sky2->speed = ecmd->speed;
2346 sky2->duplex = ecmd->duplex;
2347 }
2348
2349 sky2->autoneg = ecmd->autoneg;
2350 sky2->advertising = ecmd->advertising;
2351
2352 if (netif_running(dev))
2353 sky2_phy_reinit(sky2);
2354
2355 return 0;
2356}
2357
2358static void sky2_get_drvinfo(struct net_device *dev,
2359 struct ethtool_drvinfo *info)
2360{
2361 struct sky2_port *sky2 = netdev_priv(dev);
2362
2363 strcpy(info->driver, DRV_NAME);
2364 strcpy(info->version, DRV_VERSION);
2365 strcpy(info->fw_version, "N/A");
2366 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2367}
2368
2369static const struct sky2_stat {
2370 char name[ETH_GSTRING_LEN];
2371 u16 offset;
2372} sky2_stats[] = {
2373 { "tx_bytes", GM_TXO_OK_HI },
2374 { "rx_bytes", GM_RXO_OK_HI },
2375 { "tx_broadcast", GM_TXF_BC_OK },
2376 { "rx_broadcast", GM_RXF_BC_OK },
2377 { "tx_multicast", GM_TXF_MC_OK },
2378 { "rx_multicast", GM_RXF_MC_OK },
2379 { "tx_unicast", GM_TXF_UC_OK },
2380 { "rx_unicast", GM_RXF_UC_OK },
2381 { "tx_mac_pause", GM_TXF_MPAUSE },
2382 { "rx_mac_pause", GM_RXF_MPAUSE },
2383 { "collisions", GM_TXF_SNG_COL },
2384 { "late_collision",GM_TXF_LAT_COL },
2385 { "aborted", GM_TXF_ABO_COL },
2386 { "multi_collisions", GM_TXF_MUL_COL },
2387 { "fifo_underrun", GM_TXE_FIFO_UR },
2388 { "fifo_overflow", GM_RXE_FIFO_OV },
2389 { "rx_toolong", GM_RXF_LNG_ERR },
2390 { "rx_jabber", GM_RXF_JAB_PKT },
2391 { "rx_runt", GM_RXE_FRAG },
2392 { "rx_too_long", GM_RXF_LNG_ERR },
2393 { "rx_fcs_error", GM_RXF_FCS_ERR },
2394};
2395
2396static u32 sky2_get_rx_csum(struct net_device *dev)
2397{
2398 struct sky2_port *sky2 = netdev_priv(dev);
2399
2400 return sky2->rx_csum;
2401}
2402
2403static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2404{
2405 struct sky2_port *sky2 = netdev_priv(dev);
2406
2407 sky2->rx_csum = data;
2408
2409 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2410 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2411
2412 return 0;
2413}
2414
2415static u32 sky2_get_msglevel(struct net_device *netdev)
2416{
2417 struct sky2_port *sky2 = netdev_priv(netdev);
2418 return sky2->msg_enable;
2419}
2420
2421static int sky2_nway_reset(struct net_device *dev)
2422{
2423 struct sky2_port *sky2 = netdev_priv(dev);
2424
2425 if (sky2->autoneg != AUTONEG_ENABLE)
2426 return -EINVAL;
2427
2428 sky2_phy_reinit(sky2);
2429
2430 return 0;
2431}
2432
2433static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2434{
2435 struct sky2_hw *hw = sky2->hw;
2436 unsigned port = sky2->port;
2437 int i;
2438
2439 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2440 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2441 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2442 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2443
2444 for (i = 2; i < count; i++)
2445 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2446}
2447
2448static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2449{
2450 struct sky2_port *sky2 = netdev_priv(netdev);
2451 sky2->msg_enable = value;
2452}
2453
2454static int sky2_get_stats_count(struct net_device *dev)
2455{
2456 return ARRAY_SIZE(sky2_stats);
2457}
2458
2459static void sky2_get_ethtool_stats(struct net_device *dev,
2460 struct ethtool_stats *stats, u64 * data)
2461{
2462 struct sky2_port *sky2 = netdev_priv(dev);
2463
2464 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2465}
2466
2467static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2468{
2469 int i;
2470
2471 switch (stringset) {
2472 case ETH_SS_STATS:
2473 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
2474 memcpy(data + i * ETH_GSTRING_LEN,
2475 sky2_stats[i].name, ETH_GSTRING_LEN);
2476 break;
2477 }
2478}
2479
2480/* Use hardware MIB variables for critical path statistics and
2481 * transmit feedback not reported at interrupt.
2482 * Other errors are accounted for in interrupt handler.
2483 */
2484static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2485{
2486 struct sky2_port *sky2 = netdev_priv(dev);
2487 u64 data[13];
2488
2489 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2490
2491 sky2->net_stats.tx_bytes = data[0];
2492 sky2->net_stats.rx_bytes = data[1];
2493 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2494 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2495 sky2->net_stats.multicast = data[5] + data[7];
2496 sky2->net_stats.collisions = data[10];
2497 sky2->net_stats.tx_aborted_errors = data[12];
2498
2499 return &sky2->net_stats;
2500}
2501
2502static int sky2_set_mac_address(struct net_device *dev, void *p)
2503{
2504 struct sky2_port *sky2 = netdev_priv(dev);
2505 struct sockaddr *addr = p;
2506
2507 if (!is_valid_ether_addr(addr->sa_data))
2508 return -EADDRNOTAVAIL;
2509
2510 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2511 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8,
2512 dev->dev_addr, ETH_ALEN);
2513 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8,
2514 dev->dev_addr, ETH_ALEN);
2515
2516 if (netif_running(dev))
2517 sky2_phy_reinit(sky2);
2518
2519 return 0;
2520}
2521
2522static void sky2_set_multicast(struct net_device *dev)
2523{
2524 struct sky2_port *sky2 = netdev_priv(dev);
2525 struct sky2_hw *hw = sky2->hw;
2526 unsigned port = sky2->port;
2527 struct dev_mc_list *list = dev->mc_list;
2528 u16 reg;
2529 u8 filter[8];
2530
2531 memset(filter, 0, sizeof(filter));
2532
2533 reg = gma_read16(hw, port, GM_RX_CTRL);
2534 reg |= GM_RXCR_UCF_ENA;
2535
2536 if (dev->flags & IFF_PROMISC) /* promiscuous */
2537 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2538 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
2539 memset(filter, 0xff, sizeof(filter));
2540 else if (dev->mc_count == 0) /* no multicast */
2541 reg &= ~GM_RXCR_MCF_ENA;
2542 else {
2543 int i;
2544 reg |= GM_RXCR_MCF_ENA;
2545
2546 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2547 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2548 filter[bit / 8] |= 1 << (bit % 8);
2549 }
2550 }
2551
2552 gma_write16(hw, port, GM_MC_ADDR_H1,
2553 (u16) filter[0] | ((u16) filter[1] << 8));
2554 gma_write16(hw, port, GM_MC_ADDR_H2,
2555 (u16) filter[2] | ((u16) filter[3] << 8));
2556 gma_write16(hw, port, GM_MC_ADDR_H3,
2557 (u16) filter[4] | ((u16) filter[5] << 8));
2558 gma_write16(hw, port, GM_MC_ADDR_H4,
2559 (u16) filter[6] | ((u16) filter[7] << 8));
2560
2561 gma_write16(hw, port, GM_RX_CTRL, reg);
2562}
2563
2564/* Can have one global because blinking is controlled by
2565 * ethtool and that is always under RTNL mutex
2566 */
2567static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2568{
2569 u16 pg;
2570
2571 switch (hw->chip_id) {
2572 case CHIP_ID_YUKON_XL:
2573 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2574 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2575 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
2576 on ? (PHY_M_LEDC_LOS_CTRL(1) |
2577 PHY_M_LEDC_INIT_CTRL(7) |
2578 PHY_M_LEDC_STA1_CTRL(7) |
2579 PHY_M_LEDC_STA0_CTRL(7))
2580 : 0);
2581
2582 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2583 break;
2584
2585 default:
2586 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2587 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2588 on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
2589 PHY_M_LED_MO_10(MO_LED_ON) |
2590 PHY_M_LED_MO_100(MO_LED_ON) |
2591 PHY_M_LED_MO_1000(MO_LED_ON) |
2592 PHY_M_LED_MO_RX(MO_LED_ON)
2593 : PHY_M_LED_MO_DUP(MO_LED_OFF) |
2594 PHY_M_LED_MO_10(MO_LED_OFF) |
2595 PHY_M_LED_MO_100(MO_LED_OFF) |
2596 PHY_M_LED_MO_1000(MO_LED_OFF) |
2597 PHY_M_LED_MO_RX(MO_LED_OFF));
2598
2599 }
2600}
2601
2602/* blink LED's for finding board */
2603static int sky2_phys_id(struct net_device *dev, u32 data)
2604{
2605 struct sky2_port *sky2 = netdev_priv(dev);
2606 struct sky2_hw *hw = sky2->hw;
2607 unsigned port = sky2->port;
2608 u16 ledctrl, ledover = 0;
2609 long ms;
2610 int interrupted;
2611 int onoff = 1;
2612
2613 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
2614 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2615 else
2616 ms = data * 1000;
2617
2618 /* save initial values */
2619 down(&sky2->phy_sema);
2620 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2621 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2622 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2623 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2624 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2625 } else {
2626 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2627 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2628 }
2629
2630 interrupted = 0;
2631 while (!interrupted && ms > 0) {
2632 sky2_led(hw, port, onoff);
2633 onoff = !onoff;
2634
2635 up(&sky2->phy_sema);
2636 interrupted = msleep_interruptible(250);
2637 down(&sky2->phy_sema);
2638
2639 ms -= 250;
2640 }
2641
2642 /* resume regularly scheduled programming */
2643 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2644 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2645 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2646 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
2647 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2648 } else {
2649 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2650 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2651 }
2652 up(&sky2->phy_sema);
2653
2654 return 0;
2655}
2656
2657static void sky2_get_pauseparam(struct net_device *dev,
2658 struct ethtool_pauseparam *ecmd)
2659{
2660 struct sky2_port *sky2 = netdev_priv(dev);
2661
2662 ecmd->tx_pause = sky2->tx_pause;
2663 ecmd->rx_pause = sky2->rx_pause;
2664 ecmd->autoneg = sky2->autoneg;
2665}
2666
2667static int sky2_set_pauseparam(struct net_device *dev,
2668 struct ethtool_pauseparam *ecmd)
2669{
2670 struct sky2_port *sky2 = netdev_priv(dev);
2671 int err = 0;
2672
2673 sky2->autoneg = ecmd->autoneg;
2674 sky2->tx_pause = ecmd->tx_pause != 0;
2675 sky2->rx_pause = ecmd->rx_pause != 0;
2676
2677 sky2_phy_reinit(sky2);
2678
2679 return err;
2680}
2681
2682#ifdef CONFIG_PM
2683static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2684{
2685 struct sky2_port *sky2 = netdev_priv(dev);
2686
2687 wol->supported = WAKE_MAGIC;
2688 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2689}
2690
2691static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2692{
2693 struct sky2_port *sky2 = netdev_priv(dev);
2694 struct sky2_hw *hw = sky2->hw;
2695
2696 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2697 return -EOPNOTSUPP;
2698
2699 sky2->wol = wol->wolopts == WAKE_MAGIC;
2700
2701 if (sky2->wol) {
2702 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2703
2704 sky2_write16(hw, WOL_CTRL_STAT,
2705 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2706 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2707 } else
2708 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2709
2710 return 0;
2711}
2712#endif
2713
2714static int sky2_get_coalesce(struct net_device *dev,
2715 struct ethtool_coalesce *ecmd)
2716{
2717 struct sky2_port *sky2 = netdev_priv(dev);
2718 struct sky2_hw *hw = sky2->hw;
2719
2720 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
2721 ecmd->tx_coalesce_usecs = 0;
2722 else {
2723 u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
2724 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
2725 }
2726 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
2727
2728 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
2729 ecmd->rx_coalesce_usecs = 0;
2730 else {
2731 u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
2732 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
2733 }
2734 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
2735
2736 if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
2737 ecmd->rx_coalesce_usecs_irq = 0;
2738 else {
2739 u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
2740 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
2741 }
2742
2743 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
2744
2745 return 0;
2746}
2747
2748/* Note: this affect both ports */
2749static int sky2_set_coalesce(struct net_device *dev,
2750 struct ethtool_coalesce *ecmd)
2751{
2752 struct sky2_port *sky2 = netdev_priv(dev);
2753 struct sky2_hw *hw = sky2->hw;
2754 const u32 tmin = sky2_clk2us(hw, 1);
2755 const u32 tmax = 5000;
2756
2757 if (ecmd->tx_coalesce_usecs != 0 &&
2758 (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax))
2759 return -EINVAL;
2760
2761 if (ecmd->rx_coalesce_usecs != 0 &&
2762 (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax))
2763 return -EINVAL;
2764
2765 if (ecmd->rx_coalesce_usecs_irq != 0 &&
2766 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
2767 return -EINVAL;
2768
2769 if (ecmd->tx_max_coalesced_frames > 0xffff)
2770 return -EINVAL;
2771 if (ecmd->rx_max_coalesced_frames > 0xff)
2772 return -EINVAL;
2773 if (ecmd->rx_max_coalesced_frames_irq > 0xff)
2774 return -EINVAL;
2775
2776 if (ecmd->tx_coalesce_usecs == 0)
2777 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2778 else {
2779 sky2_write32(hw, STAT_TX_TIMER_INI,
2780 sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
2781 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2782 }
2783 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
2784
2785 if (ecmd->rx_coalesce_usecs == 0)
2786 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
2787 else {
2788 sky2_write32(hw, STAT_LEV_TIMER_INI,
2789 sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
2790 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2791 }
2792 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
2793
2794 if (ecmd->rx_coalesce_usecs_irq == 0)
2795 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
2796 else {
2797 sky2_write32(hw, STAT_TX_TIMER_INI,
2798 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
2799 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2800 }
2801 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
2802 return 0;
2803}
2804
2805static void sky2_get_ringparam(struct net_device *dev,
2806 struct ethtool_ringparam *ering)
2807{
2808 struct sky2_port *sky2 = netdev_priv(dev);
2809
2810 ering->rx_max_pending = RX_MAX_PENDING;
2811 ering->rx_mini_max_pending = 0;
2812 ering->rx_jumbo_max_pending = 0;
2813 ering->tx_max_pending = TX_RING_SIZE - 1;
2814
2815 ering->rx_pending = sky2->rx_pending;
2816 ering->rx_mini_pending = 0;
2817 ering->rx_jumbo_pending = 0;
2818 ering->tx_pending = sky2->tx_pending;
2819}
2820
2821static int sky2_set_ringparam(struct net_device *dev,
2822 struct ethtool_ringparam *ering)
2823{
2824 struct sky2_port *sky2 = netdev_priv(dev);
2825 int err = 0;
2826
2827 if (ering->rx_pending > RX_MAX_PENDING ||
2828 ering->rx_pending < 8 ||
2829 ering->tx_pending < MAX_SKB_TX_LE ||
2830 ering->tx_pending > TX_RING_SIZE - 1)
2831 return -EINVAL;
2832
2833 if (netif_running(dev))
2834 sky2_down(dev);
2835
2836 sky2->rx_pending = ering->rx_pending;
2837 sky2->tx_pending = ering->tx_pending;
2838
2839 if (netif_running(dev)) {
2840 err = sky2_up(dev);
2841 if (err)
2842 dev_close(dev);
2843 else
2844 sky2_set_multicast(dev);
2845 }
2846
2847 return err;
2848}
2849
2850static int sky2_get_regs_len(struct net_device *dev)
2851{
2852 return 0x4000;
2853}
2854
2855/*
2856 * Returns copy of control register region
2857 * Note: access to the RAM address register set will cause timeouts.
2858 */
2859static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2860 void *p)
2861{
2862 const struct sky2_port *sky2 = netdev_priv(dev);
2863 const void __iomem *io = sky2->hw->regs;
2864
2865 BUG_ON(regs->len < B3_RI_WTO_R1);
2866 regs->version = 1;
2867 memset(p, 0, regs->len);
2868
2869 memcpy_fromio(p, io, B3_RAM_ADDR);
2870
2871 memcpy_fromio(p + B3_RI_WTO_R1,
2872 io + B3_RI_WTO_R1,
2873 regs->len - B3_RI_WTO_R1);
2874}
2875
2876static struct ethtool_ops sky2_ethtool_ops = {
2877 .get_settings = sky2_get_settings,
2878 .set_settings = sky2_set_settings,
2879 .get_drvinfo = sky2_get_drvinfo,
2880 .get_msglevel = sky2_get_msglevel,
2881 .set_msglevel = sky2_set_msglevel,
2882 .nway_reset = sky2_nway_reset,
2883 .get_regs_len = sky2_get_regs_len,
2884 .get_regs = sky2_get_regs,
2885 .get_link = ethtool_op_get_link,
2886 .get_sg = ethtool_op_get_sg,
2887 .set_sg = ethtool_op_set_sg,
2888 .get_tx_csum = ethtool_op_get_tx_csum,
2889 .set_tx_csum = ethtool_op_set_tx_csum,
2890 .get_tso = ethtool_op_get_tso,
2891 .set_tso = ethtool_op_set_tso,
2892 .get_rx_csum = sky2_get_rx_csum,
2893 .set_rx_csum = sky2_set_rx_csum,
2894 .get_strings = sky2_get_strings,
2895 .get_coalesce = sky2_get_coalesce,
2896 .set_coalesce = sky2_set_coalesce,
2897 .get_ringparam = sky2_get_ringparam,
2898 .set_ringparam = sky2_set_ringparam,
2899 .get_pauseparam = sky2_get_pauseparam,
2900 .set_pauseparam = sky2_set_pauseparam,
2901#ifdef CONFIG_PM
2902 .get_wol = sky2_get_wol,
2903 .set_wol = sky2_set_wol,
2904#endif
2905 .phys_id = sky2_phys_id,
2906 .get_stats_count = sky2_get_stats_count,
2907 .get_ethtool_stats = sky2_get_ethtool_stats,
2908 .get_perm_addr = ethtool_op_get_perm_addr,
2909};
2910
2911/* Initialize network device */
2912static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2913 unsigned port, int highmem)
2914{
2915 struct sky2_port *sky2;
2916 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
2917
2918 if (!dev) {
2919 printk(KERN_ERR "sky2 etherdev alloc failed");
2920 return NULL;
2921 }
2922
2923 SET_MODULE_OWNER(dev);
2924 SET_NETDEV_DEV(dev, &hw->pdev->dev);
2925 dev->irq = hw->pdev->irq;
2926 dev->open = sky2_up;
2927 dev->stop = sky2_down;
2928 dev->do_ioctl = sky2_ioctl;
2929 dev->hard_start_xmit = sky2_xmit_frame;
2930 dev->get_stats = sky2_get_stats;
2931 dev->set_multicast_list = sky2_set_multicast;
2932 dev->set_mac_address = sky2_set_mac_address;
2933 dev->change_mtu = sky2_change_mtu;
2934 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
2935 dev->tx_timeout = sky2_tx_timeout;
2936 dev->watchdog_timeo = TX_WATCHDOG;
2937 if (port == 0)
2938 dev->poll = sky2_poll;
2939 dev->weight = NAPI_WEIGHT;
2940#ifdef CONFIG_NET_POLL_CONTROLLER
2941 dev->poll_controller = sky2_netpoll;
2942#endif
2943
2944 sky2 = netdev_priv(dev);
2945 sky2->netdev = dev;
2946 sky2->hw = hw;
2947 sky2->msg_enable = netif_msg_init(debug, default_msg);
2948
2949 spin_lock_init(&sky2->tx_lock);
2950 /* Auto speed and flow control */
2951 sky2->autoneg = AUTONEG_ENABLE;
2952 sky2->tx_pause = 1;
2953 sky2->rx_pause = 1;
2954 sky2->duplex = -1;
2955 sky2->speed = -1;
2956 sky2->advertising = sky2_supported_modes(hw);
2957
2958 /* Receive checksum disabled for Yukon XL
2959 * because of observed problems with incorrect
2960 * values when multiple packets are received in one interrupt
2961 */
2962 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
2963
2964 INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2);
2965 init_MUTEX(&sky2->phy_sema);
2966 sky2->tx_pending = TX_DEF_PENDING;
2967 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
2968 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
2969
2970 hw->dev[port] = dev;
2971
2972 sky2->port = port;
2973
2974 dev->features |= NETIF_F_LLTX;
2975 if (hw->chip_id != CHIP_ID_YUKON_EC_U)
2976 dev->features |= NETIF_F_TSO;
2977 if (highmem)
2978 dev->features |= NETIF_F_HIGHDMA;
2979 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2980
2981#ifdef SKY2_VLAN_TAG_USED
2982 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2983 dev->vlan_rx_register = sky2_vlan_rx_register;
2984 dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid;
2985#endif
2986
2987 /* read the mac address */
2988 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
2989 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2990
2991 /* device is off until link detection */
2992 netif_carrier_off(dev);
2993 netif_stop_queue(dev);
2994
2995 return dev;
2996}
2997
2998static inline void sky2_show_addr(struct net_device *dev)
2999{
3000 const struct sky2_port *sky2 = netdev_priv(dev);
3001
3002 if (netif_msg_probe(sky2))
3003 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3004 dev->name,
3005 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3006 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3007}
3008
3009static int __devinit sky2_probe(struct pci_dev *pdev,
3010 const struct pci_device_id *ent)
3011{
3012 struct net_device *dev, *dev1 = NULL;
3013 struct sky2_hw *hw;
3014 int err, pm_cap, using_dac = 0;
3015
3016 err = pci_enable_device(pdev);
3017 if (err) {
3018 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3019 pci_name(pdev));
3020 goto err_out;
3021 }
3022
3023 err = pci_request_regions(pdev, DRV_NAME);
3024 if (err) {
3025 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3026 pci_name(pdev));
3027 goto err_out;
3028 }
3029
3030 pci_set_master(pdev);
3031
3032 /* Find power-management capability. */
3033 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3034 if (pm_cap == 0) {
3035 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
3036 "aborting.\n");
3037 err = -EIO;
3038 goto err_out_free_regions;
3039 }
3040
3041 if (sizeof(dma_addr_t) > sizeof(u32)) {
3042 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
3043 if (!err)
3044 using_dac = 1;
3045 }
3046
3047 if (!using_dac) {
3048 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3049 if (err) {
3050 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3051 pci_name(pdev));
3052 goto err_out_free_regions;
3053 }
3054 }
3055#ifdef __BIG_ENDIAN
3056 /* byte swap descriptors in hardware */
3057 {
3058 u32 reg;
3059
3060 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3061 reg |= PCI_REV_DESC;
3062 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3063 }
3064#endif
3065
3066 err = -ENOMEM;
3067 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
3068 if (!hw) {
3069 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3070 pci_name(pdev));
3071 goto err_out_free_regions;
3072 }
3073
3074 memset(hw, 0, sizeof(*hw));
3075 hw->pdev = pdev;
3076
3077 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3078 if (!hw->regs) {
3079 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3080 pci_name(pdev));
3081 goto err_out_free_hw;
3082 }
3083 hw->pm_cap = pm_cap;
3084
3085 err = sky2_reset(hw);
3086 if (err)
3087 goto err_out_iounmap;
3088
3089 printk(KERN_INFO PFX "v%s addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n",
3090 DRV_VERSION, pci_resource_start(pdev, 0), pdev->irq,
3091 yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
3092 hw->chip_id, hw->chip_rev);
3093
3094 dev = sky2_init_netdev(hw, 0, using_dac);
3095 if (!dev)
3096 goto err_out_free_pci;
3097
3098 err = register_netdev(dev);
3099 if (err) {
3100 printk(KERN_ERR PFX "%s: cannot register net device\n",
3101 pci_name(pdev));
3102 goto err_out_free_netdev;
3103 }
3104
3105 sky2_show_addr(dev);
3106
3107 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
3108 if (register_netdev(dev1) == 0)
3109 sky2_show_addr(dev1);
3110 else {
3111 /* Failure to register second port need not be fatal */
3112 printk(KERN_WARNING PFX
3113 "register of second port failed\n");
3114 hw->dev[1] = NULL;
3115 free_netdev(dev1);
3116 }
3117 }
3118
3119 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
3120 if (err) {
3121 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3122 pci_name(pdev), pdev->irq);
3123 goto err_out_unregister;
3124 }
3125
3126 hw->intr_mask = Y2_IS_BASE;
3127 sky2_write32(hw, B0_IMSK, hw->intr_mask);
3128
3129 pci_set_drvdata(pdev, hw);
3130
3131 return 0;
3132
3133err_out_unregister:
3134 if (dev1) {
3135 unregister_netdev(dev1);
3136 free_netdev(dev1);
3137 }
3138 unregister_netdev(dev);
3139err_out_free_netdev:
3140 free_netdev(dev);
3141err_out_free_pci:
3142 sky2_write8(hw, B0_CTST, CS_RST_SET);
3143 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3144err_out_iounmap:
3145 iounmap(hw->regs);
3146err_out_free_hw:
3147 kfree(hw);
3148err_out_free_regions:
3149 pci_release_regions(pdev);
3150 pci_disable_device(pdev);
3151err_out:
3152 return err;
3153}
3154
3155static void __devexit sky2_remove(struct pci_dev *pdev)
3156{
3157 struct sky2_hw *hw = pci_get_drvdata(pdev);
3158 struct net_device *dev0, *dev1;
3159
3160 if (!hw)
3161 return;
3162
3163 dev0 = hw->dev[0];
3164 dev1 = hw->dev[1];
3165 if (dev1)
3166 unregister_netdev(dev1);
3167 unregister_netdev(dev0);
3168
3169 sky2_write32(hw, B0_IMSK, 0);
3170 sky2_set_power_state(hw, PCI_D3hot);
3171 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3172 sky2_write8(hw, B0_CTST, CS_RST_SET);
3173 sky2_read8(hw, B0_CTST);
3174
3175 free_irq(pdev->irq, hw);
3176 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3177 pci_release_regions(pdev);
3178 pci_disable_device(pdev);
3179
3180 if (dev1)
3181 free_netdev(dev1);
3182 free_netdev(dev0);
3183 iounmap(hw->regs);
3184 kfree(hw);
3185
3186 pci_set_drvdata(pdev, NULL);
3187}
3188
3189#ifdef CONFIG_PM
3190static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3191{
3192 struct sky2_hw *hw = pci_get_drvdata(pdev);
3193 int i;
3194
3195 for (i = 0; i < 2; i++) {
3196 struct net_device *dev = hw->dev[i];
3197
3198 if (dev) {
3199 if (!netif_running(dev))
3200 continue;
3201
3202 sky2_down(dev);
3203 netif_device_detach(dev);
3204 }
3205 }
3206
3207 return sky2_set_power_state(hw, pci_choose_state(pdev, state));
3208}
3209
3210static int sky2_resume(struct pci_dev *pdev)
3211{
3212 struct sky2_hw *hw = pci_get_drvdata(pdev);
3213 int i;
3214
3215 pci_restore_state(pdev);
3216 pci_enable_wake(pdev, PCI_D0, 0);
3217 sky2_set_power_state(hw, PCI_D0);
3218
3219 sky2_reset(hw);
3220
3221 for (i = 0; i < 2; i++) {
3222 struct net_device *dev = hw->dev[i];
3223 if (dev) {
3224 if (netif_running(dev)) {
3225 netif_device_attach(dev);
3226 if (sky2_up(dev))
3227 dev_close(dev);
3228 }
3229 }
3230 }
3231 return 0;
3232}
3233#endif
3234
3235static struct pci_driver sky2_driver = {
3236 .name = DRV_NAME,
3237 .id_table = sky2_id_table,
3238 .probe = sky2_probe,
3239 .remove = __devexit_p(sky2_remove),
3240#ifdef CONFIG_PM
3241 .suspend = sky2_suspend,
3242 .resume = sky2_resume,
3243#endif
3244};
3245
3246static int __init sky2_init_module(void)
3247{
3248 return pci_register_driver(&sky2_driver);
3249}
3250
3251static void __exit sky2_cleanup_module(void)
3252{
3253 pci_unregister_driver(&sky2_driver);
3254}
3255
3256module_init(sky2_init_module);
3257module_exit(sky2_cleanup_module);
3258
3259MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
3260MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
3261MODULE_LICENSE("GPL");
3262MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
new file mode 100644
index 000000000000..95518921001c
--- /dev/null
+++ b/drivers/net/sky2.h
@@ -0,0 +1,1922 @@
1/*
2 * Definitions for the new Marvell Yukon 2 driver.
3 */
4#ifndef _SKY2_H
5#define _SKY2_H
6
7/* PCI config registers */
8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44
10#define PCI_DEV_STATUS 0x7c
11#define PCI_OS_PCI_X (1<<26)
12
13#define PEX_LNK_STAT 0xf2
14#define PEX_UNC_ERR_STAT 0x104
15#define PEX_DEV_CTRL 0xe8
16
17/* Yukon-2 */
18enum pci_dev_reg_1 {
19 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
20 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
21 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
22 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
23 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
24 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
25};
26
27enum pci_dev_reg_2 {
28 PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
29 PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
30 PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
31
32 PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
33 PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
34 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
35 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
36
37 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
38};
39
40
41#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
42 PCI_STATUS_SIG_SYSTEM_ERROR | \
43 PCI_STATUS_REC_MASTER_ABORT | \
44 PCI_STATUS_REC_TARGET_ABORT | \
45 PCI_STATUS_PARITY)
46
47enum pex_dev_ctrl {
48 PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
49 PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
50 PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
51 PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
52 PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
53 PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
54 PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
55 PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
56 PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
57 PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
58 PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
59};
60#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
61
62/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
63enum pex_err {
64 PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
65
66 PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
67
68 PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
69
70 PEX_COMP_TO = 1<<14, /* Completion Timeout */
71 PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
72 PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
73
74 PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
75 PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
76};
77
78
79enum csr_regs {
80 B0_RAP = 0x0000,
81 B0_CTST = 0x0004,
82 B0_Y2LED = 0x0005,
83 B0_POWER_CTRL = 0x0007,
84 B0_ISRC = 0x0008,
85 B0_IMSK = 0x000c,
86 B0_HWE_ISRC = 0x0010,
87 B0_HWE_IMSK = 0x0014,
88
89 /* Special ISR registers (Yukon-2 only) */
90 B0_Y2_SP_ISRC2 = 0x001c,
91 B0_Y2_SP_ISRC3 = 0x0020,
92 B0_Y2_SP_EISR = 0x0024,
93 B0_Y2_SP_LISR = 0x0028,
94 B0_Y2_SP_ICR = 0x002c,
95
96 B2_MAC_1 = 0x0100,
97 B2_MAC_2 = 0x0108,
98 B2_MAC_3 = 0x0110,
99 B2_CONN_TYP = 0x0118,
100 B2_PMD_TYP = 0x0119,
101 B2_MAC_CFG = 0x011a,
102 B2_CHIP_ID = 0x011b,
103 B2_E_0 = 0x011c,
104
105 B2_Y2_CLK_GATE = 0x011d,
106 B2_Y2_HW_RES = 0x011e,
107 B2_E_3 = 0x011f,
108 B2_Y2_CLK_CTRL = 0x0120,
109
110 B2_TI_INI = 0x0130,
111 B2_TI_VAL = 0x0134,
112 B2_TI_CTRL = 0x0138,
113 B2_TI_TEST = 0x0139,
114
115 B2_TST_CTRL1 = 0x0158,
116 B2_TST_CTRL2 = 0x0159,
117 B2_GP_IO = 0x015c,
118
119 B2_I2C_CTRL = 0x0160,
120 B2_I2C_DATA = 0x0164,
121 B2_I2C_IRQ = 0x0168,
122 B2_I2C_SW = 0x016c,
123
124 B3_RAM_ADDR = 0x0180,
125 B3_RAM_DATA_LO = 0x0184,
126 B3_RAM_DATA_HI = 0x0188,
127
128/* RAM Interface Registers */
129/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
130/*
131 * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
132 * not usable in SW. Please notice these are NOT real timeouts, these are
133 * the number of qWords transferred continuously.
134 */
135#define RAM_BUFFER(port, reg) (reg | (port <<6))
136
137 B3_RI_WTO_R1 = 0x0190,
138 B3_RI_WTO_XA1 = 0x0191,
139 B3_RI_WTO_XS1 = 0x0192,
140 B3_RI_RTO_R1 = 0x0193,
141 B3_RI_RTO_XA1 = 0x0194,
142 B3_RI_RTO_XS1 = 0x0195,
143 B3_RI_WTO_R2 = 0x0196,
144 B3_RI_WTO_XA2 = 0x0197,
145 B3_RI_WTO_XS2 = 0x0198,
146 B3_RI_RTO_R2 = 0x0199,
147 B3_RI_RTO_XA2 = 0x019a,
148 B3_RI_RTO_XS2 = 0x019b,
149 B3_RI_TO_VAL = 0x019c,
150 B3_RI_CTRL = 0x01a0,
151 B3_RI_TEST = 0x01a2,
152 B3_MA_TOINI_RX1 = 0x01b0,
153 B3_MA_TOINI_RX2 = 0x01b1,
154 B3_MA_TOINI_TX1 = 0x01b2,
155 B3_MA_TOINI_TX2 = 0x01b3,
156 B3_MA_TOVAL_RX1 = 0x01b4,
157 B3_MA_TOVAL_RX2 = 0x01b5,
158 B3_MA_TOVAL_TX1 = 0x01b6,
159 B3_MA_TOVAL_TX2 = 0x01b7,
160 B3_MA_TO_CTRL = 0x01b8,
161 B3_MA_TO_TEST = 0x01ba,
162 B3_MA_RCINI_RX1 = 0x01c0,
163 B3_MA_RCINI_RX2 = 0x01c1,
164 B3_MA_RCINI_TX1 = 0x01c2,
165 B3_MA_RCINI_TX2 = 0x01c3,
166 B3_MA_RCVAL_RX1 = 0x01c4,
167 B3_MA_RCVAL_RX2 = 0x01c5,
168 B3_MA_RCVAL_TX1 = 0x01c6,
169 B3_MA_RCVAL_TX2 = 0x01c7,
170 B3_MA_RC_CTRL = 0x01c8,
171 B3_MA_RC_TEST = 0x01ca,
172 B3_PA_TOINI_RX1 = 0x01d0,
173 B3_PA_TOINI_RX2 = 0x01d4,
174 B3_PA_TOINI_TX1 = 0x01d8,
175 B3_PA_TOINI_TX2 = 0x01dc,
176 B3_PA_TOVAL_RX1 = 0x01e0,
177 B3_PA_TOVAL_RX2 = 0x01e4,
178 B3_PA_TOVAL_TX1 = 0x01e8,
179 B3_PA_TOVAL_TX2 = 0x01ec,
180 B3_PA_CTRL = 0x01f0,
181 B3_PA_TEST = 0x01f2,
182
183 Y2_CFG_SPC = 0x1c00,
184};
185
186/* B0_CTST 16 bit Control/Status register */
187enum {
188 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
189 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
190 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
191 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
192 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
193 Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
194 Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
195 Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
196
197 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
198 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
199 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
200 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
201 CS_MRST_CLR = 1<<3, /* Clear Master reset */
202 CS_MRST_SET = 1<<2, /* Set Master reset */
203 CS_RST_CLR = 1<<1, /* Clear Software reset */
204 CS_RST_SET = 1, /* Set Software reset */
205};
206
207/* B0_LED 8 Bit LED register */
208enum {
209/* Bit 7.. 2: reserved */
210 LED_STAT_ON = 1<<1, /* Status LED on */
211 LED_STAT_OFF = 1, /* Status LED off */
212};
213
214/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
215enum {
216 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
217 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
218 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
219 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
220 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
221 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
222 PC_VCC_ON = 1<<1, /* Switch VCC On */
223 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
224};
225
226/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
227
228/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
229/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
230/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
231/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
232enum {
233 Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
234 Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
235 Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
236
237 Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
238 Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
239 Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
240 Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
241
242 Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
243 Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
244 Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
245 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
246 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
247
248 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
249 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
250 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
251 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
252 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
253
254 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
255 Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
256 Y2_IS_IRQ_SW | Y2_IS_TIMINT,
257 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
258 Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
259 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
260 Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
261};
262
263/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
264enum {
265 IS_ERR_MSK = 0x00003fff,/* All Error bits */
266
267 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
268 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
269 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
270 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
271 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
272 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
273 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
274 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
275 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
276 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
277 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
278 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
279 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
280 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
281};
282
283/* Hardware error interrupt mask for Yukon 2 */
284enum {
285 Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
286 Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
287 Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
288 Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
289 Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
290 Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
291 /* Link 2 */
292 Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
293 Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
294 Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
295 Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
296 Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
297 Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
298 /* Link 1 */
299 Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
300 Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
301 Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
302 Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
303 Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
304 Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
305
306 Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
307 Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
308 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
309 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
310
311 Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
312 Y2_IS_PCI_EXP |
313 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
314};
315
316/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
317enum {
318 DPT_START = 1<<1,
319 DPT_STOP = 1<<0,
320};
321
322/* B2_TST_CTRL1 8 bit Test Control Register 1 */
323enum {
324 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
325 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
326 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
327 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
328 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
329 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
330 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
331 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
332};
333
334/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
335enum {
336 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
337 /* Bit 3.. 2: reserved */
338 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
339 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
340};
341
342/* B2_CHIP_ID 8 bit Chip Identification Number */
343enum {
344 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
345 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
346 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
347 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
348 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
349 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
350 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
351 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
352
353 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
354 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
355 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
356};
357
358/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
359enum {
360 Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
361 Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
362 Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
363 Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
364 Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
365 Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
366 Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
367 Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
368};
369
370/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
371enum {
372 CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
373 CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
374 CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
375};
376#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
377#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
378
379
380/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
381enum {
382 Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
383#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
384 Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
385 Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
386#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
387#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
388 Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
389 Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
390};
391
392/* B2_TI_CTRL 8 bit Timer control */
393/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
394enum {
395 TIM_START = 1<<2, /* Start Timer */
396 TIM_STOP = 1<<1, /* Stop Timer */
397 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
398};
399
400/* B2_TI_TEST 8 Bit Timer Test */
401/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
402/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
403enum {
404 TIM_T_ON = 1<<2, /* Test mode on */
405 TIM_T_OFF = 1<<1, /* Test mode off */
406 TIM_T_STEP = 1<<0, /* Test step */
407};
408
409/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
410 /* Bit 31..19: reserved */
411#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
412/* RAM Interface Registers */
413
414/* B3_RI_CTRL 16 bit RAM Interface Control Register */
415enum {
416 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
417 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
418
419 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
420 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
421};
422
423#define SK_RI_TO_53 36 /* RAM interface timeout */
424
425
426/* Port related registers FIFO, and Arbiter */
427#define SK_REG(port,reg) (((port)<<7)+(reg))
428
429/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
430/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
431/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
432/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
433/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
434
435#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
436
437/* TXA_CTRL 8 bit Tx Arbiter Control Register */
438enum {
439 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
440 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
441 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
442 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
443 TXA_START_RC = 1<<3, /* Start sync Rate Control */
444 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
445 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
446 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
447};
448
449/*
450 * Bank 4 - 5
451 */
452/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
453enum {
454 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
455 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
456 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
457 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
458 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
459 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
460 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
461};
462
463
464enum {
465 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
466 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
467 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
468 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
469 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
470 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
471 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
472 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
473 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
474};
475
476/* Queue Register Offsets, use Q_ADDR() to access */
477enum {
478 B8_Q_REGS = 0x0400, /* base of Queue registers */
479 Q_D = 0x00, /* 8*32 bit Current Descriptor */
480 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
481 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
482 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
483 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
484 Q_BC = 0x30, /* 32 bit Current Byte Counter */
485 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
486 Q_F = 0x38, /* 32 bit Flag Register */
487 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
488 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
489 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
490 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
491 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
492 Q_T2 = 0x40, /* 32 bit Test Register 2 */
493 Q_T3 = 0x44, /* 32 bit Test Register 3 */
494
495/* Yukon-2 */
496 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
497 Q_WM = 0x40, /* 16 bit FIFO Watermark */
498 Q_AL = 0x42, /* 8 bit FIFO Alignment */
499 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
500 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
501 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
502 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
503 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
504 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
505 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
506 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
507};
508#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
509
510
511/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
512enum {
513 Y2_B8_PREF_REGS = 0x0450,
514
515 PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
516 PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
517 PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
518 PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
519 PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
520 PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
521 PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
522 PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
523 PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
524 PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
525
526 PREF_UNIT_MASK_IDX = 0x0fff,
527};
528#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
529
530/* RAM Buffer Register Offsets */
531enum {
532
533 RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
534 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
535 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
536 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
537 RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
538 RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
539 RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
540 RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
541 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
542 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
543 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
544 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
545 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
546 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
547};
548
549/* Receive and Transmit Queues */
550enum {
551 Q_R1 = 0x0000, /* Receive Queue 1 */
552 Q_R2 = 0x0080, /* Receive Queue 2 */
553 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
554 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
555 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
556 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
557};
558
559/* Different PHY Types */
560enum {
561 PHY_ADDR_MARV = 0,
562};
563
564#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
565
566
567enum {
568 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
569 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
570 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
571 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
572
573 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
574
575/* Receive GMAC FIFO (YUKON and Yukon-2) */
576
577 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
578 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
579 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
580 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
581 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
582 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
583 RX_GMF_UP_THR = 0x0c58,/* 8 bit Rx Upper Pause Thr (Yukon-EC_U) */
584 RX_GMF_LP_THR = 0x0c5a,/* 8 bit Rx Lower Pause Thr (Yukon-EC_U) */
585 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
586 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
587
588 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
589
590 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
591
592 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
593};
594
595
596/* Q_BC 32 bit Current Byte Counter */
597
598/* BMU Control Status Registers */
599/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
600/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
601/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
602/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
603/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
604/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
605/* Q_CSR 32 bit BMU Control/Status Register */
606
607/* Rx BMU Control / Status Registers (Yukon-2) */
608enum {
609 BMU_IDLE = 1<<31, /* BMU Idle State */
610 BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
611 BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
612
613 BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
614 BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
615 BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
616 BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
617 BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
618 BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
619 BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
620 BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
621 BMU_START = 1<<8, /* Start Rx/Tx Queue */
622 BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
623 BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
624 BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
625 BMU_FIFO_RST = 1<<4, /* Reset FIFO */
626 BMU_OP_ON = 1<<3, /* BMU Operational On */
627 BMU_OP_OFF = 1<<2, /* BMU Operational Off */
628 BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
629 BMU_RST_SET = 1<<0, /* Set BMU Reset */
630
631 BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
632 BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
633 BMU_FIFO_ENA | BMU_OP_ON,
634
635 BMU_WM_DEFAULT = 0x600,
636};
637
638/* Tx BMU Control / Status Registers (Yukon-2) */
639 /* Bit 31: same as for Rx */
640enum {
641 BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
642 BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
643 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
644};
645
646/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
647/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
648enum {
649 PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
650 PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
651 PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
652 PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
653};
654
655/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
656/* RB_START 32 bit RAM Buffer Start Address */
657/* RB_END 32 bit RAM Buffer End Address */
658/* RB_WP 32 bit RAM Buffer Write Pointer */
659/* RB_RP 32 bit RAM Buffer Read Pointer */
660/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
661/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
662/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
663/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
664/* RB_PC 32 bit RAM Buffer Packet Counter */
665/* RB_LEV 32 bit RAM Buffer Level Register */
666
667#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
668/* RB_TST2 8 bit RAM Buffer Test Register 2 */
669/* RB_TST1 8 bit RAM Buffer Test Register 1 */
670
671/* RB_CTRL 8 bit RAM Buffer Control Register */
672enum {
673 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
674 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
675 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
676 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
677 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
678 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
679};
680
681
682/* Transmit GMAC FIFO (YUKON only) */
683enum {
684 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
685 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
686 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
687
688 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
689 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
690 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
691
692 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
693 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
694 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
695};
696
697/* Descriptor Poll Timer Registers */
698enum {
699 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
700 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
701 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
702
703 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
704};
705
706/* Time Stamp Timer Registers (YUKON only) */
707enum {
708 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
709 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
710 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
711};
712
713/* Polling Unit Registers (Yukon-2 only) */
714enum {
715 POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
716 POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
717
718 POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
719 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
720};
721
722/* ASF Subsystem Registers (Yukon-2 only) */
723enum {
724 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
725 B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
726 B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
727
728 B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
729 B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
730 B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
731 B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
732 B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
733 B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
734};
735
736/* Status BMU Registers (Yukon-2 only)*/
737enum {
738 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
739 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
740
741 STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
742 STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
743 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
744 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
745 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
746 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
747 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
748 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
749
750/* FIFO Control/Status Registers (Yukon-2 only)*/
751 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
752 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
753 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
754 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
755 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
756 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
757 STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
758
759/* Level and ISR Timer Registers (Yukon-2 only)*/
760 STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
761 STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
762 STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
763 STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
764 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
765 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
766 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
767 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
768 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
769 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
770 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
771 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
772};
773
774enum {
775 LINKLED_OFF = 0x01,
776 LINKLED_ON = 0x02,
777 LINKLED_LINKSYNC_OFF = 0x04,
778 LINKLED_LINKSYNC_ON = 0x08,
779 LINKLED_BLINK_OFF = 0x10,
780 LINKLED_BLINK_ON = 0x20,
781};
782
783/* GMAC and GPHY Control Registers (YUKON only) */
784enum {
785 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
786 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
787 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
788 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
789 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
790
791/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
792
793 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
794
795 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
796 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
797 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
798 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
799 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
800 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
801 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
802
803/* WOL Pattern Length Registers (YUKON only) */
804
805 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
806 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
807
808/* WOL Pattern Counter Registers (YUKON only) */
809
810
811 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
812 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
813};
814
815enum {
816 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
817 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
818};
819
820enum {
821 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
822 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
823};
824
825/*
826 * Marvel-PHY Registers, indirect addressed over GMAC
827 */
828enum {
829 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
830 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
831 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
832 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
833 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
834 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
835 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
836 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
837 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
838 /* Marvel-specific registers */
839 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
840 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
841 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
842 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
843 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
844 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
845 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
846 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
847 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
848 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
849 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
850 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
851 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
852 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
853 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
854 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
855 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
856 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
857
858/* for 10/100 Fast Ethernet PHY (88E3082 only) */
859 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
860 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
861 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
862 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
863 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
864};
865
866enum {
867 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
868 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
869 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
870 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
871 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
872 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
873 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
874 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
875 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
876 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
877};
878
879enum {
880 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
881 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
882 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
883};
884
885enum {
886 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
887
888 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
889 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
890 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
891 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
892 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
893 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
894 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
895};
896
897enum {
898 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
899 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
900 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
901};
902
903/* different Marvell PHY Ids */
904enum {
905 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
906
907 PHY_BCOM_ID1_A1 = 0x6041,
908 PHY_BCOM_ID1_B2 = 0x6043,
909 PHY_BCOM_ID1_C0 = 0x6044,
910 PHY_BCOM_ID1_C5 = 0x6047,
911
912 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
913 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
914 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
915 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
916};
917
918/* Advertisement register bits */
919enum {
920 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
921 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
922 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
923
924 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
925 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
926 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
927 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
928 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
929 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
930 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
931 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
932 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
933 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
934 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
935 PHY_AN_100HALF | PHY_AN_100FULL,
936};
937
938/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
939/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
940enum {
941 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
942 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
943 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
944 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
945 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
946 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
947 /* Bit 9..8: reserved */
948 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
949};
950
951/** Marvell-Specific */
952enum {
953 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
954 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
955 PHY_M_AN_RF = 1<<13, /* Remote Fault */
956
957 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
958 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
959 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
960 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
961 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
962 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
963 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
964 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
965};
966
967/* special defines for FIBER (88E1011S only) */
968enum {
969 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
970 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
971 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
972 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
973};
974
975/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
976enum {
977 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
978 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
979 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
980 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
981};
982
983/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
984enum {
985 PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
986 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
987 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
988 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
989 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
990 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
991};
992
993/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
994enum {
995 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
996 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
997 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
998 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
999 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1000 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1001 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1002 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1003 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1004 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1005 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1006 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1007};
1008
1009enum {
1010 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1011 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1012};
1013
1014#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1015
1016enum {
1017 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1018 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1019 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1020};
1021
1022/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1023enum {
1024 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1025 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1026 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1027 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1028 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1029
1030 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1031 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1032
1033 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1034 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1035};
1036
1037/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1038enum {
1039 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1040 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1041 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1042 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1043 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1044 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1045 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1046 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1047 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1048 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1049 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1050 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1051 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1052 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1053 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1054 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1055};
1056
1057#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1058
1059/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1060enum {
1061 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1062 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1063};
1064
1065enum {
1066 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1067 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1068 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1069 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1070 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1071 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1072 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1073 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1074 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1075 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1076 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1077 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1078
1079 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1080 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1081 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1082
1083 PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
1084 | PHY_M_IS_FIFO_ERROR,
1085 PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1086};
1087
1088
1089/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1090enum {
1091 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1092 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1093
1094 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1095 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1096 /* (88E1011 only) */
1097 PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1098 /* (88E1011 only) */
1099 PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1100 /* (88E1111 only) */
1101 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1102 /* !!! Errata in spec. (1 = disable) */
1103 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1104 PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1105 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1106 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1107 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1108 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1109
1110#define PHY_M_EC_M_DSC(x) ((x)<<10 & PHY_M_EC_M_DSC_MSK)
1111 /* 00=1x; 01=2x; 10=3x; 11=4x */
1112#define PHY_M_EC_S_DSC(x) ((x)<<8 & PHY_M_EC_S_DSC_MSK)
1113 /* 00=dis; 01=1x; 10=2x; 11=3x */
1114#define PHY_M_EC_DSC_2(x) ((x)<<9 & PHY_M_EC_M_DSC_MSK2)
1115 /* 000=1x; 001=2x; 010=3x; 011=4x */
1116#define PHY_M_EC_MAC_S(x) ((x)<<4 & PHY_M_EC_MAC_S_MSK)
1117 /* 01X=0; 110=2.5; 111=25 (MHz) */
1118
1119/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1120enum {
1121 PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
1122 PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
1123 PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
1124};
1125/* !!! Errata in spec. (1 = disable) */
1126
1127#define PHY_M_PC_DSC(x) (((x)<<12) & PHY_M_PC_DSC_MSK)
1128 /* 100=5x; 101=6x; 110=7x; 111=8x */
1129enum {
1130 MAC_TX_CLK_0_MHZ = 2,
1131 MAC_TX_CLK_2_5_MHZ = 6,
1132 MAC_TX_CLK_25_MHZ = 7,
1133};
1134
1135/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1136enum {
1137 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1138 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1139 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1140 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1141 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1142 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1143 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1144 /* (88E1111 only) */
1145};
1146
1147enum {
1148 PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1149 /* (88E1011 only) */
1150 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1151 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1152 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1153 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1154 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1155};
1156
1157#define PHY_M_LED_PULS_DUR(x) (((x)<<12) & PHY_M_LEDC_PULS_MSK)
1158
1159/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
1160enum {
1161 PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
1162 PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
1163 PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
1164 PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
1165 PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
1166 PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
1167};
1168
1169#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
1170#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
1171#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
1172#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
1173#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
1174#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
1175
1176enum {
1177 PULS_NO_STR = 0,/* no pulse stretching */
1178 PULS_21MS = 1,/* 21 ms to 42 ms */
1179 PULS_42MS = 2,/* 42 ms to 84 ms */
1180 PULS_84MS = 3,/* 84 ms to 170 ms */
1181 PULS_170MS = 4,/* 170 ms to 340 ms */
1182 PULS_340MS = 5,/* 340 ms to 670 ms */
1183 PULS_670MS = 6,/* 670 ms to 1.3 s */
1184 PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1185};
1186
1187#define PHY_M_LED_BLINK_RT(x) (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
1188
1189enum {
1190 BLINK_42MS = 0,/* 42 ms */
1191 BLINK_84MS = 1,/* 84 ms */
1192 BLINK_170MS = 2,/* 170 ms */
1193 BLINK_340MS = 3,/* 340 ms */
1194 BLINK_670MS = 4,/* 670 ms */
1195};
1196
1197/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1198#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1199 /* Bit 13..12: reserved */
1200#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1201#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1202#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1203#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1204#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1205#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1206
1207enum {
1208 MO_LED_NORM = 0,
1209 MO_LED_BLINK = 1,
1210 MO_LED_OFF = 2,
1211 MO_LED_ON = 3,
1212};
1213
1214/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1215enum {
1216 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1217 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1218 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1219 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1220 PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1221};
1222
1223/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1224enum {
1225 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1226 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1227 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1228 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1229 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1230 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1231 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1232 /* (88E1111 only) */
1233
1234 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1235 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1236 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1237};
1238
1239/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1240/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1241 /* Bit 15..12: reserved (used internally) */
1242enum {
1243 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1244 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1245 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1246};
1247
1248#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
1249#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
1250#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
1251
1252enum {
1253 LED_PAR_CTRL_COLX = 0x00,
1254 LED_PAR_CTRL_ERROR = 0x01,
1255 LED_PAR_CTRL_DUPLEX = 0x02,
1256 LED_PAR_CTRL_DP_COL = 0x03,
1257 LED_PAR_CTRL_SPEED = 0x04,
1258 LED_PAR_CTRL_LINK = 0x05,
1259 LED_PAR_CTRL_TX = 0x06,
1260 LED_PAR_CTRL_RX = 0x07,
1261 LED_PAR_CTRL_ACT = 0x08,
1262 LED_PAR_CTRL_LNK_RX = 0x09,
1263 LED_PAR_CTRL_LNK_AC = 0x0a,
1264 LED_PAR_CTRL_ACT_BL = 0x0b,
1265 LED_PAR_CTRL_TX_BL = 0x0c,
1266 LED_PAR_CTRL_RX_BL = 0x0d,
1267 LED_PAR_CTRL_COL_BL = 0x0e,
1268 LED_PAR_CTRL_INACT = 0x0f
1269};
1270
1271/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1272enum {
1273 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1274 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1275 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1276};
1277
1278/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1279/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1280enum {
1281 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1282 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1283 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1284 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1285};
1286#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
1287
1288/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1289enum {
1290 PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1291 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1292 PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1293 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1294};
1295
1296#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1297#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1298#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1299#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1300
1301/* GMAC registers */
1302/* Port Registers */
1303enum {
1304 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1305 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1306 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1307 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1308 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1309 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1310 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1311/* Source Address Registers */
1312 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1313 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1314 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1315 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1316 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1317 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1318
1319/* Multicast Address Hash Registers */
1320 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1321 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1322 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1323 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1324
1325/* Interrupt Source Registers */
1326 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1327 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1328 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1329
1330/* Interrupt Mask Registers */
1331 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1332 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1333 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1334
1335/* Serial Management Interface (SMI) Registers */
1336 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1337 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1338 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1339};
1340
1341/* MIB Counters */
1342#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1343#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1344
1345/*
1346 * MIB Counters base address definitions (low word) -
1347 * use offset 4 for access to high word (32 bit r/o)
1348 */
1349enum {
1350 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1351 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1352 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1353 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1354 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1355 /* GM_MIB_CNT_BASE + 40: reserved */
1356 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1357 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1358 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1359 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1360 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1361 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1362 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1363 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
1364 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
1365 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
1366 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
1367 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
1368 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
1369 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
1370 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
1371 /* GM_MIB_CNT_BASE + 168: reserved */
1372 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
1373 /* GM_MIB_CNT_BASE + 184: reserved */
1374 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
1375 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
1376 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
1377 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
1378 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
1379 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
1380 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
1381 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
1382 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
1383 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
1384 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
1385 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
1386 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
1387
1388 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
1389 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
1390 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
1391 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
1392 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
1393 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1394};
1395
1396/* GMAC Bit Definitions */
1397/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1398enum {
1399 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1400 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1401 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1402 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1403 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1404 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1405 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
1406 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
1407
1408 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1409 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1410 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1411 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1412 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1413};
1414
1415/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1416enum {
1417 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1418 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1419 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1420 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1421 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1422 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1423 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1424 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1425 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1426 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1427 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1428 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1429 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1430 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1431 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1432};
1433
1434#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1435#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
1436
1437/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1438enum {
1439 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1440 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1441 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1442 GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
1443};
1444
1445#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1446#define TX_COL_DEF 0x04
1447
1448/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1449enum {
1450 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1451 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1452 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1453 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1454};
1455
1456/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1457enum {
1458 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1459 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1460 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1461 GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
1462
1463 TX_JAM_LEN_DEF = 0x03,
1464 TX_JAM_IPG_DEF = 0x0b,
1465 TX_IPG_JAM_DEF = 0x1c,
1466 TX_BOF_LIM_DEF = 0x04,
1467};
1468
1469#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1470#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1471#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1472#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
1473
1474
1475/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1476enum {
1477 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1478 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
1479 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
1480 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
1481 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1482};
1483
1484#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1485#define DATA_BLIND_DEF 0x04
1486
1487#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1488#define IPG_DATA_DEF 0x1e
1489
1490/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1491enum {
1492 GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
1493 GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
1494 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1495 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1496 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1497};
1498
1499#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
1500#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
1501
1502/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1503enum {
1504 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1505 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1506};
1507
1508/* Receive Frame Status Encoding */
1509enum {
1510 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1511 GMR_FS_VLAN = 1<<13, /* VLAN Packet */
1512 GMR_FS_JABBER = 1<<12, /* Jabber Packet */
1513 GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
1514 GMR_FS_MC = 1<<10, /* Multicast Packet */
1515 GMR_FS_BC = 1<<9, /* Broadcast Packet */
1516 GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
1517 GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
1518 GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
1519 GMR_FS_MII_ERR = 1<<5, /* MII Error */
1520 GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
1521 GMR_FS_FRAGMENT = 1<<3, /* Fragment */
1522
1523 GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
1524 GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
1525
1526 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1527 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1528 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1529 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1530};
1531
1532/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1533enum {
1534 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1535 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1536 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1537 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1538
1539 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1540 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1541 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1542
1543 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1544 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1545 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1546 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1547 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1548 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1549 GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
1550
1551 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1552 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1553 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1554 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1555
1556 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1557
1558 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1559};
1560
1561
1562/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1563enum {
1564 TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
1565 TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
1566
1567 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1568 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1569
1570 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1571 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1572 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
1573
1574 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1575 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1576 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1577};
1578
1579/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1580enum {
1581 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1582 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1583 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1584};
1585
1586/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
1587enum {
1588 Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
1589 Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
1590 Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
1591 Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
1592 Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
1593
1594 Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
1595 Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
1596};
1597
1598/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
1599enum {
1600 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1601 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1602};
1603
1604/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1605enum {
1606 SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
1607 SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
1608 SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
1609 SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
1610 SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
1611};
1612
1613/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1614enum {
1615 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1616 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1617 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
1618 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
1619 GMC_PAUSE_ON = 1<<3, /* Pause On */
1620 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
1621 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
1622 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
1623};
1624
1625/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1626enum {
1627 GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
1628 GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
1629 GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
1630 GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
1631 GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
1632 GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
1633 GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
1634 GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
1635 GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
1636 GPC_ANEG_0 = 1<<19, /* ANEG[0] */
1637 GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
1638 GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
1639 GPC_ANEG_3 = 1<<16, /* ANEG[3] */
1640 GPC_ANEG_2 = 1<<15, /* ANEG[2] */
1641 GPC_ANEG_1 = 1<<14, /* ANEG[1] */
1642 GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
1643 GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
1644 GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
1645 GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
1646 GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
1647 GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
1648 /* Bits 7..2: reserved */
1649 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
1650 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
1651};
1652
1653/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1654/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1655enum {
1656 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
1657 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
1658 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
1659 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
1660 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1661 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1662
1663#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1664
1665/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1666 /* Bits 15.. 2: reserved */
1667 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1668 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1669
1670
1671/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1672 WOL_CTL_LINK_CHG_OCC = 1<<15,
1673 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1674 WOL_CTL_PATTERN_OCC = 1<<13,
1675 WOL_CTL_CLEAR_RESULT = 1<<12,
1676 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
1677 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
1678 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
1679 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
1680 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
1681 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
1682 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
1683 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
1684 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
1685 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
1686 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
1687 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1688};
1689
1690#define WOL_CTL_DEFAULT \
1691 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1692 WOL_CTL_DIS_PME_ON_PATTERN | \
1693 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1694 WOL_CTL_DIS_LINK_CHG_UNIT | \
1695 WOL_CTL_DIS_PATTERN_UNIT | \
1696 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1697
1698/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1699#define WOL_CTL_PATT_ENA(x) (1 << (x))
1700
1701
1702/* Control flags */
1703enum {
1704 UDPTCP = 1<<0,
1705 CALSUM = 1<<1,
1706 WR_SUM = 1<<2,
1707 INIT_SUM= 1<<3,
1708 LOCK_SUM= 1<<4,
1709 INS_VLAN= 1<<5,
1710 FRC_STAT= 1<<6,
1711 EOP = 1<<7,
1712};
1713
1714enum {
1715 HW_OWNER = 1<<7,
1716 OP_TCPWRITE = 0x11,
1717 OP_TCPSTART = 0x12,
1718 OP_TCPINIT = 0x14,
1719 OP_TCPLCK = 0x18,
1720 OP_TCPCHKSUM = OP_TCPSTART,
1721 OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
1722 OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
1723 OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
1724 OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
1725
1726 OP_ADDR64 = 0x21,
1727 OP_VLAN = 0x22,
1728 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
1729 OP_LRGLEN = 0x24,
1730 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
1731 OP_BUFFER = 0x40,
1732 OP_PACKET = 0x41,
1733 OP_LARGESEND = 0x43,
1734
1735/* YUKON-2 STATUS opcodes defines */
1736 OP_RXSTAT = 0x60,
1737 OP_RXTIMESTAMP = 0x61,
1738 OP_RXVLAN = 0x62,
1739 OP_RXCHKS = 0x64,
1740 OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
1741 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
1742 OP_RSS_HASH = 0x65,
1743 OP_TXINDEXLE = 0x68,
1744};
1745
1746/* Yukon 2 hardware interface
1747 * Not tested on big endian
1748 */
1749struct sky2_tx_le {
1750 union {
1751 __le32 addr;
1752 struct {
1753 __le16 offset;
1754 __le16 start;
1755 } csum __attribute((packed));
1756 struct {
1757 __le16 size;
1758 __le16 rsvd;
1759 } tso __attribute((packed));
1760 } tx;
1761 __le16 length; /* also vlan tag or checksum start */
1762 u8 ctrl;
1763 u8 opcode;
1764} __attribute((packed));
1765
1766struct sky2_rx_le {
1767 __le32 addr;
1768 __le16 length;
1769 u8 ctrl;
1770 u8 opcode;
1771} __attribute((packed));;
1772
1773struct sky2_status_le {
1774 __le32 status; /* also checksum */
1775 __le16 length; /* also vlan tag */
1776 u8 link;
1777 u8 opcode;
1778} __attribute((packed));
1779
1780struct tx_ring_info {
1781 struct sk_buff *skb;
1782 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1783 u16 idx;
1784};
1785
1786struct ring_info {
1787 struct sk_buff *skb;
1788 dma_addr_t mapaddr;
1789};
1790
1791struct sky2_port {
1792 struct sky2_hw *hw;
1793 struct net_device *netdev;
1794 unsigned port;
1795 u32 msg_enable;
1796
1797 spinlock_t tx_lock ____cacheline_aligned_in_smp;
1798 struct tx_ring_info *tx_ring;
1799 struct sky2_tx_le *tx_le;
1800 u16 tx_cons; /* next le to check */
1801 u16 tx_prod; /* next le to use */
1802 u32 tx_addr64;
1803 u16 tx_pending;
1804 u16 tx_last_put;
1805 u16 tx_last_mss;
1806
1807 struct ring_info *rx_ring ____cacheline_aligned_in_smp;
1808 struct sky2_rx_le *rx_le;
1809 u32 rx_addr64;
1810 u16 rx_next; /* next re to check */
1811 u16 rx_put; /* next le index to use */
1812 u16 rx_pending;
1813 u16 rx_last_put;
1814 u16 rx_bufsize;
1815#ifdef SKY2_VLAN_TAG_USED
1816 u16 rx_tag;
1817 struct vlan_group *vlgrp;
1818#endif
1819
1820 dma_addr_t rx_le_map;
1821 dma_addr_t tx_le_map;
1822 u32 advertising; /* ADVERTISED_ bits */
1823 u16 speed; /* SPEED_1000, SPEED_100, ... */
1824 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1825 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1826 u8 rx_pause;
1827 u8 tx_pause;
1828 u8 rx_csum;
1829 u8 wol;
1830
1831 struct net_device_stats net_stats;
1832
1833 struct work_struct phy_task;
1834 struct semaphore phy_sema;
1835};
1836
1837struct sky2_hw {
1838 void __iomem *regs;
1839 struct pci_dev *pdev;
1840 u32 intr_mask;
1841 struct net_device *dev[2];
1842
1843 int pm_cap;
1844 u8 chip_id;
1845 u8 chip_rev;
1846 u8 copper;
1847 u8 ports;
1848
1849 struct sky2_status_le *st_le;
1850 u32 st_idx;
1851 dma_addr_t st_dma;
1852};
1853
1854/* Register accessor for memory mapped device */
1855static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
1856{
1857 return readl(hw->regs + reg);
1858}
1859
1860static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
1861{
1862 return readw(hw->regs + reg);
1863}
1864
1865static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
1866{
1867 return readb(hw->regs + reg);
1868}
1869
1870/* This should probably go away, bus based tweeks suck */
1871static inline int is_pciex(const struct sky2_hw *hw)
1872{
1873 u32 status;
1874 pci_read_config_dword(hw->pdev, PCI_DEV_STATUS, &status);
1875 return (status & PCI_OS_PCI_X) == 0;
1876}
1877
1878static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
1879{
1880 writel(val, hw->regs + reg);
1881}
1882
1883static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
1884{
1885 writew(val, hw->regs + reg);
1886}
1887
1888static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
1889{
1890 writeb(val, hw->regs + reg);
1891}
1892
1893/* Yukon PHY related registers */
1894#define SK_GMAC_REG(port,reg) \
1895 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
1896#define GM_PHY_RETRIES 100
1897
1898static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
1899{
1900 return sky2_read16(hw, SK_GMAC_REG(port,reg));
1901}
1902
1903static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
1904{
1905 unsigned base = SK_GMAC_REG(port, reg);
1906 return (u32) sky2_read16(hw, base)
1907 | (u32) sky2_read16(hw, base+4) << 16;
1908}
1909
1910static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
1911{
1912 sky2_write16(hw, SK_GMAC_REG(port,r), v);
1913}
1914
1915static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
1916 const u8 *addr)
1917{
1918 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
1919 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
1920 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
1921}
1922#endif
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 081717d01374..28ce47a02408 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2907,7 +2907,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
2907 return 0; 2907 return 0;
2908} 2908}
2909 2909
2910static void __devexit gem_remove_one(struct pci_dev *pdev) 2910static void gem_remove_one(struct pci_dev *pdev)
2911{ 2911{
2912 struct net_device *dev = pci_get_drvdata(pdev); 2912 struct net_device *dev = pci_get_drvdata(pdev);
2913 2913
@@ -3181,7 +3181,7 @@ static struct pci_driver gem_driver = {
3181 .name = GEM_MODULE_NAME, 3181 .name = GEM_MODULE_NAME,
3182 .id_table = gem_pci_tbl, 3182 .id_table = gem_pci_tbl,
3183 .probe = gem_init_one, 3183 .probe = gem_init_one,
3184 .remove = __devexit_p(gem_remove_one), 3184 .remove = gem_remove_one,
3185#ifdef CONFIG_PM 3185#ifdef CONFIG_PM
3186 .suspend = gem_suspend, 3186 .suspend = gem_suspend,
3187 .resume = gem_resume, 3187 .resume = gem_resume,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1828a6bf8458..eb86b059809b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/in.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/ioport.h> 29#include <linux/ioport.h>
29#include <linux/pci.h> 30#include <linux/pci.h>
@@ -68,8 +69,8 @@
68 69
69#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.43" 72#define DRV_MODULE_VERSION "3.47"
72#define DRV_MODULE_RELDATE "Oct 24, 2005" 73#define DRV_MODULE_RELDATE "Dec 28, 2005"
73 74
74#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -341,6 +342,16 @@ static struct {
341 { "interrupt test (offline)" }, 342 { "interrupt test (offline)" },
342}; 343};
343 344
345static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
346{
347 writel(val, tp->regs + off);
348}
349
350static u32 tg3_read32(struct tg3 *tp, u32 off)
351{
352 return (readl(tp->regs + off));
353}
354
344static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 355static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
345{ 356{
346 unsigned long flags; 357 unsigned long flags;
@@ -411,13 +422,29 @@ static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
411 return val; 422 return val;
412} 423}
413 424
414static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 425/* usec_wait specifies the wait time in usec when writing to certain registers
426 * where it is unsafe to read back the register without some delay.
427 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
428 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
429 */
430static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
415{ 431{
416 tp->write32(tp, off, val); 432 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
417 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) && 433 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
418 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) && 434 /* Non-posted methods */
419 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) 435 tp->write32(tp, off, val);
420 tp->read32(tp, off); /* flush */ 436 else {
437 /* Posted method */
438 tg3_write32(tp, off, val);
439 if (usec_wait)
440 udelay(usec_wait);
441 tp->read32(tp, off);
442 }
443 /* Wait again after the read for the posted method to guarantee that
444 * the wait time is met.
445 */
446 if (usec_wait)
447 udelay(usec_wait);
421} 448}
422 449
423static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 450static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
@@ -438,16 +465,6 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
438 readl(mbox); 465 readl(mbox);
439} 466}
440 467
441static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
442{
443 writel(val, tp->regs + off);
444}
445
446static u32 tg3_read32(struct tg3 *tp, u32 off)
447{
448 return (readl(tp->regs + off));
449}
450
451#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 468#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
452#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 469#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
453#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 470#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
@@ -455,7 +472,8 @@ static u32 tg3_read32(struct tg3 *tp, u32 off)
455#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 472#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
456 473
457#define tw32(reg,val) tp->write32(tp, reg, val) 474#define tw32(reg,val) tp->write32(tp, reg, val)
458#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val)) 475#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
476#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
459#define tr32(reg) tp->read32(tp, reg) 477#define tr32(reg) tp->read32(tp, reg)
460 478
461static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 479static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
@@ -595,21 +613,19 @@ static void tg3_switch_clocks(struct tg3 *tp)
595 613
596 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 614 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
597 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 615 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
598 tw32_f(TG3PCI_CLOCK_CTRL, 616 tw32_wait_f(TG3PCI_CLOCK_CTRL,
599 clock_ctrl | CLOCK_CTRL_625_CORE); 617 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
600 udelay(40);
601 } 618 }
602 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 619 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
603 tw32_f(TG3PCI_CLOCK_CTRL, 620 tw32_wait_f(TG3PCI_CLOCK_CTRL,
604 clock_ctrl | 621 clock_ctrl |
605 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK)); 622 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
606 udelay(40); 623 40);
607 tw32_f(TG3PCI_CLOCK_CTRL, 624 tw32_wait_f(TG3PCI_CLOCK_CTRL,
608 clock_ctrl | (CLOCK_CTRL_ALTCLK)); 625 clock_ctrl | (CLOCK_CTRL_ALTCLK),
609 udelay(40); 626 40);
610 } 627 }
611 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl); 628 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
612 udelay(40);
613} 629}
614 630
615#define PHY_BUSY_LOOPS 5000 631#define PHY_BUSY_LOOPS 5000
@@ -1017,37 +1033,50 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1017 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0) 1033 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018 return; 1034 return;
1019 1035
1020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 1036 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1021 tp_peer = pci_get_drvdata(tp->pdev_peer); 1037 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1022 if (!tp_peer) 1038 struct net_device *dev_peer;
1039
1040 dev_peer = pci_get_drvdata(tp->pdev_peer);
1041 if (!dev_peer)
1023 BUG(); 1042 BUG();
1043 tp_peer = netdev_priv(dev_peer);
1024 } 1044 }
1025 1045
1026
1027 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 1046 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1028 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) { 1047 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1048 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1049 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 1051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1031 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1052 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1032 (GRC_LCLCTRL_GPIO_OE0 | 1053 (GRC_LCLCTRL_GPIO_OE0 |
1033 GRC_LCLCTRL_GPIO_OE1 | 1054 GRC_LCLCTRL_GPIO_OE1 |
1034 GRC_LCLCTRL_GPIO_OE2 | 1055 GRC_LCLCTRL_GPIO_OE2 |
1035 GRC_LCLCTRL_GPIO_OUTPUT0 | 1056 GRC_LCLCTRL_GPIO_OUTPUT0 |
1036 GRC_LCLCTRL_GPIO_OUTPUT1)); 1057 GRC_LCLCTRL_GPIO_OUTPUT1),
1037 udelay(100); 1058 100);
1038 } else { 1059 } else {
1039 u32 no_gpio2; 1060 u32 no_gpio2;
1040 u32 grc_local_ctrl; 1061 u32 grc_local_ctrl = 0;
1041 1062
1042 if (tp_peer != tp && 1063 if (tp_peer != tp &&
1043 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 1064 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1044 return; 1065 return;
1045 1066
1067 /* Workaround to prevent overdrawing Amps. */
1068 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1069 ASIC_REV_5714) {
1070 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1071 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072 grc_local_ctrl, 100);
1073 }
1074
1046 /* On 5753 and variants, GPIO2 cannot be used. */ 1075 /* On 5753 and variants, GPIO2 cannot be used. */
1047 no_gpio2 = tp->nic_sram_data_cfg & 1076 no_gpio2 = tp->nic_sram_data_cfg &
1048 NIC_SRAM_DATA_CFG_NO_GPIO2; 1077 NIC_SRAM_DATA_CFG_NO_GPIO2;
1049 1078
1050 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 1079 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1051 GRC_LCLCTRL_GPIO_OE1 | 1080 GRC_LCLCTRL_GPIO_OE1 |
1052 GRC_LCLCTRL_GPIO_OE2 | 1081 GRC_LCLCTRL_GPIO_OE2 |
1053 GRC_LCLCTRL_GPIO_OUTPUT1 | 1082 GRC_LCLCTRL_GPIO_OUTPUT1 |
@@ -1056,21 +1085,18 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1056 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 1085 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1057 GRC_LCLCTRL_GPIO_OUTPUT2); 1086 GRC_LCLCTRL_GPIO_OUTPUT2);
1058 } 1087 }
1059 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1088 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1060 grc_local_ctrl); 1089 grc_local_ctrl, 100);
1061 udelay(100);
1062 1090
1063 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 1091 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1064 1092
1065 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1066 grc_local_ctrl); 1094 grc_local_ctrl, 100);
1067 udelay(100);
1068 1095
1069 if (!no_gpio2) { 1096 if (!no_gpio2) {
1070 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 1097 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1071 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1098 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072 grc_local_ctrl); 1099 grc_local_ctrl, 100);
1073 udelay(100);
1074 } 1100 }
1075 } 1101 }
1076 } else { 1102 } else {
@@ -1080,19 +1106,16 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1080 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 1106 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1081 return; 1107 return;
1082 1108
1083 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1109 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1084 (GRC_LCLCTRL_GPIO_OE1 | 1110 (GRC_LCLCTRL_GPIO_OE1 |
1085 GRC_LCLCTRL_GPIO_OUTPUT1)); 1111 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1086 udelay(100);
1087 1112
1088 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089 (GRC_LCLCTRL_GPIO_OE1)); 1114 GRC_LCLCTRL_GPIO_OE1, 100);
1090 udelay(100);
1091 1115
1092 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1116 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093 (GRC_LCLCTRL_GPIO_OE1 | 1117 (GRC_LCLCTRL_GPIO_OE1 |
1094 GRC_LCLCTRL_GPIO_OUTPUT1)); 1118 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1095 udelay(100);
1096 } 1119 }
1097 } 1120 }
1098} 1121}
@@ -1105,6 +1128,8 @@ static int tg3_setup_phy(struct tg3 *, int);
1105 1128
1106static void tg3_write_sig_post_reset(struct tg3 *, int); 1129static void tg3_write_sig_post_reset(struct tg3 *, int);
1107static int tg3_halt_cpu(struct tg3 *, u32); 1130static int tg3_halt_cpu(struct tg3 *, u32);
1131static int tg3_nvram_lock(struct tg3 *);
1132static void tg3_nvram_unlock(struct tg3 *);
1108 1133
1109static int tg3_set_power_state(struct tg3 *tp, int state) 1134static int tg3_set_power_state(struct tg3 *tp, int state)
1110{ 1135{
@@ -1133,10 +1158,8 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1133 udelay(100); /* Delay after power state change */ 1158 udelay(100); /* Delay after power state change */
1134 1159
1135 /* Switch out of Vaux if it is not a LOM */ 1160 /* Switch out of Vaux if it is not a LOM */
1136 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) { 1161 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1137 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 1162 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1138 udelay(100);
1139 }
1140 1163
1141 return 0; 1164 return 0;
1142 1165
@@ -1179,6 +1202,21 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1179 tg3_setup_phy(tp, 0); 1202 tg3_setup_phy(tp, 0);
1180 } 1203 }
1181 1204
1205 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1206 int i;
1207 u32 val;
1208
1209 for (i = 0; i < 200; i++) {
1210 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1211 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1212 break;
1213 msleep(1);
1214 }
1215 }
1216 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1217 WOL_DRV_STATE_SHUTDOWN |
1218 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1219
1182 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps); 1220 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1183 1221
1184 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { 1222 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
@@ -1220,10 +1258,8 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1220 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 1258 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1221 CLOCK_CTRL_TXCLK_DISABLE); 1259 CLOCK_CTRL_TXCLK_DISABLE);
1222 1260
1223 tw32_f(TG3PCI_CLOCK_CTRL, base_val | 1261 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1224 CLOCK_CTRL_ALTCLK | 1262 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1225 CLOCK_CTRL_PWRDOWN_PLL133);
1226 udelay(40);
1227 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 1263 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1228 /* do nothing */ 1264 /* do nothing */
1229 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 1265 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
@@ -1244,11 +1280,11 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1244 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 1280 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1245 } 1281 }
1246 1282
1247 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1); 1283 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1248 udelay(40); 1284 40);
1249 1285
1250 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2); 1286 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1251 udelay(40); 1287 40);
1252 1288
1253 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 1289 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1254 u32 newbits3; 1290 u32 newbits3;
@@ -1262,9 +1298,20 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1262 newbits3 = CLOCK_CTRL_44MHZ_CORE; 1298 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1263 } 1299 }
1264 1300
1265 tw32_f(TG3PCI_CLOCK_CTRL, 1301 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1266 tp->pci_clock_ctrl | newbits3); 1302 tp->pci_clock_ctrl | newbits3, 40);
1267 udelay(40); 1303 }
1304 }
1305
1306 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1307 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1308 /* Turn off the PHY */
1309 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1310 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1311 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1312 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1313 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1314 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1268 } 1315 }
1269 } 1316 }
1270 1317
@@ -1277,8 +1324,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1277 1324
1278 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1279 tw32(0x7d00, val); 1326 tw32(0x7d00, val);
1280 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 1327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328 tg3_nvram_lock(tp);
1281 tg3_halt_cpu(tp, RX_CPU_BASE); 1329 tg3_halt_cpu(tp, RX_CPU_BASE);
1330 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
1331 tg3_nvram_unlock(tp);
1332 }
1282 } 1333 }
1283 1334
1284 /* Finally, set the new power state. */ 1335 /* Finally, set the new power state. */
@@ -1812,7 +1863,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1812 } 1863 }
1813 } 1864 }
1814relink: 1865relink:
1815 if (current_link_up == 0) { 1866 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1816 u32 tmp; 1867 u32 tmp;
1817 1868
1818 tg3_phy_copper_begin(tp); 1869 tg3_phy_copper_begin(tp);
@@ -3565,12 +3616,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3565 if (!spin_trylock(&tp->tx_lock)) 3616 if (!spin_trylock(&tp->tx_lock))
3566 return NETDEV_TX_LOCKED; 3617 return NETDEV_TX_LOCKED;
3567 3618
3568 /* This is a hard error, log it. */
3569 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3619 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3570 netif_stop_queue(dev); 3620 if (!netif_queue_stopped(dev)) {
3621 netif_stop_queue(dev);
3622
3623 /* This is a hard error, log it. */
3624 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3625 "queue awake!\n", dev->name);
3626 }
3571 spin_unlock(&tp->tx_lock); 3627 spin_unlock(&tp->tx_lock);
3572 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3573 dev->name);
3574 return NETDEV_TX_BUSY; 3628 return NETDEV_TX_BUSY;
3575 } 3629 }
3576 3630
@@ -3597,7 +3651,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3597 TXD_FLAG_CPU_POST_DMA); 3651 TXD_FLAG_CPU_POST_DMA);
3598 3652
3599 skb->nh.iph->check = 0; 3653 skb->nh.iph->check = 0;
3600 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len); 3654 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3601 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 3655 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3602 skb->h.th->check = 0; 3656 skb->h.th->check = 0;
3603 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 3657 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
@@ -7098,8 +7152,13 @@ do { p = (u32 *)(orig_p + (reg)); \
7098 GET_REG32_LOOP(BUFMGR_MODE, 0x58); 7152 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7099 GET_REG32_LOOP(RDMAC_MODE, 0x08); 7153 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7100 GET_REG32_LOOP(WDMAC_MODE, 0x08); 7154 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7101 GET_REG32_LOOP(RX_CPU_BASE, 0x280); 7155 GET_REG32_1(RX_CPU_MODE);
7102 GET_REG32_LOOP(TX_CPU_BASE, 0x280); 7156 GET_REG32_1(RX_CPU_STATE);
7157 GET_REG32_1(RX_CPU_PGMCTR);
7158 GET_REG32_1(RX_CPU_HWBKPT);
7159 GET_REG32_1(TX_CPU_MODE);
7160 GET_REG32_1(TX_CPU_STATE);
7161 GET_REG32_1(TX_CPU_PGMCTR);
7103 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); 7162 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7104 GET_REG32_LOOP(FTQ_RESET, 0x120); 7163 GET_REG32_LOOP(FTQ_RESET, 0x120);
7105 GET_REG32_LOOP(MSGINT_MODE, 0x0c); 7164 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
@@ -7922,13 +7981,12 @@ static int tg3_test_memory(struct tg3 *tp)
7922 u32 offset; 7981 u32 offset;
7923 u32 len; 7982 u32 len;
7924 } mem_tbl_570x[] = { 7983 } mem_tbl_570x[] = {
7925 { 0x00000000, 0x01000}, 7984 { 0x00000000, 0x00b50},
7926 { 0x00002000, 0x1c000}, 7985 { 0x00002000, 0x1c000},
7927 { 0xffffffff, 0x00000} 7986 { 0xffffffff, 0x00000}
7928 }, mem_tbl_5705[] = { 7987 }, mem_tbl_5705[] = {
7929 { 0x00000100, 0x0000c}, 7988 { 0x00000100, 0x0000c},
7930 { 0x00000200, 0x00008}, 7989 { 0x00000200, 0x00008},
7931 { 0x00000b50, 0x00400},
7932 { 0x00004000, 0x00800}, 7990 { 0x00004000, 0x00800},
7933 { 0x00006000, 0x01000}, 7991 { 0x00006000, 0x01000},
7934 { 0x00008000, 0x02000}, 7992 { 0x00008000, 0x02000},
@@ -8530,6 +8588,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
8530 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 8588 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8531 tp->tg3_flags |= TG3_FLAG_NVRAM; 8589 tp->tg3_flags |= TG3_FLAG_NVRAM;
8532 8590
8591 tg3_nvram_lock(tp);
8533 tg3_enable_nvram_access(tp); 8592 tg3_enable_nvram_access(tp);
8534 8593
8535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 8594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8540,6 +8599,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
8540 tg3_get_nvram_size(tp); 8599 tg3_get_nvram_size(tp);
8541 8600
8542 tg3_disable_nvram_access(tp); 8601 tg3_disable_nvram_access(tp);
8602 tg3_nvram_unlock(tp);
8543 8603
8544 } else { 8604 } else {
8545 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); 8605 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
@@ -8637,10 +8697,10 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8637 if (ret == 0) 8697 if (ret == 0)
8638 *val = swab32(tr32(NVRAM_RDDATA)); 8698 *val = swab32(tr32(NVRAM_RDDATA));
8639 8699
8640 tg3_nvram_unlock(tp);
8641
8642 tg3_disable_nvram_access(tp); 8700 tg3_disable_nvram_access(tp);
8643 8701
8702 tg3_nvram_unlock(tp);
8703
8644 return ret; 8704 return ret;
8645} 8705}
8646 8706
@@ -8725,6 +8785,10 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8725 8785
8726 offset = offset + (pagesize - page_off); 8786 offset = offset + (pagesize - page_off);
8727 8787
8788 /* Nvram lock released by tg3_nvram_read() above,
8789 * so need to get it again.
8790 */
8791 tg3_nvram_lock(tp);
8728 tg3_enable_nvram_access(tp); 8792 tg3_enable_nvram_access(tp);
8729 8793
8730 /* 8794 /*
@@ -10423,7 +10487,7 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10423 return str; 10487 return str;
10424} 10488}
10425 10489
10426static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) 10490static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10427{ 10491{
10428 struct pci_dev *peer; 10492 struct pci_dev *peer;
10429 unsigned int func, devnr = tp->pdev->devfn & ~7; 10493 unsigned int func, devnr = tp->pdev->devfn & ~7;
@@ -10434,8 +10498,13 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10434 break; 10498 break;
10435 pci_dev_put(peer); 10499 pci_dev_put(peer);
10436 } 10500 }
10437 if (!peer || peer == tp->pdev) 10501 /* 5704 can be configured in single-port mode, set peer to
10438 BUG(); 10502 * tp->pdev in that case.
10503 */
10504 if (!peer) {
10505 peer = tp->pdev;
10506 return peer;
10507 }
10439 10508
10440 /* 10509 /*
10441 * We don't need to keep the refcount elevated; there's no way 10510 * We don't need to keep the refcount elevated; there's no way
@@ -10671,8 +10740,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10671 tp->rx_pending = 63; 10740 tp->rx_pending = 63;
10672 } 10741 }
10673 10742
10674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 10743 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10675 tp->pdev_peer = tg3_find_5704_peer(tp); 10744 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10745 tp->pdev_peer = tg3_find_peer(tp);
10676 10746
10677 err = tg3_get_device_address(tp); 10747 err = tg3_get_device_address(tp);
10678 if (err) { 10748 if (err) {
@@ -10817,12 +10887,14 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10817 10887
10818 tg3_full_lock(tp, 0); 10888 tg3_full_lock(tp, 0);
10819 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10889 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10890 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
10820 tg3_full_unlock(tp); 10891 tg3_full_unlock(tp);
10821 10892
10822 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 10893 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10823 if (err) { 10894 if (err) {
10824 tg3_full_lock(tp, 0); 10895 tg3_full_lock(tp, 0);
10825 10896
10897 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10826 tg3_init_hw(tp); 10898 tg3_init_hw(tp);
10827 10899
10828 tp->timer.expires = jiffies + tp->timer_offset; 10900 tp->timer.expires = jiffies + tp->timer_offset;
@@ -10856,6 +10928,7 @@ static int tg3_resume(struct pci_dev *pdev)
10856 10928
10857 tg3_full_lock(tp, 0); 10929 tg3_full_lock(tp, 0);
10858 10930
10931 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10859 tg3_init_hw(tp); 10932 tg3_init_hw(tp);
10860 10933
10861 tp->timer.expires = jiffies + tp->timer_offset; 10934 tp->timer.expires = jiffies + tp->timer_offset;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index fb7e2a5f4a08..890e1635996b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1124,7 +1124,14 @@
1124/* 0x280 --> 0x400 unused */ 1124/* 0x280 --> 0x400 unused */
1125 1125
1126#define RX_CPU_BASE 0x00005000 1126#define RX_CPU_BASE 0x00005000
1127#define RX_CPU_MODE 0x00005000
1128#define RX_CPU_STATE 0x00005004
1129#define RX_CPU_PGMCTR 0x0000501c
1130#define RX_CPU_HWBKPT 0x00005034
1127#define TX_CPU_BASE 0x00005400 1131#define TX_CPU_BASE 0x00005400
1132#define TX_CPU_MODE 0x00005400
1133#define TX_CPU_STATE 0x00005404
1134#define TX_CPU_PGMCTR 0x0000541c
1128 1135
1129/* Mailboxes */ 1136/* Mailboxes */
1130#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ 1137#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */
@@ -1529,6 +1536,12 @@
1529#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14 1536#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14
1530#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18 1537#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18
1531 1538
1539#define NIC_SRAM_WOL_MBOX 0x00000d30
1540#define WOL_SIGNATURE 0x474c0000
1541#define WOL_DRV_STATE_SHUTDOWN 0x00000001
1542#define WOL_DRV_WOL 0x00000002
1543#define WOL_SET_MAGIC_PKT 0x00000004
1544
1532#define NIC_SRAM_DATA_CFG_2 0x00000d38 1545#define NIC_SRAM_DATA_CFG_2 0x00000d38
1533 1546
1534#define SHASTA_EXT_LED_MODE_MASK 0x00018000 1547#define SHASTA_EXT_LED_MODE_MASK 0x00018000
@@ -1565,6 +1578,7 @@
1565#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ 1578#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */
1566#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 1579#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001
1567#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 1580#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002
1581#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008
1568#define MII_TG3_EXT_CTRL_TBI 0x8000 1582#define MII_TG3_EXT_CTRL_TBI 0x8000
1569 1583
1570#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ 1584#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 942fae0f2130..c2506b56a186 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -2865,11 +2865,11 @@ void TLan_PhyMonitor( struct net_device *dev )
2865 * for this device. 2865 * for this device.
2866 * phy The address of the PHY to be queried. 2866 * phy The address of the PHY to be queried.
2867 * reg The register whose contents are to be 2867 * reg The register whose contents are to be
2868 * retreived. 2868 * retrieved.
2869 * val A pointer to a variable to store the 2869 * val A pointer to a variable to store the
2870 * retrieved value. 2870 * retrieved value.
2871 * 2871 *
2872 * This function uses the TLAN's MII bus to retreive the contents 2872 * This function uses the TLAN's MII bus to retrieve the contents
2873 * of a given register on a PHY. It sends the appropriate info 2873 * of a given register on a PHY. It sends the appropriate info
2874 * and then reads the 16-bit register value from the MII bus via 2874 * and then reads the 16-bit register value from the MII bus via
2875 * the TLAN SIO register. 2875 * the TLAN SIO register.
diff --git a/drivers/net/wan/lmc/lmc_prot.h b/drivers/net/wan/lmc/lmc_prot.h
deleted file mode 100644
index f3b1df9e2cdb..000000000000
--- a/drivers/net/wan/lmc/lmc_prot.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_
3
4void lmc_proto_init(lmc_softc_t * const)
5void lmc_proto_attach(lmc_softc_t *sc const)
6void lmc_proto_detach(lmc_softc *sc const)
7void lmc_proto_reopen(lmc_softc_t *sc const)
8int lmc_proto_ioctl(lmc_softc_t *sc const, struct ifreq *ifr, int cmd)
9void lmc_proto_open(lmc_softc_t *sc const)
10void lmc_proto_close(lmc_softc_t *sc const)
11unsigned short lmc_proto_type(lmc_softc_t *sc const, struct skbuff *skb)
12
13
14#endif
15
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 00e55165b760..24f7967aab67 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -173,7 +173,7 @@ config IPW2100_MONITOR
173 promiscuous mode via the Wireless Tool's Monitor mode. While in this 173 promiscuous mode via the Wireless Tool's Monitor mode. While in this
174 mode, no packets can be sent. 174 mode, no packets can be sent.
175 175
176config IPW_DEBUG 176config IPW2100_DEBUG
177 bool "Enable full debugging output in IPW2100 module." 177 bool "Enable full debugging output in IPW2100 module."
178 depends on IPW2100 178 depends on IPW2100
179 ---help--- 179 ---help---
@@ -192,7 +192,7 @@ config IPW_DEBUG
192 192
193config IPW2200 193config IPW2200
194 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 194 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
195 depends on IEEE80211 && PCI 195 depends on NET_RADIO && IEEE80211 && PCI
196 select FW_LOADER 196 select FW_LOADER
197 ---help--- 197 ---help---
198 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network 198 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
@@ -217,7 +217,7 @@ config IPW2200
217 say M here and read <file:Documentation/modules.txt>. The module 217 say M here and read <file:Documentation/modules.txt>. The module
218 will be called ipw2200.ko. 218 will be called ipw2200.ko.
219 219
220config IPW_DEBUG 220config IPW2200_DEBUG
221 bool "Enable full debugging output in IPW2200 module." 221 bool "Enable full debugging output in IPW2200 module."
222 depends on IPW2200 222 depends on IPW2200
223 ---help--- 223 ---help---
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 340ab4ee4b67..ee866fd6957d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2755,8 +2755,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2755 SET_NETDEV_DEV(dev, dmdev); 2755 SET_NETDEV_DEV(dev, dmdev);
2756 2756
2757 2757
2758 if (test_bit(FLAG_MPI,&ai->flags)) 2758 reset_card (dev, 1);
2759 reset_card (dev, 1); 2759 msleep(400);
2760 2760
2761 rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev ); 2761 rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev );
2762 if (rc) { 2762 if (rc) {
@@ -4037,7 +4037,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4037 Cmd cmd; 4037 Cmd cmd;
4038 Resp rsp; 4038 Resp rsp;
4039 4039
4040 if (test_bit(FLAG_ENABLED, &ai->flags)) 4040 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
4041 printk(KERN_ERR 4041 printk(KERN_ERR
4042 "%s: MAC should be disabled (rid=%04x)\n", 4042 "%s: MAC should be disabled (rid=%04x)\n",
4043 __FUNCTION__, rid); 4043 __FUNCTION__, rid);
@@ -5093,9 +5093,9 @@ static int set_wep_key(struct airo_info *ai, u16 index,
5093 printk(KERN_INFO "Setting key %d\n", index); 5093 printk(KERN_INFO "Setting key %d\n", index);
5094 } 5094 }
5095 5095
5096 disable_MAC(ai, lock); 5096 if (perm) disable_MAC(ai, lock);
5097 writeWepKeyRid(ai, &wkr, perm, lock); 5097 writeWepKeyRid(ai, &wkr, perm, lock);
5098 enable_MAC(ai, &rsp, lock); 5098 if (perm) enable_MAC(ai, &rsp, lock);
5099 return 0; 5099 return 0;
5100} 5100}
5101 5101
@@ -6170,6 +6170,8 @@ static int airo_set_encode(struct net_device *dev,
6170{ 6170{
6171 struct airo_info *local = dev->priv; 6171 struct airo_info *local = dev->priv;
6172 CapabilityRid cap_rid; /* Card capability info */ 6172 CapabilityRid cap_rid; /* Card capability info */
6173 int perm = ( dwrq->flags & IW_ENCODE_TEMP ? 0 : 1 );
6174 u16 currentAuthType = local->config.authType;
6173 6175
6174 /* Is WEP supported ? */ 6176 /* Is WEP supported ? */
6175 readCapabilityRid(local, &cap_rid, 1); 6177 readCapabilityRid(local, &cap_rid, 1);
@@ -6212,7 +6214,7 @@ static int airo_set_encode(struct net_device *dev,
6212 /* Copy the key in the driver */ 6214 /* Copy the key in the driver */
6213 memcpy(key.key, extra, dwrq->length); 6215 memcpy(key.key, extra, dwrq->length);
6214 /* Send the key to the card */ 6216 /* Send the key to the card */
6215 set_wep_key(local, index, key.key, key.len, 1, 1); 6217 set_wep_key(local, index, key.key, key.len, perm, 1);
6216 } 6218 }
6217 /* WE specify that if a valid key is set, encryption 6219 /* WE specify that if a valid key is set, encryption
6218 * should be enabled (user may turn it off later) 6220 * should be enabled (user may turn it off later)
@@ -6220,13 +6222,12 @@ static int airo_set_encode(struct net_device *dev,
6220 if((index == current_index) && (key.len > 0) && 6222 if((index == current_index) && (key.len > 0) &&
6221 (local->config.authType == AUTH_OPEN)) { 6223 (local->config.authType == AUTH_OPEN)) {
6222 local->config.authType = AUTH_ENCRYPT; 6224 local->config.authType = AUTH_ENCRYPT;
6223 set_bit (FLAG_COMMIT, &local->flags);
6224 } 6225 }
6225 } else { 6226 } else {
6226 /* Do we want to just set the transmit key index ? */ 6227 /* Do we want to just set the transmit key index ? */
6227 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 6228 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
6228 if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) { 6229 if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) {
6229 set_wep_key(local, index, NULL, 0, 1, 1); 6230 set_wep_key(local, index, NULL, 0, perm, 1);
6230 } else 6231 } else
6231 /* Don't complain if only change the mode */ 6232 /* Don't complain if only change the mode */
6232 if(!dwrq->flags & IW_ENCODE_MODE) { 6233 if(!dwrq->flags & IW_ENCODE_MODE) {
@@ -6241,7 +6242,7 @@ static int airo_set_encode(struct net_device *dev,
6241 if(dwrq->flags & IW_ENCODE_OPEN) 6242 if(dwrq->flags & IW_ENCODE_OPEN)
6242 local->config.authType = AUTH_ENCRYPT; // Only Wep 6243 local->config.authType = AUTH_ENCRYPT; // Only Wep
6243 /* Commit the changes to flags if needed */ 6244 /* Commit the changes to flags if needed */
6244 if(dwrq->flags & IW_ENCODE_MODE) 6245 if (local->config.authType != currentAuthType)
6245 set_bit (FLAG_COMMIT, &local->flags); 6246 set_bit (FLAG_COMMIT, &local->flags);
6246 return -EINPROGRESS; /* Call commit handler */ 6247 return -EINPROGRESS; /* Call commit handler */
6247} 6248}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 5e53c5258a33..e4729ddf29fd 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -5,9 +5,9 @@
5 Copyright 2000-2001 ATMEL Corporation. 5 Copyright 2000-2001 ATMEL Corporation.
6 Copyright 2003-2004 Simon Kelley. 6 Copyright 2003-2004 Simon Kelley.
7 7
8 This code was developed from version 2.1.1 of the Atmel drivers, 8 This code was developed from version 2.1.1 of the Atmel drivers,
9 released by Atmel corp. under the GPL in December 2002. It also 9 released by Atmel corp. under the GPL in December 2002. It also
10 includes code from the Linux aironet drivers (C) Benjamin Reed, 10 includes code from the Linux aironet drivers (C) Benjamin Reed,
11 and the Linux PCMCIA package, (C) David Hinds and the Linux wireless 11 and the Linux PCMCIA package, (C) David Hinds and the Linux wireless
12 extensions, (C) Jean Tourrilhes. 12 extensions, (C) Jean Tourrilhes.
13 13
@@ -31,7 +31,7 @@
31 along with Atmel wireless lan drivers; if not, write to the Free Software 31 along with Atmel wireless lan drivers; if not, write to the Free Software
32 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 32 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 33
34 For all queries about this code, please contact the current author, 34 For all queries about this code, please contact the current author,
35 Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. 35 Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
36 36
37 Credit is due to HP UK and Cambridge Online Systems Ltd for supplying 37 Credit is due to HP UK and Cambridge Online Systems Ltd for supplying
@@ -79,13 +79,13 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards"); 80MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards");
81 81
82/* The name of the firmware file to be loaded 82/* The name of the firmware file to be loaded
83 over-rides any automatic selection */ 83 over-rides any automatic selection */
84static char *firmware = NULL; 84static char *firmware = NULL;
85module_param(firmware, charp, 0); 85module_param(firmware, charp, 0);
86 86
87/* table of firmware file names */ 87/* table of firmware file names */
88static struct { 88static struct {
89 AtmelFWType fw_type; 89 AtmelFWType fw_type;
90 const char *fw_file; 90 const char *fw_file;
91 const char *fw_file_ext; 91 const char *fw_file_ext;
@@ -104,17 +104,17 @@ static struct {
104#define MAX_SSID_LENGTH 32 104#define MAX_SSID_LENGTH 32
105#define MGMT_JIFFIES (256 * HZ / 100) 105#define MGMT_JIFFIES (256 * HZ / 100)
106 106
107#define MAX_BSS_ENTRIES 64 107#define MAX_BSS_ENTRIES 64
108 108
109/* registers */ 109/* registers */
110#define GCR 0x00 // (SIR0) General Configuration Register 110#define GCR 0x00 // (SIR0) General Configuration Register
111#define BSR 0x02 // (SIR1) Bank Switching Select Register 111#define BSR 0x02 // (SIR1) Bank Switching Select Register
112#define AR 0x04 112#define AR 0x04
113#define DR 0x08 113#define DR 0x08
114#define MR1 0x12 // Mirror Register 1 114#define MR1 0x12 // Mirror Register 1
115#define MR2 0x14 // Mirror Register 2 115#define MR2 0x14 // Mirror Register 2
116#define MR3 0x16 // Mirror Register 3 116#define MR3 0x16 // Mirror Register 3
117#define MR4 0x18 // Mirror Register 4 117#define MR4 0x18 // Mirror Register 4
118 118
119#define GPR1 0x0c 119#define GPR1 0x0c
120#define GPR2 0x0e 120#define GPR2 0x0e
@@ -123,9 +123,9 @@ static struct {
123// Constants for the GCR register. 123// Constants for the GCR register.
124// 124//
125#define GCR_REMAP 0x0400 // Remap internal SRAM to 0 125#define GCR_REMAP 0x0400 // Remap internal SRAM to 0
126#define GCR_SWRES 0x0080 // BIU reset (ARM and PAI are NOT reset) 126#define GCR_SWRES 0x0080 // BIU reset (ARM and PAI are NOT reset)
127#define GCR_CORES 0x0060 // Core Reset (ARM and PAI are reset) 127#define GCR_CORES 0x0060 // Core Reset (ARM and PAI are reset)
128#define GCR_ENINT 0x0002 // Enable Interrupts 128#define GCR_ENINT 0x0002 // Enable Interrupts
129#define GCR_ACKINT 0x0008 // Acknowledge Interrupts 129#define GCR_ACKINT 0x0008 // Acknowledge Interrupts
130 130
131#define BSS_SRAM 0x0200 // AMBA module selection --> SRAM 131#define BSS_SRAM 0x0200 // AMBA module selection --> SRAM
@@ -190,7 +190,7 @@ struct rx_desc {
190 u32 Next; 190 u32 Next;
191 u16 MsduPos; 191 u16 MsduPos;
192 u16 MsduSize; 192 u16 MsduSize;
193 193
194 u8 State; 194 u8 State;
195 u8 Status; 195 u8 Status;
196 u8 Rate; 196 u8 Rate;
@@ -199,7 +199,6 @@ struct rx_desc {
199 u8 PreambleType; 199 u8 PreambleType;
200 u16 Duration; 200 u16 Duration;
201 u32 RxTime; 201 u32 RxTime;
202
203}; 202};
204 203
205#define RX_DESC_FLAG_VALID 0x80 204#define RX_DESC_FLAG_VALID 0x80
@@ -218,16 +217,15 @@ struct rx_desc {
218#define RX_DESC_DURATION_OFFSET 14 217#define RX_DESC_DURATION_OFFSET 14
219#define RX_DESC_RX_TIME_OFFSET 16 218#define RX_DESC_RX_TIME_OFFSET 16
220 219
221
222struct tx_desc { 220struct tx_desc {
223 u32 NextDescriptor; 221 u32 NextDescriptor;
224 u16 TxStartOfFrame; 222 u16 TxStartOfFrame;
225 u16 TxLength; 223 u16 TxLength;
226 224
227 u8 TxState; 225 u8 TxState;
228 u8 TxStatus; 226 u8 TxStatus;
229 u8 RetryCount; 227 u8 RetryCount;
230 228
231 u8 TxRate; 229 u8 TxRate;
232 230
233 u8 KeyIndex; 231 u8 KeyIndex;
@@ -238,10 +236,8 @@ struct tx_desc {
238 u8 Reserved; 236 u8 Reserved;
239 u8 PacketType; 237 u8 PacketType;
240 u16 HostTxLength; 238 u16 HostTxLength;
241
242}; 239};
243 240
244
245#define TX_DESC_NEXT_OFFSET 0 241#define TX_DESC_NEXT_OFFSET 0
246#define TX_DESC_POS_OFFSET 4 242#define TX_DESC_POS_OFFSET 4
247#define TX_DESC_SIZE_OFFSET 6 243#define TX_DESC_SIZE_OFFSET 6
@@ -255,8 +251,6 @@ struct tx_desc {
255#define TX_DESC_PACKET_TYPE_OFFSET 17 251#define TX_DESC_PACKET_TYPE_OFFSET 17
256#define TX_DESC_HOST_LENGTH_OFFSET 18 252#define TX_DESC_HOST_LENGTH_OFFSET 18
257 253
258
259
260/////////////////////////////////////////////////////// 254///////////////////////////////////////////////////////
261// Host-MAC interface 255// Host-MAC interface
262/////////////////////////////////////////////////////// 256///////////////////////////////////////////////////////
@@ -266,7 +260,6 @@ struct tx_desc {
266#define TX_FIRM_OWN 0x80 260#define TX_FIRM_OWN 0x80
267#define TX_DONE 0x40 261#define TX_DONE 0x40
268 262
269
270#define TX_ERROR 0x01 263#define TX_ERROR 0x01
271 264
272#define TX_PACKET_TYPE_DATA 0x01 265#define TX_PACKET_TYPE_DATA 0x01
@@ -280,8 +273,7 @@ struct tx_desc {
280#define ISR_COMMAND_COMPLETE 0x10 // command completed 273#define ISR_COMMAND_COMPLETE 0x10 // command completed
281#define ISR_OUT_OF_RANGE 0x20 // command completed 274#define ISR_OUT_OF_RANGE 0x20 // command completed
282#define ISR_IBSS_MERGE 0x40 // (4.1.2.30): IBSS merge 275#define ISR_IBSS_MERGE 0x40 // (4.1.2.30): IBSS merge
283#define ISR_GENERIC_IRQ 0x80 276#define ISR_GENERIC_IRQ 0x80
284
285 277
286#define Local_Mib_Type 0x01 278#define Local_Mib_Type 0x01
287#define Mac_Address_Mib_Type 0x02 279#define Mac_Address_Mib_Type 0x02
@@ -317,7 +309,6 @@ struct tx_desc {
317#define LOCAL_MIB_PREAMBLE_TYPE 9 309#define LOCAL_MIB_PREAMBLE_TYPE 9
318#define MAC_ADDR_MIB_MAC_ADDR_POS 0 310#define MAC_ADDR_MIB_MAC_ADDR_POS 0
319 311
320
321#define CMD_Set_MIB_Vars 0x01 312#define CMD_Set_MIB_Vars 0x01
322#define CMD_Get_MIB_Vars 0x02 313#define CMD_Get_MIB_Vars 0x02
323#define CMD_Scan 0x03 314#define CMD_Scan 0x03
@@ -338,7 +329,6 @@ struct tx_desc {
338#define CMD_STATUS_HOST_ERROR 0xFF 329#define CMD_STATUS_HOST_ERROR 0xFF
339#define CMD_STATUS_BUSY 0xFE 330#define CMD_STATUS_BUSY 0xFE
340 331
341
342#define CMD_BLOCK_COMMAND_OFFSET 0 332#define CMD_BLOCK_COMMAND_OFFSET 0
343#define CMD_BLOCK_STATUS_OFFSET 1 333#define CMD_BLOCK_STATUS_OFFSET 1
344#define CMD_BLOCK_PARAMETERS_OFFSET 4 334#define CMD_BLOCK_PARAMETERS_OFFSET 4
@@ -347,15 +337,15 @@ struct tx_desc {
347 337
348#define MGMT_FRAME_BODY_OFFSET 24 338#define MGMT_FRAME_BODY_OFFSET 24
349#define MAX_AUTHENTICATION_RETRIES 3 339#define MAX_AUTHENTICATION_RETRIES 3
350#define MAX_ASSOCIATION_RETRIES 3 340#define MAX_ASSOCIATION_RETRIES 3
351 341
352#define AUTHENTICATION_RESPONSE_TIME_OUT 1000 342#define AUTHENTICATION_RESPONSE_TIME_OUT 1000
353 343
354#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */ 344#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */
355#define LOOP_RETRY_LIMIT 500000 345#define LOOP_RETRY_LIMIT 500000
356 346
357#define ACTIVE_MODE 1 347#define ACTIVE_MODE 1
358#define PS_MODE 2 348#define PS_MODE 2
359 349
360#define MAX_ENCRYPTION_KEYS 4 350#define MAX_ENCRYPTION_KEYS 4
361#define MAX_ENCRYPTION_KEY_SIZE 40 351#define MAX_ENCRYPTION_KEY_SIZE 40
@@ -377,7 +367,7 @@ struct tx_desc {
377#define REG_DOMAIN_MKK1 0x41 //Channel 1-14 Japan(MKK1) 367#define REG_DOMAIN_MKK1 0x41 //Channel 1-14 Japan(MKK1)
378#define REG_DOMAIN_ISRAEL 0x50 //Channel 3-9 ISRAEL 368#define REG_DOMAIN_ISRAEL 0x50 //Channel 3-9 ISRAEL
379 369
380#define BSS_TYPE_AD_HOC 1 370#define BSS_TYPE_AD_HOC 1
381#define BSS_TYPE_INFRASTRUCTURE 2 371#define BSS_TYPE_INFRASTRUCTURE 2
382 372
383#define SCAN_TYPE_ACTIVE 0 373#define SCAN_TYPE_ACTIVE 0
@@ -389,7 +379,7 @@ struct tx_desc {
389 379
390#define DATA_FRAME_WS_HEADER_SIZE 30 380#define DATA_FRAME_WS_HEADER_SIZE 30
391 381
392/* promiscuous mode control */ 382/* promiscuous mode control */
393#define PROM_MODE_OFF 0x0 383#define PROM_MODE_OFF 0x0
394#define PROM_MODE_UNKNOWN 0x1 384#define PROM_MODE_UNKNOWN 0x1
395#define PROM_MODE_CRC_FAILED 0x2 385#define PROM_MODE_CRC_FAILED 0x2
@@ -398,8 +388,7 @@ struct tx_desc {
398#define PROM_MODE_CTRL 0x10 388#define PROM_MODE_CTRL 0x10
399#define PROM_MODE_BAD_PROTOCOL 0x20 389#define PROM_MODE_BAD_PROTOCOL 0x20
400 390
401 391#define IFACE_INT_STATUS_OFFSET 0
402#define IFACE_INT_STATUS_OFFSET 0
403#define IFACE_INT_MASK_OFFSET 1 392#define IFACE_INT_MASK_OFFSET 1
404#define IFACE_LOCKOUT_HOST_OFFSET 2 393#define IFACE_LOCKOUT_HOST_OFFSET 2
405#define IFACE_LOCKOUT_MAC_OFFSET 3 394#define IFACE_LOCKOUT_MAC_OFFSET 3
@@ -407,7 +396,7 @@ struct tx_desc {
407#define IFACE_MAC_STAT_OFFSET 30 396#define IFACE_MAC_STAT_OFFSET 30
408#define IFACE_GENERIC_INT_TYPE_OFFSET 32 397#define IFACE_GENERIC_INT_TYPE_OFFSET 32
409 398
410#define CIPHER_SUITE_NONE 0 399#define CIPHER_SUITE_NONE 0
411#define CIPHER_SUITE_WEP_64 1 400#define CIPHER_SUITE_WEP_64 1
412#define CIPHER_SUITE_TKIP 2 401#define CIPHER_SUITE_TKIP 2
413#define CIPHER_SUITE_AES 3 402#define CIPHER_SUITE_AES 3
@@ -419,11 +408,11 @@ struct tx_desc {
419// 408//
420// 409//
421 410
422// FuncCtrl field: 411// FuncCtrl field:
423// 412//
424#define FUNC_CTRL_TxENABLE 0x10 413#define FUNC_CTRL_TxENABLE 0x10
425#define FUNC_CTRL_RxENABLE 0x20 414#define FUNC_CTRL_RxENABLE 0x20
426#define FUNC_CTRL_INIT_COMPLETE 0x01 415#define FUNC_CTRL_INIT_COMPLETE 0x01
427 416
428/* A stub firmware image which reads the MAC address from NVRAM on the card. 417/* A stub firmware image which reads the MAC address from NVRAM on the card.
429 For copyright information and source see the end of this file. */ 418 For copyright information and source see the end of this file. */
@@ -486,10 +475,10 @@ struct atmel_private {
486 struct net_device_stats stats; // device stats 475 struct net_device_stats stats; // device stats
487 spinlock_t irqlock, timerlock; // spinlocks 476 spinlock_t irqlock, timerlock; // spinlocks
488 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type; 477 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
489 enum { 478 enum {
490 CARD_TYPE_PARALLEL_FLASH, 479 CARD_TYPE_PARALLEL_FLASH,
491 CARD_TYPE_SPI_FLASH, 480 CARD_TYPE_SPI_FLASH,
492 CARD_TYPE_EEPROM 481 CARD_TYPE_EEPROM
493 } card_type; 482 } card_type;
494 int do_rx_crc; /* If we need to CRC incoming packets */ 483 int do_rx_crc; /* If we need to CRC incoming packets */
495 int probe_crc; /* set if we don't yet know */ 484 int probe_crc; /* set if we don't yet know */
@@ -497,18 +486,18 @@ struct atmel_private {
497 u16 rx_desc_head; 486 u16 rx_desc_head;
498 u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous; 487 u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous;
499 u16 tx_free_mem, tx_buff_head, tx_buff_tail; 488 u16 tx_free_mem, tx_buff_head, tx_buff_tail;
500 489
501 u16 frag_seq, frag_len, frag_no; 490 u16 frag_seq, frag_len, frag_no;
502 u8 frag_source[6]; 491 u8 frag_source[6];
503 492
504 u8 wep_is_on, default_key, exclude_unencrypted, encryption_level; 493 u8 wep_is_on, default_key, exclude_unencrypted, encryption_level;
505 u8 group_cipher_suite, pairwise_cipher_suite; 494 u8 group_cipher_suite, pairwise_cipher_suite;
506 u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE]; 495 u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
507 int wep_key_len[MAX_ENCRYPTION_KEYS]; 496 int wep_key_len[MAX_ENCRYPTION_KEYS];
508 int use_wpa, radio_on_broken; /* firmware dependent stuff. */ 497 int use_wpa, radio_on_broken; /* firmware dependent stuff. */
509 498
510 u16 host_info_base; 499 u16 host_info_base;
511 struct host_info_struct { 500 struct host_info_struct {
512 /* NB this is matched to the hardware, don't change. */ 501 /* NB this is matched to the hardware, don't change. */
513 u8 volatile int_status; 502 u8 volatile int_status;
514 u8 volatile int_mask; 503 u8 volatile int_mask;
@@ -524,20 +513,20 @@ struct atmel_private {
524 u16 rx_buff_size; 513 u16 rx_buff_size;
525 u16 rx_desc_pos; 514 u16 rx_desc_pos;
526 u16 rx_desc_count; 515 u16 rx_desc_count;
527 516
528 u16 build_version; 517 u16 build_version;
529 u16 command_pos; 518 u16 command_pos;
530 519
531 u16 major_version; 520 u16 major_version;
532 u16 minor_version; 521 u16 minor_version;
533 522
534 u16 func_ctrl; 523 u16 func_ctrl;
535 u16 mac_status; 524 u16 mac_status;
536 u16 generic_IRQ_type; 525 u16 generic_IRQ_type;
537 u8 reserved[2]; 526 u8 reserved[2];
538 } host_info; 527 } host_info;
539 528
540 enum { 529 enum {
541 STATION_STATE_SCANNING, 530 STATION_STATE_SCANNING,
542 STATION_STATE_JOINNING, 531 STATION_STATE_JOINNING,
543 STATION_STATE_AUTHENTICATING, 532 STATION_STATE_AUTHENTICATING,
@@ -547,7 +536,7 @@ struct atmel_private {
547 STATION_STATE_DOWN, 536 STATION_STATE_DOWN,
548 STATION_STATE_MGMT_ERROR 537 STATION_STATE_MGMT_ERROR
549 } station_state; 538 } station_state;
550 539
551 int operating_mode, power_mode; 540 int operating_mode, power_mode;
552 time_t last_qual; 541 time_t last_qual;
553 int beacons_this_sec; 542 int beacons_this_sec;
@@ -560,18 +549,18 @@ struct atmel_private {
560 int long_retry, short_retry; 549 int long_retry, short_retry;
561 int preamble; 550 int preamble;
562 int default_beacon_period, beacon_period, listen_interval; 551 int default_beacon_period, beacon_period, listen_interval;
563 int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum; 552 int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum;
564 int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt; 553 int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt;
565 enum { 554 enum {
566 SITE_SURVEY_IDLE, 555 SITE_SURVEY_IDLE,
567 SITE_SURVEY_IN_PROGRESS, 556 SITE_SURVEY_IN_PROGRESS,
568 SITE_SURVEY_COMPLETED 557 SITE_SURVEY_COMPLETED
569 } site_survey_state; 558 } site_survey_state;
570 time_t last_survey; 559 time_t last_survey;
571 560
572 int station_was_associated, station_is_associated; 561 int station_was_associated, station_is_associated;
573 int fast_scan; 562 int fast_scan;
574 563
575 struct bss_info { 564 struct bss_info {
576 int channel; 565 int channel;
577 int SSIDsize; 566 int SSIDsize;
@@ -584,13 +573,12 @@ struct atmel_private {
584 u8 SSID[MAX_SSID_LENGTH]; 573 u8 SSID[MAX_SSID_LENGTH];
585 } BSSinfo[MAX_BSS_ENTRIES]; 574 } BSSinfo[MAX_BSS_ENTRIES];
586 int BSS_list_entries, current_BSS; 575 int BSS_list_entries, current_BSS;
587 int connect_to_any_BSS; 576 int connect_to_any_BSS;
588 int SSID_size, new_SSID_size; 577 int SSID_size, new_SSID_size;
589 u8 CurrentBSSID[6], BSSID[6]; 578 u8 CurrentBSSID[6], BSSID[6];
590 u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH]; 579 u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH];
591 u64 last_beacon_timestamp; 580 u64 last_beacon_timestamp;
592 u8 rx_buf[MAX_WIRELESS_BODY]; 581 u8 rx_buf[MAX_WIRELESS_BODY];
593
594}; 582};
595 583
596static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16}; 584static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16};
@@ -598,39 +586,49 @@ static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16};
598static const struct { 586static const struct {
599 int reg_domain; 587 int reg_domain;
600 int min, max; 588 int min, max;
601 char *name; 589 char *name;
602} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" }, 590} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" },
603 { REG_DOMAIN_DOC, 1, 11, "Canada" }, 591 { REG_DOMAIN_DOC, 1, 11, "Canada" },
604 { REG_DOMAIN_ETSI, 1, 13, "Europe" }, 592 { REG_DOMAIN_ETSI, 1, 13, "Europe" },
605 { REG_DOMAIN_SPAIN, 10, 11, "Spain" }, 593 { REG_DOMAIN_SPAIN, 10, 11, "Spain" },
606 { REG_DOMAIN_FRANCE, 10, 13, "France" }, 594 { REG_DOMAIN_FRANCE, 10, 13, "France" },
607 { REG_DOMAIN_MKK, 14, 14, "MKK" }, 595 { REG_DOMAIN_MKK, 14, 14, "MKK" },
608 { REG_DOMAIN_MKK1, 1, 14, "MKK1" }, 596 { REG_DOMAIN_MKK1, 1, 14, "MKK1" },
609 { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} }; 597 { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} };
610 598
611static void build_wpa_mib(struct atmel_private *priv); 599static void build_wpa_mib(struct atmel_private *priv);
612static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 600static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
613static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len); 601static void atmel_copy_to_card(struct net_device *dev, u16 dest,
614static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len); 602 unsigned char *src, u16 len);
603static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
604 u16 src, u16 len);
615static void atmel_set_gcr(struct net_device *dev, u16 mask); 605static void atmel_set_gcr(struct net_device *dev, u16 mask);
616static void atmel_clear_gcr(struct net_device *dev, u16 mask); 606static void atmel_clear_gcr(struct net_device *dev, u16 mask);
617static int atmel_lock_mac(struct atmel_private *priv); 607static int atmel_lock_mac(struct atmel_private *priv);
618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); 608static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
619static void atmel_command_irq(struct atmel_private *priv); 609static void atmel_command_irq(struct atmel_private *priv);
620static int atmel_validate_channel(struct atmel_private *priv, int channel); 610static int atmel_validate_channel(struct atmel_private *priv, int channel);
621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 611static void atmel_management_frame(struct atmel_private *priv,
612 struct ieee80211_hdr_4addr *header,
622 u16 frame_len, u8 rssi); 613 u16 frame_len, u8 rssi);
623static void atmel_management_timer(u_long a); 614static void atmel_management_timer(u_long a);
624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); 615static void atmel_send_command(struct atmel_private *priv, int command,
625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); 616 void *cmd, int cmd_size);
626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 617static int atmel_send_command_wait(struct atmel_private *priv, int command,
618 void *cmd, int cmd_size);
619static void atmel_transmit_management_frame(struct atmel_private *priv,
620 struct ieee80211_hdr_4addr *header,
627 u8 *body, int body_len); 621 u8 *body, int body_len);
628 622
629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 623static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
630static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data); 624static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index,
631static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data); 625 u8 data);
632static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len); 626static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
633static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len); 627 u16 data);
628static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
629 u8 *data, int data_len);
630static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
631 u8 *data, int data_len);
634static void atmel_scan(struct atmel_private *priv, int specific_ssid); 632static void atmel_scan(struct atmel_private *priv, int specific_ssid);
635static void atmel_join_bss(struct atmel_private *priv, int bss_index); 633static void atmel_join_bss(struct atmel_private *priv, int bss_index);
636static void atmel_smooth_qual(struct atmel_private *priv); 634static void atmel_smooth_qual(struct atmel_private *priv);
@@ -650,12 +648,12 @@ static inline u16 atmel_co(struct atmel_private *priv, u16 offset)
650 return priv->host_info.command_pos + offset; 648 return priv->host_info.command_pos + offset;
651} 649}
652 650
653static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc) 651static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc)
654{ 652{
655 return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset; 653 return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset;
656} 654}
657 655
658static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc) 656static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc)
659{ 657{
660 return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset; 658 return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset;
661} 659}
@@ -682,25 +680,25 @@ static inline void atmel_write16(struct net_device *dev, u16 offset, u16 data)
682 680
683static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos) 681static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos)
684{ 682{
685 atmel_writeAR(priv->dev, pos); 683 atmel_writeAR(priv->dev, pos);
686 return atmel_read8(priv->dev, DR); 684 return atmel_read8(priv->dev, DR);
687} 685}
688 686
689static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data) 687static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data)
690{ 688{
691 atmel_writeAR(priv->dev, pos); 689 atmel_writeAR(priv->dev, pos);
692 atmel_write8(priv->dev, DR, data); 690 atmel_write8(priv->dev, DR, data);
693} 691}
694 692
695static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos) 693static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos)
696{ 694{
697 atmel_writeAR(priv->dev, pos); 695 atmel_writeAR(priv->dev, pos);
698 return atmel_read16(priv->dev, DR); 696 return atmel_read16(priv->dev, DR);
699} 697}
700 698
701static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data) 699static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data)
702{ 700{
703 atmel_writeAR(priv->dev, pos); 701 atmel_writeAR(priv->dev, pos);
704 atmel_write16(priv->dev, DR, data); 702 atmel_write16(priv->dev, DR, data);
705} 703}
706 704
@@ -710,11 +708,10 @@ static void tx_done_irq(struct atmel_private *priv)
710{ 708{
711 int i; 709 int i;
712 710
713 for (i = 0; 711 for (i = 0;
714 atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE && 712 atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE &&
715 i < priv->host_info.tx_desc_count; 713 i < priv->host_info.tx_desc_count;
716 i++) { 714 i++) {
717
718 u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head)); 715 u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head));
719 u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head)); 716 u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head));
720 u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head)); 717 u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head));
@@ -728,16 +725,16 @@ static void tx_done_irq(struct atmel_private *priv)
728 priv->tx_buff_head = 0; 725 priv->tx_buff_head = 0;
729 else 726 else
730 priv->tx_buff_head += msdu_size; 727 priv->tx_buff_head += msdu_size;
731 728
732 if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1)) 729 if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1))
733 priv->tx_desc_head++ ; 730 priv->tx_desc_head++ ;
734 else 731 else
735 priv->tx_desc_head = 0; 732 priv->tx_desc_head = 0;
736 733
737 if (type == TX_PACKET_TYPE_DATA) { 734 if (type == TX_PACKET_TYPE_DATA) {
738 if (status == TX_STATUS_SUCCESS) 735 if (status == TX_STATUS_SUCCESS)
739 priv->stats.tx_packets++; 736 priv->stats.tx_packets++;
740 else 737 else
741 priv->stats.tx_errors++; 738 priv->stats.tx_errors++;
742 netif_wake_queue(priv->dev); 739 netif_wake_queue(priv->dev);
743 } 740 }
@@ -748,21 +745,22 @@ static u16 find_tx_buff(struct atmel_private *priv, u16 len)
748{ 745{
749 u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail; 746 u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail;
750 747
751 if (priv->tx_desc_free == 3 || priv->tx_free_mem < len) 748 if (priv->tx_desc_free == 3 || priv->tx_free_mem < len)
752 return 0; 749 return 0;
753 750
754 if (bottom_free >= len) 751 if (bottom_free >= len)
755 return priv->host_info.tx_buff_pos + priv->tx_buff_tail; 752 return priv->host_info.tx_buff_pos + priv->tx_buff_tail;
756 753
757 if (priv->tx_free_mem - bottom_free >= len) { 754 if (priv->tx_free_mem - bottom_free >= len) {
758 priv->tx_buff_tail = 0; 755 priv->tx_buff_tail = 0;
759 return priv->host_info.tx_buff_pos; 756 return priv->host_info.tx_buff_pos;
760 } 757 }
761 758
762 return 0; 759 return 0;
763} 760}
764 761
765static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 len, u16 buff, u8 type) 762static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
763 u16 len, u16 buff, u8 type)
766{ 764{
767 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff); 765 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff);
768 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len); 766 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len);
@@ -775,8 +773,8 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
775 int cipher_type, cipher_length; 773 int cipher_type, cipher_length;
776 if (is_bcast) { 774 if (is_bcast) {
777 cipher_type = priv->group_cipher_suite; 775 cipher_type = priv->group_cipher_suite;
778 if (cipher_type == CIPHER_SUITE_WEP_64 || 776 if (cipher_type == CIPHER_SUITE_WEP_64 ||
779 cipher_type == CIPHER_SUITE_WEP_128 ) 777 cipher_type == CIPHER_SUITE_WEP_128)
780 cipher_length = 8; 778 cipher_length = 8;
781 else if (cipher_type == CIPHER_SUITE_TKIP) 779 else if (cipher_type == CIPHER_SUITE_TKIP)
782 cipher_length = 12; 780 cipher_length = 12;
@@ -790,8 +788,8 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
790 } 788 }
791 } else { 789 } else {
792 cipher_type = priv->pairwise_cipher_suite; 790 cipher_type = priv->pairwise_cipher_suite;
793 if (cipher_type == CIPHER_SUITE_WEP_64 || 791 if (cipher_type == CIPHER_SUITE_WEP_64 ||
794 cipher_type == CIPHER_SUITE_WEP_128 ) 792 cipher_type == CIPHER_SUITE_WEP_128)
795 cipher_length = 8; 793 cipher_length = 8;
796 else if (cipher_type == CIPHER_SUITE_TKIP) 794 else if (cipher_type == CIPHER_SUITE_TKIP)
797 cipher_length = 12; 795 cipher_length = 12;
@@ -804,9 +802,9 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
804 cipher_length = 0; 802 cipher_length = 0;
805 } 803 }
806 } 804 }
807 805
808 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail), 806 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail),
809 cipher_type); 807 cipher_type);
810 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail), 808 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail),
811 cipher_length); 809 cipher_length);
812 } 810 }
@@ -815,46 +813,46 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
815 if (priv->tx_desc_previous != priv->tx_desc_tail) 813 if (priv->tx_desc_previous != priv->tx_desc_tail)
816 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0); 814 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0);
817 priv->tx_desc_previous = priv->tx_desc_tail; 815 priv->tx_desc_previous = priv->tx_desc_tail;
818 if (priv->tx_desc_tail < (priv->host_info.tx_desc_count -1 )) 816 if (priv->tx_desc_tail < (priv->host_info.tx_desc_count - 1))
819 priv->tx_desc_tail++; 817 priv->tx_desc_tail++;
820 else 818 else
821 priv->tx_desc_tail = 0; 819 priv->tx_desc_tail = 0;
822 priv->tx_desc_free--; 820 priv->tx_desc_free--;
823 priv->tx_free_mem -= len; 821 priv->tx_free_mem -= len;
824
825} 822}
826 823
827static int start_tx (struct sk_buff *skb, struct net_device *dev) 824static int start_tx(struct sk_buff *skb, struct net_device *dev)
828{ 825{
829 struct atmel_private *priv = netdev_priv(dev); 826 struct atmel_private *priv = netdev_priv(dev);
830 struct ieee80211_hdr_4addr header; 827 struct ieee80211_hdr_4addr header;
831 unsigned long flags; 828 unsigned long flags;
832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 829 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; 830 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
834 831
835 if (priv->card && priv->present_callback && 832 if (priv->card && priv->present_callback &&
836 !(*priv->present_callback)(priv->card)) { 833 !(*priv->present_callback)(priv->card)) {
837 priv->stats.tx_errors++; 834 priv->stats.tx_errors++;
838 dev_kfree_skb(skb); 835 dev_kfree_skb(skb);
839 return 0; 836 return 0;
840 } 837 }
841 838
842 if (priv->station_state != STATION_STATE_READY) { 839 if (priv->station_state != STATION_STATE_READY) {
843 priv->stats.tx_errors++; 840 priv->stats.tx_errors++;
844 dev_kfree_skb(skb); 841 dev_kfree_skb(skb);
845 return 0; 842 return 0;
846 } 843 }
847 844
848 /* first ensure the timer func cannot run */ 845 /* first ensure the timer func cannot run */
849 spin_lock_bh(&priv->timerlock); 846 spin_lock_bh(&priv->timerlock);
850 /* then stop the hardware ISR */ 847 /* then stop the hardware ISR */
851 spin_lock_irqsave(&priv->irqlock, flags); 848 spin_lock_irqsave(&priv->irqlock, flags);
852 /* nb doing the above in the opposite order will deadlock */ 849 /* nb doing the above in the opposite order will deadlock */
853 850
854 /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the 851 /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the
855 12 first bytes (containing DA/SA) and put them in the appropriate fields of 852 12 first bytes (containing DA/SA) and put them in the appropriate
856 the Wireless Header. Thus the packet length is then the initial + 18 (+30-12) */ 853 fields of the Wireless Header. Thus the packet length is then the
857 854 initial + 18 (+30-12) */
855
858 if (!(buff = find_tx_buff(priv, len + 18))) { 856 if (!(buff = find_tx_buff(priv, len + 18))) {
859 priv->stats.tx_dropped++; 857 priv->stats.tx_dropped++;
860 spin_unlock_irqrestore(&priv->irqlock, flags); 858 spin_unlock_irqrestore(&priv->irqlock, flags);
@@ -862,7 +860,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
862 netif_stop_queue(dev); 860 netif_stop_queue(dev);
863 return 1; 861 return 1;
864 } 862 }
865 863
866 frame_ctl = IEEE80211_FTYPE_DATA; 864 frame_ctl = IEEE80211_FTYPE_DATA;
867 header.duration_id = 0; 865 header.duration_id = 0;
868 header.seq_ctl = 0; 866 header.seq_ctl = 0;
@@ -878,7 +876,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
878 memcpy(&header.addr2, dev->dev_addr, 6); 876 memcpy(&header.addr2, dev->dev_addr, 6);
879 memcpy(&header.addr3, skb->data, 6); 877 memcpy(&header.addr3, skb->data, 6);
880 } 878 }
881 879
882 if (priv->use_wpa) 880 if (priv->use_wpa)
883 memcpy(&header.addr4, SNAP_RFC1024, 6); 881 memcpy(&header.addr4, SNAP_RFC1024, 6);
884 882
@@ -888,27 +886,27 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
888 /* Copy the packet sans its 802.3 header addresses which have been replaced */ 886 /* Copy the packet sans its 802.3 header addresses which have been replaced */
889 atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12); 887 atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
890 priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE; 888 priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE;
891 889
892 /* low bit of first byte of destination tells us if broadcast */ 890 /* low bit of first byte of destination tells us if broadcast */
893 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA); 891 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
894 dev->trans_start = jiffies; 892 dev->trans_start = jiffies;
895 priv->stats.tx_bytes += len; 893 priv->stats.tx_bytes += len;
896 894
897 spin_unlock_irqrestore(&priv->irqlock, flags); 895 spin_unlock_irqrestore(&priv->irqlock, flags);
898 spin_unlock_bh(&priv->timerlock); 896 spin_unlock_bh(&priv->timerlock);
899 dev_kfree_skb(skb); 897 dev_kfree_skb(skb);
900 898
901 return 0; 899 return 0;
902} 900}
903 901
904static void atmel_transmit_management_frame(struct atmel_private *priv, 902static void atmel_transmit_management_frame(struct atmel_private *priv,
905 struct ieee80211_hdr_4addr *header, 903 struct ieee80211_hdr_4addr *header,
906 u8 *body, int body_len) 904 u8 *body, int body_len)
907{ 905{
908 u16 buff; 906 u16 buff;
909 int len = MGMT_FRAME_BODY_OFFSET + body_len; 907 int len = MGMT_FRAME_BODY_OFFSET + body_len;
910 908
911 if (!(buff = find_tx_buff(priv, len))) 909 if (!(buff = find_tx_buff(priv, len)))
912 return; 910 return;
913 911
914 atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET); 912 atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET);
@@ -916,24 +914,25 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
916 priv->tx_buff_tail += len; 914 priv->tx_buff_tail += len;
917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); 915 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
918} 916}
919 917
920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 918static void fast_rx_path(struct atmel_private *priv,
919 struct ieee80211_hdr_4addr *header,
921 u16 msdu_size, u16 rx_packet_loc, u32 crc) 920 u16 msdu_size, u16 rx_packet_loc, u32 crc)
922{ 921{
923 /* fast path: unfragmented packet copy directly into skbuf */ 922 /* fast path: unfragmented packet copy directly into skbuf */
924 u8 mac4[6]; 923 u8 mac4[6];
925 struct sk_buff *skb; 924 struct sk_buff *skb;
926 unsigned char *skbp; 925 unsigned char *skbp;
927 926
928 /* get the final, mac 4 header field, this tells us encapsulation */ 927 /* get the final, mac 4 header field, this tells us encapsulation */
929 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6); 928 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6);
930 msdu_size -= 6; 929 msdu_size -= 6;
931 930
932 if (priv->do_rx_crc) { 931 if (priv->do_rx_crc) {
933 crc = crc32_le(crc, mac4, 6); 932 crc = crc32_le(crc, mac4, 6);
934 msdu_size -= 4; 933 msdu_size -= 4;
935 } 934 }
936 935
937 if (!(skb = dev_alloc_skb(msdu_size + 14))) { 936 if (!(skb = dev_alloc_skb(msdu_size + 14))) {
938 priv->stats.rx_dropped++; 937 priv->stats.rx_dropped++;
939 return; 938 return;
@@ -942,7 +941,7 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
942 skb_reserve(skb, 2); 941 skb_reserve(skb, 2);
943 skbp = skb_put(skb, msdu_size + 12); 942 skbp = skb_put(skb, msdu_size + 12);
944 atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size); 943 atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size);
945 944
946 if (priv->do_rx_crc) { 945 if (priv->do_rx_crc) {
947 u32 netcrc; 946 u32 netcrc;
948 crc = crc32_le(crc, skbp + 12, msdu_size); 947 crc = crc32_le(crc, skbp + 12, msdu_size);
@@ -953,24 +952,25 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
953 return; 952 return;
954 } 953 }
955 } 954 }
956 955
957 memcpy(skbp, header->addr1, 6); /* destination address */ 956 memcpy(skbp, header->addr1, 6); /* destination address */
958 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 957 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
959 memcpy(&skbp[6], header->addr3, 6); 958 memcpy(&skbp[6], header->addr3, 6);
960 else 959 else
961 memcpy(&skbp[6], header->addr2, 6); /* source address */ 960 memcpy(&skbp[6], header->addr2, 6); /* source address */
962 961
963 priv->dev->last_rx=jiffies; 962 priv->dev->last_rx = jiffies;
964 skb->dev = priv->dev; 963 skb->dev = priv->dev;
965 skb->protocol = eth_type_trans(skb, priv->dev); 964 skb->protocol = eth_type_trans(skb, priv->dev);
966 skb->ip_summed = CHECKSUM_NONE; 965 skb->ip_summed = CHECKSUM_NONE;
967 netif_rx(skb); 966 netif_rx(skb);
968 priv->stats.rx_bytes += 12 + msdu_size; 967 priv->stats.rx_bytes += 12 + msdu_size;
969 priv->stats.rx_packets++; 968 priv->stats.rx_packets++;
970} 969}
971 970
972/* Test to see if the packet in card memory at packet_loc has a valid CRC 971/* Test to see if the packet in card memory at packet_loc has a valid CRC
973 It doesn't matter that this is slow: it is only used to proble the first few packets. */ 972 It doesn't matter that this is slow: it is only used to proble the first few
973 packets. */
974static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size) 974static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
975{ 975{
976 int i = msdu_size - 4; 976 int i = msdu_size - 4;
@@ -980,7 +980,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
980 return 0; 980 return 0;
981 981
982 atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4); 982 atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4);
983 983
984 atmel_writeAR(priv->dev, packet_loc); 984 atmel_writeAR(priv->dev, packet_loc);
985 while (i--) { 985 while (i--) {
986 u8 octet = atmel_read8(priv->dev, DR); 986 u8 octet = atmel_read8(priv->dev, DR);
@@ -990,20 +990,22 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
990 return (crc ^ 0xffffffff) == netcrc; 990 return (crc ^ 0xffffffff) == netcrc;
991} 991}
992 992
993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 993static void frag_rx_path(struct atmel_private *priv,
994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) 994 struct ieee80211_hdr_4addr *header,
995 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
996 u8 frag_no, int more_frags)
995{ 997{
996 u8 mac4[6]; 998 u8 mac4[6];
997 u8 source[6]; 999 u8 source[6];
998 struct sk_buff *skb; 1000 struct sk_buff *skb;
999 1001
1000 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 1002 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
1001 memcpy(source, header->addr3, 6); 1003 memcpy(source, header->addr3, 6);
1002 else 1004 else
1003 memcpy(source, header->addr2, 6); 1005 memcpy(source, header->addr2, 6);
1004 1006
1005 rx_packet_loc += 24; /* skip header */ 1007 rx_packet_loc += 24; /* skip header */
1006 1008
1007 if (priv->do_rx_crc) 1009 if (priv->do_rx_crc)
1008 msdu_size -= 4; 1010 msdu_size -= 4;
1009 1011
@@ -1012,16 +1014,16 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1012 msdu_size -= 6; 1014 msdu_size -= 6;
1013 rx_packet_loc += 6; 1015 rx_packet_loc += 6;
1014 1016
1015 if (priv->do_rx_crc) 1017 if (priv->do_rx_crc)
1016 crc = crc32_le(crc, mac4, 6); 1018 crc = crc32_le(crc, mac4, 6);
1017 1019
1018 priv->frag_seq = seq_no; 1020 priv->frag_seq = seq_no;
1019 priv->frag_no = 1; 1021 priv->frag_no = 1;
1020 priv->frag_len = msdu_size; 1022 priv->frag_len = msdu_size;
1021 memcpy(priv->frag_source, source, 6); 1023 memcpy(priv->frag_source, source, 6);
1022 memcpy(&priv->rx_buf[6], source, 6); 1024 memcpy(&priv->rx_buf[6], source, 6);
1023 memcpy(priv->rx_buf, header->addr1, 6); 1025 memcpy(priv->rx_buf, header->addr1, 6);
1024 1026
1025 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size); 1027 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
1026 1028
1027 if (priv->do_rx_crc) { 1029 if (priv->do_rx_crc) {
@@ -1033,17 +1035,17 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1033 memset(priv->frag_source, 0xff, 6); 1035 memset(priv->frag_source, 0xff, 6);
1034 } 1036 }
1035 } 1037 }
1036 1038
1037 } else if (priv->frag_no == frag_no && 1039 } else if (priv->frag_no == frag_no &&
1038 priv->frag_seq == seq_no && 1040 priv->frag_seq == seq_no &&
1039 memcmp(priv->frag_source, source, 6) == 0) { 1041 memcmp(priv->frag_source, source, 6) == 0) {
1040 1042
1041 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len], 1043 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
1042 rx_packet_loc, msdu_size); 1044 rx_packet_loc, msdu_size);
1043 if (priv->do_rx_crc) { 1045 if (priv->do_rx_crc) {
1044 u32 netcrc; 1046 u32 netcrc;
1045 crc = crc32_le(crc, 1047 crc = crc32_le(crc,
1046 &priv->rx_buf[12 + priv->frag_len], 1048 &priv->rx_buf[12 + priv->frag_len],
1047 msdu_size); 1049 msdu_size);
1048 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1050 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1049 if ((crc ^ 0xffffffff) != netcrc) { 1051 if ((crc ^ 0xffffffff) != netcrc) {
@@ -1052,7 +1054,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1052 more_frags = 1; /* don't send broken assembly */ 1054 more_frags = 1; /* don't send broken assembly */
1053 } 1055 }
1054 } 1056 }
1055 1057
1056 priv->frag_len += msdu_size; 1058 priv->frag_len += msdu_size;
1057 priv->frag_no++; 1059 priv->frag_no++;
1058 1060
@@ -1062,60 +1064,60 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1062 priv->stats.rx_dropped++; 1064 priv->stats.rx_dropped++;
1063 } else { 1065 } else {
1064 skb_reserve(skb, 2); 1066 skb_reserve(skb, 2);
1065 memcpy(skb_put(skb, priv->frag_len + 12), 1067 memcpy(skb_put(skb, priv->frag_len + 12),
1066 priv->rx_buf, 1068 priv->rx_buf,
1067 priv->frag_len + 12); 1069 priv->frag_len + 12);
1068 priv->dev->last_rx = jiffies; 1070 priv->dev->last_rx = jiffies;
1069 skb->dev = priv->dev; 1071 skb->dev = priv->dev;
1070 skb->protocol = eth_type_trans(skb, priv->dev); 1072 skb->protocol = eth_type_trans(skb, priv->dev);
1071 skb->ip_summed = CHECKSUM_NONE; 1073 skb->ip_summed = CHECKSUM_NONE;
1072 netif_rx(skb); 1074 netif_rx(skb);
1073 priv->stats.rx_bytes += priv->frag_len + 12; 1075 priv->stats.rx_bytes += priv->frag_len + 12;
1074 priv->stats.rx_packets++; 1076 priv->stats.rx_packets++;
1075 } 1077 }
1076 } 1078 }
1077
1078 } else 1079 } else
1079 priv->wstats.discard.fragment++; 1080 priv->wstats.discard.fragment++;
1080} 1081}
1081 1082
1082static void rx_done_irq(struct atmel_private *priv) 1083static void rx_done_irq(struct atmel_private *priv)
1083{ 1084{
1084 int i; 1085 int i;
1085 struct ieee80211_hdr_4addr header; 1086 struct ieee80211_hdr_4addr header;
1086 1087
1087 for (i = 0; 1088 for (i = 0;
1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1089 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
1089 i < priv->host_info.rx_desc_count; 1090 i < priv->host_info.rx_desc_count;
1090 i++) { 1091 i++) {
1091 1092
1092 u16 msdu_size, rx_packet_loc, frame_ctl, seq_control; 1093 u16 msdu_size, rx_packet_loc, frame_ctl, seq_control;
1093 u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head)); 1094 u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head));
1094 u32 crc = 0xffffffff; 1095 u32 crc = 0xffffffff;
1095 1096
1096 if (status != RX_STATUS_SUCCESS) { 1097 if (status != RX_STATUS_SUCCESS) {
1097 if (status == 0xc1) /* determined by experiment */ 1098 if (status == 0xc1) /* determined by experiment */
1098 priv->wstats.discard.nwid++; 1099 priv->wstats.discard.nwid++;
1099 else 1100 else
1100 priv->stats.rx_errors++; 1101 priv->stats.rx_errors++;
1101 goto next; 1102 goto next;
1102 } 1103 }
1103 1104
1104 msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head)); 1105 msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head));
1105 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head)); 1106 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
1106 1107
1107 if (msdu_size < 30) { 1108 if (msdu_size < 30) {
1108 priv->stats.rx_errors++; 1109 priv->stats.rx_errors++;
1109 goto next; 1110 goto next;
1110 } 1111 }
1111 1112
1112 /* Get header as far as end of seq_ctl */ 1113 /* Get header as far as end of seq_ctl */
1113 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24); 1114 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
1114 frame_ctl = le16_to_cpu(header.frame_ctl); 1115 frame_ctl = le16_to_cpu(header.frame_ctl);
1115 seq_control = le16_to_cpu(header.seq_ctl); 1116 seq_control = le16_to_cpu(header.seq_ctl);
1116 1117
1117 /* probe for CRC use here if needed once five packets have arrived with 1118 /* probe for CRC use here if needed once five packets have
1118 the same crc status, we assume we know what's happening and stop probing */ 1119 arrived with the same crc status, we assume we know what's
1120 happening and stop probing */
1119 if (priv->probe_crc) { 1121 if (priv->probe_crc) {
1120 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) { 1122 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) {
1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size); 1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
@@ -1130,34 +1132,33 @@ static void rx_done_irq(struct atmel_private *priv)
1130 priv->probe_crc = 0; 1132 priv->probe_crc = 0;
1131 } 1133 }
1132 } 1134 }
1133 1135
1134 /* don't CRC header when WEP in use */ 1136 /* don't CRC header when WEP in use */
1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) { 1137 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) {
1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24); 1138 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
1137 } 1139 }
1138 msdu_size -= 24; /* header */ 1140 msdu_size -= 24; /* header */
1139 1141
1140 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { 1142 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
1141
1142 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS; 1143 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
1143 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG; 1144 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
1144 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4; 1145 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
1145 1146
1146 if (!more_fragments && packet_fragment_no == 0 ) { 1147 if (!more_fragments && packet_fragment_no == 0) {
1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc); 1148 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
1148 } else { 1149 } else {
1149 frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc, 1150 frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc,
1150 packet_sequence_no, packet_fragment_no, more_fragments); 1151 packet_sequence_no, packet_fragment_no, more_fragments);
1151 } 1152 }
1152 } 1153 }
1153 1154
1154 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 1155 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
1155 /* copy rest of packet into buffer */ 1156 /* copy rest of packet into buffer */
1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); 1157 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
1157 1158
1158 /* we use the same buffer for frag reassembly and control packets */ 1159 /* we use the same buffer for frag reassembly and control packets */
1159 memset(priv->frag_source, 0xff, 6); 1160 memset(priv->frag_source, 0xff, 6);
1160 1161
1161 if (priv->do_rx_crc) { 1162 if (priv->do_rx_crc) {
1162 /* last 4 octets is crc */ 1163 /* last 4 octets is crc */
1163 msdu_size -= 4; 1164 msdu_size -= 4;
@@ -1170,18 +1171,18 @@ static void rx_done_irq(struct atmel_private *priv)
1170 1171
1171 atmel_management_frame(priv, &header, msdu_size, 1172 atmel_management_frame(priv, &header, msdu_size,
1172 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head))); 1173 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head)));
1173 } 1174 }
1174 1175
1175 next: 1176next:
1176 /* release descriptor */ 1177 /* release descriptor */
1177 atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED); 1178 atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED);
1178 1179
1179 if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1)) 1180 if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1))
1180 priv->rx_desc_head++; 1181 priv->rx_desc_head++;
1181 else 1182 else
1182 priv->rx_desc_head = 0; 1183 priv->rx_desc_head = 0;
1183 } 1184 }
1184} 1185}
1185 1186
1186static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1187static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1187{ 1188{
@@ -1189,7 +1190,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1189 struct atmel_private *priv = netdev_priv(dev); 1190 struct atmel_private *priv = netdev_priv(dev);
1190 u8 isr; 1191 u8 isr;
1191 int i = -1; 1192 int i = -1;
1192 static u8 irq_order[] = { 1193 static u8 irq_order[] = {
1193 ISR_OUT_OF_RANGE, 1194 ISR_OUT_OF_RANGE,
1194 ISR_RxCOMPLETE, 1195 ISR_RxCOMPLETE,
1195 ISR_TxCOMPLETE, 1196 ISR_TxCOMPLETE,
@@ -1199,20 +1200,19 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1199 ISR_IBSS_MERGE, 1200 ISR_IBSS_MERGE,
1200 ISR_GENERIC_IRQ 1201 ISR_GENERIC_IRQ
1201 }; 1202 };
1202
1203 1203
1204 if (priv->card && priv->present_callback && 1204 if (priv->card && priv->present_callback &&
1205 !(*priv->present_callback)(priv->card)) 1205 !(*priv->present_callback)(priv->card))
1206 return IRQ_HANDLED; 1206 return IRQ_HANDLED;
1207 1207
1208 /* In this state upper-level code assumes it can mess with 1208 /* In this state upper-level code assumes it can mess with
1209 the card unhampered by interrupts which may change register state. 1209 the card unhampered by interrupts which may change register state.
1210 Note that even though the card shouldn't generate interrupts 1210 Note that even though the card shouldn't generate interrupts
1211 the inturrupt line may be shared. This allows card setup 1211 the inturrupt line may be shared. This allows card setup
1212 to go on without disabling interrupts for a long time. */ 1212 to go on without disabling interrupts for a long time. */
1213 if (priv->station_state == STATION_STATE_DOWN) 1213 if (priv->station_state == STATION_STATE_DOWN)
1214 return IRQ_NONE; 1214 return IRQ_NONE;
1215 1215
1216 atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */ 1216 atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */
1217 1217
1218 while (1) { 1218 while (1) {
@@ -1221,36 +1221,36 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1221 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name); 1221 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
1222 return IRQ_HANDLED; 1222 return IRQ_HANDLED;
1223 } 1223 }
1224 1224
1225 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET)); 1225 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
1226 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 1226 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
1227 1227
1228 if (!isr) { 1228 if (!isr) {
1229 atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */ 1229 atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */
1230 return i == -1 ? IRQ_NONE : IRQ_HANDLED; 1230 return i == -1 ? IRQ_NONE : IRQ_HANDLED;
1231 } 1231 }
1232 1232
1233 atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */ 1233 atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
1234 1234
1235 for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++) 1235 for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++)
1236 if (isr & irq_order[i]) 1236 if (isr & irq_order[i])
1237 break; 1237 break;
1238 1238
1239 if (!atmel_lock_mac(priv)) { 1239 if (!atmel_lock_mac(priv)) {
1240 /* failed to contact card */ 1240 /* failed to contact card */
1241 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name); 1241 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
1242 return IRQ_HANDLED; 1242 return IRQ_HANDLED;
1243 } 1243 }
1244 1244
1245 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET)); 1245 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
1246 isr ^= irq_order[i]; 1246 isr ^= irq_order[i];
1247 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr); 1247 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr);
1248 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 1248 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
1249 1249
1250 switch (irq_order[i]) { 1250 switch (irq_order[i]) {
1251 1251
1252 case ISR_OUT_OF_RANGE: 1252 case ISR_OUT_OF_RANGE:
1253 if (priv->operating_mode == IW_MODE_INFRA && 1253 if (priv->operating_mode == IW_MODE_INFRA &&
1254 priv->station_state == STATION_STATE_READY) { 1254 priv->station_state == STATION_STATE_READY) {
1255 priv->station_is_associated = 0; 1255 priv->station_is_associated = 0;
1256 atmel_scan(priv, 1); 1256 atmel_scan(priv, 1);
@@ -1261,24 +1261,24 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1261 priv->wstats.discard.misc++; 1261 priv->wstats.discard.misc++;
1262 /* fall through */ 1262 /* fall through */
1263 case ISR_RxCOMPLETE: 1263 case ISR_RxCOMPLETE:
1264 rx_done_irq(priv); 1264 rx_done_irq(priv);
1265 break; 1265 break;
1266 1266
1267 case ISR_TxCOMPLETE: 1267 case ISR_TxCOMPLETE:
1268 tx_done_irq(priv); 1268 tx_done_irq(priv);
1269 break; 1269 break;
1270 1270
1271 case ISR_FATAL_ERROR: 1271 case ISR_FATAL_ERROR:
1272 printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name); 1272 printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name);
1273 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 1273 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
1274 break; 1274 break;
1275 1275
1276 case ISR_COMMAND_COMPLETE: 1276 case ISR_COMMAND_COMPLETE:
1277 atmel_command_irq(priv); 1277 atmel_command_irq(priv);
1278 break; 1278 break;
1279 1279
1280 case ISR_IBSS_MERGE: 1280 case ISR_IBSS_MERGE:
1281 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS, 1281 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
1282 priv->CurrentBSSID, 6); 1282 priv->CurrentBSSID, 6);
1283 /* The WPA stuff cares about the current AP address */ 1283 /* The WPA stuff cares about the current AP address */
1284 if (priv->use_wpa) 1284 if (priv->use_wpa)
@@ -1288,24 +1288,23 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1288 printk(KERN_INFO "%s: Generic_irq received.\n", dev->name); 1288 printk(KERN_INFO "%s: Generic_irq received.\n", dev->name);
1289 break; 1289 break;
1290 } 1290 }
1291 } 1291 }
1292} 1292}
1293 1293
1294 1294static struct net_device_stats *atmel_get_stats(struct net_device *dev)
1295static struct net_device_stats *atmel_get_stats (struct net_device *dev)
1296{ 1295{
1297 struct atmel_private *priv = netdev_priv(dev); 1296 struct atmel_private *priv = netdev_priv(dev);
1298 return &priv->stats; 1297 return &priv->stats;
1299} 1298}
1300 1299
1301static struct iw_statistics *atmel_get_wireless_stats (struct net_device *dev) 1300static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
1302{ 1301{
1303 struct atmel_private *priv = netdev_priv(dev); 1302 struct atmel_private *priv = netdev_priv(dev);
1304 1303
1305 /* update the link quality here in case we are seeing no beacons 1304 /* update the link quality here in case we are seeing no beacons
1306 at all to drive the process */ 1305 at all to drive the process */
1307 atmel_smooth_qual(priv); 1306 atmel_smooth_qual(priv);
1308 1307
1309 priv->wstats.status = priv->station_state; 1308 priv->wstats.status = priv->station_state;
1310 1309
1311 if (priv->operating_mode == IW_MODE_INFRA) { 1310 if (priv->operating_mode == IW_MODE_INFRA) {
@@ -1328,8 +1327,8 @@ static struct iw_statistics *atmel_get_wireless_stats (struct net_device *dev)
1328 | IW_QUAL_NOISE_INVALID; 1327 | IW_QUAL_NOISE_INVALID;
1329 priv->wstats.miss.beacon = 0; 1328 priv->wstats.miss.beacon = 0;
1330 } 1329 }
1331 1330
1332 return (&priv->wstats); 1331 return &priv->wstats;
1333} 1332}
1334 1333
1335static int atmel_change_mtu(struct net_device *dev, int new_mtu) 1334static int atmel_change_mtu(struct net_device *dev, int new_mtu)
@@ -1343,21 +1342,21 @@ static int atmel_change_mtu(struct net_device *dev, int new_mtu)
1343static int atmel_set_mac_address(struct net_device *dev, void *p) 1342static int atmel_set_mac_address(struct net_device *dev, void *p)
1344{ 1343{
1345 struct sockaddr *addr = p; 1344 struct sockaddr *addr = p;
1346 1345
1347 memcpy (dev->dev_addr, addr->sa_data, dev->addr_len); 1346 memcpy (dev->dev_addr, addr->sa_data, dev->addr_len);
1348 return atmel_open(dev); 1347 return atmel_open(dev);
1349} 1348}
1350 1349
1351EXPORT_SYMBOL(atmel_open); 1350EXPORT_SYMBOL(atmel_open);
1352 1351
1353int atmel_open (struct net_device *dev) 1352int atmel_open(struct net_device *dev)
1354{ 1353{
1355 struct atmel_private *priv = netdev_priv(dev); 1354 struct atmel_private *priv = netdev_priv(dev);
1356 int i, channel; 1355 int i, channel;
1357 1356
1358 /* any scheduled timer is no longer needed and might screw things up.. */ 1357 /* any scheduled timer is no longer needed and might screw things up.. */
1359 del_timer_sync(&priv->management_timer); 1358 del_timer_sync(&priv->management_timer);
1360 1359
1361 /* Interrupts will not touch the card once in this state... */ 1360 /* Interrupts will not touch the card once in this state... */
1362 priv->station_state = STATION_STATE_DOWN; 1361 priv->station_state = STATION_STATE_DOWN;
1363 1362
@@ -1377,7 +1376,7 @@ int atmel_open (struct net_device *dev)
1377 priv->site_survey_state = SITE_SURVEY_IDLE; 1376 priv->site_survey_state = SITE_SURVEY_IDLE;
1378 priv->station_is_associated = 0; 1377 priv->station_is_associated = 0;
1379 1378
1380 if (!reset_atmel_card(dev)) 1379 if (!reset_atmel_card(dev))
1381 return -EAGAIN; 1380 return -EAGAIN;
1382 1381
1383 if (priv->config_reg_domain) { 1382 if (priv->config_reg_domain) {
@@ -1391,26 +1390,26 @@ int atmel_open (struct net_device *dev)
1391 if (i == sizeof(channel_table)/sizeof(channel_table[0])) { 1390 if (i == sizeof(channel_table)/sizeof(channel_table[0])) {
1392 priv->reg_domain = REG_DOMAIN_MKK1; 1391 priv->reg_domain = REG_DOMAIN_MKK1;
1393 printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name); 1392 printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
1394 } 1393 }
1395 } 1394 }
1396 1395
1397 if ((channel = atmel_validate_channel(priv, priv->channel))) 1396 if ((channel = atmel_validate_channel(priv, priv->channel)))
1398 priv->channel = channel; 1397 priv->channel = channel;
1399 1398
1400 /* this moves station_state on.... */ 1399 /* this moves station_state on.... */
1401 atmel_scan(priv, 1); 1400 atmel_scan(priv, 1);
1402 1401
1403 atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */ 1402 atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */
1404 return 0; 1403 return 0;
1405} 1404}
1406 1405
1407static int atmel_close (struct net_device *dev) 1406static int atmel_close(struct net_device *dev)
1408{ 1407{
1409 struct atmel_private *priv = netdev_priv(dev); 1408 struct atmel_private *priv = netdev_priv(dev);
1410 1409
1411 atmel_enter_state(priv, STATION_STATE_DOWN); 1410 atmel_enter_state(priv, STATION_STATE_DOWN);
1412 1411
1413 if (priv->bus_type == BUS_TYPE_PCCARD) 1412 if (priv->bus_type == BUS_TYPE_PCCARD)
1414 atmel_write16(dev, GCR, 0x0060); 1413 atmel_write16(dev, GCR, 0x0060);
1415 atmel_write16(dev, GCR, 0x0040); 1414 atmel_write16(dev, GCR, 0x0040);
1416 return 0; 1415 return 0;
@@ -1438,43 +1437,46 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
1438 int i; 1437 int i;
1439 char *p = buf; 1438 char *p = buf;
1440 char *s, *r, *c; 1439 char *s, *r, *c;
1441 1440
1442 p += sprintf(p, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR); 1441 p += sprintf(p, "Driver version:\t\t%d.%d\n",
1443 1442 DRIVER_MAJOR, DRIVER_MINOR);
1443
1444 if (priv->station_state != STATION_STATE_DOWN) { 1444 if (priv->station_state != STATION_STATE_DOWN) {
1445 p += sprintf(p, "Firmware version:\t%d.%d build %d\nFirmware location:\t", 1445 p += sprintf(p, "Firmware version:\t%d.%d build %d\n"
1446 "Firmware location:\t",
1446 priv->host_info.major_version, 1447 priv->host_info.major_version,
1447 priv->host_info.minor_version, 1448 priv->host_info.minor_version,
1448 priv->host_info.build_version); 1449 priv->host_info.build_version);
1449 1450
1450 if (priv->card_type != CARD_TYPE_EEPROM) 1451 if (priv->card_type != CARD_TYPE_EEPROM)
1451 p += sprintf(p, "on card\n"); 1452 p += sprintf(p, "on card\n");
1452 else if (priv->firmware) 1453 else if (priv->firmware)
1453 p += sprintf(p, "%s loaded by host\n", priv->firmware_id); 1454 p += sprintf(p, "%s loaded by host\n",
1455 priv->firmware_id);
1454 else 1456 else
1455 p += sprintf(p, "%s loaded by hotplug\n", priv->firmware_id); 1457 p += sprintf(p, "%s loaded by hotplug\n",
1456 1458 priv->firmware_id);
1457 switch(priv->card_type) { 1459
1460 switch (priv->card_type) {
1458 case CARD_TYPE_PARALLEL_FLASH: c = "Parallel flash"; break; 1461 case CARD_TYPE_PARALLEL_FLASH: c = "Parallel flash"; break;
1459 case CARD_TYPE_SPI_FLASH: c = "SPI flash\n"; break; 1462 case CARD_TYPE_SPI_FLASH: c = "SPI flash\n"; break;
1460 case CARD_TYPE_EEPROM: c = "EEPROM"; break; 1463 case CARD_TYPE_EEPROM: c = "EEPROM"; break;
1461 default: c = "<unknown>"; 1464 default: c = "<unknown>";
1462 } 1465 }
1463 1466
1464
1465 r = "<unknown>"; 1467 r = "<unknown>";
1466 for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) 1468 for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
1467 if (priv->reg_domain == channel_table[i].reg_domain) 1469 if (priv->reg_domain == channel_table[i].reg_domain)
1468 r = channel_table[i].name; 1470 r = channel_table[i].name;
1469 1471
1470 p += sprintf(p, "MAC memory type:\t%s\n", c); 1472 p += sprintf(p, "MAC memory type:\t%s\n", c);
1471 p += sprintf(p, "Regulatory domain:\t%s\n", r); 1473 p += sprintf(p, "Regulatory domain:\t%s\n", r);
1472 p += sprintf(p, "Host CRC checking:\t%s\n", 1474 p += sprintf(p, "Host CRC checking:\t%s\n",
1473 priv->do_rx_crc ? "On" : "Off"); 1475 priv->do_rx_crc ? "On" : "Off");
1474 p += sprintf(p, "WPA-capable firmware:\t%s\n", 1476 p += sprintf(p, "WPA-capable firmware:\t%s\n",
1475 priv->use_wpa ? "Yes" : "No"); 1477 priv->use_wpa ? "Yes" : "No");
1476 } 1478 }
1477 1479
1478 switch(priv->station_state) { 1480 switch(priv->station_state) {
1479 case STATION_STATE_SCANNING: s = "Scanning"; break; 1481 case STATION_STATE_SCANNING: s = "Scanning"; break;
1480 case STATION_STATE_JOINNING: s = "Joining"; break; 1482 case STATION_STATE_JOINNING: s = "Joining"; break;
@@ -1486,9 +1488,9 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
1486 case STATION_STATE_DOWN: s = "Down"; break; 1488 case STATION_STATE_DOWN: s = "Down"; break;
1487 default: s = "<unknown>"; 1489 default: s = "<unknown>";
1488 } 1490 }
1489 1491
1490 p += sprintf(p, "Current state:\t\t%s\n", s); 1492 p += sprintf(p, "Current state:\t\t%s\n", s);
1491 return p - buf; 1493 return p - buf;
1492} 1494}
1493 1495
1494static int atmel_read_proc(char *page, char **start, off_t off, 1496static int atmel_read_proc(char *page, char **start, off_t off,
@@ -1504,9 +1506,12 @@ static int atmel_read_proc(char *page, char **start, off_t off,
1504 return len; 1506 return len;
1505} 1507}
1506 1508
1507struct net_device *init_atmel_card( unsigned short irq, unsigned long port, const AtmelFWType fw_type, 1509struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1508 struct device *sys_dev, int (*card_present)(void *), void *card) 1510 const AtmelFWType fw_type,
1511 struct device *sys_dev,
1512 int (*card_present)(void *), void *card)
1509{ 1513{
1514 struct proc_dir_entry *ent;
1510 struct net_device *dev; 1515 struct net_device *dev;
1511 struct atmel_private *priv; 1516 struct atmel_private *priv;
1512 int rc; 1517 int rc;
@@ -1514,11 +1519,11 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1514 /* Create the network device object. */ 1519 /* Create the network device object. */
1515 dev = alloc_etherdev(sizeof(*priv)); 1520 dev = alloc_etherdev(sizeof(*priv));
1516 if (!dev) { 1521 if (!dev) {
1517 printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n"); 1522 printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n");
1518 return NULL; 1523 return NULL;
1519 } 1524 }
1520 if (dev_alloc_name(dev, dev->name) < 0) { 1525 if (dev_alloc_name(dev, dev->name) < 0) {
1521 printk(KERN_ERR "atmel: Couldn't get name!\n"); 1526 printk(KERN_ERR "atmel: Couldn't get name!\n");
1522 goto err_out_free; 1527 goto err_out_free;
1523 } 1528 }
1524 1529
@@ -1550,7 +1555,7 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1550 memset(priv->BSSID, 0, 6); 1555 memset(priv->BSSID, 0, 6);
1551 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ 1556 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
1552 priv->station_was_associated = 0; 1557 priv->station_was_associated = 0;
1553 1558
1554 priv->last_survey = jiffies; 1559 priv->last_survey = jiffies;
1555 priv->preamble = LONG_PREAMBLE; 1560 priv->preamble = LONG_PREAMBLE;
1556 priv->operating_mode = IW_MODE_INFRA; 1561 priv->operating_mode = IW_MODE_INFRA;
@@ -1586,7 +1591,7 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1586 spin_lock_init(&priv->timerlock); 1591 spin_lock_init(&priv->timerlock);
1587 priv->management_timer.function = atmel_management_timer; 1592 priv->management_timer.function = atmel_management_timer;
1588 priv->management_timer.data = (unsigned long) dev; 1593 priv->management_timer.data = (unsigned long) dev;
1589 1594
1590 dev->open = atmel_open; 1595 dev->open = atmel_open;
1591 dev->stop = atmel_close; 1596 dev->stop = atmel_close;
1592 dev->change_mtu = atmel_change_mtu; 1597 dev->change_mtu = atmel_change_mtu;
@@ -1597,44 +1602,46 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1597 dev->do_ioctl = atmel_ioctl; 1602 dev->do_ioctl = atmel_ioctl;
1598 dev->irq = irq; 1603 dev->irq = irq;
1599 dev->base_addr = port; 1604 dev->base_addr = port;
1600 1605
1601 SET_NETDEV_DEV(dev, sys_dev); 1606 SET_NETDEV_DEV(dev, sys_dev);
1602 1607
1603 if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) { 1608 if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) {
1604 printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc ); 1609 printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc);
1605 goto err_out_free; 1610 goto err_out_free;
1606 } 1611 }
1607 1612
1608 if (!request_region(dev->base_addr, 32, 1613 if (!request_region(dev->base_addr, 32,
1609 priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) { 1614 priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) {
1610 goto err_out_irq; 1615 goto err_out_irq;
1611 } 1616 }
1612 1617
1613 if (register_netdev(dev)) 1618 if (register_netdev(dev))
1614 goto err_out_res; 1619 goto err_out_res;
1615 1620
1616 if (!probe_atmel_card(dev)){ 1621 if (!probe_atmel_card(dev)){
1617 unregister_netdev(dev); 1622 unregister_netdev(dev);
1618 goto err_out_res; 1623 goto err_out_res;
1619 } 1624 }
1620 1625
1621 netif_carrier_off(dev); 1626 netif_carrier_off(dev);
1622 1627
1623 create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv); 1628 ent = create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv);
1624 1629 if (!ent)
1630 printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
1631
1625 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 1632 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1626 dev->name, DRIVER_MAJOR, DRIVER_MINOR, 1633 dev->name, DRIVER_MAJOR, DRIVER_MINOR,
1627 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 1634 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1628 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); 1635 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
1629 1636
1630 SET_MODULE_OWNER(dev); 1637 SET_MODULE_OWNER(dev);
1631 return dev; 1638 return dev;
1632 1639
1633 err_out_res: 1640err_out_res:
1634 release_region( dev->base_addr, 32); 1641 release_region( dev->base_addr, 32);
1635 err_out_irq: 1642err_out_irq:
1636 free_irq(dev->irq, dev); 1643 free_irq(dev->irq, dev);
1637 err_out_free: 1644err_out_free:
1638 free_netdev(dev); 1645 free_netdev(dev);
1639 return NULL; 1646 return NULL;
1640} 1647}
@@ -1644,12 +1651,12 @@ EXPORT_SYMBOL(init_atmel_card);
1644void stop_atmel_card(struct net_device *dev) 1651void stop_atmel_card(struct net_device *dev)
1645{ 1652{
1646 struct atmel_private *priv = netdev_priv(dev); 1653 struct atmel_private *priv = netdev_priv(dev);
1647 1654
1648 /* put a brick on it... */ 1655 /* put a brick on it... */
1649 if (priv->bus_type == BUS_TYPE_PCCARD) 1656 if (priv->bus_type == BUS_TYPE_PCCARD)
1650 atmel_write16(dev, GCR, 0x0060); 1657 atmel_write16(dev, GCR, 0x0060);
1651 atmel_write16(dev, GCR, 0x0040); 1658 atmel_write16(dev, GCR, 0x0040);
1652 1659
1653 del_timer_sync(&priv->management_timer); 1660 del_timer_sync(&priv->management_timer);
1654 unregister_netdev(dev); 1661 unregister_netdev(dev);
1655 remove_proc_entry("driver/atmel", NULL); 1662 remove_proc_entry("driver/atmel", NULL);
@@ -1675,13 +1682,13 @@ static int atmel_set_essid(struct net_device *dev,
1675 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1682 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1676 1683
1677 priv->connect_to_any_BSS = 0; 1684 priv->connect_to_any_BSS = 0;
1678 1685
1679 /* Check the size of the string */ 1686 /* Check the size of the string */
1680 if (dwrq->length > MAX_SSID_LENGTH + 1) 1687 if (dwrq->length > MAX_SSID_LENGTH + 1)
1681 return -E2BIG ; 1688 return -E2BIG;
1682 if (index != 0) 1689 if (index != 0)
1683 return -EINVAL; 1690 return -EINVAL;
1684 1691
1685 memcpy(priv->new_SSID, extra, dwrq->length - 1); 1692 memcpy(priv->new_SSID, extra, dwrq->length - 1);
1686 priv->new_SSID_size = dwrq->length - 1; 1693 priv->new_SSID_size = dwrq->length - 1;
1687 } 1694 }
@@ -1706,7 +1713,7 @@ static int atmel_get_essid(struct net_device *dev,
1706 extra[priv->SSID_size] = '\0'; 1713 extra[priv->SSID_size] = '\0';
1707 dwrq->length = priv->SSID_size + 1; 1714 dwrq->length = priv->SSID_size + 1;
1708 } 1715 }
1709 1716
1710 dwrq->flags = !priv->connect_to_any_BSS; /* active */ 1717 dwrq->flags = !priv->connect_to_any_BSS; /* active */
1711 1718
1712 return 0; 1719 return 0;
@@ -1768,7 +1775,7 @@ static int atmel_set_encode(struct net_device *dev,
1768 /* WE specify that if a valid key is set, encryption 1775 /* WE specify that if a valid key is set, encryption
1769 * should be enabled (user may turn it off later) 1776 * should be enabled (user may turn it off later)
1770 * This is also how "iwconfig ethX key on" works */ 1777 * This is also how "iwconfig ethX key on" works */
1771 if (index == current_index && 1778 if (index == current_index &&
1772 priv->wep_key_len[index] > 0) { 1779 priv->wep_key_len[index] > 0) {
1773 priv->wep_is_on = 1; 1780 priv->wep_is_on = 1;
1774 priv->exclude_unencrypted = 1; 1781 priv->exclude_unencrypted = 1;
@@ -1783,18 +1790,18 @@ static int atmel_set_encode(struct net_device *dev,
1783 } else { 1790 } else {
1784 /* Do we want to just set the transmit key index ? */ 1791 /* Do we want to just set the transmit key index ? */
1785 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1792 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1786 if ( index>=0 && index < 4 ) { 1793 if (index >= 0 && index < 4) {
1787 priv->default_key = index; 1794 priv->default_key = index;
1788 } else 1795 } else
1789 /* Don't complain if only change the mode */ 1796 /* Don't complain if only change the mode */
1790 if(!dwrq->flags & IW_ENCODE_MODE) { 1797 if (!dwrq->flags & IW_ENCODE_MODE) {
1791 return -EINVAL; 1798 return -EINVAL;
1792 } 1799 }
1793 } 1800 }
1794 /* Read the flags */ 1801 /* Read the flags */
1795 if(dwrq->flags & IW_ENCODE_DISABLED) { 1802 if (dwrq->flags & IW_ENCODE_DISABLED) {
1796 priv->wep_is_on = 0; 1803 priv->wep_is_on = 0;
1797 priv->encryption_level = 0; 1804 priv->encryption_level = 0;
1798 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; 1805 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
1799 } else { 1806 } else {
1800 priv->wep_is_on = 1; 1807 priv->wep_is_on = 1;
@@ -1806,15 +1813,14 @@ static int atmel_set_encode(struct net_device *dev,
1806 priv->encryption_level = 1; 1813 priv->encryption_level = 1;
1807 } 1814 }
1808 } 1815 }
1809 if(dwrq->flags & IW_ENCODE_RESTRICTED) 1816 if (dwrq->flags & IW_ENCODE_RESTRICTED)
1810 priv->exclude_unencrypted = 1; 1817 priv->exclude_unencrypted = 1;
1811 if(dwrq->flags & IW_ENCODE_OPEN) 1818 if(dwrq->flags & IW_ENCODE_OPEN)
1812 priv->exclude_unencrypted = 0; 1819 priv->exclude_unencrypted = 0;
1813 1820
1814 return -EINPROGRESS; /* Call commit handler */ 1821 return -EINPROGRESS; /* Call commit handler */
1815} 1822}
1816 1823
1817
1818static int atmel_get_encode(struct net_device *dev, 1824static int atmel_get_encode(struct net_device *dev,
1819 struct iw_request_info *info, 1825 struct iw_request_info *info,
1820 struct iw_point *dwrq, 1826 struct iw_point *dwrq,
@@ -1822,7 +1828,7 @@ static int atmel_get_encode(struct net_device *dev,
1822{ 1828{
1823 struct atmel_private *priv = netdev_priv(dev); 1829 struct atmel_private *priv = netdev_priv(dev);
1824 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1830 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1825 1831
1826 if (!priv->wep_is_on) 1832 if (!priv->wep_is_on)
1827 dwrq->flags = IW_ENCODE_DISABLED; 1833 dwrq->flags = IW_ENCODE_DISABLED;
1828 else { 1834 else {
@@ -1843,7 +1849,7 @@ static int atmel_get_encode(struct net_device *dev,
1843 memset(extra, 0, 16); 1849 memset(extra, 0, 16);
1844 memcpy(extra, priv->wep_keys[index], dwrq->length); 1850 memcpy(extra, priv->wep_keys[index], dwrq->length);
1845 } 1851 }
1846 1852
1847 return 0; 1853 return 0;
1848} 1854}
1849 1855
@@ -1862,17 +1868,17 @@ static int atmel_set_rate(struct net_device *dev,
1862 char *extra) 1868 char *extra)
1863{ 1869{
1864 struct atmel_private *priv = netdev_priv(dev); 1870 struct atmel_private *priv = netdev_priv(dev);
1865 1871
1866 if (vwrq->fixed == 0) { 1872 if (vwrq->fixed == 0) {
1867 priv->tx_rate = 3; 1873 priv->tx_rate = 3;
1868 priv->auto_tx_rate = 1; 1874 priv->auto_tx_rate = 1;
1869 } else { 1875 } else {
1870 priv->auto_tx_rate = 0; 1876 priv->auto_tx_rate = 0;
1871 1877
1872 /* Which type of value ? */ 1878 /* Which type of value ? */
1873 if((vwrq->value < 4) && (vwrq->value >= 0)) { 1879 if ((vwrq->value < 4) && (vwrq->value >= 0)) {
1874 /* Setting by rate index */ 1880 /* Setting by rate index */
1875 priv->tx_rate = vwrq->value; 1881 priv->tx_rate = vwrq->value;
1876 } else { 1882 } else {
1877 /* Setting by frequency value */ 1883 /* Setting by frequency value */
1878 switch (vwrq->value) { 1884 switch (vwrq->value) {
@@ -1899,7 +1905,7 @@ static int atmel_set_mode(struct net_device *dev,
1899 return -EINVAL; 1905 return -EINVAL;
1900 1906
1901 priv->operating_mode = *uwrq; 1907 priv->operating_mode = *uwrq;
1902 return -EINPROGRESS; 1908 return -EINPROGRESS;
1903} 1909}
1904 1910
1905static int atmel_get_mode(struct net_device *dev, 1911static int atmel_get_mode(struct net_device *dev,
@@ -1908,7 +1914,7 @@ static int atmel_get_mode(struct net_device *dev,
1908 char *extra) 1914 char *extra)
1909{ 1915{
1910 struct atmel_private *priv = netdev_priv(dev); 1916 struct atmel_private *priv = netdev_priv(dev);
1911 1917
1912 *uwrq = priv->operating_mode; 1918 *uwrq = priv->operating_mode;
1913 return 0; 1919 return 0;
1914} 1920}
@@ -1962,9 +1968,9 @@ static int atmel_set_retry(struct net_device *dev,
1962 char *extra) 1968 char *extra)
1963{ 1969{
1964 struct atmel_private *priv = netdev_priv(dev); 1970 struct atmel_private *priv = netdev_priv(dev);
1965 1971
1966 if(!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) { 1972 if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
1967 if(vwrq->flags & IW_RETRY_MAX) 1973 if (vwrq->flags & IW_RETRY_MAX)
1968 priv->long_retry = vwrq->value; 1974 priv->long_retry = vwrq->value;
1969 else if (vwrq->flags & IW_RETRY_MIN) 1975 else if (vwrq->flags & IW_RETRY_MIN)
1970 priv->short_retry = vwrq->value; 1976 priv->short_retry = vwrq->value;
@@ -1973,9 +1979,9 @@ static int atmel_set_retry(struct net_device *dev,
1973 priv->long_retry = vwrq->value; 1979 priv->long_retry = vwrq->value;
1974 priv->short_retry = vwrq->value; 1980 priv->short_retry = vwrq->value;
1975 } 1981 }
1976 return -EINPROGRESS; 1982 return -EINPROGRESS;
1977 } 1983 }
1978 1984
1979 return -EINVAL; 1985 return -EINVAL;
1980} 1986}
1981 1987
@@ -1989,13 +1995,13 @@ static int atmel_get_retry(struct net_device *dev,
1989 vwrq->disabled = 0; /* Can't be disabled */ 1995 vwrq->disabled = 0; /* Can't be disabled */
1990 1996
1991 /* Note : by default, display the min retry number */ 1997 /* Note : by default, display the min retry number */
1992 if((vwrq->flags & IW_RETRY_MAX)) { 1998 if (vwrq->flags & IW_RETRY_MAX) {
1993 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; 1999 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
1994 vwrq->value = priv->long_retry; 2000 vwrq->value = priv->long_retry;
1995 } else { 2001 } else {
1996 vwrq->flags = IW_RETRY_LIMIT; 2002 vwrq->flags = IW_RETRY_LIMIT;
1997 vwrq->value = priv->short_retry; 2003 vwrq->value = priv->short_retry;
1998 if(priv->long_retry != priv->short_retry) 2004 if (priv->long_retry != priv->short_retry)
1999 vwrq->flags |= IW_RETRY_MIN; 2005 vwrq->flags |= IW_RETRY_MIN;
2000 } 2006 }
2001 2007
@@ -2010,13 +2016,13 @@ static int atmel_set_rts(struct net_device *dev,
2010 struct atmel_private *priv = netdev_priv(dev); 2016 struct atmel_private *priv = netdev_priv(dev);
2011 int rthr = vwrq->value; 2017 int rthr = vwrq->value;
2012 2018
2013 if(vwrq->disabled) 2019 if (vwrq->disabled)
2014 rthr = 2347; 2020 rthr = 2347;
2015 if((rthr < 0) || (rthr > 2347)) { 2021 if ((rthr < 0) || (rthr > 2347)) {
2016 return -EINVAL; 2022 return -EINVAL;
2017 } 2023 }
2018 priv->rts_threshold = rthr; 2024 priv->rts_threshold = rthr;
2019 2025
2020 return -EINPROGRESS; /* Call commit handler */ 2026 return -EINPROGRESS; /* Call commit handler */
2021} 2027}
2022 2028
@@ -2026,7 +2032,7 @@ static int atmel_get_rts(struct net_device *dev,
2026 char *extra) 2032 char *extra)
2027{ 2033{
2028 struct atmel_private *priv = netdev_priv(dev); 2034 struct atmel_private *priv = netdev_priv(dev);
2029 2035
2030 vwrq->value = priv->rts_threshold; 2036 vwrq->value = priv->rts_threshold;
2031 vwrq->disabled = (vwrq->value >= 2347); 2037 vwrq->disabled = (vwrq->value >= 2347);
2032 vwrq->fixed = 1; 2038 vwrq->fixed = 1;
@@ -2042,14 +2048,14 @@ static int atmel_set_frag(struct net_device *dev,
2042 struct atmel_private *priv = netdev_priv(dev); 2048 struct atmel_private *priv = netdev_priv(dev);
2043 int fthr = vwrq->value; 2049 int fthr = vwrq->value;
2044 2050
2045 if(vwrq->disabled) 2051 if (vwrq->disabled)
2046 fthr = 2346; 2052 fthr = 2346;
2047 if((fthr < 256) || (fthr > 2346)) { 2053 if ((fthr < 256) || (fthr > 2346)) {
2048 return -EINVAL; 2054 return -EINVAL;
2049 } 2055 }
2050 fthr &= ~0x1; /* Get an even value - is it really needed ??? */ 2056 fthr &= ~0x1; /* Get an even value - is it really needed ??? */
2051 priv->frag_threshold = fthr; 2057 priv->frag_threshold = fthr;
2052 2058
2053 return -EINPROGRESS; /* Call commit handler */ 2059 return -EINPROGRESS; /* Call commit handler */
2054} 2060}
2055 2061
@@ -2077,21 +2083,21 @@ static int atmel_set_freq(struct net_device *dev,
2077{ 2083{
2078 struct atmel_private *priv = netdev_priv(dev); 2084 struct atmel_private *priv = netdev_priv(dev);
2079 int rc = -EINPROGRESS; /* Call commit handler */ 2085 int rc = -EINPROGRESS; /* Call commit handler */
2080 2086
2081 /* If setting by frequency, convert to a channel */ 2087 /* If setting by frequency, convert to a channel */
2082 if((fwrq->e == 1) && 2088 if ((fwrq->e == 1) &&
2083 (fwrq->m >= (int) 241200000) && 2089 (fwrq->m >= (int) 241200000) &&
2084 (fwrq->m <= (int) 248700000)) { 2090 (fwrq->m <= (int) 248700000)) {
2085 int f = fwrq->m / 100000; 2091 int f = fwrq->m / 100000;
2086 int c = 0; 2092 int c = 0;
2087 while((c < 14) && (f != frequency_list[c])) 2093 while ((c < 14) && (f != frequency_list[c]))
2088 c++; 2094 c++;
2089 /* Hack to fall through... */ 2095 /* Hack to fall through... */
2090 fwrq->e = 0; 2096 fwrq->e = 0;
2091 fwrq->m = c + 1; 2097 fwrq->m = c + 1;
2092 } 2098 }
2093 /* Setting by channel number */ 2099 /* Setting by channel number */
2094 if((fwrq->m > 1000) || (fwrq->e > 0)) 2100 if ((fwrq->m > 1000) || (fwrq->e > 0))
2095 rc = -EOPNOTSUPP; 2101 rc = -EOPNOTSUPP;
2096 else { 2102 else {
2097 int channel = fwrq->m; 2103 int channel = fwrq->m;
@@ -2099,7 +2105,7 @@ static int atmel_set_freq(struct net_device *dev,
2099 priv->channel = channel; 2105 priv->channel = channel;
2100 } else { 2106 } else {
2101 rc = -EINVAL; 2107 rc = -EINVAL;
2102 } 2108 }
2103 } 2109 }
2104 return rc; 2110 return rc;
2105} 2111}
@@ -2130,7 +2136,7 @@ static int atmel_set_scan(struct net_device *dev,
2130 * This is not an error, while the device perform scanning, 2136 * This is not an error, while the device perform scanning,
2131 * traffic doesn't flow, so it's a perfect DoS... 2137 * traffic doesn't flow, so it's a perfect DoS...
2132 * Jean II */ 2138 * Jean II */
2133 2139
2134 if (priv->station_state == STATION_STATE_DOWN) 2140 if (priv->station_state == STATION_STATE_DOWN)
2135 return -EAGAIN; 2141 return -EAGAIN;
2136 2142
@@ -2142,15 +2148,15 @@ static int atmel_set_scan(struct net_device *dev,
2142 /* Initiate a scan command */ 2148 /* Initiate a scan command */
2143 if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS) 2149 if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS)
2144 return -EBUSY; 2150 return -EBUSY;
2145 2151
2146 del_timer_sync(&priv->management_timer); 2152 del_timer_sync(&priv->management_timer);
2147 spin_lock_irqsave(&priv->irqlock, flags); 2153 spin_lock_irqsave(&priv->irqlock, flags);
2148 2154
2149 priv->site_survey_state = SITE_SURVEY_IN_PROGRESS; 2155 priv->site_survey_state = SITE_SURVEY_IN_PROGRESS;
2150 priv->fast_scan = 0; 2156 priv->fast_scan = 0;
2151 atmel_scan(priv, 0); 2157 atmel_scan(priv, 0);
2152 spin_unlock_irqrestore(&priv->irqlock, flags); 2158 spin_unlock_irqrestore(&priv->irqlock, flags);
2153 2159
2154 return 0; 2160 return 0;
2155} 2161}
2156 2162
@@ -2163,11 +2169,11 @@ static int atmel_get_scan(struct net_device *dev,
2163 int i; 2169 int i;
2164 char *current_ev = extra; 2170 char *current_ev = extra;
2165 struct iw_event iwe; 2171 struct iw_event iwe;
2166 2172
2167 if (priv->site_survey_state != SITE_SURVEY_COMPLETED) 2173 if (priv->site_survey_state != SITE_SURVEY_COMPLETED)
2168 return -EAGAIN; 2174 return -EAGAIN;
2169 2175
2170 for(i=0; i<priv->BSS_list_entries; i++) { 2176 for (i = 0; i < priv->BSS_list_entries; i++) {
2171 iwe.cmd = SIOCGIWAP; 2177 iwe.cmd = SIOCGIWAP;
2172 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 2178 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
2173 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6); 2179 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
@@ -2179,16 +2185,16 @@ static int atmel_get_scan(struct net_device *dev,
2179 iwe.cmd = SIOCGIWESSID; 2185 iwe.cmd = SIOCGIWESSID;
2180 iwe.u.data.flags = 1; 2186 iwe.u.data.flags = 1;
2181 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, priv->BSSinfo[i].SSID); 2187 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, priv->BSSinfo[i].SSID);
2182 2188
2183 iwe.cmd = SIOCGIWMODE; 2189 iwe.cmd = SIOCGIWMODE;
2184 iwe.u.mode = priv->BSSinfo[i].BSStype; 2190 iwe.u.mode = priv->BSSinfo[i].BSStype;
2185 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN); 2191 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN);
2186 2192
2187 iwe.cmd = SIOCGIWFREQ; 2193 iwe.cmd = SIOCGIWFREQ;
2188 iwe.u.freq.m = priv->BSSinfo[i].channel; 2194 iwe.u.freq.m = priv->BSSinfo[i].channel;
2189 iwe.u.freq.e = 0; 2195 iwe.u.freq.e = 0;
2190 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); 2196 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN);
2191 2197
2192 iwe.cmd = SIOCGIWENCODE; 2198 iwe.cmd = SIOCGIWENCODE;
2193 if (priv->BSSinfo[i].UsingWEP) 2199 if (priv->BSSinfo[i].UsingWEP)
2194 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 2200 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -2196,13 +2202,12 @@ static int atmel_get_scan(struct net_device *dev,
2196 iwe.u.data.flags = IW_ENCODE_DISABLED; 2202 iwe.u.data.flags = IW_ENCODE_DISABLED;
2197 iwe.u.data.length = 0; 2203 iwe.u.data.length = 0;
2198 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL); 2204 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL);
2199
2200 } 2205 }
2201 2206
2202 /* Length of data */ 2207 /* Length of data */
2203 dwrq->length = (current_ev - extra); 2208 dwrq->length = (current_ev - extra);
2204 dwrq->flags = 0; 2209 dwrq->flags = 0;
2205 2210
2206 return 0; 2211 return 0;
2207} 2212}
2208 2213
@@ -2213,7 +2218,7 @@ static int atmel_get_range(struct net_device *dev,
2213{ 2218{
2214 struct atmel_private *priv = netdev_priv(dev); 2219 struct atmel_private *priv = netdev_priv(dev);
2215 struct iw_range *range = (struct iw_range *) extra; 2220 struct iw_range *range = (struct iw_range *) extra;
2216 int k,i,j; 2221 int k, i, j;
2217 2222
2218 dwrq->length = sizeof(struct iw_range); 2223 dwrq->length = sizeof(struct iw_range);
2219 memset(range, 0, sizeof(struct iw_range)); 2224 memset(range, 0, sizeof(struct iw_range));
@@ -2226,14 +2231,14 @@ static int atmel_get_range(struct net_device *dev,
2226 break; 2231 break;
2227 } 2232 }
2228 if (range->num_channels != 0) { 2233 if (range->num_channels != 0) {
2229 for(k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) { 2234 for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
2230 range->freq[k].i = i; /* List index */ 2235 range->freq[k].i = i; /* List index */
2231 range->freq[k].m = frequency_list[i-1] * 100000; 2236 range->freq[k].m = frequency_list[i - 1] * 100000;
2232 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ 2237 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
2233 } 2238 }
2234 range->num_frequency = k; 2239 range->num_frequency = k;
2235 } 2240 }
2236 2241
2237 range->max_qual.qual = 100; 2242 range->max_qual.qual = 100;
2238 range->max_qual.level = 100; 2243 range->max_qual.level = 100;
2239 range->max_qual.noise = 0; 2244 range->max_qual.noise = 0;
@@ -2261,11 +2266,11 @@ static int atmel_get_range(struct net_device *dev,
2261 range->encoding_size[1] = 13; 2266 range->encoding_size[1] = 13;
2262 range->num_encoding_sizes = 2; 2267 range->num_encoding_sizes = 2;
2263 range->max_encoding_tokens = 4; 2268 range->max_encoding_tokens = 4;
2264 2269
2265 range->pmp_flags = IW_POWER_ON; 2270 range->pmp_flags = IW_POWER_ON;
2266 range->pmt_flags = IW_POWER_ON; 2271 range->pmt_flags = IW_POWER_ON;
2267 range->pm_capa = 0; 2272 range->pm_capa = 0;
2268 2273
2269 range->we_version_source = WIRELESS_EXT; 2274 range->we_version_source = WIRELESS_EXT;
2270 range->we_version_compiled = WIRELESS_EXT; 2275 range->we_version_compiled = WIRELESS_EXT;
2271 range->retry_capa = IW_RETRY_LIMIT ; 2276 range->retry_capa = IW_RETRY_LIMIT ;
@@ -2289,7 +2294,7 @@ static int atmel_set_wap(struct net_device *dev,
2289 2294
2290 if (awrq->sa_family != ARPHRD_ETHER) 2295 if (awrq->sa_family != ARPHRD_ETHER)
2291 return -EINVAL; 2296 return -EINVAL;
2292 2297
2293 if (memcmp(bcast, awrq->sa_data, 6) == 0) { 2298 if (memcmp(bcast, awrq->sa_data, 6) == 0) {
2294 del_timer_sync(&priv->management_timer); 2299 del_timer_sync(&priv->management_timer);
2295 spin_lock_irqsave(&priv->irqlock, flags); 2300 spin_lock_irqsave(&priv->irqlock, flags);
@@ -2297,8 +2302,8 @@ static int atmel_set_wap(struct net_device *dev,
2297 spin_unlock_irqrestore(&priv->irqlock, flags); 2302 spin_unlock_irqrestore(&priv->irqlock, flags);
2298 return 0; 2303 return 0;
2299 } 2304 }
2300 2305
2301 for(i=0; i<priv->BSS_list_entries; i++) { 2306 for (i = 0; i < priv->BSS_list_entries; i++) {
2302 if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) { 2307 if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) {
2303 if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) { 2308 if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) {
2304 return -EINVAL; 2309 return -EINVAL;
@@ -2313,10 +2318,10 @@ static int atmel_set_wap(struct net_device *dev,
2313 } 2318 }
2314 } 2319 }
2315 } 2320 }
2316 2321
2317 return -EINVAL; 2322 return -EINVAL;
2318} 2323}
2319 2324
2320static int atmel_config_commit(struct net_device *dev, 2325static int atmel_config_commit(struct net_device *dev,
2321 struct iw_request_info *info, /* NULL */ 2326 struct iw_request_info *info, /* NULL */
2322 void *zwrq, /* NULL */ 2327 void *zwrq, /* NULL */
@@ -2325,18 +2330,18 @@ static int atmel_config_commit(struct net_device *dev,
2325 return atmel_open(dev); 2330 return atmel_open(dev);
2326} 2331}
2327 2332
2328static const iw_handler atmel_handler[] = 2333static const iw_handler atmel_handler[] =
2329{ 2334{
2330 (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */ 2335 (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */
2331 (iw_handler) atmel_get_name, /* SIOCGIWNAME */ 2336 (iw_handler) atmel_get_name, /* SIOCGIWNAME */
2332 (iw_handler) NULL, /* SIOCSIWNWID */ 2337 (iw_handler) NULL, /* SIOCSIWNWID */
2333 (iw_handler) NULL, /* SIOCGIWNWID */ 2338 (iw_handler) NULL, /* SIOCGIWNWID */
2334 (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */ 2339 (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */
2335 (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */ 2340 (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */
2336 (iw_handler) atmel_set_mode, /* SIOCSIWMODE */ 2341 (iw_handler) atmel_set_mode, /* SIOCSIWMODE */
2337 (iw_handler) atmel_get_mode, /* SIOCGIWMODE */ 2342 (iw_handler) atmel_get_mode, /* SIOCGIWMODE */
2338 (iw_handler) NULL, /* SIOCSIWSENS */ 2343 (iw_handler) NULL, /* SIOCSIWSENS */
2339 (iw_handler) NULL, /* SIOCGIWSENS */ 2344 (iw_handler) NULL, /* SIOCGIWSENS */
2340 (iw_handler) NULL, /* SIOCSIWRANGE */ 2345 (iw_handler) NULL, /* SIOCSIWRANGE */
2341 (iw_handler) atmel_get_range, /* SIOCGIWRANGE */ 2346 (iw_handler) atmel_get_range, /* SIOCGIWRANGE */
2342 (iw_handler) NULL, /* SIOCSIWPRIV */ 2347 (iw_handler) NULL, /* SIOCSIWPRIV */
@@ -2350,13 +2355,13 @@ static const iw_handler atmel_handler[] =
2350 (iw_handler) atmel_set_wap, /* SIOCSIWAP */ 2355 (iw_handler) atmel_set_wap, /* SIOCSIWAP */
2351 (iw_handler) atmel_get_wap, /* SIOCGIWAP */ 2356 (iw_handler) atmel_get_wap, /* SIOCGIWAP */
2352 (iw_handler) NULL, /* -- hole -- */ 2357 (iw_handler) NULL, /* -- hole -- */
2353 (iw_handler) NULL, /* SIOCGIWAPLIST */ 2358 (iw_handler) NULL, /* SIOCGIWAPLIST */
2354 (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */ 2359 (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */
2355 (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */ 2360 (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */
2356 (iw_handler) atmel_set_essid, /* SIOCSIWESSID */ 2361 (iw_handler) atmel_set_essid, /* SIOCSIWESSID */
2357 (iw_handler) atmel_get_essid, /* SIOCGIWESSID */ 2362 (iw_handler) atmel_get_essid, /* SIOCGIWESSID */
2358 (iw_handler) NULL, /* SIOCSIWNICKN */ 2363 (iw_handler) NULL, /* SIOCSIWNICKN */
2359 (iw_handler) NULL, /* SIOCGIWNICKN */ 2364 (iw_handler) NULL, /* SIOCGIWNICKN */
2360 (iw_handler) NULL, /* -- hole -- */ 2365 (iw_handler) NULL, /* -- hole -- */
2361 (iw_handler) NULL, /* -- hole -- */ 2366 (iw_handler) NULL, /* -- hole -- */
2362 (iw_handler) atmel_set_rate, /* SIOCSIWRATE */ 2367 (iw_handler) atmel_set_rate, /* SIOCSIWRATE */
@@ -2365,8 +2370,8 @@ static const iw_handler atmel_handler[] =
2365 (iw_handler) atmel_get_rts, /* SIOCGIWRTS */ 2370 (iw_handler) atmel_get_rts, /* SIOCGIWRTS */
2366 (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */ 2371 (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */
2367 (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */ 2372 (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */
2368 (iw_handler) NULL, /* SIOCSIWTXPOW */ 2373 (iw_handler) NULL, /* SIOCSIWTXPOW */
2369 (iw_handler) NULL, /* SIOCGIWTXPOW */ 2374 (iw_handler) NULL, /* SIOCGIWTXPOW */
2370 (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */ 2375 (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */
2371 (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */ 2376 (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */
2372 (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */ 2377 (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */
@@ -2375,39 +2380,51 @@ static const iw_handler atmel_handler[] =
2375 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */ 2380 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */
2376}; 2381};
2377 2382
2378 2383static const iw_handler atmel_private_handler[] =
2379static const iw_handler atmel_private_handler[] =
2380{ 2384{
2381 NULL, /* SIOCIWFIRSTPRIV */ 2385 NULL, /* SIOCIWFIRSTPRIV */
2382}; 2386};
2383 2387
2384typedef struct atmel_priv_ioctl { 2388typedef struct atmel_priv_ioctl {
2385 char id[32]; 2389 char id[32];
2386 unsigned char __user *data; 2390 unsigned char __user *data;
2387 unsigned short len; 2391 unsigned short len;
2388} atmel_priv_ioctl; 2392} atmel_priv_ioctl;
2389 2393
2390 2394#define ATMELFWL SIOCIWFIRSTPRIV
2391#define ATMELFWL SIOCIWFIRSTPRIV 2395#define ATMELIDIFC ATMELFWL + 1
2392#define ATMELIDIFC ATMELFWL + 1 2396#define ATMELRD ATMELFWL + 2
2393#define ATMELRD ATMELFWL + 2 2397#define ATMELMAGIC 0x51807
2394#define ATMELMAGIC 0x51807
2395#define REGDOMAINSZ 20 2398#define REGDOMAINSZ 20
2396 2399
2397static const struct iw_priv_args atmel_private_args[] = { 2400static const struct iw_priv_args atmel_private_args[] = {
2398/*{ cmd, set_args, get_args, name } */ 2401 {
2399 { ATMELFWL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (atmel_priv_ioctl), IW_PRIV_TYPE_NONE, "atmelfwl" }, 2402 .cmd = ATMELFWL,
2400 { ATMELIDIFC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "atmelidifc" }, 2403 .set_args = IW_PRIV_TYPE_BYTE
2401 { ATMELRD, IW_PRIV_TYPE_CHAR | REGDOMAINSZ, IW_PRIV_TYPE_NONE, "regdomain" }, 2404 | IW_PRIV_SIZE_FIXED
2405 | sizeof (atmel_priv_ioctl),
2406 .get_args = IW_PRIV_TYPE_NONE,
2407 .name = "atmelfwl"
2408 }, {
2409 .cmd = ATMELIDIFC,
2410 .set_args = IW_PRIV_TYPE_NONE,
2411 .get_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2412 .name = "atmelidifc"
2413 }, {
2414 .cmd = ATMELRD,
2415 .set_args = IW_PRIV_TYPE_CHAR | REGDOMAINSZ,
2416 .get_args = IW_PRIV_TYPE_NONE,
2417 .name = "regdomain"
2418 },
2402}; 2419};
2403 2420
2404static const struct iw_handler_def atmel_handler_def = 2421static const struct iw_handler_def atmel_handler_def =
2405{ 2422{
2406 .num_standard = sizeof(atmel_handler)/sizeof(iw_handler), 2423 .num_standard = sizeof(atmel_handler)/sizeof(iw_handler),
2407 .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler), 2424 .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler),
2408 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args), 2425 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
2409 .standard = (iw_handler *) atmel_handler, 2426 .standard = (iw_handler *) atmel_handler,
2410 .private = (iw_handler *) atmel_private_handler, 2427 .private = (iw_handler *) atmel_private_handler,
2411 .private_args = (struct iw_priv_args *) atmel_private_args, 2428 .private_args = (struct iw_priv_args *) atmel_private_args,
2412 .get_wireless_stats = atmel_get_wireless_stats 2429 .get_wireless_stats = atmel_get_wireless_stats
2413}; 2430};
@@ -2419,13 +2436,13 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2419 atmel_priv_ioctl com; 2436 atmel_priv_ioctl com;
2420 struct iwreq *wrq = (struct iwreq *) rq; 2437 struct iwreq *wrq = (struct iwreq *) rq;
2421 unsigned char *new_firmware; 2438 unsigned char *new_firmware;
2422 char domain[REGDOMAINSZ+1]; 2439 char domain[REGDOMAINSZ + 1];
2423 2440
2424 switch (cmd) { 2441 switch (cmd) {
2425 case ATMELIDIFC: 2442 case ATMELIDIFC:
2426 wrq->u.param.value = ATMELMAGIC; 2443 wrq->u.param.value = ATMELMAGIC;
2427 break; 2444 break;
2428 2445
2429 case ATMELFWL: 2446 case ATMELFWL:
2430 if (copy_from_user(&com, rq->ifr_data, sizeof(com))) { 2447 if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
2431 rc = -EFAULT; 2448 rc = -EFAULT;
@@ -2449,7 +2466,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2449 } 2466 }
2450 2467
2451 kfree(priv->firmware); 2468 kfree(priv->firmware);
2452 2469
2453 priv->firmware = new_firmware; 2470 priv->firmware = new_firmware;
2454 priv->firmware_length = com.len; 2471 priv->firmware_length = com.len;
2455 strncpy(priv->firmware_id, com.id, 31); 2472 strncpy(priv->firmware_id, com.id, 31);
@@ -2461,7 +2478,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2461 rc = -EFAULT; 2478 rc = -EFAULT;
2462 break; 2479 break;
2463 } 2480 }
2464 2481
2465 if (!capable(CAP_NET_ADMIN)) { 2482 if (!capable(CAP_NET_ADMIN)) {
2466 rc = -EPERM; 2483 rc = -EPERM;
2467 break; 2484 break;
@@ -2484,15 +2501,15 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2484 rc = 0; 2501 rc = 0;
2485 } 2502 }
2486 } 2503 }
2487 2504
2488 if (rc == 0 && priv->station_state != STATION_STATE_DOWN) 2505 if (rc == 0 && priv->station_state != STATION_STATE_DOWN)
2489 rc = atmel_open(dev); 2506 rc = atmel_open(dev);
2490 break; 2507 break;
2491 2508
2492 default: 2509 default:
2493 rc = -EOPNOTSUPP; 2510 rc = -EOPNOTSUPP;
2494 } 2511 }
2495 2512
2496 return rc; 2513 return rc;
2497} 2514}
2498 2515
@@ -2503,17 +2520,17 @@ struct auth_body {
2503 u8 el_id; 2520 u8 el_id;
2504 u8 chall_text_len; 2521 u8 chall_text_len;
2505 u8 chall_text[253]; 2522 u8 chall_text[253];
2506}; 2523};
2507 2524
2508static void atmel_enter_state(struct atmel_private *priv, int new_state) 2525static void atmel_enter_state(struct atmel_private *priv, int new_state)
2509{ 2526{
2510 int old_state = priv->station_state; 2527 int old_state = priv->station_state;
2511 2528
2512 if (new_state == old_state) 2529 if (new_state == old_state)
2513 return; 2530 return;
2514 2531
2515 priv->station_state = new_state; 2532 priv->station_state = new_state;
2516 2533
2517 if (new_state == STATION_STATE_READY) { 2534 if (new_state == STATION_STATE_READY) {
2518 netif_start_queue(priv->dev); 2535 netif_start_queue(priv->dev);
2519 netif_carrier_on(priv->dev); 2536 netif_carrier_on(priv->dev);
@@ -2540,7 +2557,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2540 u8 options; 2557 u8 options;
2541 u8 SSID_size; 2558 u8 SSID_size;
2542 } cmd; 2559 } cmd;
2543 2560
2544 memset(cmd.BSSID, 0xff, 6); 2561 memset(cmd.BSSID, 0xff, 6);
2545 2562
2546 if (priv->fast_scan) { 2563 if (priv->fast_scan) {
@@ -2554,17 +2571,17 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2554 cmd.min_channel_time = cpu_to_le16(10); 2571 cmd.min_channel_time = cpu_to_le16(10);
2555 cmd.max_channel_time = cpu_to_le16(120); 2572 cmd.max_channel_time = cpu_to_le16(120);
2556 } 2573 }
2557 2574
2558 cmd.options = 0; 2575 cmd.options = 0;
2559 2576
2560 if (!specific_ssid) 2577 if (!specific_ssid)
2561 cmd.options |= SCAN_OPTIONS_SITE_SURVEY; 2578 cmd.options |= SCAN_OPTIONS_SITE_SURVEY;
2562 2579
2563 cmd.channel = (priv->channel & 0x7f); 2580 cmd.channel = (priv->channel & 0x7f);
2564 cmd.scan_type = SCAN_TYPE_ACTIVE; 2581 cmd.scan_type = SCAN_TYPE_ACTIVE;
2565 cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ? 2582 cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ?
2566 BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE); 2583 BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE);
2567 2584
2568 atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd)); 2585 atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd));
2569 2586
2570 /* This must come after all hardware access to avoid being messed up 2587 /* This must come after all hardware access to avoid being messed up
@@ -2591,16 +2608,15 @@ static void join(struct atmel_private *priv, int type)
2591 cmd.BSS_type = type; 2608 cmd.BSS_type = type;
2592 cmd.timeout = cpu_to_le16(2000); 2609 cmd.timeout = cpu_to_le16(2000);
2593 2610
2594 atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd)); 2611 atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd));
2595} 2612}
2596 2613
2597
2598static void start(struct atmel_private *priv, int type) 2614static void start(struct atmel_private *priv, int type)
2599{ 2615{
2600 struct { 2616 struct {
2601 u8 BSSID[6]; 2617 u8 BSSID[6];
2602 u8 SSID[MAX_SSID_LENGTH]; 2618 u8 SSID[MAX_SSID_LENGTH];
2603 u8 BSS_type; 2619 u8 BSS_type;
2604 u8 channel; 2620 u8 channel;
2605 u8 SSID_size; 2621 u8 SSID_size;
2606 u8 reserved[3]; 2622 u8 reserved[3];
@@ -2612,13 +2628,14 @@ static void start(struct atmel_private *priv, int type)
2612 cmd.BSS_type = type; 2628 cmd.BSS_type = type;
2613 cmd.channel = (priv->channel & 0x7f); 2629 cmd.channel = (priv->channel & 0x7f);
2614 2630
2615 atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd)); 2631 atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd));
2616} 2632}
2617 2633
2618static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 channel) 2634static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
2635 u8 channel)
2619{ 2636{
2620 int rejoin = 0; 2637 int rejoin = 0;
2621 int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 2638 int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
2622 SHORT_PREAMBLE : LONG_PREAMBLE; 2639 SHORT_PREAMBLE : LONG_PREAMBLE;
2623 2640
2624 if (priv->preamble != new) { 2641 if (priv->preamble != new) {
@@ -2626,48 +2643,48 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
2626 rejoin = 1; 2643 rejoin = 1;
2627 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new); 2644 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new);
2628 } 2645 }
2629 2646
2630 if (priv->channel != channel) { 2647 if (priv->channel != channel) {
2631 priv->channel = channel; 2648 priv->channel = channel;
2632 rejoin = 1; 2649 rejoin = 1;
2633 atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel); 2650 atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel);
2634 } 2651 }
2635 2652
2636 if (rejoin) { 2653 if (rejoin) {
2637 priv->station_is_associated = 0; 2654 priv->station_is_associated = 0;
2638 atmel_enter_state(priv, STATION_STATE_JOINNING); 2655 atmel_enter_state(priv, STATION_STATE_JOINNING);
2639 2656
2640 if (priv->operating_mode == IW_MODE_INFRA) 2657 if (priv->operating_mode == IW_MODE_INFRA)
2641 join(priv, BSS_TYPE_INFRASTRUCTURE); 2658 join(priv, BSS_TYPE_INFRASTRUCTURE);
2642 else 2659 else
2643 join(priv, BSS_TYPE_AD_HOC); 2660 join(priv, BSS_TYPE_AD_HOC);
2644 } 2661 }
2645} 2662}
2646 2663
2647 2664static void send_authentication_request(struct atmel_private *priv, u16 system,
2648static void send_authentication_request(struct atmel_private *priv, u16 system, u8 *challenge, int challenge_len) 2665 u8 *challenge, int challenge_len)
2649{ 2666{
2650 struct ieee80211_hdr_4addr header; 2667 struct ieee80211_hdr_4addr header;
2651 struct auth_body auth; 2668 struct auth_body auth;
2652 2669
2653 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 2670 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2654 header.duration_id = cpu_to_le16(0x8000); 2671 header.duration_id = cpu_to_le16(0x8000);
2655 header.seq_ctl = 0; 2672 header.seq_ctl = 0;
2656 memcpy(header.addr1, priv->CurrentBSSID, 6); 2673 memcpy(header.addr1, priv->CurrentBSSID, 6);
2657 memcpy(header.addr2, priv->dev->dev_addr, 6); 2674 memcpy(header.addr2, priv->dev->dev_addr, 6);
2658 memcpy(header.addr3, priv->CurrentBSSID, 6); 2675 memcpy(header.addr3, priv->CurrentBSSID, 6);
2659 2676
2660 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1) 2677 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
2661 /* no WEP for authentication frames with TrSeqNo 1 */ 2678 /* no WEP for authentication frames with TrSeqNo 1 */
2662 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2679 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2663 2680
2664 auth.alg = cpu_to_le16(system); 2681 auth.alg = cpu_to_le16(system);
2665 2682
2666 auth.status = 0; 2683 auth.status = 0;
2667 auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum); 2684 auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum);
2668 priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1; 2685 priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1;
2669 priv->CurrentAuthentTransactionSeqNum += 2; 2686 priv->CurrentAuthentTransactionSeqNum += 2;
2670 2687
2671 if (challenge_len != 0) { 2688 if (challenge_len != 0) {
2672 auth.el_id = 16; /* challenge_text */ 2689 auth.el_id = 16; /* challenge_text */
2673 auth.chall_text_len = challenge_len; 2690 auth.chall_text_len = challenge_len;
@@ -2685,7 +2702,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2685 struct ieee80211_hdr_4addr header; 2702 struct ieee80211_hdr_4addr header;
2686 struct ass_req_format { 2703 struct ass_req_format {
2687 u16 capability; 2704 u16 capability;
2688 u16 listen_interval; 2705 u16 listen_interval;
2689 u8 ap[6]; /* nothing after here directly accessible */ 2706 u8 ap[6]; /* nothing after here directly accessible */
2690 u8 ssid_el_id; 2707 u8 ssid_el_id;
2691 u8 ssid_len; 2708 u8 ssid_len;
@@ -2694,15 +2711,15 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2694 u8 sup_rates_len; 2711 u8 sup_rates_len;
2695 u8 rates[4]; 2712 u8 rates[4];
2696 } body; 2713 } body;
2697 2714
2698 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2715 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2699 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ)); 2716 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
2700 header.duration_id = cpu_to_le16(0x8000); 2717 header.duration_id = cpu_to_le16(0x8000);
2701 header.seq_ctl = 0; 2718 header.seq_ctl = 0;
2702 2719
2703 memcpy(header.addr1, priv->CurrentBSSID, 6); 2720 memcpy(header.addr1, priv->CurrentBSSID, 6);
2704 memcpy(header.addr2, priv->dev->dev_addr, 6); 2721 memcpy(header.addr2, priv->dev->dev_addr, 6);
2705 memcpy(header.addr3, priv->CurrentBSSID, 6); 2722 memcpy(header.addr3, priv->CurrentBSSID, 6);
2706 2723
2707 body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS); 2724 body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS);
2708 if (priv->wep_is_on) 2725 if (priv->wep_is_on)
@@ -2711,18 +2728,18 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2711 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble); 2728 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble);
2712 2729
2713 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); 2730 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
2714 2731
2715 /* current AP address - only in reassoc frame */ 2732 /* current AP address - only in reassoc frame */
2716 if (is_reassoc) { 2733 if (is_reassoc) {
2717 memcpy(body.ap, priv->CurrentBSSID, 6); 2734 memcpy(body.ap, priv->CurrentBSSID, 6);
2718 ssid_el_p = (u8 *)&body.ssid_el_id; 2735 ssid_el_p = (u8 *)&body.ssid_el_id;
2719 bodysize = 18 + priv->SSID_size; 2736 bodysize = 18 + priv->SSID_size;
2720 } else { 2737 } else {
2721 ssid_el_p = (u8 *)&body.ap[0]; 2738 ssid_el_p = (u8 *)&body.ap[0];
2722 bodysize = 12 + priv->SSID_size; 2739 bodysize = 12 + priv->SSID_size;
2723 } 2740 }
2724 2741
2725 ssid_el_p[0]= C80211_MGMT_ElementID_SSID; 2742 ssid_el_p[0] = C80211_MGMT_ElementID_SSID;
2726 ssid_el_p[1] = priv->SSID_size; 2743 ssid_el_p[1] = priv->SSID_size;
2727 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); 2744 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
2728 ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates; 2745 ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates;
@@ -2732,7 +2749,8 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2732 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); 2749 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
2733} 2750}
2734 2751
2735static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header) 2752static int is_frame_from_current_bss(struct atmel_private *priv,
2753 struct ieee80211_hdr_4addr *header)
2736{ 2754{
2737 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 2755 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
2738 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2756 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
@@ -2745,29 +2763,29 @@ static int retrieve_bss(struct atmel_private *priv)
2745 int i; 2763 int i;
2746 int max_rssi = -128; 2764 int max_rssi = -128;
2747 int max_index = -1; 2765 int max_index = -1;
2748 2766
2749 if (priv->BSS_list_entries == 0) 2767 if (priv->BSS_list_entries == 0)
2750 return -1; 2768 return -1;
2751 2769
2752 if (priv->connect_to_any_BSS) { 2770 if (priv->connect_to_any_BSS) {
2753 /* Select a BSS with the max-RSSI but of the same type and of the same WEP mode 2771 /* Select a BSS with the max-RSSI but of the same type and of
2754 and that it is not marked as 'bad' (i.e. we had previously failed to connect to 2772 the same WEP mode and that it is not marked as 'bad' (i.e.
2755 this BSS with the settings that we currently use) */ 2773 we had previously failed to connect to this BSS with the
2774 settings that we currently use) */
2756 priv->current_BSS = 0; 2775 priv->current_BSS = 0;
2757 for(i=0; i<priv->BSS_list_entries; i++) { 2776 for (i = 0; i < priv->BSS_list_entries; i++) {
2758 if (priv->operating_mode == priv->BSSinfo[i].BSStype && 2777 if (priv->operating_mode == priv->BSSinfo[i].BSStype &&
2759 ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) || 2778 ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) ||
2760 (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) && 2779 (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) &&
2761 !(priv->BSSinfo[i].channel & 0x80)) { 2780 !(priv->BSSinfo[i].channel & 0x80)) {
2762 max_rssi = priv->BSSinfo[i].RSSI; 2781 max_rssi = priv->BSSinfo[i].RSSI;
2763 priv->current_BSS = max_index = i; 2782 priv->current_BSS = max_index = i;
2764 } 2783 }
2765
2766 } 2784 }
2767 return max_index; 2785 return max_index;
2768 } 2786 }
2769 2787
2770 for(i=0; i<priv->BSS_list_entries; i++) { 2788 for (i = 0; i < priv->BSS_list_entries; i++) {
2771 if (priv->SSID_size == priv->BSSinfo[i].SSIDsize && 2789 if (priv->SSID_size == priv->BSSinfo[i].SSIDsize &&
2772 memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 && 2790 memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 &&
2773 priv->operating_mode == priv->BSSinfo[i].BSStype && 2791 priv->operating_mode == priv->BSSinfo[i].BSStype &&
@@ -2781,19 +2799,19 @@ static int retrieve_bss(struct atmel_private *priv)
2781 return max_index; 2799 return max_index;
2782} 2800}
2783 2801
2784 2802static void store_bss_info(struct atmel_private *priv,
2785static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 2803 struct ieee80211_hdr_4addr *header, u16 capability,
2786 u16 capability, u16 beacon_period, u8 channel, u8 rssi, 2804 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
2787 u8 ssid_len, u8 *ssid, int is_beacon) 2805 u8 *ssid, int is_beacon)
2788{ 2806{
2789 u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3; 2807 u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3;
2790 int i, index; 2808 int i, index;
2791 2809
2792 for (index = -1, i = 0; i < priv->BSS_list_entries; i++) 2810 for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
2793 if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0) 2811 if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
2794 index = i; 2812 index = i;
2795 2813
2796 /* If we process a probe and an entry from this BSS exists 2814 /* If we process a probe and an entry from this BSS exists
2797 we will update the BSS entry with the info from this BSS. 2815 we will update the BSS entry with the info from this BSS.
2798 If we process a beacon we will only update RSSI */ 2816 If we process a beacon we will only update RSSI */
2799 2817
@@ -2820,8 +2838,8 @@ static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4add
2820 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; 2838 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
2821 else if (capability & C80211_MGMT_CAPABILITY_ESS) 2839 else if (capability & C80211_MGMT_CAPABILITY_ESS)
2822 priv->BSSinfo[index].BSStype =IW_MODE_INFRA; 2840 priv->BSSinfo[index].BSStype =IW_MODE_INFRA;
2823 2841
2824 priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 2842 priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
2825 SHORT_PREAMBLE : LONG_PREAMBLE; 2843 SHORT_PREAMBLE : LONG_PREAMBLE;
2826} 2844}
2827 2845
@@ -2831,8 +2849,8 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2831 u16 status = le16_to_cpu(auth->status); 2849 u16 status = le16_to_cpu(auth->status);
2832 u16 trans_seq_no = le16_to_cpu(auth->trans_seq); 2850 u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
2833 u16 system = le16_to_cpu(auth->alg); 2851 u16 system = le16_to_cpu(auth->alg);
2834 2852
2835 if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) { 2853 if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) {
2836 /* no WEP */ 2854 /* no WEP */
2837 if (priv->station_was_associated) { 2855 if (priv->station_was_associated) {
2838 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 2856 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
@@ -2842,20 +2860,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2842 atmel_enter_state(priv, STATION_STATE_ASSOCIATING); 2860 atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
2843 send_association_request(priv, 0); 2861 send_association_request(priv, 0);
2844 return; 2862 return;
2845 } 2863 }
2846 } 2864 }
2847 2865
2848 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { 2866 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) {
2849 /* WEP */ 2867 /* WEP */
2850 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) 2868 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
2851 return; 2869 return;
2852 2870
2853 if (trans_seq_no == 0x0002 && 2871 if (trans_seq_no == 0x0002 &&
2854 auth->el_id == C80211_MGMT_ElementID_ChallengeText) { 2872 auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
2855 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); 2873 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
2856 return; 2874 return;
2857 } 2875 }
2858 2876
2859 if (trans_seq_no == 0x0004) { 2877 if (trans_seq_no == 0x0004) {
2860 if(priv->station_was_associated) { 2878 if(priv->station_was_associated) {
2861 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 2879 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
@@ -2865,10 +2883,10 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2865 atmel_enter_state(priv, STATION_STATE_ASSOCIATING); 2883 atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
2866 send_association_request(priv, 0); 2884 send_association_request(priv, 0);
2867 return; 2885 return;
2868 } 2886 }
2869 } 2887 }
2870 } 2888 }
2871 2889
2872 if (status == C80211_MGMT_SC_AuthAlgNotSupported) { 2890 if (status == C80211_MGMT_SC_AuthAlgNotSupported) {
2873 /* Do opensystem first, then try sharedkey */ 2891 /* Do opensystem first, then try sharedkey */
2874 if (system == C80211_MGMT_AAN_OPENSYSTEM) { 2892 if (system == C80211_MGMT_AAN_OPENSYSTEM) {
@@ -2876,17 +2894,16 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2876 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 2894 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
2877 } else if (priv->connect_to_any_BSS) { 2895 } else if (priv->connect_to_any_BSS) {
2878 int bss_index; 2896 int bss_index;
2879 2897
2880 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 2898 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
2881 2899
2882 if ((bss_index = retrieve_bss(priv)) != -1) { 2900 if ((bss_index = retrieve_bss(priv)) != -1) {
2883 atmel_join_bss(priv, bss_index); 2901 atmel_join_bss(priv, bss_index);
2884 return; 2902 return;
2885 } 2903 }
2886 } 2904 }
2887 } 2905 }
2888 2906
2889
2890 priv->AuthenticationRequestRetryCnt = 0; 2907 priv->AuthenticationRequestRetryCnt = 0;
2891 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 2908 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
2892 priv->station_is_associated = 0; 2909 priv->station_is_associated = 0;
@@ -2902,38 +2919,44 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2902 u8 length; 2919 u8 length;
2903 u8 rates[4]; 2920 u8 rates[4];
2904 } *ass_resp = (struct ass_resp_format *)priv->rx_buf; 2921 } *ass_resp = (struct ass_resp_format *)priv->rx_buf;
2905 2922
2906 u16 status = le16_to_cpu(ass_resp->status); 2923 u16 status = le16_to_cpu(ass_resp->status);
2907 u16 ass_id = le16_to_cpu(ass_resp->ass_id); 2924 u16 ass_id = le16_to_cpu(ass_resp->ass_id);
2908 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length; 2925 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
2909 2926
2910 if (frame_len < 8 + rates_len) 2927 if (frame_len < 8 + rates_len)
2911 return; 2928 return;
2912 2929
2913 if (status == C80211_MGMT_SC_Success) { 2930 if (status == C80211_MGMT_SC_Success) {
2914 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE) 2931 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE)
2915 priv->AssociationRequestRetryCnt = 0; 2932 priv->AssociationRequestRetryCnt = 0;
2916 else 2933 else
2917 priv->ReAssociationRequestRetryCnt = 0; 2934 priv->ReAssociationRequestRetryCnt = 0;
2918 2935
2919 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff); 2936 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2920 atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len); 2937 MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff);
2938 atmel_set_mib(priv, Phy_Mib_Type,
2939 PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len);
2921 if (priv->power_mode == 0) { 2940 if (priv->power_mode == 0) {
2922 priv->listen_interval = 1; 2941 priv->listen_interval = 1;
2923 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 2942 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2924 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 2943 MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
2944 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2945 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
2925 } else { 2946 } else {
2926 priv->listen_interval = 2; 2947 priv->listen_interval = 2;
2927 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, PS_MODE); 2948 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2928 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2); 2949 MAC_MGMT_MIB_PS_MODE_POS, PS_MODE);
2950 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2951 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2);
2929 } 2952 }
2930 2953
2931 priv->station_is_associated = 1; 2954 priv->station_is_associated = 1;
2932 priv->station_was_associated = 1; 2955 priv->station_was_associated = 1;
2933 atmel_enter_state(priv, STATION_STATE_READY); 2956 atmel_enter_state(priv, STATION_STATE_READY);
2934 return; 2957 return;
2935 } 2958 }
2936 2959
2937 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE && 2960 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE &&
2938 status != C80211_MGMT_SC_AssDeniedBSSRate && 2961 status != C80211_MGMT_SC_AssDeniedBSSRate &&
2939 status != C80211_MGMT_SC_SupportCapabilities && 2962 status != C80211_MGMT_SC_SupportCapabilities &&
@@ -2943,7 +2966,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2943 send_association_request(priv, 0); 2966 send_association_request(priv, 0);
2944 return; 2967 return;
2945 } 2968 }
2946 2969
2947 if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE && 2970 if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE &&
2948 status != C80211_MGMT_SC_AssDeniedBSSRate && 2971 status != C80211_MGMT_SC_AssDeniedBSSRate &&
2949 status != C80211_MGMT_SC_SupportCapabilities && 2972 status != C80211_MGMT_SC_SupportCapabilities &&
@@ -2953,17 +2976,16 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2953 send_association_request(priv, 1); 2976 send_association_request(priv, 1);
2954 return; 2977 return;
2955 } 2978 }
2956 2979
2957 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 2980 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
2958 priv->station_is_associated = 0; 2981 priv->station_is_associated = 0;
2959 2982
2960 if(priv->connect_to_any_BSS) { 2983 if (priv->connect_to_any_BSS) {
2961 int bss_index; 2984 int bss_index;
2962 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 2985 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
2963 2986
2964 if ((bss_index = retrieve_bss(priv)) != -1) 2987 if ((bss_index = retrieve_bss(priv)) != -1)
2965 atmel_join_bss(priv, bss_index); 2988 atmel_join_bss(priv, bss_index);
2966
2967 } 2989 }
2968} 2990}
2969 2991
@@ -2977,7 +2999,7 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
2977 /* The WPA stuff cares about the current AP address */ 2999 /* The WPA stuff cares about the current AP address */
2978 if (priv->use_wpa) 3000 if (priv->use_wpa)
2979 build_wpa_mib(priv); 3001 build_wpa_mib(priv);
2980 3002
2981 /* When switching to AdHoc turn OFF Power Save if needed */ 3003 /* When switching to AdHoc turn OFF Power Save if needed */
2982 3004
2983 if (bss->BSStype == IW_MODE_ADHOC && 3005 if (bss->BSStype == IW_MODE_ADHOC &&
@@ -2985,25 +3007,28 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
2985 priv->power_mode) { 3007 priv->power_mode) {
2986 priv->power_mode = 0; 3008 priv->power_mode = 0;
2987 priv->listen_interval = 1; 3009 priv->listen_interval = 1;
2988 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 3010 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2989 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 3011 MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
3012 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
3013 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
2990 } 3014 }
2991 3015
2992 priv->operating_mode = bss->BSStype; 3016 priv->operating_mode = bss->BSStype;
2993 priv->channel = bss->channel & 0x7f; 3017 priv->channel = bss->channel & 0x7f;
2994 priv->beacon_period = bss->beacon_period; 3018 priv->beacon_period = bss->beacon_period;
2995 3019
2996 if (priv->preamble != bss->preamble) { 3020 if (priv->preamble != bss->preamble) {
2997 priv->preamble = bss->preamble; 3021 priv->preamble = bss->preamble;
2998 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, bss->preamble); 3022 atmel_set_mib8(priv, Local_Mib_Type,
3023 LOCAL_MIB_PREAMBLE_TYPE, bss->preamble);
2999 } 3024 }
3000 3025
3001 if (!priv->wep_is_on && bss->UsingWEP) { 3026 if (!priv->wep_is_on && bss->UsingWEP) {
3002 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3027 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3003 priv->station_is_associated = 0; 3028 priv->station_is_associated = 0;
3004 return; 3029 return;
3005 } 3030 }
3006 3031
3007 if (priv->wep_is_on && !bss->UsingWEP) { 3032 if (priv->wep_is_on && !bss->UsingWEP) {
3008 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3033 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3009 priv->station_is_associated = 0; 3034 priv->station_is_associated = 0;
@@ -3011,30 +3036,28 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
3011 } 3036 }
3012 3037
3013 atmel_enter_state(priv, STATION_STATE_JOINNING); 3038 atmel_enter_state(priv, STATION_STATE_JOINNING);
3014 3039
3015 if (priv->operating_mode == IW_MODE_INFRA) 3040 if (priv->operating_mode == IW_MODE_INFRA)
3016 join(priv, BSS_TYPE_INFRASTRUCTURE); 3041 join(priv, BSS_TYPE_INFRASTRUCTURE);
3017 else 3042 else
3018 join(priv, BSS_TYPE_AD_HOC); 3043 join(priv, BSS_TYPE_AD_HOC);
3019} 3044}
3020 3045
3021
3022static void restart_search(struct atmel_private *priv) 3046static void restart_search(struct atmel_private *priv)
3023{ 3047{
3024 int bss_index; 3048 int bss_index;
3025 3049
3026 if (!priv->connect_to_any_BSS) { 3050 if (!priv->connect_to_any_BSS) {
3027 atmel_scan(priv, 1); 3051 atmel_scan(priv, 1);
3028 } else { 3052 } else {
3029 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 3053 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
3030 3054
3031 if ((bss_index = retrieve_bss(priv)) != -1) 3055 if ((bss_index = retrieve_bss(priv)) != -1)
3032 atmel_join_bss(priv, bss_index); 3056 atmel_join_bss(priv, bss_index);
3033 else 3057 else
3034 atmel_scan(priv, 0); 3058 atmel_scan(priv, 0);
3035 3059 }
3036 } 3060}
3037}
3038 3061
3039static void smooth_rssi(struct atmel_private *priv, u8 rssi) 3062static void smooth_rssi(struct atmel_private *priv, u8 rssi)
3040{ 3063{
@@ -3050,21 +3073,21 @@ static void smooth_rssi(struct atmel_private *priv, u8 rssi)
3050 } 3073 }
3051 3074
3052 rssi = rssi * 100 / max_rssi; 3075 rssi = rssi * 100 / max_rssi;
3053 if((rssi + old) % 2) 3076 if ((rssi + old) % 2)
3054 priv->wstats.qual.level = ((rssi + old)/2) + 1; 3077 priv->wstats.qual.level = (rssi + old) / 2 + 1;
3055 else 3078 else
3056 priv->wstats.qual.level = ((rssi + old)/2); 3079 priv->wstats.qual.level = (rssi + old) / 2;
3057 priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; 3080 priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
3058 priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID; 3081 priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID;
3059} 3082}
3060 3083
3061static void atmel_smooth_qual(struct atmel_private *priv) 3084static void atmel_smooth_qual(struct atmel_private *priv)
3062{ 3085{
3063 unsigned long time_diff = (jiffies - priv->last_qual)/HZ; 3086 unsigned long time_diff = (jiffies - priv->last_qual) / HZ;
3064 while (time_diff--) { 3087 while (time_diff--) {
3065 priv->last_qual += HZ; 3088 priv->last_qual += HZ;
3066 priv->wstats.qual.qual = priv->wstats.qual.qual/2; 3089 priv->wstats.qual.qual = priv->wstats.qual.qual / 2;
3067 priv->wstats.qual.qual += 3090 priv->wstats.qual.qual +=
3068 priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000; 3091 priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000;
3069 priv->beacons_this_sec = 0; 3092 priv->beacons_this_sec = 0;
3070 } 3093 }
@@ -3073,15 +3096,17 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3073} 3096}
3074 3097
3075/* deals with incoming managment frames. */ 3098/* deals with incoming managment frames. */
3076static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 3099static void atmel_management_frame(struct atmel_private *priv,
3077 u16 frame_len, u8 rssi) 3100 struct ieee80211_hdr_4addr *header,
3101 u16 frame_len, u8 rssi)
3078{ 3102{
3079 u16 subtype; 3103 u16 subtype;
3080 3104
3081 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE) { 3105 subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE;
3082 case C80211_SUBTYPE_MGMT_BEACON : 3106 switch (subtype) {
3107 case C80211_SUBTYPE_MGMT_BEACON:
3083 case C80211_SUBTYPE_MGMT_ProbeResponse: 3108 case C80211_SUBTYPE_MGMT_ProbeResponse:
3084 3109
3085 /* beacon frame has multiple variable-length fields - 3110 /* beacon frame has multiple variable-length fields -
3086 never let an engineer loose with a data structure design. */ 3111 never let an engineer loose with a data structure design. */
3087 { 3112 {
@@ -3099,7 +3124,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3099 u8 ds_length; 3124 u8 ds_length;
3100 /* ds here */ 3125 /* ds here */
3101 } *beacon = (struct beacon_format *)priv->rx_buf; 3126 } *beacon = (struct beacon_format *)priv->rx_buf;
3102 3127
3103 u8 channel, rates_length, ssid_length; 3128 u8 channel, rates_length, ssid_length;
3104 u64 timestamp = le64_to_cpu(beacon->timestamp); 3129 u64 timestamp = le64_to_cpu(beacon->timestamp);
3105 u16 beacon_interval = le16_to_cpu(beacon->interval); 3130 u16 beacon_interval = le16_to_cpu(beacon->interval);
@@ -3107,7 +3132,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3107 u8 *beaconp = priv->rx_buf; 3132 u8 *beaconp = priv->rx_buf;
3108 ssid_length = beacon->ssid_length; 3133 ssid_length = beacon->ssid_length;
3109 /* this blows chunks. */ 3134 /* this blows chunks. */
3110 if (frame_len < 14 || frame_len < ssid_length + 15) 3135 if (frame_len < 14 || frame_len < ssid_length + 15)
3111 return; 3136 return;
3112 rates_length = beaconp[beacon->ssid_length + 15]; 3137 rates_length = beaconp[beacon->ssid_length + 15];
3113 if (frame_len < ssid_length + rates_length + 18) 3138 if (frame_len < ssid_length + rates_length + 18)
@@ -3115,10 +3140,10 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3115 if (ssid_length > MAX_SSID_LENGTH) 3140 if (ssid_length > MAX_SSID_LENGTH)
3116 return; 3141 return;
3117 channel = beaconp[ssid_length + rates_length + 18]; 3142 channel = beaconp[ssid_length + rates_length + 18];
3118 3143
3119 if (priv->station_state == STATION_STATE_READY) { 3144 if (priv->station_state == STATION_STATE_READY) {
3120 smooth_rssi(priv, rssi); 3145 smooth_rssi(priv, rssi);
3121 if (is_frame_from_current_bss(priv, header)) { 3146 if (is_frame_from_current_bss(priv, header)) {
3122 priv->beacons_this_sec++; 3147 priv->beacons_this_sec++;
3123 atmel_smooth_qual(priv); 3148 atmel_smooth_qual(priv);
3124 if (priv->last_beacon_timestamp) { 3149 if (priv->last_beacon_timestamp) {
@@ -3132,41 +3157,43 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3132 handle_beacon_probe(priv, capability, channel); 3157 handle_beacon_probe(priv, capability, channel);
3133 } 3158 }
3134 } 3159 }
3135 3160
3136 if (priv->station_state == STATION_STATE_SCANNING ) 3161 if (priv->station_state == STATION_STATE_SCANNING)
3137 store_bss_info(priv, header, capability, beacon_interval, channel, 3162 store_bss_info(priv, header, capability,
3138 rssi, ssid_length, &beacon->rates_el_id, 3163 beacon_interval, channel, rssi,
3139 subtype == C80211_SUBTYPE_MGMT_BEACON) ; 3164 ssid_length,
3165 &beacon->rates_el_id,
3166 subtype == C80211_SUBTYPE_MGMT_BEACON);
3140 } 3167 }
3141 break; 3168 break;
3142 3169
3143 case C80211_SUBTYPE_MGMT_Authentication: 3170 case C80211_SUBTYPE_MGMT_Authentication:
3144 3171
3145 if (priv->station_state == STATION_STATE_AUTHENTICATING) 3172 if (priv->station_state == STATION_STATE_AUTHENTICATING)
3146 authenticate(priv, frame_len); 3173 authenticate(priv, frame_len);
3147 3174
3148 break; 3175 break;
3149 3176
3150 case C80211_SUBTYPE_MGMT_ASS_RESPONSE: 3177 case C80211_SUBTYPE_MGMT_ASS_RESPONSE:
3151 case C80211_SUBTYPE_MGMT_REASS_RESPONSE: 3178 case C80211_SUBTYPE_MGMT_REASS_RESPONSE:
3152 3179
3153 if (priv->station_state == STATION_STATE_ASSOCIATING || 3180 if (priv->station_state == STATION_STATE_ASSOCIATING ||
3154 priv->station_state == STATION_STATE_REASSOCIATING) 3181 priv->station_state == STATION_STATE_REASSOCIATING)
3155 associate(priv, frame_len, subtype); 3182 associate(priv, frame_len, subtype);
3156 3183
3157 break; 3184 break;
3158 3185
3159 case C80211_SUBTYPE_MGMT_DISASSOSIATION: 3186 case C80211_SUBTYPE_MGMT_DISASSOSIATION:
3160 if (priv->station_is_associated && 3187 if (priv->station_is_associated &&
3161 priv->operating_mode == IW_MODE_INFRA && 3188 priv->operating_mode == IW_MODE_INFRA &&
3162 is_frame_from_current_bss(priv, header)) { 3189 is_frame_from_current_bss(priv, header)) {
3163 priv->station_was_associated = 0; 3190 priv->station_was_associated = 0;
3164 priv->station_is_associated = 0; 3191 priv->station_is_associated = 0;
3165 3192
3166 atmel_enter_state(priv, STATION_STATE_JOINNING); 3193 atmel_enter_state(priv, STATION_STATE_JOINNING);
3167 join(priv, BSS_TYPE_INFRASTRUCTURE); 3194 join(priv, BSS_TYPE_INFRASTRUCTURE);
3168 } 3195 }
3169 3196
3170 break; 3197 break;
3171 3198
3172 case C80211_SUBTYPE_MGMT_Deauthentication: 3199 case C80211_SUBTYPE_MGMT_Deauthentication:
@@ -3177,7 +3204,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3177 atmel_enter_state(priv, STATION_STATE_JOINNING); 3204 atmel_enter_state(priv, STATION_STATE_JOINNING);
3178 join(priv, BSS_TYPE_INFRASTRUCTURE); 3205 join(priv, BSS_TYPE_INFRASTRUCTURE);
3179 } 3206 }
3180 3207
3181 break; 3208 break;
3182 } 3209 }
3183} 3210}
@@ -3185,76 +3212,73 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3185/* run when timer expires */ 3212/* run when timer expires */
3186static void atmel_management_timer(u_long a) 3213static void atmel_management_timer(u_long a)
3187{ 3214{
3188 struct net_device *dev = (struct net_device *) a; 3215 struct net_device *dev = (struct net_device *) a;
3189 struct atmel_private *priv = netdev_priv(dev); 3216 struct atmel_private *priv = netdev_priv(dev);
3190 unsigned long flags; 3217 unsigned long flags;
3191
3192 /* Check if the card has been yanked. */
3193 if (priv->card && priv->present_callback &&
3194 !(*priv->present_callback)(priv->card))
3195 return;
3196
3197 spin_lock_irqsave(&priv->irqlock, flags);
3198
3199 switch (priv->station_state) {
3200
3201 case STATION_STATE_AUTHENTICATING:
3202 if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
3203 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3204 priv->station_is_associated = 0;
3205 priv->AuthenticationRequestRetryCnt = 0;
3206 restart_search(priv);
3207 } else {
3208 priv->AuthenticationRequestRetryCnt++;
3209 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3210 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3211 send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0);
3212 }
3213
3214 break;
3215 3218
3216 case STATION_STATE_ASSOCIATING: 3219 /* Check if the card has been yanked. */
3217 if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) { 3220 if (priv->card && priv->present_callback &&
3218 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3221 !(*priv->present_callback)(priv->card))
3219 priv->station_is_associated = 0; 3222 return;
3220 priv->AssociationRequestRetryCnt = 0;
3221 restart_search(priv);
3222 } else {
3223 priv->AssociationRequestRetryCnt++;
3224 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3225 send_association_request(priv, 0);
3226 }
3227 3223
3228 break; 3224 spin_lock_irqsave(&priv->irqlock, flags);
3229 3225
3230 case STATION_STATE_REASSOCIATING: 3226 switch (priv->station_state) {
3231 if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3232 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3233 priv->station_is_associated = 0;
3234 priv->ReAssociationRequestRetryCnt = 0;
3235 restart_search(priv);
3236 } else {
3237 priv->ReAssociationRequestRetryCnt++;
3238 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3239 send_association_request(priv, 1);
3240 }
3241 3227
3228 case STATION_STATE_AUTHENTICATING:
3229 if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
3230 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3231 priv->station_is_associated = 0;
3232 priv->AuthenticationRequestRetryCnt = 0;
3233 restart_search(priv);
3234 } else {
3235 priv->AuthenticationRequestRetryCnt++;
3236 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3237 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3238 send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0);
3239 }
3242 break; 3240 break;
3243 3241
3244 default: 3242 case STATION_STATE_ASSOCIATING:
3243 if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3244 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3245 priv->station_is_associated = 0;
3246 priv->AssociationRequestRetryCnt = 0;
3247 restart_search(priv);
3248 } else {
3249 priv->AssociationRequestRetryCnt++;
3250 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3251 send_association_request(priv, 0);
3252 }
3245 break; 3253 break;
3246 } 3254
3247 3255 case STATION_STATE_REASSOCIATING:
3248 spin_unlock_irqrestore(&priv->irqlock, flags); 3256 if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3257 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3258 priv->station_is_associated = 0;
3259 priv->ReAssociationRequestRetryCnt = 0;
3260 restart_search(priv);
3261 } else {
3262 priv->ReAssociationRequestRetryCnt++;
3263 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3264 send_association_request(priv, 1);
3265 }
3266 break;
3267
3268 default:
3269 break;
3270 }
3271
3272 spin_unlock_irqrestore(&priv->irqlock, flags);
3249} 3273}
3250 3274
3251static void atmel_command_irq(struct atmel_private *priv) 3275static void atmel_command_irq(struct atmel_private *priv)
3252{ 3276{
3253 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET)); 3277 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
3254 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET)); 3278 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
3255 int fast_scan; 3279 int fast_scan;
3256 3280
3257 if (status == CMD_STATUS_IDLE || 3281 if (status == CMD_STATUS_IDLE ||
3258 status == CMD_STATUS_IN_PROGRESS) 3282 status == CMD_STATUS_IN_PROGRESS)
3259 return; 3283 return;
3260 3284
@@ -3266,20 +3290,20 @@ static void atmel_command_irq(struct atmel_private *priv)
3266 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS, 3290 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
3267 (u8 *)priv->CurrentBSSID, 6); 3291 (u8 *)priv->CurrentBSSID, 6);
3268 atmel_enter_state(priv, STATION_STATE_READY); 3292 atmel_enter_state(priv, STATION_STATE_READY);
3269 } 3293 }
3270 break; 3294 break;
3271 3295
3272 case CMD_Scan: 3296 case CMD_Scan:
3273 fast_scan = priv->fast_scan; 3297 fast_scan = priv->fast_scan;
3274 priv->fast_scan = 0; 3298 priv->fast_scan = 0;
3275 3299
3276 if (status != CMD_STATUS_COMPLETE) { 3300 if (status != CMD_STATUS_COMPLETE) {
3277 atmel_scan(priv, 1); 3301 atmel_scan(priv, 1);
3278 } else { 3302 } else {
3279 int bss_index = retrieve_bss(priv); 3303 int bss_index = retrieve_bss(priv);
3280 if (bss_index != -1) { 3304 if (bss_index != -1) {
3281 atmel_join_bss(priv, bss_index); 3305 atmel_join_bss(priv, bss_index);
3282 } else if (priv->operating_mode == IW_MODE_ADHOC && 3306 } else if (priv->operating_mode == IW_MODE_ADHOC &&
3283 priv->SSID_size != 0) { 3307 priv->SSID_size != 0) {
3284 start(priv, BSS_TYPE_AD_HOC); 3308 start(priv, BSS_TYPE_AD_HOC);
3285 } else { 3309 } else {
@@ -3289,16 +3313,16 @@ static void atmel_command_irq(struct atmel_private *priv)
3289 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3313 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3290 } 3314 }
3291 break; 3315 break;
3292 3316
3293 case CMD_SiteSurvey: 3317 case CMD_SiteSurvey:
3294 priv->fast_scan = 0; 3318 priv->fast_scan = 0;
3295 3319
3296 if (status != CMD_STATUS_COMPLETE) 3320 if (status != CMD_STATUS_COMPLETE)
3297 return; 3321 return;
3298 3322
3299 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3323 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3300 if (priv->station_is_associated) { 3324 if (priv->station_is_associated) {
3301 atmel_enter_state(priv, STATION_STATE_READY); 3325 atmel_enter_state(priv, STATION_STATE_READY);
3302 } else { 3326 } else {
3303 atmel_scan(priv, 1); 3327 atmel_scan(priv, 1);
3304 } 3328 }
@@ -3312,16 +3336,15 @@ static void atmel_command_irq(struct atmel_private *priv)
3312 } else { 3336 } else {
3313 priv->AuthenticationRequestRetryCnt = 0; 3337 priv->AuthenticationRequestRetryCnt = 0;
3314 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); 3338 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
3315 3339
3316 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3340 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3317 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3341 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3318 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 3342 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
3319 } 3343 }
3320 return; 3344 return;
3321 } 3345 }
3322 3346
3323 atmel_scan(priv, 1); 3347 atmel_scan(priv, 1);
3324
3325 } 3348 }
3326} 3349}
3327 3350
@@ -3333,20 +3356,20 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3333 3356
3334 if (priv->card_type == CARD_TYPE_SPI_FLASH) 3357 if (priv->card_type == CARD_TYPE_SPI_FLASH)
3335 atmel_set_gcr(priv->dev, GCR_REMAP); 3358 atmel_set_gcr(priv->dev, GCR_REMAP);
3336 3359
3337 /* wake up on-board processor */ 3360 /* wake up on-board processor */
3338 atmel_clear_gcr(priv->dev, 0x0040); 3361 atmel_clear_gcr(priv->dev, 0x0040);
3339 atmel_write16(priv->dev, BSR, BSS_SRAM); 3362 atmel_write16(priv->dev, BSR, BSS_SRAM);
3340 3363
3341 if (priv->card_type == CARD_TYPE_SPI_FLASH) 3364 if (priv->card_type == CARD_TYPE_SPI_FLASH)
3342 mdelay(100); 3365 mdelay(100);
3343 3366
3344 /* and wait for it */ 3367 /* and wait for it */
3345 for (i = LOOP_RETRY_LIMIT; i; i--) { 3368 for (i = LOOP_RETRY_LIMIT; i; i--) {
3346 mr1 = atmel_read16(priv->dev, MR1); 3369 mr1 = atmel_read16(priv->dev, MR1);
3347 mr3 = atmel_read16(priv->dev, MR3); 3370 mr3 = atmel_read16(priv->dev, MR3);
3348 3371
3349 if (mr3 & MAC_BOOT_COMPLETE) 3372 if (mr3 & MAC_BOOT_COMPLETE)
3350 break; 3373 break;
3351 if (mr1 & MAC_BOOT_COMPLETE && 3374 if (mr1 & MAC_BOOT_COMPLETE &&
3352 priv->bus_type == BUS_TYPE_PCCARD) 3375 priv->bus_type == BUS_TYPE_PCCARD)
@@ -3357,35 +3380,36 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3357 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); 3380 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
3358 return 0; 3381 return 0;
3359 } 3382 }
3360 3383
3361 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { 3384 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
3362 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); 3385 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
3363 return 0; 3386 return 0;
3364 } 3387 }
3365 3388
3366 /* now check for completion of MAC initialization through 3389 /* now check for completion of MAC initialization through
3367 the FunCtrl field of the IFACE, poll MR1 to detect completion of 3390 the FunCtrl field of the IFACE, poll MR1 to detect completion of
3368 MAC initialization, check completion status, set interrupt mask, 3391 MAC initialization, check completion status, set interrupt mask,
3369 enables interrupts and calls Tx and Rx initialization functions */ 3392 enables interrupts and calls Tx and Rx initialization functions */
3370 3393
3371 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE); 3394 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE);
3372 3395
3373 for (i = LOOP_RETRY_LIMIT; i; i--) { 3396 for (i = LOOP_RETRY_LIMIT; i; i--) {
3374 mr1 = atmel_read16(priv->dev, MR1); 3397 mr1 = atmel_read16(priv->dev, MR1);
3375 mr3 = atmel_read16(priv->dev, MR3); 3398 mr3 = atmel_read16(priv->dev, MR3);
3376 3399
3377 if (mr3 & MAC_INIT_COMPLETE) 3400 if (mr3 & MAC_INIT_COMPLETE)
3378 break; 3401 break;
3379 if (mr1 & MAC_INIT_COMPLETE && 3402 if (mr1 & MAC_INIT_COMPLETE &&
3380 priv->bus_type == BUS_TYPE_PCCARD) 3403 priv->bus_type == BUS_TYPE_PCCARD)
3381 break; 3404 break;
3382 } 3405 }
3383 3406
3384 if (i == 0) { 3407 if (i == 0) {
3385 printk(KERN_ALERT "%s: MAC failed to initialise.\n", priv->dev->name); 3408 printk(KERN_ALERT "%s: MAC failed to initialise.\n",
3409 priv->dev->name);
3386 return 0; 3410 return 0;
3387 } 3411 }
3388 3412
3389 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ 3413 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
3390 if ((mr3 & MAC_INIT_COMPLETE) && 3414 if ((mr3 & MAC_INIT_COMPLETE) &&
3391 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { 3415 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
@@ -3398,9 +3422,9 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3398 return 0; 3422 return 0;
3399 } 3423 }
3400 3424
3401 atmel_copy_to_host(priv->dev, (unsigned char *)iface, 3425 atmel_copy_to_host(priv->dev, (unsigned char *)iface,
3402 priv->host_info_base, sizeof(*iface)); 3426 priv->host_info_base, sizeof(*iface));
3403 3427
3404 iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos); 3428 iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos);
3405 iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size); 3429 iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size);
3406 iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos); 3430 iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos);
@@ -3424,16 +3448,16 @@ static int probe_atmel_card(struct net_device *dev)
3424{ 3448{
3425 int rc = 0; 3449 int rc = 0;
3426 struct atmel_private *priv = netdev_priv(dev); 3450 struct atmel_private *priv = netdev_priv(dev);
3427 3451
3428 /* reset pccard */ 3452 /* reset pccard */
3429 if (priv->bus_type == BUS_TYPE_PCCARD) 3453 if (priv->bus_type == BUS_TYPE_PCCARD)
3430 atmel_write16(dev, GCR, 0x0060); 3454 atmel_write16(dev, GCR, 0x0060);
3431 3455
3432 atmel_write16(dev, GCR, 0x0040); 3456 atmel_write16(dev, GCR, 0x0040);
3433 mdelay(500); 3457 mdelay(500);
3434 3458
3435 if (atmel_read16(dev, MR2) == 0) { 3459 if (atmel_read16(dev, MR2) == 0) {
3436 /* No stored firmware so load a small stub which just 3460 /* No stored firmware so load a small stub which just
3437 tells us the MAC address */ 3461 tells us the MAC address */
3438 int i; 3462 int i;
3439 priv->card_type = CARD_TYPE_EEPROM; 3463 priv->card_type = CARD_TYPE_EEPROM;
@@ -3442,7 +3466,7 @@ static int probe_atmel_card(struct net_device *dev)
3442 atmel_set_gcr(dev, GCR_REMAP); 3466 atmel_set_gcr(dev, GCR_REMAP);
3443 atmel_clear_gcr(priv->dev, 0x0040); 3467 atmel_clear_gcr(priv->dev, 0x0040);
3444 atmel_write16(dev, BSR, BSS_SRAM); 3468 atmel_write16(dev, BSR, BSS_SRAM);
3445 for (i = LOOP_RETRY_LIMIT; i; i--) 3469 for (i = LOOP_RETRY_LIMIT; i; i--)
3446 if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE) 3470 if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE)
3447 break; 3471 break;
3448 if (i == 0) { 3472 if (i == 0) {
@@ -3451,7 +3475,7 @@ static int probe_atmel_card(struct net_device *dev)
3451 atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6); 3475 atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6);
3452 /* got address, now squash it again until the network 3476 /* got address, now squash it again until the network
3453 interface is opened */ 3477 interface is opened */
3454 if (priv->bus_type == BUS_TYPE_PCCARD) 3478 if (priv->bus_type == BUS_TYPE_PCCARD)
3455 atmel_write16(dev, GCR, 0x0060); 3479 atmel_write16(dev, GCR, 0x0060);
3456 atmel_write16(dev, GCR, 0x0040); 3480 atmel_write16(dev, GCR, 0x0040);
3457 rc = 1; 3481 rc = 1;
@@ -3459,7 +3483,7 @@ static int probe_atmel_card(struct net_device *dev)
3459 } else if (atmel_read16(dev, MR4) == 0) { 3483 } else if (atmel_read16(dev, MR4) == 0) {
3460 /* Mac address easy in this case. */ 3484 /* Mac address easy in this case. */
3461 priv->card_type = CARD_TYPE_PARALLEL_FLASH; 3485 priv->card_type = CARD_TYPE_PARALLEL_FLASH;
3462 atmel_write16(dev, BSR, 1); 3486 atmel_write16(dev, BSR, 1);
3463 atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6); 3487 atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6);
3464 atmel_write16(dev, BSR, 0x200); 3488 atmel_write16(dev, BSR, 0x200);
3465 rc = 1; 3489 rc = 1;
@@ -3469,16 +3493,16 @@ static int probe_atmel_card(struct net_device *dev)
3469 priv->card_type = CARD_TYPE_SPI_FLASH; 3493 priv->card_type = CARD_TYPE_SPI_FLASH;
3470 if (atmel_wakeup_firmware(priv)) { 3494 if (atmel_wakeup_firmware(priv)) {
3471 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); 3495 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
3472 3496
3473 /* got address, now squash it again until the network 3497 /* got address, now squash it again until the network
3474 interface is opened */ 3498 interface is opened */
3475 if (priv->bus_type == BUS_TYPE_PCCARD) 3499 if (priv->bus_type == BUS_TYPE_PCCARD)
3476 atmel_write16(dev, GCR, 0x0060); 3500 atmel_write16(dev, GCR, 0x0060);
3477 atmel_write16(dev, GCR, 0x0040); 3501 atmel_write16(dev, GCR, 0x0040);
3478 rc = 1; 3502 rc = 1;
3479 } 3503 }
3480 } 3504 }
3481 3505
3482 if (rc) { 3506 if (rc) {
3483 if (dev->dev_addr[0] == 0xFF) { 3507 if (dev->dev_addr[0] == 0xFF) {
3484 u8 default_mac[] = {0x00,0x04, 0x25, 0x00, 0x00, 0x00}; 3508 u8 default_mac[] = {0x00,0x04, 0x25, 0x00, 0x00, 0x00};
@@ -3486,27 +3510,27 @@ static int probe_atmel_card(struct net_device *dev)
3486 memcpy(dev->dev_addr, default_mac, 6); 3510 memcpy(dev->dev_addr, default_mac, 6);
3487 } 3511 }
3488 } 3512 }
3489 3513
3490 return rc; 3514 return rc;
3491} 3515}
3492 3516
3493static void build_wep_mib(struct atmel_private *priv)
3494/* Move the encyption information on the MIB structure. 3517/* Move the encyption information on the MIB structure.
3495 This routine is for the pre-WPA firmware: later firmware has 3518 This routine is for the pre-WPA firmware: later firmware has
3496 a different format MIB and a different routine. */ 3519 a different format MIB and a different routine. */
3520static void build_wep_mib(struct atmel_private *priv)
3497{ 3521{
3498 struct { /* NB this is matched to the hardware, don't change. */ 3522 struct { /* NB this is matched to the hardware, don't change. */
3499 u8 wep_is_on; 3523 u8 wep_is_on;
3500 u8 default_key; /* 0..3 */ 3524 u8 default_key; /* 0..3 */
3501 u8 reserved; 3525 u8 reserved;
3502 u8 exclude_unencrypted; 3526 u8 exclude_unencrypted;
3503 3527
3504 u32 WEPICV_error_count; 3528 u32 WEPICV_error_count;
3505 u32 WEP_excluded_count; 3529 u32 WEP_excluded_count;
3506 3530
3507 u8 wep_keys[MAX_ENCRYPTION_KEYS][13]; 3531 u8 wep_keys[MAX_ENCRYPTION_KEYS][13];
3508 u8 encryption_level; /* 0, 1, 2 */ 3532 u8 encryption_level; /* 0, 1, 2 */
3509 u8 reserved2[3]; 3533 u8 reserved2[3];
3510 } mib; 3534 } mib;
3511 int i; 3535 int i;
3512 3536
@@ -3515,54 +3539,55 @@ static void build_wep_mib(struct atmel_private *priv)
3515 if (priv->wep_key_len[priv->default_key] > 5) 3539 if (priv->wep_key_len[priv->default_key] > 5)
3516 mib.encryption_level = 2; 3540 mib.encryption_level = 2;
3517 else 3541 else
3518 mib.encryption_level = 1; 3542 mib.encryption_level = 1;
3519 } else { 3543 } else {
3520 mib.encryption_level = 0; 3544 mib.encryption_level = 0;
3521 } 3545 }
3522 3546
3523 mib.default_key = priv->default_key; 3547 mib.default_key = priv->default_key;
3524 mib.exclude_unencrypted = priv->exclude_unencrypted; 3548 mib.exclude_unencrypted = priv->exclude_unencrypted;
3525 3549
3526 for(i = 0; i < MAX_ENCRYPTION_KEYS; i++) 3550 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++)
3527 memcpy(mib.wep_keys[i], priv->wep_keys[i], 13); 3551 memcpy(mib.wep_keys[i], priv->wep_keys[i], 13);
3528 3552
3529 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib)); 3553 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
3530} 3554}
3531 3555
3532static void build_wpa_mib(struct atmel_private *priv) 3556static void build_wpa_mib(struct atmel_private *priv)
3533{ 3557{
3534 /* This is for the later (WPA enabled) firmware. */ 3558 /* This is for the later (WPA enabled) firmware. */
3535 3559
3536 struct { /* NB this is matched to the hardware, don't change. */ 3560 struct { /* NB this is matched to the hardware, don't change. */
3537 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE]; 3561 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
3538 u8 receiver_address[6]; 3562 u8 receiver_address[6];
3539 u8 wep_is_on; 3563 u8 wep_is_on;
3540 u8 default_key; /* 0..3 */ 3564 u8 default_key; /* 0..3 */
3541 u8 group_key; 3565 u8 group_key;
3542 u8 exclude_unencrypted; 3566 u8 exclude_unencrypted;
3543 u8 encryption_type; 3567 u8 encryption_type;
3544 u8 reserved; 3568 u8 reserved;
3545 3569
3546 u32 WEPICV_error_count; 3570 u32 WEPICV_error_count;
3547 u32 WEP_excluded_count; 3571 u32 WEP_excluded_count;
3548 3572
3549 u8 key_RSC[4][8]; 3573 u8 key_RSC[4][8];
3550 } mib; 3574 } mib;
3551 3575
3552 int i; 3576 int i;
3553 3577
3554 mib.wep_is_on = priv->wep_is_on; 3578 mib.wep_is_on = priv->wep_is_on;
3555 mib.exclude_unencrypted = priv->exclude_unencrypted; 3579 mib.exclude_unencrypted = priv->exclude_unencrypted;
3556 memcpy(mib.receiver_address, priv->CurrentBSSID, 6); 3580 memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
3557 3581
3558 /* zero all the keys before adding in valid ones. */ 3582 /* zero all the keys before adding in valid ones. */
3559 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value)); 3583 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
3560 3584
3561 if (priv->wep_is_on) { 3585 if (priv->wep_is_on) {
3562 /* There's a comment in the Atmel code to the effect that this is only valid 3586 /* There's a comment in the Atmel code to the effect that this
3563 when still using WEP, it may need to be set to something to use WPA */ 3587 is only valid when still using WEP, it may need to be set to
3588 something to use WPA */
3564 memset(mib.key_RSC, 0, sizeof(mib.key_RSC)); 3589 memset(mib.key_RSC, 0, sizeof(mib.key_RSC));
3565 3590
3566 mib.default_key = mib.group_key = 255; 3591 mib.default_key = mib.group_key = 255;
3567 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) { 3592 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) {
3568 if (priv->wep_key_len[i] > 0) { 3593 if (priv->wep_key_len[i] > 0) {
@@ -3570,12 +3595,12 @@ static void build_wpa_mib(struct atmel_private *priv)
3570 if (i == priv->default_key) { 3595 if (i == priv->default_key) {
3571 mib.default_key = i; 3596 mib.default_key = i;
3572 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7; 3597 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7;
3573 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite; 3598 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite;
3574 } else { 3599 } else {
3575 mib.group_key = i; 3600 mib.group_key = i;
3576 priv->group_cipher_suite = priv->pairwise_cipher_suite; 3601 priv->group_cipher_suite = priv->pairwise_cipher_suite;
3577 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1; 3602 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1;
3578 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite; 3603 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite;
3579 } 3604 }
3580 } 3605 }
3581 } 3606 }
@@ -3583,47 +3608,47 @@ static void build_wpa_mib(struct atmel_private *priv)
3583 mib.default_key = mib.group_key != 255 ? mib.group_key : 0; 3608 mib.default_key = mib.group_key != 255 ? mib.group_key : 0;
3584 if (mib.group_key == 255) 3609 if (mib.group_key == 255)
3585 mib.group_key = mib.default_key; 3610 mib.group_key = mib.default_key;
3586 3611
3587 } 3612 }
3588 3613
3589 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib)); 3614 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
3590} 3615}
3591 3616
3592static int reset_atmel_card(struct net_device *dev) 3617static int reset_atmel_card(struct net_device *dev)
3593{ 3618{
3594 /* do everything necessary to wake up the hardware, including 3619 /* do everything necessary to wake up the hardware, including
3595 waiting for the lightning strike and throwing the knife switch.... 3620 waiting for the lightning strike and throwing the knife switch....
3596 3621
3597 set all the Mib values which matter in the card to match 3622 set all the Mib values which matter in the card to match
3598 their settings in the atmel_private structure. Some of these 3623 their settings in the atmel_private structure. Some of these
3599 can be altered on the fly, but many (WEP, infrastucture or ad-hoc) 3624 can be altered on the fly, but many (WEP, infrastucture or ad-hoc)
3600 can only be changed by tearing down the world and coming back through 3625 can only be changed by tearing down the world and coming back through
3601 here. 3626 here.
3602 3627
3603 This routine is also responsible for initialising some 3628 This routine is also responsible for initialising some
3604 hardware-specific fields in the atmel_private structure, 3629 hardware-specific fields in the atmel_private structure,
3605 including a copy of the firmware's hostinfo stucture 3630 including a copy of the firmware's hostinfo stucture
3606 which is the route into the rest of the firmare datastructures. */ 3631 which is the route into the rest of the firmare datastructures. */
3607 3632
3608 struct atmel_private *priv = netdev_priv(dev); 3633 struct atmel_private *priv = netdev_priv(dev);
3609 u8 configuration; 3634 u8 configuration;
3610 3635
3611 /* data to add to the firmware names, in priority order 3636 /* data to add to the firmware names, in priority order
3612 this implemenents firmware versioning */ 3637 this implemenents firmware versioning */
3613 3638
3614 static char *firmware_modifier[] = { 3639 static char *firmware_modifier[] = {
3615 "-wpa", 3640 "-wpa",
3616 "", 3641 "",
3617 NULL 3642 NULL
3618 }; 3643 };
3619 3644
3620 /* reset pccard */ 3645 /* reset pccard */
3621 if (priv->bus_type == BUS_TYPE_PCCARD) 3646 if (priv->bus_type == BUS_TYPE_PCCARD)
3622 atmel_write16(priv->dev, GCR, 0x0060); 3647 atmel_write16(priv->dev, GCR, 0x0060);
3623 3648
3624 /* stop card , disable interrupts */ 3649 /* stop card , disable interrupts */
3625 atmel_write16(priv->dev, GCR, 0x0040); 3650 atmel_write16(priv->dev, GCR, 0x0040);
3626 3651
3627 if (priv->card_type == CARD_TYPE_EEPROM) { 3652 if (priv->card_type == CARD_TYPE_EEPROM) {
3628 /* copy in firmware if needed */ 3653 /* copy in firmware if needed */
3629 const struct firmware *fw_entry = NULL; 3654 const struct firmware *fw_entry = NULL;
@@ -3636,13 +3661,13 @@ static int reset_atmel_card(struct net_device *dev)
3636 "%s: card type is unknown: assuming at76c502 firmware is OK.\n", 3661 "%s: card type is unknown: assuming at76c502 firmware is OK.\n",
3637 dev->name); 3662 dev->name);
3638 printk(KERN_INFO 3663 printk(KERN_INFO
3639 "%s: if not, use the firmware= module parameter.\n", 3664 "%s: if not, use the firmware= module parameter.\n",
3640 dev->name); 3665 dev->name);
3641 strcpy(priv->firmware_id, "atmel_at76c502.bin"); 3666 strcpy(priv->firmware_id, "atmel_at76c502.bin");
3642 } 3667 }
3643 if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) { 3668 if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) {
3644 printk(KERN_ALERT 3669 printk(KERN_ALERT
3645 "%s: firmware %s is missing, cannot continue.\n", 3670 "%s: firmware %s is missing, cannot continue.\n",
3646 dev->name, priv->firmware_id); 3671 dev->name, priv->firmware_id);
3647 return 0; 3672 return 0;
3648 } 3673 }
@@ -3654,7 +3679,7 @@ static int reset_atmel_card(struct net_device *dev)
3654 while (fw_table[fw_index].fw_type != priv->firmware_type 3679 while (fw_table[fw_index].fw_type != priv->firmware_type
3655 && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) 3680 && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE)
3656 fw_index++; 3681 fw_index++;
3657 3682
3658 /* construct the actual firmware file name */ 3683 /* construct the actual firmware file name */
3659 if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) { 3684 if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) {
3660 int i; 3685 int i;
@@ -3669,24 +3694,24 @@ static int reset_atmel_card(struct net_device *dev)
3669 } 3694 }
3670 } 3695 }
3671 if (!success) { 3696 if (!success) {
3672 printk(KERN_ALERT 3697 printk(KERN_ALERT
3673 "%s: firmware %s is missing, cannot start.\n", 3698 "%s: firmware %s is missing, cannot start.\n",
3674 dev->name, priv->firmware_id); 3699 dev->name, priv->firmware_id);
3675 priv->firmware_id[0] = '\0'; 3700 priv->firmware_id[0] = '\0';
3676 return 0; 3701 return 0;
3677 } 3702 }
3678 } 3703 }
3679 3704
3680 fw = fw_entry->data; 3705 fw = fw_entry->data;
3681 len = fw_entry->size; 3706 len = fw_entry->size;
3682 } 3707 }
3683 3708
3684 if (len <= 0x6000) { 3709 if (len <= 0x6000) {
3685 atmel_write16(priv->dev, BSR, BSS_IRAM); 3710 atmel_write16(priv->dev, BSR, BSS_IRAM);
3686 atmel_copy_to_card(priv->dev, 0, fw, len); 3711 atmel_copy_to_card(priv->dev, 0, fw, len);
3687 atmel_set_gcr(priv->dev, GCR_REMAP); 3712 atmel_set_gcr(priv->dev, GCR_REMAP);
3688 } else { 3713 } else {
3689 /* Remap */ 3714 /* Remap */
3690 atmel_set_gcr(priv->dev, GCR_REMAP); 3715 atmel_set_gcr(priv->dev, GCR_REMAP);
3691 atmel_write16(priv->dev, BSR, BSS_IRAM); 3716 atmel_write16(priv->dev, BSR, BSS_IRAM);
3692 atmel_copy_to_card(priv->dev, 0, fw, 0x6000); 3717 atmel_copy_to_card(priv->dev, 0, fw, 0x6000);
@@ -3708,45 +3733,45 @@ static int reset_atmel_card(struct net_device *dev)
3708 the 3com broken-ness filter. */ 3733 the 3com broken-ness filter. */
3709 priv->use_wpa = (priv->host_info.major_version == 4); 3734 priv->use_wpa = (priv->host_info.major_version == 4);
3710 priv->radio_on_broken = (priv->host_info.major_version == 5); 3735 priv->radio_on_broken = (priv->host_info.major_version == 5);
3711 3736
3712 /* unmask all irq sources */ 3737 /* unmask all irq sources */
3713 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff); 3738 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff);
3714 3739
3715 /* int Tx system and enable Tx */ 3740 /* int Tx system and enable Tx */
3716 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0); 3741 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0);
3717 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L); 3742 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L);
3718 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0); 3743 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0);
3719 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0); 3744 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0);
3720 3745
3721 priv->tx_desc_free = priv->host_info.tx_desc_count; 3746 priv->tx_desc_free = priv->host_info.tx_desc_count;
3722 priv->tx_desc_head = 0; 3747 priv->tx_desc_head = 0;
3723 priv->tx_desc_tail = 0; 3748 priv->tx_desc_tail = 0;
3724 priv->tx_desc_previous = 0; 3749 priv->tx_desc_previous = 0;
3725 priv->tx_free_mem = priv->host_info.tx_buff_size; 3750 priv->tx_free_mem = priv->host_info.tx_buff_size;
3726 priv->tx_buff_head = 0; 3751 priv->tx_buff_head = 0;
3727 priv->tx_buff_tail = 0; 3752 priv->tx_buff_tail = 0;
3728 3753
3729 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET)); 3754 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
3730 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), 3755 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
3731 configuration | FUNC_CTRL_TxENABLE); 3756 configuration | FUNC_CTRL_TxENABLE);
3732 3757
3733 /* init Rx system and enable */ 3758 /* init Rx system and enable */
3734 priv->rx_desc_head = 0; 3759 priv->rx_desc_head = 0;
3735 3760
3736 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET)); 3761 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
3737 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), 3762 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
3738 configuration | FUNC_CTRL_RxENABLE); 3763 configuration | FUNC_CTRL_RxENABLE);
3739 3764
3740 if (!priv->radio_on_broken) { 3765 if (!priv->radio_on_broken) {
3741 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == 3766 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
3742 CMD_STATUS_REJECTED_RADIO_OFF) { 3767 CMD_STATUS_REJECTED_RADIO_OFF) {
3743 printk(KERN_INFO 3768 printk(KERN_INFO
3744 "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n", 3769 "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
3745 dev->name); 3770 dev->name);
3746 return 0; 3771 return 0;
3747 } 3772 }
3748 } 3773 }
3749 3774
3750 /* set up enough MIB values to run. */ 3775 /* set up enough MIB values to run. */
3751 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate); 3776 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate);
3752 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF); 3777 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF);
@@ -3755,7 +3780,7 @@ static int reset_atmel_card(struct net_device *dev)
3755 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry); 3780 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry);
3756 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry); 3781 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry);
3757 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble); 3782 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble);
3758 atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS, 3783 atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS,
3759 priv->dev->dev_addr, 6); 3784 priv->dev->dev_addr, 6);
3760 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 3785 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
3761 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 3786 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
@@ -3766,42 +3791,44 @@ static int reset_atmel_card(struct net_device *dev)
3766 build_wpa_mib(priv); 3791 build_wpa_mib(priv);
3767 else 3792 else
3768 build_wep_mib(priv); 3793 build_wep_mib(priv);
3769 3794
3770 return 1; 3795 return 1;
3771} 3796}
3772 3797
3773static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size) 3798static void atmel_send_command(struct atmel_private *priv, int command,
3799 void *cmd, int cmd_size)
3774{ 3800{
3775 if (cmd) 3801 if (cmd)
3776 atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET), 3802 atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET),
3777 cmd, cmd_size); 3803 cmd, cmd_size);
3778 3804
3779 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command); 3805 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command);
3780 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0); 3806 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0);
3781} 3807}
3782 3808
3783static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size) 3809static int atmel_send_command_wait(struct atmel_private *priv, int command,
3810 void *cmd, int cmd_size)
3784{ 3811{
3785 int i, status; 3812 int i, status;
3786 3813
3787 atmel_send_command(priv, command, cmd, cmd_size); 3814 atmel_send_command(priv, command, cmd, cmd_size);
3788 3815
3789 for (i = 5000; i; i--) { 3816 for (i = 5000; i; i--) {
3790 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET)); 3817 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
3791 if (status != CMD_STATUS_IDLE && 3818 if (status != CMD_STATUS_IDLE &&
3792 status != CMD_STATUS_IN_PROGRESS) 3819 status != CMD_STATUS_IN_PROGRESS)
3793 break; 3820 break;
3794 udelay(20); 3821 udelay(20);
3795 } 3822 }
3796 3823
3797 if (i == 0) { 3824 if (i == 0) {
3798 printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name); 3825 printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name);
3799 status = CMD_STATUS_HOST_ERROR; 3826 status = CMD_STATUS_HOST_ERROR;
3800 } else { 3827 } else {
3801 if (command != CMD_EnableRadio) 3828 if (command != CMD_EnableRadio)
3802 status = CMD_STATUS_COMPLETE; 3829 status = CMD_STATUS_COMPLETE;
3803 } 3830 }
3804 3831
3805 return status; 3832 return status;
3806} 3833}
3807 3834
@@ -3827,7 +3854,8 @@ static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 dat
3827 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1); 3854 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
3828} 3855}
3829 3856
3830static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data) 3857static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
3858 u16 data)
3831{ 3859{
3832 struct get_set_mib m; 3860 struct get_set_mib m;
3833 m.type = type; 3861 m.type = type;
@@ -3839,7 +3867,8 @@ static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 d
3839 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2); 3867 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2);
3840} 3868}
3841 3869
3842static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len) 3870static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
3871 u8 *data, int data_len)
3843{ 3872{
3844 struct get_set_mib m; 3873 struct get_set_mib m;
3845 m.type = type; 3874 m.type = type;
@@ -3848,23 +3877,24 @@ static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *dat
3848 3877
3849 if (data_len > MIB_MAX_DATA_BYTES) 3878 if (data_len > MIB_MAX_DATA_BYTES)
3850 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name); 3879 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
3851 3880
3852 memcpy(m.data, data, data_len); 3881 memcpy(m.data, data, data_len);
3853 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len); 3882 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
3854} 3883}
3855 3884
3856static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len) 3885static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
3886 u8 *data, int data_len)
3857{ 3887{
3858 struct get_set_mib m; 3888 struct get_set_mib m;
3859 m.type = type; 3889 m.type = type;
3860 m.size = data_len; 3890 m.size = data_len;
3861 m.index = index; 3891 m.index = index;
3862 3892
3863 if (data_len > MIB_MAX_DATA_BYTES) 3893 if (data_len > MIB_MAX_DATA_BYTES)
3864 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name); 3894 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
3865 3895
3866 atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len); 3896 atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
3867 atmel_copy_to_host(priv->dev, data, 3897 atmel_copy_to_host(priv->dev, data,
3868 atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len); 3898 atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len);
3869} 3899}
3870 3900
@@ -3873,11 +3903,12 @@ static void atmel_writeAR(struct net_device *dev, u16 data)
3873 int i; 3903 int i;
3874 outw(data, dev->base_addr + AR); 3904 outw(data, dev->base_addr + AR);
3875 /* Address register appears to need some convincing..... */ 3905 /* Address register appears to need some convincing..... */
3876 for (i = 0; data != inw(dev->base_addr + AR) && i<10; i++) 3906 for (i = 0; data != inw(dev->base_addr + AR) && i < 10; i++)
3877 outw(data, dev->base_addr + AR); 3907 outw(data, dev->base_addr + AR);
3878} 3908}
3879 3909
3880static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len) 3910static void atmel_copy_to_card(struct net_device *dev, u16 dest,
3911 unsigned char *src, u16 len)
3881{ 3912{
3882 int i; 3913 int i;
3883 atmel_writeAR(dev, dest); 3914 atmel_writeAR(dev, dest);
@@ -3894,7 +3925,8 @@ static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *
3894 atmel_write8(dev, DR, *src); 3925 atmel_write8(dev, DR, *src);
3895} 3926}
3896 3927
3897static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len) 3928static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
3929 u16 src, u16 len)
3898{ 3930{
3899 int i; 3931 int i;
3900 atmel_writeAR(dev, src); 3932 atmel_writeAR(dev, src);
@@ -3930,22 +3962,24 @@ static int atmel_lock_mac(struct atmel_private *priv)
3930 break; 3962 break;
3931 udelay(20); 3963 udelay(20);
3932 } 3964 }
3933 3965
3934 if (!i) return 0; /* timed out */ 3966 if (!i)
3935 3967 return 0; /* timed out */
3968
3936 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1); 3969 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1);
3937 if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) { 3970 if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) {
3938 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 3971 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
3939 if (!j--) return 0; /* timed out */ 3972 if (!j--)
3973 return 0; /* timed out */
3940 goto retry; 3974 goto retry;
3941 } 3975 }
3942 3976
3943 return 1; 3977 return 1;
3944} 3978}
3945 3979
3946static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data) 3980static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
3947{ 3981{
3948 atmel_writeAR(priv->dev, pos); 3982 atmel_writeAR(priv->dev, pos);
3949 atmel_write16(priv->dev, DR, data); /* card is little-endian */ 3983 atmel_write16(priv->dev, DR, data); /* card is little-endian */
3950 atmel_write16(priv->dev, DR, data >> 16); 3984 atmel_write16(priv->dev, DR, data >> 16);
3951} 3985}
@@ -4017,9 +4051,9 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
4017 serial output, since SO is normally high. But it 4051 serial output, since SO is normally high. But it
4018 does cause 8 clock cycles and thus 8 bits to be 4052 does cause 8 clock cycles and thus 8 bits to be
4019 clocked in to the chip. See Atmel's SPI 4053 clocked in to the chip. See Atmel's SPI
4020 controller (e.g. AT91M55800) timing and 4K 4054 controller (e.g. AT91M55800) timing and 4K
4021 SPI EEPROM manuals */ 4055 SPI EEPROM manuals */
4022 4056
4023 .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */ 4057 .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */
4024 .set NVRAM_IMAGE, 0x02000200 4058 .set NVRAM_IMAGE, 0x02000200
4025 .set NVRAM_LENGTH, 0x0200 4059 .set NVRAM_LENGTH, 0x0200
@@ -4032,24 +4066,24 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
4032 .set MR4, 0xC 4066 .set MR4, 0xC
4033RESET_VECTOR: 4067RESET_VECTOR:
4034 b RESET_HANDLER 4068 b RESET_HANDLER
4035UNDEF_VECTOR: 4069UNDEF_VECTOR:
4036 b HALT1 4070 b HALT1
4037SWI_VECTOR: 4071SWI_VECTOR:
4038 b HALT1 4072 b HALT1
4039IABORT_VECTOR: 4073IABORT_VECTOR:
4040 b HALT1 4074 b HALT1
4041DABORT_VECTOR: 4075DABORT_VECTOR:
4042RESERVED_VECTOR: 4076RESERVED_VECTOR:
4043 b HALT1 4077 b HALT1
4044IRQ_VECTOR: 4078IRQ_VECTOR:
4045 b HALT1 4079 b HALT1
4046FIQ_VECTOR: 4080FIQ_VECTOR:
4047 b HALT1 4081 b HALT1
4048HALT1: b HALT1 4082HALT1: b HALT1
4049RESET_HANDLER: 4083RESET_HANDLER:
4050 mov r0, #CPSR_INITIAL 4084 mov r0, #CPSR_INITIAL
4051 msr CPSR_c, r0 /* This is probably unnecessary */ 4085 msr CPSR_c, r0 /* This is probably unnecessary */
4052 4086
4053/* I'm guessing this is initializing clock generator electronics for SPI */ 4087/* I'm guessing this is initializing clock generator electronics for SPI */
4054 ldr r0, =SPI_CGEN_BASE 4088 ldr r0, =SPI_CGEN_BASE
4055 mov r1, #0 4089 mov r1, #0
@@ -4061,7 +4095,7 @@ RESET_HANDLER:
4061 str r1, [r0, #28] 4095 str r1, [r0, #28]
4062 mov r1, #1 4096 mov r1, #1
4063 str r1, [r0, #8] 4097 str r1, [r0, #8]
4064 4098
4065 ldr r0, =MRBASE 4099 ldr r0, =MRBASE
4066 mov r1, #0 4100 mov r1, #0
4067 strh r1, [r0, #MR1] 4101 strh r1, [r0, #MR1]
@@ -4094,7 +4128,7 @@ GET_WHOLE_NVRAM:
4094 ldmia sp!, {lr} 4128 ldmia sp!, {lr}
4095 bx lr 4129 bx lr
4096.endfunc 4130.endfunc
4097 4131
4098.func Get_MAC_Addr, GET_MAC_ADDR 4132.func Get_MAC_Addr, GET_MAC_ADDR
4099GET_MAC_ADDR: 4133GET_MAC_ADDR:
4100 stmdb sp!, {lr} 4134 stmdb sp!, {lr}
@@ -4110,13 +4144,13 @@ GET_MAC_ADDR:
4110.func Delay9, DELAY9 4144.func Delay9, DELAY9
4111DELAY9: 4145DELAY9:
4112 adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */ 4146 adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */
4113DELAYLOOP: 4147DELAYLOOP:
4114 beq DELAY9_done 4148 beq DELAY9_done
4115 subs r0, r0, #1 4149 subs r0, r0, #1
4116 b DELAYLOOP 4150 b DELAYLOOP
4117DELAY9_done: 4151DELAY9_done:
4118 bx lr 4152 bx lr
4119.endfunc 4153.endfunc
4120 4154
4121.func SP_Init, SP_INIT 4155.func SP_Init, SP_INIT
4122SP_INIT: 4156SP_INIT:
@@ -4145,26 +4179,26 @@ SP_INIT:
4145 ldr r0, [r0, #SP_RDR] 4179 ldr r0, [r0, #SP_RDR]
4146 bx lr 4180 bx lr
4147.endfunc 4181.endfunc
4148.func NVRAM_Init, NVRAM_INIT 4182.func NVRAM_Init, NVRAM_INIT
4149NVRAM_INIT: 4183NVRAM_INIT:
4150 ldr r1, =SP_BASE 4184 ldr r1, =SP_BASE
4151 ldr r0, [r1, #SP_RDR] 4185 ldr r0, [r1, #SP_RDR]
4152 mov r0, #NVRAM_CMD_RDSR 4186 mov r0, #NVRAM_CMD_RDSR
4153 str r0, [r1, #SP_TDR] 4187 str r0, [r1, #SP_TDR]
4154SP_loop1: 4188SP_loop1:
4155 ldr r0, [r1, #SP_SR] 4189 ldr r0, [r1, #SP_SR]
4156 tst r0, #SP_TDRE 4190 tst r0, #SP_TDRE
4157 beq SP_loop1 4191 beq SP_loop1
4158 4192
4159 mov r0, #SPI_8CLOCKS 4193 mov r0, #SPI_8CLOCKS
4160 str r0, [r1, #SP_TDR] 4194 str r0, [r1, #SP_TDR]
4161SP_loop2: 4195SP_loop2:
4162 ldr r0, [r1, #SP_SR] 4196 ldr r0, [r1, #SP_SR]
4163 tst r0, #SP_TDRE 4197 tst r0, #SP_TDRE
4164 beq SP_loop2 4198 beq SP_loop2
4165 4199
4166 ldr r0, [r1, #SP_RDR] 4200 ldr r0, [r1, #SP_RDR]
4167SP_loop3: 4201SP_loop3:
4168 ldr r0, [r1, #SP_SR] 4202 ldr r0, [r1, #SP_SR]
4169 tst r0, #SP_RDRF 4203 tst r0, #SP_RDRF
4170 beq SP_loop3 4204 beq SP_loop3
@@ -4173,7 +4207,7 @@ SP_loop3:
4173 and r0, r0, #255 4207 and r0, r0, #255
4174 bx lr 4208 bx lr
4175.endfunc 4209.endfunc
4176 4210
4177.func NVRAM_Xfer, NVRAM_XFER 4211.func NVRAM_Xfer, NVRAM_XFER
4178 /* r0 = dest address */ 4212 /* r0 = dest address */
4179 /* r1 = not used */ 4213 /* r1 = not used */
@@ -4185,11 +4219,11 @@ NVRAM_XFER:
4185 mov r4, r3 /* save r3 (length) */ 4219 mov r4, r3 /* save r3 (length) */
4186 mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */ 4220 mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */
4187 and r0, r0, #8 4221 and r0, r0, #8
4188 add r0, r0, #NVRAM_CMD_READ 4222 add r0, r0, #NVRAM_CMD_READ
4189 ldr r1, =NVRAM_SCRATCH 4223 ldr r1, =NVRAM_SCRATCH
4190 strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */ 4224 strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */
4191 strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */ 4225 strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */
4192_local1: 4226_local1:
4193 bl NVRAM_INIT 4227 bl NVRAM_INIT
4194 tst r0, #NVRAM_SR_RDY 4228 tst r0, #NVRAM_SR_RDY
4195 bne _local1 4229 bne _local1
@@ -4211,7 +4245,7 @@ NVRAM_XFER2:
4211 cmp r0, #0 4245 cmp r0, #0
4212 bls _local2 4246 bls _local2
4213 ldr r5, =NVRAM_SCRATCH 4247 ldr r5, =NVRAM_SCRATCH
4214_local4: 4248_local4:
4215 ldrb r6, [r5, r3] 4249 ldrb r6, [r5, r3]
4216 str r6, [r4, #SP_TDR] 4250 str r6, [r4, #SP_TDR]
4217_local3: 4251_local3:
@@ -4225,7 +4259,7 @@ _local2:
4225 mov r3, #SPI_8CLOCKS 4259 mov r3, #SPI_8CLOCKS
4226 str r3, [r4, #SP_TDR] 4260 str r3, [r4, #SP_TDR]
4227 ldr r0, [r4, #SP_RDR] 4261 ldr r0, [r4, #SP_RDR]
4228_local5: 4262_local5:
4229 ldr r0, [r4, #SP_SR] 4263 ldr r0, [r4, #SP_SR]
4230 tst r0, #SP_RDRF 4264 tst r0, #SP_RDRF
4231 beq _local5 4265 beq _local5
@@ -4233,12 +4267,12 @@ _local5:
4233 mov r0, #0 4267 mov r0, #0
4234 cmp r2, #0 /* r2 is # of bytes to copy in */ 4268 cmp r2, #0 /* r2 is # of bytes to copy in */
4235 bls _local6 4269 bls _local6
4236_local7: 4270_local7:
4237 ldr r5, [r4, #SP_SR] 4271 ldr r5, [r4, #SP_SR]
4238 tst r5, #SP_TDRE 4272 tst r5, #SP_TDRE
4239 beq _local7 4273 beq _local7
4240 str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */ 4274 str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */
4241_local8: 4275_local8:
4242 ldr r5, [r4, #SP_SR] 4276 ldr r5, [r4, #SP_SR]
4243 tst r5, #SP_RDRF 4277 tst r5, #SP_RDRF
4244 beq _local8 4278 beq _local8
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index fc62235bfc24..353ccb93134b 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,3 +1,4 @@
1hostap-y := hostap_main.o
1obj-$(CONFIG_HOSTAP) += hostap.o 2obj-$(CONFIG_HOSTAP) += hostap.o
2 3
3obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o 4obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..3d2ea61033be 100644
--- a/drivers/net/wireless/hostap/hostap.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 77d2a21d4cd0..44cd3fcd1572 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -175,7 +175,7 @@ that only one external action is invoked at a time.
175#define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation"
176 176
177/* Debugging stuff */ 177/* Debugging stuff */
178#ifdef CONFIG_IPW_DEBUG 178#ifdef CONFIG_IPW2100_DEBUG
179#define CONFIG_IPW2100_RX_DEBUG /* Reception debugging */ 179#define CONFIG_IPW2100_RX_DEBUG /* Reception debugging */
180#endif 180#endif
181 181
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
208 208
209static u32 ipw2100_debug_level = IPW_DL_NONE; 209static u32 ipw2100_debug_level = IPW_DL_NONE;
210 210
211#ifdef CONFIG_IPW_DEBUG 211#ifdef CONFIG_IPW2100_DEBUG
212#define IPW_DEBUG(level, message...) \ 212#define IPW_DEBUG(level, message...) \
213do { \ 213do { \
214 if (ipw2100_debug_level & (level)) { \ 214 if (ipw2100_debug_level & (level)) { \
@@ -219,9 +219,9 @@ do { \
219} while (0) 219} while (0)
220#else 220#else
221#define IPW_DEBUG(level, message...) do {} while (0) 221#define IPW_DEBUG(level, message...) do {} while (0)
222#endif /* CONFIG_IPW_DEBUG */ 222#endif /* CONFIG_IPW2100_DEBUG */
223 223
224#ifdef CONFIG_IPW_DEBUG 224#ifdef CONFIG_IPW2100_DEBUG
225static const char *command_types[] = { 225static const char *command_types[] = {
226 "undefined", 226 "undefined",
227 "unused", /* HOST_ATTENTION */ 227 "unused", /* HOST_ATTENTION */
@@ -2081,7 +2081,7 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2081 priv->status &= ~STATUS_SCANNING; 2081 priv->status &= ~STATUS_SCANNING;
2082} 2082}
2083 2083
2084#ifdef CONFIG_IPW_DEBUG 2084#ifdef CONFIG_IPW2100_DEBUG
2085#define IPW2100_HANDLER(v, f) { v, f, # v } 2085#define IPW2100_HANDLER(v, f) { v, f, # v }
2086struct ipw2100_status_indicator { 2086struct ipw2100_status_indicator {
2087 int status; 2087 int status;
@@ -2094,7 +2094,7 @@ struct ipw2100_status_indicator {
2094 int status; 2094 int status;
2095 void (*cb) (struct ipw2100_priv * priv, u32 status); 2095 void (*cb) (struct ipw2100_priv * priv, u32 status);
2096}; 2096};
2097#endif /* CONFIG_IPW_DEBUG */ 2097#endif /* CONFIG_IPW2100_DEBUG */
2098 2098
2099static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status) 2099static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status)
2100{ 2100{
@@ -2149,7 +2149,7 @@ static void isr_status_change(struct ipw2100_priv *priv, int status)
2149static void isr_rx_complete_command(struct ipw2100_priv *priv, 2149static void isr_rx_complete_command(struct ipw2100_priv *priv,
2150 struct ipw2100_cmd_header *cmd) 2150 struct ipw2100_cmd_header *cmd)
2151{ 2151{
2152#ifdef CONFIG_IPW_DEBUG 2152#ifdef CONFIG_IPW2100_DEBUG
2153 if (cmd->host_command_reg < ARRAY_SIZE(command_types)) { 2153 if (cmd->host_command_reg < ARRAY_SIZE(command_types)) {
2154 IPW_DEBUG_HC("Command completed '%s (%d)'\n", 2154 IPW_DEBUG_HC("Command completed '%s (%d)'\n",
2155 command_types[cmd->host_command_reg], 2155 command_types[cmd->host_command_reg],
@@ -2167,7 +2167,7 @@ static void isr_rx_complete_command(struct ipw2100_priv *priv,
2167 wake_up_interruptible(&priv->wait_command_queue); 2167 wake_up_interruptible(&priv->wait_command_queue);
2168} 2168}
2169 2169
2170#ifdef CONFIG_IPW_DEBUG 2170#ifdef CONFIG_IPW2100_DEBUG
2171static const char *frame_types[] = { 2171static const char *frame_types[] = {
2172 "COMMAND_STATUS_VAL", 2172 "COMMAND_STATUS_VAL",
2173 "STATUS_CHANGE_VAL", 2173 "STATUS_CHANGE_VAL",
@@ -2290,7 +2290,7 @@ static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
2290 2290
2291static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) 2291static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
2292{ 2292{
2293#ifdef CONFIG_IPW_DEBUG_C3 2293#ifdef CONFIG_IPW2100_DEBUG_C3
2294 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2294 struct ipw2100_status *status = &priv->status_queue.drv[i];
2295 u32 match, reg; 2295 u32 match, reg;
2296 int j; 2296 int j;
@@ -2312,7 +2312,7 @@ static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
2312 } 2312 }
2313#endif 2313#endif
2314 2314
2315#ifdef CONFIG_IPW_DEBUG_C3 2315#ifdef CONFIG_IPW2100_DEBUG_C3
2316 /* Halt the fimrware so we can get a good image */ 2316 /* Halt the fimrware so we can get a good image */
2317 write_register(priv->net_dev, IPW_REG_RESET_REG, 2317 write_register(priv->net_dev, IPW_REG_RESET_REG,
2318 IPW_AUX_HOST_RESET_REG_STOP_MASTER); 2318 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
@@ -2716,7 +2716,7 @@ static inline int __ipw2100_tx_process(struct ipw2100_priv *priv)
2716 list_del(element); 2716 list_del(element);
2717 DEC_STAT(&priv->fw_pend_stat); 2717 DEC_STAT(&priv->fw_pend_stat);
2718 2718
2719#ifdef CONFIG_IPW_DEBUG 2719#ifdef CONFIG_IPW2100_DEBUG
2720 { 2720 {
2721 int i = txq->oldest; 2721 int i = txq->oldest;
2722 IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i, 2722 IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i,
@@ -2782,7 +2782,7 @@ static inline int __ipw2100_tx_process(struct ipw2100_priv *priv)
2782 "something else: ids %d=%d.\n", 2782 "something else: ids %d=%d.\n",
2783 priv->net_dev->name, txq->oldest, packet->index); 2783 priv->net_dev->name, txq->oldest, packet->index);
2784 2784
2785#ifdef CONFIG_IPW_DEBUG 2785#ifdef CONFIG_IPW2100_DEBUG
2786 if (packet->info.c_struct.cmd->host_command_reg < 2786 if (packet->info.c_struct.cmd->host_command_reg <
2787 sizeof(command_types) / sizeof(*command_types)) 2787 sizeof(command_types) / sizeof(*command_types))
2788 IPW_DEBUG_TX("Command '%s (%d)' processed: %d.\n", 2788 IPW_DEBUG_TX("Command '%s (%d)' processed: %d.\n",
@@ -2975,7 +2975,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
2975 2975
2976 IPW_DEBUG_TX("data header tbd TX%d P=%08x L=%d\n", 2976 IPW_DEBUG_TX("data header tbd TX%d P=%08x L=%d\n",
2977 packet->index, tbd->host_addr, tbd->buf_length); 2977 packet->index, tbd->host_addr, tbd->buf_length);
2978#ifdef CONFIG_IPW_DEBUG 2978#ifdef CONFIG_IPW2100_DEBUG
2979 if (packet->info.d_struct.txb->nr_frags > 1) 2979 if (packet->info.d_struct.txb->nr_frags > 1)
2980 IPW_DEBUG_FRAG("fragment Tx: %d frames\n", 2980 IPW_DEBUG_FRAG("fragment Tx: %d frames\n",
2981 packet->info.d_struct.txb->nr_frags); 2981 packet->info.d_struct.txb->nr_frags);
@@ -3827,7 +3827,7 @@ static ssize_t show_stats(struct device *d, struct device_attribute *attr,
3827 priv->rx_interrupts, priv->inta_other); 3827 priv->rx_interrupts, priv->inta_other);
3828 out += sprintf(out, "firmware resets: %d\n", priv->resets); 3828 out += sprintf(out, "firmware resets: %d\n", priv->resets);
3829 out += sprintf(out, "firmware hangs: %d\n", priv->hangs); 3829 out += sprintf(out, "firmware hangs: %d\n", priv->hangs);
3830#ifdef CONFIG_IPW_DEBUG 3830#ifdef CONFIG_IPW2100_DEBUG
3831 out += sprintf(out, "packet mismatch image: %s\n", 3831 out += sprintf(out, "packet mismatch image: %s\n",
3832 priv->snapshot[0] ? "YES" : "NO"); 3832 priv->snapshot[0] ? "YES" : "NO");
3833#endif 3833#endif
@@ -3982,7 +3982,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
3982 3982
3983static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL); 3983static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL);
3984 3984
3985#ifdef CONFIG_IPW_DEBUG 3985#ifdef CONFIG_IPW2100_DEBUG
3986static ssize_t show_debug_level(struct device_driver *d, char *buf) 3986static ssize_t show_debug_level(struct device_driver *d, char *buf)
3987{ 3987{
3988 return sprintf(buf, "0x%08X\n", ipw2100_debug_level); 3988 return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
@@ -4011,7 +4011,7 @@ static ssize_t store_debug_level(struct device_driver *d,
4011 4011
4012static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level, 4012static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level,
4013 store_debug_level); 4013 store_debug_level);
4014#endif /* CONFIG_IPW_DEBUG */ 4014#endif /* CONFIG_IPW2100_DEBUG */
4015 4015
4016static ssize_t show_fatal_error(struct device *d, 4016static ssize_t show_fatal_error(struct device *d,
4017 struct device_attribute *attr, char *buf) 4017 struct device_attribute *attr, char *buf)
@@ -4937,7 +4937,7 @@ static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid,
4937 }; 4937 };
4938 int err; 4938 int err;
4939 4939
4940#ifdef CONFIG_IPW_DEBUG 4940#ifdef CONFIG_IPW2100_DEBUG
4941 if (bssid != NULL) 4941 if (bssid != NULL)
4942 IPW_DEBUG_HC("MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n", 4942 IPW_DEBUG_HC("MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n",
4943 bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], 4943 bssid[0], bssid[1], bssid[2], bssid[3], bssid[4],
@@ -6858,7 +6858,7 @@ static int __init ipw2100_init(void)
6858 6858
6859 ret = pci_module_init(&ipw2100_pci_driver); 6859 ret = pci_module_init(&ipw2100_pci_driver);
6860 6860
6861#ifdef CONFIG_IPW_DEBUG 6861#ifdef CONFIG_IPW2100_DEBUG
6862 ipw2100_debug_level = debug; 6862 ipw2100_debug_level = debug;
6863 driver_create_file(&ipw2100_pci_driver.driver, 6863 driver_create_file(&ipw2100_pci_driver.driver,
6864 &driver_attr_debug_level); 6864 &driver_attr_debug_level);
@@ -6873,7 +6873,7 @@ static int __init ipw2100_init(void)
6873static void __exit ipw2100_exit(void) 6873static void __exit ipw2100_exit(void)
6874{ 6874{
6875 /* FIXME: IPG: check that we have no instances of the devices open */ 6875 /* FIXME: IPG: check that we have no instances of the devices open */
6876#ifdef CONFIG_IPW_DEBUG 6876#ifdef CONFIG_IPW2100_DEBUG
6877 driver_remove_file(&ipw2100_pci_driver.driver, 6877 driver_remove_file(&ipw2100_pci_driver.driver,
6878 &driver_attr_debug_level); 6878 &driver_attr_debug_level);
6879#endif 6879#endif
@@ -8558,7 +8558,7 @@ static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev)
8558 8558
8559 quality = min(beacon_qual, min(tx_qual, rssi_qual)); 8559 quality = min(beacon_qual, min(tx_qual, rssi_qual));
8560 8560
8561#ifdef CONFIG_IPW_DEBUG 8561#ifdef CONFIG_IPW2100_DEBUG
8562 if (beacon_qual == quality) 8562 if (beacon_qual == quality)
8563 IPW_DEBUG_WX("Quality clamped by Missed Beacons\n"); 8563 IPW_DEBUG_WX("Quality clamped by Missed Beacons\n");
8564 else if (tx_qual == quality) 8564 else if (tx_qual == quality)
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 7c65b10bb164..f6c51441fa87 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -73,7 +73,7 @@ struct ipw2100_rx_packet;
73 * you simply need to add your entry to the ipw2100_debug_levels array. 73 * you simply need to add your entry to the ipw2100_debug_levels array.
74 * 74 *
75 * If you do not see debug_level in /proc/net/ipw2100 then you do not have 75 * If you do not see debug_level in /proc/net/ipw2100 then you do not have
76 * CONFIG_IPW_DEBUG defined in your kernel configuration 76 * CONFIG_IPW2100_DEBUG defined in your kernel configuration
77 * 77 *
78 */ 78 */
79 79
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 5e7c7e944c9d..cdfe50207757 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -462,7 +462,7 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv)
462 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 462 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
463} 463}
464 464
465#ifdef CONFIG_IPW_DEBUG 465#ifdef CONFIG_IPW2200_DEBUG
466static char *ipw_error_desc(u32 val) 466static char *ipw_error_desc(u32 val)
467{ 467{
468 switch (val) { 468 switch (val) {
@@ -1235,7 +1235,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1235 const char *buf, size_t count) 1235 const char *buf, size_t count)
1236{ 1236{
1237 struct ipw_priv *priv = dev_get_drvdata(d); 1237 struct ipw_priv *priv = dev_get_drvdata(d);
1238#ifdef CONFIG_IPW_DEBUG 1238#ifdef CONFIG_IPW2200_DEBUG
1239 struct net_device *dev = priv->net_dev; 1239 struct net_device *dev = priv->net_dev;
1240#endif 1240#endif
1241 char buffer[] = "00000000"; 1241 char buffer[] = "00000000";
@@ -1754,7 +1754,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1754 IPW_ERROR("Firmware error detected. Restarting.\n"); 1754 IPW_ERROR("Firmware error detected. Restarting.\n");
1755 if (priv->error) { 1755 if (priv->error) {
1756 IPW_ERROR("Sysfs 'error' log already exists.\n"); 1756 IPW_ERROR("Sysfs 'error' log already exists.\n");
1757#ifdef CONFIG_IPW_DEBUG 1757#ifdef CONFIG_IPW2200_DEBUG
1758 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 1758 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1759 struct ipw_fw_error *error = 1759 struct ipw_fw_error *error =
1760 ipw_alloc_error_log(priv); 1760 ipw_alloc_error_log(priv);
@@ -1770,7 +1770,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1770 else 1770 else
1771 IPW_ERROR("Error allocating sysfs 'error' " 1771 IPW_ERROR("Error allocating sysfs 'error' "
1772 "log.\n"); 1772 "log.\n");
1773#ifdef CONFIG_IPW_DEBUG 1773#ifdef CONFIG_IPW2200_DEBUG
1774 if (ipw_debug_level & IPW_DL_FW_ERRORS) 1774 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1775 ipw_dump_error_log(priv, priv->error); 1775 ipw_dump_error_log(priv, priv->error);
1776#endif 1776#endif
@@ -3778,7 +3778,7 @@ static const struct ipw_status_code ipw_status_codes[] = {
3778 {0x2E, "Cipher suite is rejected per security policy"}, 3778 {0x2E, "Cipher suite is rejected per security policy"},
3779}; 3779};
3780 3780
3781#ifdef CONFIG_IPW_DEBUG 3781#ifdef CONFIG_IPW2200_DEBUG
3782static const char *ipw_get_status_code(u16 status) 3782static const char *ipw_get_status_code(u16 status)
3783{ 3783{
3784 int i; 3784 int i;
@@ -4250,7 +4250,7 @@ static inline void ipw_rx_notification(struct ipw_priv *priv,
4250 if (priv-> 4250 if (priv->
4251 status & (STATUS_ASSOCIATED | 4251 status & (STATUS_ASSOCIATED |
4252 STATUS_AUTH)) { 4252 STATUS_AUTH)) {
4253#ifdef CONFIG_IPW_DEBUG 4253#ifdef CONFIG_IPW2200_DEBUG
4254 struct notif_authenticate *auth 4254 struct notif_authenticate *auth
4255 = &notif->u.auth; 4255 = &notif->u.auth;
4256 IPW_DEBUG(IPW_DL_NOTIF | 4256 IPW_DEBUG(IPW_DL_NOTIF |
@@ -4944,12 +4944,11 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4944 struct ipw_rx_queue *rxq; 4944 struct ipw_rx_queue *rxq;
4945 int i; 4945 int i;
4946 4946
4947 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL); 4947 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4948 if (unlikely(!rxq)) { 4948 if (unlikely(!rxq)) {
4949 IPW_ERROR("memory allocation failed\n"); 4949 IPW_ERROR("memory allocation failed\n");
4950 return NULL; 4950 return NULL;
4951 } 4951 }
4952 memset(rxq, 0, sizeof(*rxq));
4953 spin_lock_init(&rxq->lock); 4952 spin_lock_init(&rxq->lock);
4954 INIT_LIST_HEAD(&rxq->rx_free); 4953 INIT_LIST_HEAD(&rxq->rx_free);
4955 INIT_LIST_HEAD(&rxq->rx_used); 4954 INIT_LIST_HEAD(&rxq->rx_used);
@@ -5828,7 +5827,7 @@ static void ipw_bg_adhoc_check(void *data)
5828 up(&priv->sem); 5827 up(&priv->sem);
5829} 5828}
5830 5829
5831#ifdef CONFIG_IPW_DEBUG 5830#ifdef CONFIG_IPW2200_DEBUG
5832static void ipw_debug_config(struct ipw_priv *priv) 5831static void ipw_debug_config(struct ipw_priv *priv)
5833{ 5832{
5834 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 5833 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
@@ -7456,8 +7455,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
7456 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ 7455 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7457 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data; 7456 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7458 if (priv->ieee->iw_mode != IW_MODE_MONITOR && 7457 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7459 ((is_multicast_ether_addr(hdr->addr1) || 7458 (is_multicast_ether_addr(hdr->addr1) ?
7460 is_broadcast_ether_addr(hdr->addr1)) ?
7461 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) 7459 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7462 ipw_rebuild_decrypted_skb(priv, rxb->skb); 7460 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7463 7461
@@ -7648,8 +7646,7 @@ static inline int is_network_packet(struct ipw_priv *priv,
7648 return 0; 7646 return 0;
7649 7647
7650 /* {broad,multi}cast packets to our BSSID go through */ 7648 /* {broad,multi}cast packets to our BSSID go through */
7651 if (is_multicast_ether_addr(header->addr1) || 7649 if (is_multicast_ether_addr(header->addr1))
7652 is_broadcast_ether_addr(header->addr1))
7653 return !memcmp(header->addr3, priv->bssid, ETH_ALEN); 7650 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7654 7651
7655 /* packets to our adapter go through */ 7652 /* packets to our adapter go through */
@@ -7662,8 +7659,7 @@ static inline int is_network_packet(struct ipw_priv *priv,
7662 return 0; 7659 return 0;
7663 7660
7664 /* {broad,multi}cast packets to our BSS go through */ 7661 /* {broad,multi}cast packets to our BSS go through */
7665 if (is_multicast_ether_addr(header->addr1) || 7662 if (is_multicast_ether_addr(header->addr1))
7666 is_broadcast_ether_addr(header->addr1))
7667 return !memcmp(header->addr2, priv->bssid, ETH_ALEN); 7663 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7668 7664
7669 /* packets to our adapter go through */ 7665 /* packets to our adapter go through */
@@ -7815,7 +7811,7 @@ static void ipw_rx(struct ipw_priv *priv)
7815 7811
7816 while (i != r) { 7812 while (i != r) {
7817 rxb = priv->rxq->queue[i]; 7813 rxb = priv->rxq->queue[i];
7818#ifdef CONFIG_IPW_DEBUG 7814#ifdef CONFIG_IPW2200_DEBUG
7819 if (unlikely(rxb == NULL)) { 7815 if (unlikely(rxb == NULL)) {
7820 printk(KERN_CRIT "Queue not allocated!\n"); 7816 printk(KERN_CRIT "Queue not allocated!\n");
7821 break; 7817 break;
@@ -9657,8 +9653,7 @@ static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9657 switch (priv->ieee->iw_mode) { 9653 switch (priv->ieee->iw_mode) {
9658 case IW_MODE_ADHOC: 9654 case IW_MODE_ADHOC:
9659 hdr_len = IEEE80211_3ADDR_LEN; 9655 hdr_len = IEEE80211_3ADDR_LEN;
9660 unicast = !(is_multicast_ether_addr(hdr->addr1) || 9656 unicast = !is_multicast_ether_addr(hdr->addr1);
9661 is_broadcast_ether_addr(hdr->addr1));
9662 id = ipw_find_station(priv, hdr->addr1); 9657 id = ipw_find_station(priv, hdr->addr1);
9663 if (id == IPW_INVALID_STATION) { 9658 if (id == IPW_INVALID_STATION) {
9664 id = ipw_add_station(priv, hdr->addr1); 9659 id = ipw_add_station(priv, hdr->addr1);
@@ -9673,8 +9668,7 @@ static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9673 9668
9674 case IW_MODE_INFRA: 9669 case IW_MODE_INFRA:
9675 default: 9670 default:
9676 unicast = !(is_multicast_ether_addr(hdr->addr3) || 9671 unicast = !is_multicast_ether_addr(hdr->addr3);
9677 is_broadcast_ether_addr(hdr->addr3));
9678 hdr_len = IEEE80211_3ADDR_LEN; 9672 hdr_len = IEEE80211_3ADDR_LEN;
9679 id = 0; 9673 id = 0;
9680 break; 9674 break;
@@ -10956,7 +10950,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10956 10950
10957 priv->net_dev = net_dev; 10951 priv->net_dev = net_dev;
10958 priv->pci_dev = pdev; 10952 priv->pci_dev = pdev;
10959#ifdef CONFIG_IPW_DEBUG 10953#ifdef CONFIG_IPW2200_DEBUG
10960 ipw_debug_level = debug; 10954 ipw_debug_level = debug;
10961#endif 10955#endif
10962 spin_lock_init(&priv->lock); 10956 spin_lock_init(&priv->lock);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 1c98db0652c9..e65620a4d79e 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1301,14 +1301,14 @@ struct ipw_priv {
1301 1301
1302/* debug macros */ 1302/* debug macros */
1303 1303
1304#ifdef CONFIG_IPW_DEBUG 1304#ifdef CONFIG_IPW2200_DEBUG
1305#define IPW_DEBUG(level, fmt, args...) \ 1305#define IPW_DEBUG(level, fmt, args...) \
1306do { if (ipw_debug_level & (level)) \ 1306do { if (ipw_debug_level & (level)) \
1307 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1307 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1308 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1308 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
1309#else 1309#else
1310#define IPW_DEBUG(level, fmt, args...) do {} while (0) 1310#define IPW_DEBUG(level, fmt, args...) do {} while (0)
1311#endif /* CONFIG_IPW_DEBUG */ 1311#endif /* CONFIG_IPW2200_DEBUG */
1312 1312
1313/* 1313/*
1314 * To use the debug system; 1314 * To use the debug system;
@@ -1332,7 +1332,7 @@ do { if (ipw_debug_level & (level)) \
1332 * you simply need to add your entry to the ipw_debug_levels array. 1332 * you simply need to add your entry to the ipw_debug_levels array.
1333 * 1333 *
1334 * If you do not see debug_level in /proc/net/ipw then you do not have 1334 * If you do not see debug_level in /proc/net/ipw then you do not have
1335 * CONFIG_IPW_DEBUG defined in your kernel configuration 1335 * CONFIG_IPW2200_DEBUG defined in your kernel configuration
1336 * 1336 *
1337 */ 1337 */
1338 1338
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 488ab06fb79f..6fd0bf736830 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -3512,9 +3512,8 @@ static int orinoco_ioctl_setpower(struct net_device *dev,
3512 break; 3512 break;
3513 default: 3513 default:
3514 err = -EINVAL; 3514 err = -EINVAL;
3515 }
3516 if (err)
3517 goto out; 3515 goto out;
3516 }
3518 3517
3519 if (prq->flags & IW_POWER_TIMEOUT) { 3518 if (prq->flags & IW_POWER_TIMEOUT) {
3520 priv->pm_on = 1; 3519 priv->pm_on = 1;
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index d8afd51ff8a5..d1a670b35338 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -1,6 +1,8 @@
1/* orinoco_nortel.c 1/* orinoco_nortel.c
2 * 2 *
3 * Driver for Prism II devices which would usually be driven by orinoco_cs, 3 * Driver for Prism II devices which would usually be driven by orinoco_cs,
4 * but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in
5 * Nortel emobility, Symbol LA-4113 and Symbol LA-4123.
4 * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter. 6 * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter.
5 * 7 *
6 * Copyright (C) 2002 Tobias Hoffmann 8 * Copyright (C) 2002 Tobias Hoffmann
@@ -165,7 +167,7 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
165 goto fail_resources; 167 goto fail_resources;
166 } 168 }
167 169
168 iomem = pci_iomap(pdev, 3, 0); 170 iomem = pci_iomap(pdev, 2, 0);
169 if (!iomem) { 171 if (!iomem) {
170 err = -ENOMEM; 172 err = -ENOMEM;
171 goto fail_map_io; 173 goto fail_map_io;
@@ -265,6 +267,8 @@ static void __devexit nortel_pci_remove_one(struct pci_dev *pdev)
265static struct pci_device_id nortel_pci_id_table[] = { 267static struct pci_device_id nortel_pci_id_table[] = {
266 /* Nortel emobility PCI */ 268 /* Nortel emobility PCI */
267 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, 269 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
270 /* Symbol LA-4123 PCI */
271 {0x1562, 0x0001, PCI_ANY_ID, PCI_ANY_ID,},
268 {0,}, 272 {0,},
269}; 273};
270 274