aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/8139too.c86
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c106
-rw-r--r--drivers/net/bonding/bond_3ad.h13
-rw-r--r--drivers/net/bonding/bond_alb.c75
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c781
-rw-r--r--drivers/net/bonding/bond_sysfs.c1358
-rw-r--r--drivers/net/bonding/bonding.h52
-rw-r--r--drivers/net/gianfar.c231
-rw-r--r--drivers/net/gianfar.h69
-rw-r--r--drivers/net/gianfar_ethtool.c2
-rw-r--r--drivers/net/gianfar_mii.h1
-rw-r--r--drivers/net/gianfar_sysfs.c311
-rw-r--r--drivers/net/ixp2000/Kconfig6
-rw-r--r--drivers/net/ixp2000/Makefile3
-rw-r--r--drivers/net/ixp2000/caleb.c136
-rw-r--r--drivers/net/ixp2000/caleb.h22
-rw-r--r--drivers/net/ixp2000/enp2611.c238
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.c213
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ixp2000/ixpdev.c404
-rw-r--r--drivers/net/ixp2000/ixpdev.h27
-rw-r--r--drivers/net/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ixp2000/pm3386.c304
-rw-r--r--drivers/net/ixp2000/pm3386.h26
-rw-r--r--drivers/net/s2io.c186
-rw-r--r--drivers/net/s2io.h3
-rw-r--r--drivers/net/sis900.c73
-rw-r--r--drivers/net/sis900.h45
-rw-r--r--drivers/net/sk98lin/h/skdrv2nd.h1
-rw-r--r--drivers/net/sk98lin/h/skvpd.h8
-rw-r--r--drivers/net/sk98lin/skge.c43
-rw-r--r--drivers/net/sky2.c3123
-rw-r--r--drivers/net/sky2.h1917
-rw-r--r--drivers/net/wan/lmc/lmc_prot.h15
-rw-r--r--drivers/net/wireless/atmel.c1490
-rw-r--r--drivers/net/wireless/hostap/Makefile1
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c (renamed from drivers/net/wireless/hostap/hostap.c)0
45 files changed, 10730 insertions, 1752 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 30bee11c48bd..d2102a27d307 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -586,16 +586,16 @@ struct rtl8139_private {
586 dma_addr_t tx_bufs_dma; 586 dma_addr_t tx_bufs_dma;
587 signed char phys[4]; /* MII device addresses. */ 587 signed char phys[4]; /* MII device addresses. */
588 char twistie, twist_row, twist_col; /* Twister tune state. */ 588 char twistie, twist_row, twist_col; /* Twister tune state. */
589 unsigned int default_port:4; /* Last dev->if_port value. */ 589 unsigned int default_port : 4; /* Last dev->if_port value. */
590 unsigned int have_thread : 1;
590 spinlock_t lock; 591 spinlock_t lock;
591 spinlock_t rx_lock; 592 spinlock_t rx_lock;
592 chip_t chipset; 593 chip_t chipset;
593 pid_t thr_pid;
594 wait_queue_head_t thr_wait;
595 struct completion thr_exited;
596 u32 rx_config; 594 u32 rx_config;
597 struct rtl_extra_stats xstats; 595 struct rtl_extra_stats xstats;
598 int time_to_die; 596
597 struct work_struct thread;
598
599 struct mii_if_info mii; 599 struct mii_if_info mii;
600 unsigned int regs_len; 600 unsigned int regs_len;
601 unsigned long fifo_copy_timeout; 601 unsigned long fifo_copy_timeout;
@@ -620,7 +620,7 @@ static int rtl8139_open (struct net_device *dev);
620static int mdio_read (struct net_device *dev, int phy_id, int location); 620static int mdio_read (struct net_device *dev, int phy_id, int location);
621static void mdio_write (struct net_device *dev, int phy_id, int location, 621static void mdio_write (struct net_device *dev, int phy_id, int location,
622 int val); 622 int val);
623static void rtl8139_start_thread(struct net_device *dev); 623static void rtl8139_start_thread(struct rtl8139_private *tp);
624static void rtl8139_tx_timeout (struct net_device *dev); 624static void rtl8139_tx_timeout (struct net_device *dev);
625static void rtl8139_init_ring (struct net_device *dev); 625static void rtl8139_init_ring (struct net_device *dev);
626static int rtl8139_start_xmit (struct sk_buff *skb, 626static int rtl8139_start_xmit (struct sk_buff *skb,
@@ -637,6 +637,7 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
637static void rtl8139_set_rx_mode (struct net_device *dev); 637static void rtl8139_set_rx_mode (struct net_device *dev);
638static void __set_rx_mode (struct net_device *dev); 638static void __set_rx_mode (struct net_device *dev);
639static void rtl8139_hw_start (struct net_device *dev); 639static void rtl8139_hw_start (struct net_device *dev);
640static void rtl8139_thread (void *_data);
640static struct ethtool_ops rtl8139_ethtool_ops; 641static struct ethtool_ops rtl8139_ethtool_ops;
641 642
642/* write MMIO register, with flush */ 643/* write MMIO register, with flush */
@@ -1007,8 +1008,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1007 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1008 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1008 spin_lock_init (&tp->lock); 1009 spin_lock_init (&tp->lock);
1009 spin_lock_init (&tp->rx_lock); 1010 spin_lock_init (&tp->rx_lock);
1010 init_waitqueue_head (&tp->thr_wait); 1011 INIT_WORK(&tp->thread, rtl8139_thread, dev);
1011 init_completion (&tp->thr_exited);
1012 tp->mii.dev = dev; 1012 tp->mii.dev = dev;
1013 tp->mii.mdio_read = mdio_read; 1013 tp->mii.mdio_read = mdio_read;
1014 tp->mii.mdio_write = mdio_write; 1014 tp->mii.mdio_write = mdio_write;
@@ -1345,7 +1345,7 @@ static int rtl8139_open (struct net_device *dev)
1345 dev->irq, RTL_R8 (MediaStatus), 1345 dev->irq, RTL_R8 (MediaStatus),
1346 tp->mii.full_duplex ? "full" : "half"); 1346 tp->mii.full_duplex ? "full" : "half");
1347 1347
1348 rtl8139_start_thread(dev); 1348 rtl8139_start_thread(tp);
1349 1349
1350 return 0; 1350 return 0;
1351} 1351}
@@ -1594,55 +1594,43 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1594 RTL_R8 (Config1)); 1594 RTL_R8 (Config1));
1595} 1595}
1596 1596
1597static int rtl8139_thread (void *data) 1597static void rtl8139_thread (void *_data)
1598{ 1598{
1599 struct net_device *dev = data; 1599 struct net_device *dev = _data;
1600 struct rtl8139_private *tp = netdev_priv(dev); 1600 struct rtl8139_private *tp = netdev_priv(dev);
1601 unsigned long timeout; 1601 unsigned long thr_delay;
1602
1603 daemonize("%s", dev->name);
1604 allow_signal(SIGTERM);
1605
1606 while (1) {
1607 timeout = next_tick;
1608 do {
1609 timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
1610 /* make swsusp happy with our thread */
1611 try_to_freeze();
1612 } while (!signal_pending (current) && (timeout > 0));
1613
1614 if (signal_pending (current)) {
1615 flush_signals(current);
1616 }
1617 1602
1618 if (tp->time_to_die) 1603 if (rtnl_shlock_nowait() == 0) {
1619 break;
1620
1621 if (rtnl_lock_interruptible ())
1622 break;
1623 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1604 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1624 rtnl_unlock (); 1605 rtnl_unlock ();
1606
1607 thr_delay = next_tick;
1608 } else {
1609 /* unlikely race. mitigate with fast poll. */
1610 thr_delay = HZ / 2;
1625 } 1611 }
1626 1612
1627 complete_and_exit (&tp->thr_exited, 0); 1613 schedule_delayed_work(&tp->thread, thr_delay);
1628} 1614}
1629 1615
1630static void rtl8139_start_thread(struct net_device *dev) 1616static void rtl8139_start_thread(struct rtl8139_private *tp)
1631{ 1617{
1632 struct rtl8139_private *tp = netdev_priv(dev);
1633
1634 tp->thr_pid = -1;
1635 tp->twistie = 0; 1618 tp->twistie = 0;
1636 tp->time_to_die = 0;
1637 if (tp->chipset == CH_8139_K) 1619 if (tp->chipset == CH_8139_K)
1638 tp->twistie = 1; 1620 tp->twistie = 1;
1639 else if (tp->drv_flags & HAS_LNK_CHNG) 1621 else if (tp->drv_flags & HAS_LNK_CHNG)
1640 return; 1622 return;
1641 1623
1642 tp->thr_pid = kernel_thread(rtl8139_thread, dev, CLONE_FS|CLONE_FILES); 1624 tp->have_thread = 1;
1643 if (tp->thr_pid < 0) { 1625
1644 printk (KERN_WARNING "%s: unable to start kernel thread\n", 1626 schedule_delayed_work(&tp->thread, next_tick);
1645 dev->name); 1627}
1628
1629static void rtl8139_stop_thread(struct rtl8139_private *tp)
1630{
1631 if (tp->have_thread) {
1632 cancel_rearming_delayed_work(&tp->thread);
1633 tp->have_thread = 0;
1646 } 1634 }
1647} 1635}
1648 1636
@@ -2224,22 +2212,12 @@ static int rtl8139_close (struct net_device *dev)
2224{ 2212{
2225 struct rtl8139_private *tp = netdev_priv(dev); 2213 struct rtl8139_private *tp = netdev_priv(dev);
2226 void __iomem *ioaddr = tp->mmio_addr; 2214 void __iomem *ioaddr = tp->mmio_addr;
2227 int ret = 0;
2228 unsigned long flags; 2215 unsigned long flags;
2229 2216
2230 netif_stop_queue (dev); 2217 netif_stop_queue (dev);
2231 2218
2232 if (tp->thr_pid >= 0) { 2219 rtl8139_stop_thread(tp);
2233 tp->time_to_die = 1; 2220
2234 wmb();
2235 ret = kill_proc (tp->thr_pid, SIGTERM, 1);
2236 if (ret) {
2237 printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
2238 return ret;
2239 }
2240 wait_for_completion (&tp->thr_exited);
2241 }
2242
2243 if (netif_msg_ifdown(tp)) 2221 if (netif_msg_ifdown(tp))
2244 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", 2222 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n",
2245 dev->name, RTL_R16 (IntrStatus)); 2223 dev->name, RTL_R16 (IntrStatus));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ebd7313d7fc1..0f2e4c11f80f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1901,6 +1901,8 @@ config E1000_NAPI
1901 1901
1902 If in doubt, say N. 1902 If in doubt, say N.
1903 1903
1904source "drivers/net/ixp2000/Kconfig"
1905
1904config MYRI_SBUS 1906config MYRI_SBUS
1905 tristate "MyriCOM Gigabit Ethernet support" 1907 tristate "MyriCOM Gigabit Ethernet support"
1906 depends on SBUS 1908 depends on SBUS
@@ -2008,7 +2010,18 @@ config SKGE
2008 2010
2009 It does not support the link failover and network management 2011 It does not support the link failover and network management
2010 features that "portable" vendor supplied sk98lin driver does. 2012 features that "portable" vendor supplied sk98lin driver does.
2011 2013
2014
2015config SKY2
2016 tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
2017 depends on PCI && EXPERIMENTAL
2018 select CRC32
2019 ---help---
2020 This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
2021
2022 To compile this driver as a module, choose M here: the module
2023 will be called sky2. This is recommended.
2024
2012config SK98LIN 2025config SK98LIN
2013 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support" 2026 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
2014 depends on PCI 2027 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4cffd34442aa..b74a7cb5bae6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -13,7 +13,10 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
13obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
14obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
15 15
16gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o 16gianfar_driver-objs := gianfar.o \
17 gianfar_ethtool.o \
18 gianfar_mii.o \
19 gianfar_sysfs.o
17 20
18# 21#
19# link order important here 22# link order important here
@@ -59,6 +62,7 @@ spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o
59obj-$(CONFIG_SPIDER_NET) += spidernet.o 62obj-$(CONFIG_SPIDER_NET) += spidernet.o
60obj-$(CONFIG_TC35815) += tc35815.o 63obj-$(CONFIG_TC35815) += tc35815.o
61obj-$(CONFIG_SKGE) += skge.o 64obj-$(CONFIG_SKGE) += skge.o
65obj-$(CONFIG_SKY2) += sky2.o
62obj-$(CONFIG_SK98LIN) += sk98lin/ 66obj-$(CONFIG_SK98LIN) += sk98lin/
63obj-$(CONFIG_SKFP) += skfp/ 67obj-$(CONFIG_SKFP) += skfp/
64obj-$(CONFIG_VIA_RHINE) += via-rhine.o 68obj-$(CONFIG_VIA_RHINE) += via-rhine.o
@@ -202,6 +206,7 @@ obj-$(CONFIG_NET_TULIP) += tulip/
202obj-$(CONFIG_HAMRADIO) += hamradio/ 206obj-$(CONFIG_HAMRADIO) += hamradio/
203obj-$(CONFIG_IRDA) += irda/ 207obj-$(CONFIG_IRDA) += irda/
204obj-$(CONFIG_ETRAX_ETHERNET) += cris/ 208obj-$(CONFIG_ETRAX_ETHERNET) += cris/
209obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
205 210
206obj-$(CONFIG_NETCONSOLE) += netconsole.o 211obj-$(CONFIG_NETCONSOLE) += netconsole.o
207 212
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index cf50384b469e..5cdae2bc055a 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
8 8
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d2f34d5a8083..f3f5825469d6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -18,38 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
25 * Amir Noam <amir.noam at intel dot com>
26 * - Added support for lacp_rate module param.
27 *
28 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Based on discussion on mailing list, changed locking scheme
30 * to use lock/unlock or lock_bh/unlock_bh appropriately instead
31 * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
32 * hidden bugs and solves system hangs that occurred due to the fact
33 * that holding lock_irqsave doesn't prevent softirqs from running.
34 * This also increases total throughput since interrupts are not
35 * blocked on each transmitted packets or monitor timeout.
36 *
37 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
38 * - Renamed bond_3ad_link_status_changed() to
39 * bond_3ad_handle_link_change() for compatibility with TLB.
40 *
41 * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
42 * - Fix long fail over time when releasing last slave of an active
43 * aggregator - send LACPDU on unbind of slave to tell partner this
44 * port is no longer aggregatable.
45 *
46 * 2003/06/25 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
47 * - Send LACPDU as highest priority packet to further fix the above
48 * problem on very high Tx traffic load where packets may get dropped
49 * by the slave.
50 *
51 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
52 * - Code cleanup and style changes
53 */ 21 */
54 22
55//#define BONDING_DEBUG 1 23//#define BONDING_DEBUG 1
@@ -1198,10 +1166,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1198 // detect loopback situation 1166 // detect loopback situation
1199 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) { 1167 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
1200 // INFO_RECEIVED_LOOPBACK_FRAMES 1168 // INFO_RECEIVED_LOOPBACK_FRAMES
1201 printk(KERN_ERR DRV_NAME ": An illegal loopback occurred on adapter (%s)\n", 1169 printk(KERN_ERR DRV_NAME ": %s: An illegal loopback occurred on "
1202 port->slave->dev->name); 1170 "adapter (%s). Check the configuration to verify that all "
1203 printk(KERN_ERR "Check the configuration to verify that all Adapters " 1171 "Adapters are connected to 802.3ad compliant switch ports\n",
1204 "are connected to 802.3ad compliant switch ports\n"); 1172 port->slave->dev->master->name, port->slave->dev->name);
1205 __release_rx_machine_lock(port); 1173 __release_rx_machine_lock(port);
1206 return; 1174 return;
1207 } 1175 }
@@ -1378,8 +1346,9 @@ static void ad_port_selection_logic(struct port *port)
1378 } 1346 }
1379 } 1347 }
1380 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list 1348 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
1381 printk(KERN_WARNING DRV_NAME ": Warning: Port %d (on %s) was " 1349 printk(KERN_WARNING DRV_NAME ": %s: Warning: Port %d (on %s) was "
1382 "related to aggregator %d but was not on its port list\n", 1350 "related to aggregator %d but was not on its port list\n",
1351 port->slave->dev->master->name,
1383 port->actor_port_number, port->slave->dev->name, 1352 port->actor_port_number, port->slave->dev->name,
1384 port->aggregator->aggregator_identifier); 1353 port->aggregator->aggregator_identifier);
1385 } 1354 }
@@ -1450,7 +1419,8 @@ static void ad_port_selection_logic(struct port *port)
1450 1419
1451 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1420 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1452 } else { 1421 } else {
1453 printk(KERN_ERR DRV_NAME ": Port %d (on %s) did not find a suitable aggregator\n", 1422 printk(KERN_ERR DRV_NAME ": %s: Port %d (on %s) did not find a suitable aggregator\n",
1423 port->slave->dev->master->name,
1454 port->actor_port_number, port->slave->dev->name); 1424 port->actor_port_number, port->slave->dev->name);
1455 } 1425 }
1456 } 1426 }
@@ -1582,8 +1552,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator)
1582 1552
1583 // check if any partner replys 1553 // check if any partner replys
1584 if (best_aggregator->is_individual) { 1554 if (best_aggregator->is_individual) {
1585 printk(KERN_WARNING DRV_NAME ": Warning: No 802.3ad response from the link partner " 1555 printk(KERN_WARNING DRV_NAME ": %s: Warning: No 802.3ad response from "
1586 "for any adapters in the bond\n"); 1556 "the link partner for any adapters in the bond\n",
1557 best_aggregator->slave->dev->master->name);
1587 } 1558 }
1588 1559
1589 // check if there are more than one aggregator 1560 // check if there are more than one aggregator
@@ -1915,7 +1886,8 @@ int bond_3ad_bind_slave(struct slave *slave)
1915 struct aggregator *aggregator; 1886 struct aggregator *aggregator;
1916 1887
1917 if (bond == NULL) { 1888 if (bond == NULL) {
1918 printk(KERN_ERR "The slave %s is not attached to its bond\n", slave->dev->name); 1889 printk(KERN_ERR DRV_NAME ": %s: The slave %s is not attached to its bond\n",
1890 slave->dev->master->name, slave->dev->name);
1919 return -1; 1891 return -1;
1920 } 1892 }
1921 1893
@@ -1990,7 +1962,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
1990 1962
1991 // if slave is null, the whole port is not initialized 1963 // if slave is null, the whole port is not initialized
1992 if (!port->slave) { 1964 if (!port->slave) {
1993 printk(KERN_WARNING DRV_NAME ": Trying to unbind an uninitialized port on %s\n", slave->dev->name); 1965 printk(KERN_WARNING DRV_NAME ": Warning: %s: Trying to "
1966 "unbind an uninitialized port on %s\n",
1967 slave->dev->master->name, slave->dev->name);
1994 return; 1968 return;
1995 } 1969 }
1996 1970
@@ -2021,7 +1995,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2021 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); 1995 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
2022 1996
2023 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { 1997 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
2024 printk(KERN_INFO DRV_NAME ": Removing an active aggregator\n"); 1998 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
1999 aggregator->slave->dev->master->name);
2025 // select new active aggregator 2000 // select new active aggregator
2026 select_new_active_agg = 1; 2001 select_new_active_agg = 1;
2027 } 2002 }
@@ -2051,15 +2026,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
2051 ad_agg_selection_logic(__get_first_agg(port)); 2026 ad_agg_selection_logic(__get_first_agg(port));
2052 } 2027 }
2053 } else { 2028 } else {
2054 printk(KERN_WARNING DRV_NAME ": Warning: unbinding aggregator, " 2029 printk(KERN_WARNING DRV_NAME ": %s: Warning: unbinding aggregator, "
2055 "and could not find a new aggregator for its ports\n"); 2030 "and could not find a new aggregator for its ports\n",
2031 slave->dev->master->name);
2056 } 2032 }
2057 } else { // in case that the only port related to this aggregator is the one we want to remove 2033 } else { // in case that the only port related to this aggregator is the one we want to remove
2058 select_new_active_agg = aggregator->is_active; 2034 select_new_active_agg = aggregator->is_active;
2059 // clear the aggregator 2035 // clear the aggregator
2060 ad_clear_agg(aggregator); 2036 ad_clear_agg(aggregator);
2061 if (select_new_active_agg) { 2037 if (select_new_active_agg) {
2062 printk(KERN_INFO "Removing an active aggregator\n"); 2038 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2039 slave->dev->master->name);
2063 // select new active aggregator 2040 // select new active aggregator
2064 ad_agg_selection_logic(__get_first_agg(port)); 2041 ad_agg_selection_logic(__get_first_agg(port));
2065 } 2042 }
@@ -2085,7 +2062,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2085 // clear the aggregator 2062 // clear the aggregator
2086 ad_clear_agg(temp_aggregator); 2063 ad_clear_agg(temp_aggregator);
2087 if (select_new_active_agg) { 2064 if (select_new_active_agg) {
2088 printk(KERN_INFO "Removing an active aggregator\n"); 2065 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2066 slave->dev->master->name);
2089 // select new active aggregator 2067 // select new active aggregator
2090 ad_agg_selection_logic(__get_first_agg(port)); 2068 ad_agg_selection_logic(__get_first_agg(port));
2091 } 2069 }
@@ -2131,7 +2109,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2131 // select the active aggregator for the bond 2109 // select the active aggregator for the bond
2132 if ((port = __get_first_port(bond))) { 2110 if ((port = __get_first_port(bond))) {
2133 if (!port->slave) { 2111 if (!port->slave) {
2134 printk(KERN_WARNING DRV_NAME ": Warning: bond's first port is uninitialized\n"); 2112 printk(KERN_WARNING DRV_NAME ": %s: Warning: bond's first port is "
2113 "uninitialized\n", bond->dev->name);
2135 goto re_arm; 2114 goto re_arm;
2136 } 2115 }
2137 2116
@@ -2143,7 +2122,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2143 // for each port run the state machines 2122 // for each port run the state machines
2144 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2123 for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
2145 if (!port->slave) { 2124 if (!port->slave) {
2146 printk(KERN_WARNING DRV_NAME ": Warning: Found an uninitialized port\n"); 2125 printk(KERN_WARNING DRV_NAME ": %s: Warning: Found an uninitialized "
2126 "port\n", bond->dev->name);
2147 goto re_arm; 2127 goto re_arm;
2148 } 2128 }
2149 2129
@@ -2184,7 +2164,8 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2184 port = &(SLAVE_AD_INFO(slave).port); 2164 port = &(SLAVE_AD_INFO(slave).port);
2185 2165
2186 if (!port->slave) { 2166 if (!port->slave) {
2187 printk(KERN_WARNING DRV_NAME ": Warning: port of slave %s is uninitialized\n", slave->dev->name); 2167 printk(KERN_WARNING DRV_NAME ": %s: Warning: port of slave %s is "
2168 "uninitialized\n", slave->dev->name, slave->dev->master->name);
2188 return; 2169 return;
2189 } 2170 }
2190 2171
@@ -2230,8 +2211,9 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2230 2211
2231 // if slave is null, the whole port is not initialized 2212 // if slave is null, the whole port is not initialized
2232 if (!port->slave) { 2213 if (!port->slave) {
2233 printk(KERN_WARNING DRV_NAME ": Warning: speed changed for uninitialized port on %s\n", 2214 printk(KERN_WARNING DRV_NAME ": Warning: %s: speed "
2234 slave->dev->name); 2215 "changed for uninitialized port on %s\n",
2216 slave->dev->master->name, slave->dev->name);
2235 return; 2217 return;
2236 } 2218 }
2237 2219
@@ -2257,8 +2239,9 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2257 2239
2258 // if slave is null, the whole port is not initialized 2240 // if slave is null, the whole port is not initialized
2259 if (!port->slave) { 2241 if (!port->slave) {
2260 printk(KERN_WARNING DRV_NAME ": Warning: duplex changed for uninitialized port on %s\n", 2242 printk(KERN_WARNING DRV_NAME ": %s: Warning: duplex changed "
2261 slave->dev->name); 2243 "for uninitialized port on %s\n",
2244 slave->dev->master->name, slave->dev->name);
2262 return; 2245 return;
2263 } 2246 }
2264 2247
@@ -2285,8 +2268,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2285 2268
2286 // if slave is null, the whole port is not initialized 2269 // if slave is null, the whole port is not initialized
2287 if (!port->slave) { 2270 if (!port->slave) {
2288 printk(KERN_WARNING DRV_NAME ": Warning: link status changed for uninitialized port on %s\n", 2271 printk(KERN_WARNING DRV_NAME ": Warning: %s: link status changed for "
2289 slave->dev->name); 2272 "uninitialized port on %s\n",
2273 slave->dev->master->name, slave->dev->name);
2290 return; 2274 return;
2291 } 2275 }
2292 2276
@@ -2363,7 +2347,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2363 } 2347 }
2364 2348
2365 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2349 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2366 printk(KERN_DEBUG "ERROR: bond_3ad_get_active_agg_info failed\n"); 2350 printk(KERN_DEBUG DRV_NAME ": %s: Error: "
2351 "bond_3ad_get_active_agg_info failed\n", dev->name);
2367 goto out; 2352 goto out;
2368 } 2353 }
2369 2354
@@ -2372,7 +2357,9 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2372 2357
2373 if (slaves_in_agg == 0) { 2358 if (slaves_in_agg == 0) {
2374 /*the aggregator is empty*/ 2359 /*the aggregator is empty*/
2375 printk(KERN_DEBUG "ERROR: active aggregator is empty\n"); 2360 printk(KERN_DEBUG DRV_NAME ": %s: Error: active "
2361 "aggregator is empty\n",
2362 dev->name);
2376 goto out; 2363 goto out;
2377 } 2364 }
2378 2365
@@ -2390,7 +2377,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2390 } 2377 }
2391 2378
2392 if (slave_agg_no >= 0) { 2379 if (slave_agg_no >= 0) {
2393 printk(KERN_ERR DRV_NAME ": Error: Couldn't find a slave to tx on for aggregator ID %d\n", agg_id); 2380 printk(KERN_ERR DRV_NAME ": %s: Error: Couldn't find a slave to tx on "
2381 "for aggregator ID %d\n", dev->name, agg_id);
2394 goto out; 2382 goto out;
2395 } 2383 }
2396 2384
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 673a30af5660..5ee2cef5b037 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -18,19 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
25 * Amir Noam <amir.noam at intel dot com>
26 * - Added support for lacp_rate module param.
27 *
28 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Renamed bond_3ad_link_status_changed() to
30 * bond_3ad_handle_link_change() for compatibility with TLB.
31 *
32 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
33 * - Code cleanup and style changes
34 */ 21 */
35 22
36#ifndef __BOND_3AD_H__ 23#ifndef __BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f8fce3961197..854ddfb90da1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -18,25 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/06/25 - Shmulik Hen <shmulik.hen at intel dot com>
25 * - Fixed signed/unsigned calculation errors that caused load sharing
26 * to collapse to one slave under very heavy UDP Tx stress.
27 *
28 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
29 * - Add support for setting bond's MAC address with special
30 * handling required for ALB/TLB.
31 *
32 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
33 * - Code cleanup and style changes
34 *
35 * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
36 * - Fixed: Cannot remove and re-enslave the original active slave.
37 *
38 * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
39 * - Add capability to tag self generated packets in ALB/TLB modes.
40 */ 21 */
41 22
42//#define BONDING_DEBUG 1 23//#define BONDING_DEBUG 1
@@ -198,20 +179,21 @@ static int tlb_initialize(struct bonding *bond)
198{ 179{
199 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 180 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
200 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info); 181 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
182 struct tlb_client_info *new_hashtbl;
201 int i; 183 int i;
202 184
203 spin_lock_init(&(bond_info->tx_hashtbl_lock)); 185 spin_lock_init(&(bond_info->tx_hashtbl_lock));
204 186
205 _lock_tx_hashtbl(bond); 187 new_hashtbl = kmalloc(size, GFP_KERNEL);
206 188 if (!new_hashtbl) {
207 bond_info->tx_hashtbl = kmalloc(size, GFP_KERNEL);
208 if (!bond_info->tx_hashtbl) {
209 printk(KERN_ERR DRV_NAME 189 printk(KERN_ERR DRV_NAME
210 ": Error: %s: Failed to allocate TLB hash table\n", 190 ": %s: Error: Failed to allocate TLB hash table\n",
211 bond->dev->name); 191 bond->dev->name);
212 _unlock_tx_hashtbl(bond);
213 return -1; 192 return -1;
214 } 193 }
194 _lock_tx_hashtbl(bond);
195
196 bond_info->tx_hashtbl = new_hashtbl;
215 197
216 memset(bond_info->tx_hashtbl, 0, size); 198 memset(bond_info->tx_hashtbl, 0, size);
217 199
@@ -513,7 +495,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
513 client_info->mac_dst); 495 client_info->mac_dst);
514 if (!skb) { 496 if (!skb) {
515 printk(KERN_ERR DRV_NAME 497 printk(KERN_ERR DRV_NAME
516 ": Error: failed to create an ARP packet\n"); 498 ": %s: Error: failed to create an ARP packet\n",
499 client_info->slave->dev->master->name);
517 continue; 500 continue;
518 } 501 }
519 502
@@ -523,7 +506,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
523 skb = vlan_put_tag(skb, client_info->vlan_id); 506 skb = vlan_put_tag(skb, client_info->vlan_id);
524 if (!skb) { 507 if (!skb) {
525 printk(KERN_ERR DRV_NAME 508 printk(KERN_ERR DRV_NAME
526 ": Error: failed to insert VLAN tag\n"); 509 ": %s: Error: failed to insert VLAN tag\n",
510 client_info->slave->dev->master->name);
527 continue; 511 continue;
528 } 512 }
529 } 513 }
@@ -606,8 +590,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
606 590
607 if (!client_info->slave) { 591 if (!client_info->slave) {
608 printk(KERN_ERR DRV_NAME 592 printk(KERN_ERR DRV_NAME
609 ": Error: found a client with no channel in " 593 ": %s: Error: found a client with no channel in "
610 "the client's hash table\n"); 594 "the client's hash table\n",
595 bond->dev->name);
611 continue; 596 continue;
612 } 597 }
613 /*update all clients using this src_ip, that are not assigned 598 /*update all clients using this src_ip, that are not assigned
@@ -797,21 +782,22 @@ static int rlb_initialize(struct bonding *bond)
797{ 782{
798 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 783 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
799 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type); 784 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
785 struct rlb_client_info *new_hashtbl;
800 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); 786 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
801 int i; 787 int i;
802 788
803 spin_lock_init(&(bond_info->rx_hashtbl_lock)); 789 spin_lock_init(&(bond_info->rx_hashtbl_lock));
804 790
805 _lock_rx_hashtbl(bond); 791 new_hashtbl = kmalloc(size, GFP_KERNEL);
806 792 if (!new_hashtbl) {
807 bond_info->rx_hashtbl = kmalloc(size, GFP_KERNEL);
808 if (!bond_info->rx_hashtbl) {
809 printk(KERN_ERR DRV_NAME 793 printk(KERN_ERR DRV_NAME
810 ": Error: %s: Failed to allocate RLB hash table\n", 794 ": %s: Error: Failed to allocate RLB hash table\n",
811 bond->dev->name); 795 bond->dev->name);
812 _unlock_rx_hashtbl(bond);
813 return -1; 796 return -1;
814 } 797 }
798 _lock_rx_hashtbl(bond);
799
800 bond_info->rx_hashtbl = new_hashtbl;
815 801
816 bond_info->rx_hashtbl_head = RLB_NULL_INDEX; 802 bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
817 803
@@ -927,7 +913,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
927 skb = vlan_put_tag(skb, vlan->vlan_id); 913 skb = vlan_put_tag(skb, vlan->vlan_id);
928 if (!skb) { 914 if (!skb) {
929 printk(KERN_ERR DRV_NAME 915 printk(KERN_ERR DRV_NAME
930 ": Error: failed to insert VLAN tag\n"); 916 ": %s: Error: failed to insert VLAN tag\n",
917 bond->dev->name);
931 continue; 918 continue;
932 } 919 }
933 } 920 }
@@ -956,11 +943,11 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
956 s_addr.sa_family = dev->type; 943 s_addr.sa_family = dev->type;
957 if (dev_set_mac_address(dev, &s_addr)) { 944 if (dev_set_mac_address(dev, &s_addr)) {
958 printk(KERN_ERR DRV_NAME 945 printk(KERN_ERR DRV_NAME
959 ": Error: dev_set_mac_address of dev %s failed! ALB " 946 ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
960 "mode requires that the base driver support setting " 947 "mode requires that the base driver support setting "
961 "the hw address also when the network device's " 948 "the hw address also when the network device's "
962 "interface is open\n", 949 "interface is open\n",
963 dev->name); 950 dev->master->name, dev->name);
964 return -EOPNOTSUPP; 951 return -EOPNOTSUPP;
965 } 952 }
966 return 0; 953 return 0;
@@ -1153,16 +1140,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1153 bond->alb_info.rlb_enabled); 1140 bond->alb_info.rlb_enabled);
1154 1141
1155 printk(KERN_WARNING DRV_NAME 1142 printk(KERN_WARNING DRV_NAME
1156 ": Warning: the hw address of slave %s is in use by " 1143 ": %s: Warning: the hw address of slave %s is in use by "
1157 "the bond; giving it the hw address of %s\n", 1144 "the bond; giving it the hw address of %s\n",
1158 slave->dev->name, free_mac_slave->dev->name); 1145 bond->dev->name, slave->dev->name, free_mac_slave->dev->name);
1159 1146
1160 } else if (has_bond_addr) { 1147 } else if (has_bond_addr) {
1161 printk(KERN_ERR DRV_NAME 1148 printk(KERN_ERR DRV_NAME
1162 ": Error: the hw address of slave %s is in use by the " 1149 ": %s: Error: the hw address of slave %s is in use by the "
1163 "bond; couldn't find a slave with a free hw address to " 1150 "bond; couldn't find a slave with a free hw address to "
1164 "give it (this should not have happened)\n", 1151 "give it (this should not have happened)\n",
1165 slave->dev->name); 1152 bond->dev->name, slave->dev->name);
1166 return -EFAULT; 1153 return -EFAULT;
1167 } 1154 }
1168 1155
@@ -1250,6 +1237,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1250 tlb_deinitialize(bond); 1237 tlb_deinitialize(bond);
1251 return res; 1238 return res;
1252 } 1239 }
1240 } else {
1241 bond->alb_info.rlb_enabled = 0;
1253 } 1242 }
1254 1243
1255 return 0; 1244 return 0;
@@ -1409,7 +1398,7 @@ void bond_alb_monitor(struct bonding *bond)
1409 read_lock(&bond->curr_slave_lock); 1398 read_lock(&bond->curr_slave_lock);
1410 1399
1411 bond_for_each_slave(bond, slave, i) { 1400 bond_for_each_slave(bond, slave, i) {
1412 alb_send_learning_packets(slave,slave->dev->dev_addr); 1401 alb_send_learning_packets(slave, slave->dev->dev_addr);
1413 } 1402 }
1414 1403
1415 read_unlock(&bond->curr_slave_lock); 1404 read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e4091cd8d654..28f2a2fd1b5a 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -18,15 +18,6 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 *
22 * Changes:
23 *
24 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
25 * - Add support for setting bond's MAC address with special
26 * handling required for ALB/TLB.
27 *
28 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
29 * - Code cleanup and style changes
30 */ 21 */
31 22
32#ifndef __BOND_ALB_H__ 23#ifndef __BOND_ALB_H__
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94cec3cf2a13..2582d98ef5c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -29,466 +29,6 @@
29 * b: if a hw mac address already is there, eth0's hw mac address 29 * b: if a hw mac address already is there, eth0's hw mac address
30 * will then be set from bond0. 30 * will then be set from bond0.
31 * 31 *
32 * v0.1 - first working version.
33 * v0.2 - changed stats to be calculated by summing slaves stats.
34 *
35 * Changes:
36 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
37 * - fix leaks on failure at bond_init
38 *
39 * 2000/09/30 - Willy Tarreau <willy at meta-x.org>
40 * - added trivial code to release a slave device.
41 * - fixed security bug (CAP_NET_ADMIN not checked)
42 * - implemented MII link monitoring to disable dead links :
43 * All MII capable slaves are checked every <miimon> milliseconds
44 * (100 ms seems good). This value can be changed by passing it to
45 * insmod. A value of zero disables the monitoring (default).
46 * - fixed an infinite loop in bond_xmit_roundrobin() when there's no
47 * good slave.
48 * - made the code hopefully SMP safe
49 *
50 * 2000/10/03 - Willy Tarreau <willy at meta-x.org>
51 * - optimized slave lists based on relevant suggestions from Thomas Davis
52 * - implemented active-backup method to obtain HA with two switches:
53 * stay as long as possible on the same active interface, while we
54 * also monitor the backup one (MII link status) because we want to know
55 * if we are able to switch at any time. ( pass "mode=1" to insmod )
56 * - lots of stress testings because we need it to be more robust than the
57 * wires ! :->
58 *
59 * 2000/10/09 - Willy Tarreau <willy at meta-x.org>
60 * - added up and down delays after link state change.
61 * - optimized the slaves chaining so that when we run forward, we never
62 * repass through the bond itself, but we can find it by searching
63 * backwards. Renders the deletion more difficult, but accelerates the
64 * scan.
65 * - smarter enslaving and releasing.
66 * - finer and more robust SMP locking
67 *
68 * 2000/10/17 - Willy Tarreau <willy at meta-x.org>
69 * - fixed two potential SMP race conditions
70 *
71 * 2000/10/18 - Willy Tarreau <willy at meta-x.org>
72 * - small fixes to the monitoring FSM in case of zero delays
73 * 2000/11/01 - Willy Tarreau <willy at meta-x.org>
74 * - fixed first slave not automatically used in trunk mode.
75 * 2000/11/10 : spelling of "EtherChannel" corrected.
76 * 2000/11/13 : fixed a race condition in case of concurrent accesses to ioctl().
77 * 2000/12/16 : fixed improper usage of rtnl_exlock_nowait().
78 *
79 * 2001/1/3 - Chad N. Tindel <ctindel at ieee dot org>
80 * - The bonding driver now simulates MII status monitoring, just like
81 * a normal network device. It will show that the link is down iff
82 * every slave in the bond shows that their links are down. If at least
83 * one slave is up, the bond's MII status will appear as up.
84 *
85 * 2001/2/7 - Chad N. Tindel <ctindel at ieee dot org>
86 * - Applications can now query the bond from user space to get
87 * information which may be useful. They do this by calling
88 * the BOND_INFO_QUERY ioctl. Once the app knows how many slaves
89 * are in the bond, it can call the BOND_SLAVE_INFO_QUERY ioctl to
90 * get slave specific information (# link failures, etc). See
91 * <linux/if_bonding.h> for more details. The structs of interest
92 * are ifbond and ifslave.
93 *
94 * 2001/4/5 - Chad N. Tindel <ctindel at ieee dot org>
95 * - Ported to 2.4 Kernel
96 *
97 * 2001/5/2 - Jeffrey E. Mast <jeff at mastfamily dot com>
98 * - When a device is detached from a bond, the slave device is no longer
99 * left thinking that is has a master.
100 *
101 * 2001/5/16 - Jeffrey E. Mast <jeff at mastfamily dot com>
102 * - memset did not appropriately initialized the bond rw_locks. Used
103 * rwlock_init to initialize to unlocked state to prevent deadlock when
104 * first attempting a lock
105 * - Called SET_MODULE_OWNER for bond device
106 *
107 * 2001/5/17 - Tim Anderson <tsa at mvista.com>
108 * - 2 paths for releasing for slave release; 1 through ioctl
109 * and 2) through close. Both paths need to release the same way.
110 * - the free slave in bond release is changing slave status before
111 * the free. The netdev_set_master() is intended to change slave state
112 * so it should not be done as part of the release process.
113 * - Simple rule for slave state at release: only the active in A/B and
114 * only one in the trunked case.
115 *
116 * 2001/6/01 - Tim Anderson <tsa at mvista.com>
117 * - Now call dev_close when releasing a slave so it doesn't screw up
118 * out routing table.
119 *
120 * 2001/6/01 - Chad N. Tindel <ctindel at ieee dot org>
121 * - Added /proc support for getting bond and slave information.
122 * Information is in /proc/net/<bond device>/info.
123 * - Changed the locking when calling bond_close to prevent deadlock.
124 *
125 * 2001/8/05 - Janice Girouard <girouard at us.ibm.com>
126 * - correct problem where refcnt of slave is not incremented in bond_ioctl
127 * so the system hangs when halting.
128 * - correct locking problem when unable to malloc in bond_enslave.
129 * - adding bond_xmit_xor logic.
130 * - adding multiple bond device support.
131 *
132 * 2001/8/13 - Erik Habbinga <erik_habbinga at hp dot com>
133 * - correct locking problem with rtnl_exlock_nowait
134 *
135 * 2001/8/23 - Janice Girouard <girouard at us.ibm.com>
136 * - bzero initial dev_bonds, to correct oops
137 * - convert SIOCDEVPRIVATE to new MII ioctl calls
138 *
139 * 2001/9/13 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
140 * - Add the BOND_CHANGE_ACTIVE ioctl implementation
141 *
142 * 2001/9/14 - Mark Huth <mhuth at mvista dot com>
143 * - Change MII_LINK_READY to not check for end of auto-negotiation,
144 * but only for an up link.
145 *
146 * 2001/9/20 - Chad N. Tindel <ctindel at ieee dot org>
147 * - Add the device field to bonding_t. Previously the net_device
148 * corresponding to a bond wasn't available from the bonding_t
149 * structure.
150 *
151 * 2001/9/25 - Janice Girouard <girouard at us.ibm.com>
152 * - add arp_monitor for active backup mode
153 *
154 * 2001/10/23 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
155 * - Various memory leak fixes
156 *
157 * 2001/11/5 - Mark Huth <mark dot huth at mvista dot com>
158 * - Don't take rtnl lock in bond_mii_monitor as it deadlocks under
159 * certain hotswap conditions.
160 * Note: this same change may be required in bond_arp_monitor ???
161 * - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
162 * - Handle hot swap ethernet interface deregistration events to remove
163 * kernel oops following hot swap of enslaved interface
164 *
165 * 2002/1/2 - Chad N. Tindel <ctindel at ieee dot org>
166 * - Restore original slave flags at release time.
167 *
168 * 2002/02/18 - Erik Habbinga <erik_habbinga at hp dot com>
169 * - bond_release(): calling kfree on our_slave after call to
170 * bond_restore_slave_flags, not before
171 * - bond_enslave(): saving slave flags into original_flags before
172 * call to netdev_set_master, so the IFF_SLAVE flag doesn't end
173 * up in original_flags
174 *
175 * 2002/04/05 - Mark Smith <mark.smith at comdev dot cc> and
176 * Steve Mead <steve.mead at comdev dot cc>
177 * - Port Gleb Natapov's multicast support patchs from 2.4.12
178 * to 2.4.18 adding support for multicast.
179 *
180 * 2002/06/10 - Tony Cureington <tony.cureington * hp_com>
181 * - corrected uninitialized pointer (ifr.ifr_data) in bond_check_dev_link;
182 * actually changed function to use MIIPHY, then MIIREG, and finally
183 * ETHTOOL to determine the link status
184 * - fixed bad ifr_data pointer assignments in bond_ioctl
185 * - corrected mode 1 being reported as active-backup in bond_get_info;
186 * also added text to distinguish type of load balancing (rr or xor)
187 * - change arp_ip_target module param from "1-12s" (array of 12 ptrs)
188 * to "s" (a single ptr)
189 *
190 * 2002/08/30 - Jay Vosburgh <fubar at us dot ibm dot com>
191 * - Removed acquisition of xmit_lock in set_multicast_list; caused
192 * deadlock on SMP (lock is held by caller).
193 * - Revamped SIOCGMIIPHY, SIOCGMIIREG portion of bond_check_dev_link().
194 *
195 * 2002/09/18 - Jay Vosburgh <fubar at us dot ibm dot com>
196 * - Fixed up bond_check_dev_link() (and callers): removed some magic
197 * numbers, banished local MII_ defines, wrapped ioctl calls to
198 * prevent EFAULT errors
199 *
200 * 2002/9/30 - Jay Vosburgh <fubar at us dot ibm dot com>
201 * - make sure the ip target matches the arp_target before saving the
202 * hw address.
203 *
204 * 2002/9/30 - Dan Eisner <eisner at 2robots dot com>
205 * - make sure my_ip is set before taking down the link, since
206 * not all switches respond if the source ip is not set.
207 *
208 * 2002/10/8 - Janice Girouard <girouard at us dot ibm dot com>
209 * - read in the local ip address when enslaving a device
210 * - add primary support
211 * - make sure 2*arp_interval has passed when a new device
212 * is brought on-line before taking it down.
213 *
214 * 2002/09/11 - Philippe De Muyter <phdm at macqel dot be>
215 * - Added bond_xmit_broadcast logic.
216 * - Added bond_mode() support function.
217 *
218 * 2002/10/26 - Laurent Deniel <laurent.deniel at free.fr>
219 * - allow to register multicast addresses only on active slave
220 * (useful in active-backup mode)
221 * - add multicast module parameter
222 * - fix deletion of multicast groups after unloading module
223 *
224 * 2002/11/06 - Kameshwara Rayaprolu <kameshwara.rao * wipro_com>
225 * - Changes to prevent panic from closing the device twice; if we close
226 * the device in bond_release, we must set the original_flags to down
227 * so it won't be closed again by the network layer.
228 *
229 * 2002/11/07 - Tony Cureington <tony.cureington * hp_com>
230 * - Fix arp_target_hw_addr memory leak
231 * - Created activebackup_arp_monitor function to handle arp monitoring
232 * in active backup mode - the bond_arp_monitor had several problems...
233 * such as allowing slaves to tx arps sequentially without any delay
234 * for a response
235 * - Renamed bond_arp_monitor to loadbalance_arp_monitor and re-wrote
236 * this function to just handle arp monitoring in load-balancing mode;
237 * it is a lot more compact now
238 * - Changes to ensure one and only one slave transmits in active-backup
239 * mode
240 * - Robustesize parameters; warn users about bad combinations of
241 * parameters; also if miimon is specified and a network driver does
242 * not support MII or ETHTOOL, inform the user of this
243 * - Changes to support link_failure_count when in arp monitoring mode
244 * - Fix up/down delay reported in /proc
245 * - Added version; log version; make version available from "modinfo -d"
246 * - Fixed problem in bond_check_dev_link - if the first IOCTL (SIOCGMIIPH)
247 * failed, the ETHTOOL ioctl never got a chance
248 *
249 * 2002/11/16 - Laurent Deniel <laurent.deniel at free.fr>
250 * - fix multicast handling in activebackup_arp_monitor
251 * - remove one unnecessary and confusing curr_active_slave == slave test
252 * in activebackup_arp_monitor
253 *
254 * 2002/11/17 - Laurent Deniel <laurent.deniel at free.fr>
255 * - fix bond_slave_info_query when slave_id = num_slaves
256 *
257 * 2002/11/19 - Janice Girouard <girouard at us dot ibm dot com>
258 * - correct ifr_data reference. Update ifr_data reference
259 * to mii_ioctl_data struct values to avoid confusion.
260 *
261 * 2002/11/22 - Bert Barbe <bert.barbe at oracle dot com>
262 * - Add support for multiple arp_ip_target
263 *
264 * 2002/12/13 - Jay Vosburgh <fubar at us dot ibm dot com>
265 * - Changed to allow text strings for mode and multicast, e.g.,
266 * insmod bonding mode=active-backup. The numbers still work.
267 * One change: an invalid choice will cause module load failure,
268 * rather than the previous behavior of just picking one.
269 * - Minor cleanups; got rid of dup ctype stuff, atoi function
270 *
271 * 2003/02/07 - Jay Vosburgh <fubar at us dot ibm dot com>
272 * - Added use_carrier module parameter that causes miimon to
273 * use netif_carrier_ok() test instead of MII/ETHTOOL ioctls.
274 * - Minor cleanups; consolidated ioctl calls to one function.
275 *
276 * 2003/02/07 - Tony Cureington <tony.cureington * hp_com>
277 * - Fix bond_mii_monitor() logic error that could result in
278 * bonding round-robin mode ignoring links after failover/recovery
279 *
280 * 2003/03/17 - Jay Vosburgh <fubar at us dot ibm dot com>
281 * - kmalloc fix (GFP_KERNEL to GFP_ATOMIC) reported by
282 * Shmulik dot Hen at intel.com.
283 * - Based on discussion on mailing list, changed use of
284 * update_slave_cnt(), created wrapper functions for adding/removing
285 * slaves, changed bond_xmit_xor() to check slave_cnt instead of
286 * checking slave and slave->dev (which only worked by accident).
287 * - Misc code cleanup: get arp_send() prototype from header file,
288 * add max_bonds to bonding.txt.
289 *
290 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
291 * Shmulik Hen <shmulik.hen at intel dot com>
292 * - Make sure only bond_attach_slave() and bond_detach_slave() can
293 * manipulate the slave list, including slave_cnt, even when in
294 * bond_release_all().
295 * - Fixed hang in bond_release() with traffic running:
296 * netdev_set_master() must not be called from within the bond lock.
297 *
298 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
299 * Shmulik Hen <shmulik.hen at intel dot com>
300 * - Fixed hang in bond_enslave() with traffic running:
301 * netdev_set_master() must not be called from within the bond lock.
302 *
303 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>
304 * - Added support for getting slave's speed and duplex via ethtool.
305 * Needed for 802.3ad and other future modes.
306 *
307 * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
308 * Shmulik Hen <shmulik.hen at intel dot com>
309 * - Enable support of modes that need to use the unique mac address of
310 * each slave.
311 * * bond_enslave(): Moved setting the slave's mac address, and
312 * openning it, from the application to the driver. This breaks
313 * backward comaptibility with old versions of ifenslave that open
314 * the slave before enalsving it !!!.
315 * * bond_release(): The driver also takes care of closing the slave
316 * and restoring its original mac address.
317 * - Removed the code that restores all base driver's flags.
318 * Flags are automatically restored once all undo stages are done
319 * properly.
320 * - Block possibility of enslaving before the master is up. This
321 * prevents putting the system in an unstable state.
322 *
323 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
324 * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
325 * Shmulik Hen <shmulik.hen at intel dot com>
326 * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
327 *
328 * 2003/05/01 - Amir Noam <amir.noam at intel dot com>
329 * - Added ABI version control to restore compatibility between
330 * new/old ifenslave and new/old bonding.
331 *
332 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
333 * - Fixed bug in bond_release_all(): save old value of curr_active_slave
334 * before setting it to NULL.
335 * - Changed driver versioning scheme to include version number instead
336 * of release date (that is already in another field). There are 3
337 * fields X.Y.Z where:
338 * X - Major version - big behavior changes
339 * Y - Minor version - addition of features
340 * Z - Extra version - minor changes and bug fixes
341 * The current version is 1.0.0 as a base line.
342 *
343 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
344 * Amir Noam <amir.noam at intel dot com>
345 * - Added support for lacp_rate module param.
346 * - Code beautification and style changes (mainly in comments).
347 * new version - 1.0.1
348 *
349 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
350 * - Based on discussion on mailing list, changed locking scheme
351 * to use lock/unlock or lock_bh/unlock_bh appropriately instead
352 * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
353 * hidden bugs and solves system hangs that occurred due to the fact
354 * that holding lock_irqsave doesn't prevent softirqs from running.
355 * This also increases total throughput since interrupts are not
356 * blocked on each transmitted packets or monitor timeout.
357 * new version - 2.0.0
358 *
359 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
360 * - Added support for Transmit load balancing mode.
361 * - Concentrate all assignments of curr_active_slave to a single point
362 * so specific modes can take actions when the primary adapter is
363 * changed.
364 * - Take the updelay parameter into consideration during bond_enslave
365 * since some adapters loose their link during setting the device.
366 * - Renamed bond_3ad_link_status_changed() to
367 * bond_3ad_handle_link_change() for compatibility with TLB.
368 * new version - 2.1.0
369 *
370 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
371 * - Added support for Adaptive load balancing mode which is
372 * equivalent to Transmit load balancing + Receive load balancing.
373 * new version - 2.2.0
374 *
375 * 2003/05/15 - Jay Vosburgh <fubar at us dot ibm dot com>
376 * - Applied fix to activebackup_arp_monitor posted to bonding-devel
377 * by Tony Cureington <tony.cureington * hp_com>. Fixes ARP
378 * monitor endless failover bug. Version to 2.2.10
379 *
380 * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
381 * - Fixed bug in ABI version control - Don't commit to a specific
382 * ABI version if receiving unsupported ioctl commands.
383 *
384 * 2003/05/22 - Jay Vosburgh <fubar at us dot ibm dot com>
385 * - Fix ifenslave -c causing bond to loose existing routes;
386 * added bond_set_mac_address() that doesn't require the
387 * bond to be down.
388 * - In conjunction with fix for ifenslave -c, in
389 * bond_change_active(), changing to the already active slave
390 * is no longer an error (it successfully does nothing).
391 *
392 * 2003/06/30 - Amir Noam <amir.noam at intel dot com>
393 * - Fixed bond_change_active() for ALB/TLB modes.
394 * Version to 2.2.14.
395 *
396 * 2003/07/29 - Amir Noam <amir.noam at intel dot com>
397 * - Fixed ARP monitoring bug.
398 * Version to 2.2.15.
399 *
400 * 2003/07/31 - Willy Tarreau <willy at ods dot org>
401 * - Fixed kernel panic when using ARP monitoring without
402 * setting bond's IP address.
403 * Version to 2.2.16.
404 *
405 * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
406 * - Back port from 2.6: use alloc_netdev(); fix /proc handling;
407 * made stats a part of bond struct so no need to allocate
408 * and free it separately; use standard list operations instead
409 * of pre-allocated array of bonds.
410 * Version to 2.3.0.
411 *
412 * 2003/08/07 - Jay Vosburgh <fubar at us dot ibm dot com>,
413 * Amir Noam <amir.noam at intel dot com> and
414 * Shmulik Hen <shmulik.hen at intel dot com>
415 * - Propagating master's settings: Distinguish between modes that
416 * use a primary slave from those that don't, and propagate settings
417 * accordingly; Consolidate change_active opeartions and add
418 * reselect_active and find_best opeartions; Decouple promiscuous
419 * handling from the multicast mode setting; Add support for changing
420 * HW address and MTU with proper unwind; Consolidate procfs code,
421 * add CHANGENAME handler; Enhance netdev notification handling.
422 * Version to 2.4.0.
423 *
424 * 2003/09/15 - Stephen Hemminger <shemminger at osdl dot org>,
425 * Amir Noam <amir.noam at intel dot com>
426 * - Convert /proc to seq_file interface.
427 * Change /proc/net/bondX/info to /proc/net/bonding/bondX.
428 * Set version to 2.4.1.
429 *
430 * 2003/11/20 - Amir Noam <amir.noam at intel dot com>
431 * - Fix /proc creation/destruction.
432 *
433 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
434 * - Massive cleanup - Set version to 2.5.0
435 * Code changes:
436 * o Consolidate format of prints and debug prints.
437 * o Remove bonding_t/slave_t typedefs and consolidate all casts.
438 * o Remove dead code and unnecessary checks.
439 * o Consolidate starting/stopping timers.
440 * o Consolidate handling of primary module param throughout the code.
441 * o Removed multicast module param support - all settings are done
442 * according to mode.
443 * o Slave list iteration - bond is no longer part of the list,
444 * added cyclic list iteration macros.
445 * o Consolidate error handling in all xmit functions.
446 * Style changes:
447 * o Consolidate function naming and declarations.
448 * o Consolidate function params and local variables names.
449 * o Consolidate return values.
450 * o Consolidate curly braces.
451 * o Consolidate conditionals format.
452 * o Change struct member names and types.
453 * o Chomp trailing spaces, remove empty lines, fix indentations.
454 * o Re-organize code according to context.
455 *
456 * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
457 * - Fixed: Cannot remove and re-enslave the original active slave.
458 * - Fixed: Releasing the original active slave causes mac address
459 * duplication.
460 * - Add support for slaves that use ethtool_ops.
461 * Set version to 2.5.3.
462 *
463 * 2004/01/05 - Amir Noam <amir.noam at intel dot com>
464 * - Save bonding parameters per bond instead of using the global values.
465 * Set version to 2.5.4.
466 *
467 * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
468 * - Enhance VLAN support:
469 * * Add support for VLAN hardware acceleration capable slaves.
470 * * Add capability to tag self generated packets in ALB/TLB modes.
471 * Set version to 2.6.0.
472 * 2004/10/29 - Mitch Williams <mitch.a.williams at intel dot com>
473 * - Fixed bug when unloading module while using 802.3ad. If
474 * spinlock debugging is turned on, this causes a stack dump.
475 * Solution is to move call to dev_remove_pack outside of the
476 * spinlock.
477 * Set version to 2.6.1.
478 * 2005/06/05 - Jay Vosburgh <fubar@us.ibm.com>
479 * - Support for generating gratuitous ARPs in active-backup mode.
480 * Includes support for VLAN tagging all bonding-generated ARPs
481 * as needed. Set version to 2.6.2.
482 * 2005/06/08 - Jason Gabler <jygabler at lbl dot gov>
483 * - alternate hashing policy support for mode 2
484 * * Added kernel parameter "xmit_hash_policy" to allow the selection
485 * of different hashing policies for mode 2. The original mode 2
486 * policy is the default, now found in xmit_hash_policy_layer2().
487 * * Added xmit_hash_policy_layer34()
488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
492 */ 32 */
493 33
494//#define BONDING_DEBUG 1 34//#define BONDING_DEBUG 1
@@ -557,6 +97,7 @@ static char *lacp_rate = NULL;
557static char *xmit_hash_policy = NULL; 97static char *xmit_hash_policy = NULL;
558static int arp_interval = BOND_LINK_ARP_INTERV; 98static int arp_interval = BOND_LINK_ARP_INTERV;
559static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 99static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
100struct bond_params bonding_defaults;
560 101
561module_param(max_bonds, int, 0); 102module_param(max_bonds, int, 0);
562MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 103MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -565,17 +106,24 @@ MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
565module_param(updelay, int, 0); 106module_param(updelay, int, 0);
566MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 107MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
567module_param(downdelay, int, 0); 108module_param(downdelay, int, 0);
568MODULE_PARM_DESC(downdelay, "Delay before considering link down, in milliseconds"); 109MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
110 "in milliseconds");
569module_param(use_carrier, int, 0); 111module_param(use_carrier, int, 0);
570MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; 0 for off, 1 for on (default)"); 112MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
113 "0 for off, 1 for on (default)");
571module_param(mode, charp, 0); 114module_param(mode, charp, 0);
572MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor"); 115MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
116 "1 for active-backup, 2 for balance-xor, "
117 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
118 "6 for balance-alb");
573module_param(primary, charp, 0); 119module_param(primary, charp, 0);
574MODULE_PARM_DESC(primary, "Primary network device to use"); 120MODULE_PARM_DESC(primary, "Primary network device to use");
575module_param(lacp_rate, charp, 0); 121module_param(lacp_rate, charp, 0);
576MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner (slow/fast)"); 122MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
123 "(slow/fast)");
577module_param(xmit_hash_policy, charp, 0); 124module_param(xmit_hash_policy, charp, 0);
578MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method : 0 for layer 2 (default), 1 for layer 3+4"); 125MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
126 ", 1 for layer 3+4");
579module_param(arp_interval, int, 0); 127module_param(arp_interval, int, 0);
580MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 128MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
581module_param_array(arp_ip_target, charp, NULL, 0); 129module_param_array(arp_ip_target, charp, NULL, 0);
@@ -586,30 +134,27 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
586static const char *version = 134static const char *version =
587 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 135 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
588 136
589static LIST_HEAD(bond_dev_list); 137LIST_HEAD(bond_dev_list);
590 138
591#ifdef CONFIG_PROC_FS 139#ifdef CONFIG_PROC_FS
592static struct proc_dir_entry *bond_proc_dir = NULL; 140static struct proc_dir_entry *bond_proc_dir = NULL;
593#endif 141#endif
594 142
143extern struct rw_semaphore bonding_rwsem;
595static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; 144static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
596static int arp_ip_count = 0; 145static int arp_ip_count = 0;
597static int bond_mode = BOND_MODE_ROUNDROBIN; 146static int bond_mode = BOND_MODE_ROUNDROBIN;
598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 147static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
599static int lacp_fast = 0; 148static int lacp_fast = 0;
600 149
601struct bond_parm_tbl {
602 char *modename;
603 int mode;
604};
605 150
606static struct bond_parm_tbl bond_lacp_tbl[] = { 151struct bond_parm_tbl bond_lacp_tbl[] = {
607{ "slow", AD_LACP_SLOW}, 152{ "slow", AD_LACP_SLOW},
608{ "fast", AD_LACP_FAST}, 153{ "fast", AD_LACP_FAST},
609{ NULL, -1}, 154{ NULL, -1},
610}; 155};
611 156
612static struct bond_parm_tbl bond_mode_tbl[] = { 157struct bond_parm_tbl bond_mode_tbl[] = {
613{ "balance-rr", BOND_MODE_ROUNDROBIN}, 158{ "balance-rr", BOND_MODE_ROUNDROBIN},
614{ "active-backup", BOND_MODE_ACTIVEBACKUP}, 159{ "active-backup", BOND_MODE_ACTIVEBACKUP},
615{ "balance-xor", BOND_MODE_XOR}, 160{ "balance-xor", BOND_MODE_XOR},
@@ -620,7 +165,7 @@ static struct bond_parm_tbl bond_mode_tbl[] = {
620{ NULL, -1}, 165{ NULL, -1},
621}; 166};
622 167
623static struct bond_parm_tbl xmit_hashtype_tbl[] = { 168struct bond_parm_tbl xmit_hashtype_tbl[] = {
624{ "layer2", BOND_XMIT_POLICY_LAYER2}, 169{ "layer2", BOND_XMIT_POLICY_LAYER2},
625{ "layer3+4", BOND_XMIT_POLICY_LAYER34}, 170{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
626{ NULL, -1}, 171{ NULL, -1},
@@ -628,12 +173,11 @@ static struct bond_parm_tbl xmit_hashtype_tbl[] = {
628 173
629/*-------------------------- Forward declarations ---------------------------*/ 174/*-------------------------- Forward declarations ---------------------------*/
630 175
631static inline void bond_set_mode_ops(struct bonding *bond, int mode);
632static void bond_send_gratuitous_arp(struct bonding *bond); 176static void bond_send_gratuitous_arp(struct bonding *bond);
633 177
634/*---------------------------- General routines -----------------------------*/ 178/*---------------------------- General routines -----------------------------*/
635 179
636static const char *bond_mode_name(int mode) 180const char *bond_mode_name(int mode)
637{ 181{
638 switch (mode) { 182 switch (mode) {
639 case BOND_MODE_ROUNDROBIN : 183 case BOND_MODE_ROUNDROBIN :
@@ -910,7 +454,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
910 res = bond_add_vlan(bond, vid); 454 res = bond_add_vlan(bond, vid);
911 if (res) { 455 if (res) {
912 printk(KERN_ERR DRV_NAME 456 printk(KERN_ERR DRV_NAME
913 ": %s: Failed to add vlan id %d\n", 457 ": %s: Error: Failed to add vlan id %d\n",
914 bond_dev->name, vid); 458 bond_dev->name, vid);
915 } 459 }
916} 460}
@@ -944,7 +488,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
944 res = bond_del_vlan(bond, vid); 488 res = bond_del_vlan(bond, vid);
945 if (res) { 489 if (res) {
946 printk(KERN_ERR DRV_NAME 490 printk(KERN_ERR DRV_NAME
947 ": %s: Failed to remove vlan id %d\n", 491 ": %s: Error: Failed to remove vlan id %d\n",
948 bond_dev->name, vid); 492 bond_dev->name, vid);
949 } 493 }
950} 494}
@@ -1449,7 +993,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1449 * 993 *
1450 * Warning: Caller must hold curr_slave_lock for writing. 994 * Warning: Caller must hold curr_slave_lock for writing.
1451 */ 995 */
1452static void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 996void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1453{ 997{
1454 struct slave *old_active = bond->curr_active_slave; 998 struct slave *old_active = bond->curr_active_slave;
1455 999
@@ -1523,7 +1067,7 @@ static void bond_change_active_slave(struct bonding *bond, struct slave *new_act
1523 * 1067 *
1524 * Warning: Caller must hold curr_slave_lock for writing. 1068 * Warning: Caller must hold curr_slave_lock for writing.
1525 */ 1069 */
1526static void bond_select_active_slave(struct bonding *bond) 1070void bond_select_active_slave(struct bonding *bond)
1527{ 1071{
1528 struct slave *best_slave; 1072 struct slave *best_slave;
1529 1073
@@ -1591,7 +1135,7 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1591 1135
1592/*---------------------------------- IOCTL ----------------------------------*/ 1136/*---------------------------------- IOCTL ----------------------------------*/
1593 1137
1594static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev) 1138int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
1595{ 1139{
1596 dprintk("bond_dev=%p\n", bond_dev); 1140 dprintk("bond_dev=%p\n", bond_dev);
1597 dprintk("slave_dev=%p\n", slave_dev); 1141 dprintk("slave_dev=%p\n", slave_dev);
@@ -1631,7 +1175,7 @@ static int bond_compute_features(struct bonding *bond)
1631} 1175}
1632 1176
1633/* enslave device <slave> to bond device <master> */ 1177/* enslave device <slave> to bond device <master> */
1634static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1178int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1635{ 1179{
1636 struct bonding *bond = bond_dev->priv; 1180 struct bonding *bond = bond_dev->priv;
1637 struct slave *new_slave = NULL; 1181 struct slave *new_slave = NULL;
@@ -1644,8 +1188,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1644 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1188 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1645 slave_dev->do_ioctl == NULL) { 1189 slave_dev->do_ioctl == NULL) {
1646 printk(KERN_WARNING DRV_NAME 1190 printk(KERN_WARNING DRV_NAME
1647 ": Warning : no link monitoring support for %s\n", 1191 ": %s: Warning: no link monitoring support for %s\n",
1648 slave_dev->name); 1192 bond_dev->name, slave_dev->name);
1649 } 1193 }
1650 1194
1651 /* bond must be initialized by bond_open() before enslaving */ 1195 /* bond must be initialized by bond_open() before enslaving */
@@ -1666,17 +1210,17 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1666 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1210 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1667 if (!list_empty(&bond->vlan_list)) { 1211 if (!list_empty(&bond->vlan_list)) {
1668 printk(KERN_ERR DRV_NAME 1212 printk(KERN_ERR DRV_NAME
1669 ": Error: cannot enslave VLAN " 1213 ": %s: Error: cannot enslave VLAN "
1670 "challenged slave %s on VLAN enabled " 1214 "challenged slave %s on VLAN enabled "
1671 "bond %s\n", slave_dev->name, 1215 "bond %s\n", bond_dev->name, slave_dev->name,
1672 bond_dev->name); 1216 bond_dev->name);
1673 return -EPERM; 1217 return -EPERM;
1674 } else { 1218 } else {
1675 printk(KERN_WARNING DRV_NAME 1219 printk(KERN_WARNING DRV_NAME
1676 ": Warning: enslaved VLAN challenged " 1220 ": %s: Warning: enslaved VLAN challenged "
1677 "slave %s. Adding VLANs will be blocked as " 1221 "slave %s. Adding VLANs will be blocked as "
1678 "long as %s is part of bond %s\n", 1222 "long as %s is part of bond %s\n",
1679 slave_dev->name, slave_dev->name, 1223 bond_dev->name, slave_dev->name, slave_dev->name,
1680 bond_dev->name); 1224 bond_dev->name);
1681 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1225 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1682 } 1226 }
@@ -1706,12 +1250,11 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1706 1250
1707 if (slave_dev->set_mac_address == NULL) { 1251 if (slave_dev->set_mac_address == NULL) {
1708 printk(KERN_ERR DRV_NAME 1252 printk(KERN_ERR DRV_NAME
1709 ": Error: The slave device you specified does " 1253 ": %s: Error: The slave device you specified does "
1710 "not support setting the MAC address.\n"); 1254 "not support setting the MAC address. "
1711 printk(KERN_ERR 1255 "Your kernel likely does not support slave "
1712 "Your kernel likely does not support slave devices.\n"); 1256 "devices.\n", bond_dev->name);
1713 1257 res = -EOPNOTSUPP;
1714 res = -EOPNOTSUPP;
1715 goto err_undo_flags; 1258 goto err_undo_flags;
1716 } 1259 }
1717 1260
@@ -1827,21 +1370,21 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1827 * the messages for netif_carrier. 1370 * the messages for netif_carrier.
1828 */ 1371 */
1829 printk(KERN_WARNING DRV_NAME 1372 printk(KERN_WARNING DRV_NAME
1830 ": Warning: MII and ETHTOOL support not " 1373 ": %s: Warning: MII and ETHTOOL support not "
1831 "available for interface %s, and " 1374 "available for interface %s, and "
1832 "arp_interval/arp_ip_target module parameters " 1375 "arp_interval/arp_ip_target module parameters "
1833 "not specified, thus bonding will not detect " 1376 "not specified, thus bonding will not detect "
1834 "link failures! see bonding.txt for details.\n", 1377 "link failures! see bonding.txt for details.\n",
1835 slave_dev->name); 1378 bond_dev->name, slave_dev->name);
1836 } else if (link_reporting == -1) { 1379 } else if (link_reporting == -1) {
1837 /* unable get link status using mii/ethtool */ 1380 /* unable get link status using mii/ethtool */
1838 printk(KERN_WARNING DRV_NAME 1381 printk(KERN_WARNING DRV_NAME
1839 ": Warning: can't get link status from " 1382 ": %s: Warning: can't get link status from "
1840 "interface %s; the network driver associated " 1383 "interface %s; the network driver associated "
1841 "with this interface does not support MII or " 1384 "with this interface does not support MII or "
1842 "ETHTOOL link status reporting, thus miimon " 1385 "ETHTOOL link status reporting, thus miimon "
1843 "has no effect on this interface.\n", 1386 "has no effect on this interface.\n",
1844 slave_dev->name); 1387 bond_dev->name, slave_dev->name);
1845 } 1388 }
1846 } 1389 }
1847 1390
@@ -1868,15 +1411,15 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1868 if (bond_update_speed_duplex(new_slave) && 1411 if (bond_update_speed_duplex(new_slave) &&
1869 (new_slave->link != BOND_LINK_DOWN)) { 1412 (new_slave->link != BOND_LINK_DOWN)) {
1870 printk(KERN_WARNING DRV_NAME 1413 printk(KERN_WARNING DRV_NAME
1871 ": Warning: failed to get speed and duplex from %s, " 1414 ": %s: Warning: failed to get speed and duplex from %s, "
1872 "assumed to be 100Mb/sec and Full.\n", 1415 "assumed to be 100Mb/sec and Full.\n",
1873 new_slave->dev->name); 1416 bond_dev->name, new_slave->dev->name);
1874 1417
1875 if (bond->params.mode == BOND_MODE_8023AD) { 1418 if (bond->params.mode == BOND_MODE_8023AD) {
1876 printk(KERN_WARNING 1419 printk(KERN_WARNING DRV_NAME
1877 "Operation of 802.3ad mode requires ETHTOOL " 1420 ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
1878 "support in base driver for proper aggregator " 1421 "support in base driver for proper aggregator "
1879 "selection.\n"); 1422 "selection.\n", bond_dev->name);
1880 } 1423 }
1881 } 1424 }
1882 1425
@@ -1958,6 +1501,10 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1958 1501
1959 write_unlock_bh(&bond->lock); 1502 write_unlock_bh(&bond->lock);
1960 1503
1504 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1505 if (res)
1506 goto err_unset_master;
1507
1961 printk(KERN_INFO DRV_NAME 1508 printk(KERN_INFO DRV_NAME
1962 ": %s: enslaving %s as a%s interface with a%s link.\n", 1509 ": %s: enslaving %s as a%s interface with a%s link.\n",
1963 bond_dev->name, slave_dev->name, 1510 bond_dev->name, slave_dev->name,
@@ -1999,7 +1546,7 @@ err_undo_flags:
1999 * for Bonded connections: 1546 * for Bonded connections:
2000 * The first up interface should be left on and all others downed. 1547 * The first up interface should be left on and all others downed.
2001 */ 1548 */
2002static int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 1549int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2003{ 1550{
2004 struct bonding *bond = bond_dev->priv; 1551 struct bonding *bond = bond_dev->priv;
2005 struct slave *slave, *oldcurrent; 1552 struct slave *slave, *oldcurrent;
@@ -2010,7 +1557,7 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2010 if (!(slave_dev->flags & IFF_SLAVE) || 1557 if (!(slave_dev->flags & IFF_SLAVE) ||
2011 (slave_dev->master != bond_dev)) { 1558 (slave_dev->master != bond_dev)) {
2012 printk(KERN_ERR DRV_NAME 1559 printk(KERN_ERR DRV_NAME
2013 ": Error: %s: cannot release %s.\n", 1560 ": %s: Error: cannot release %s.\n",
2014 bond_dev->name, slave_dev->name); 1561 bond_dev->name, slave_dev->name);
2015 return -EINVAL; 1562 return -EINVAL;
2016 } 1563 }
@@ -2031,11 +1578,12 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2031 ETH_ALEN); 1578 ETH_ALEN);
2032 if (!mac_addr_differ && (bond->slave_cnt > 1)) { 1579 if (!mac_addr_differ && (bond->slave_cnt > 1)) {
2033 printk(KERN_WARNING DRV_NAME 1580 printk(KERN_WARNING DRV_NAME
2034 ": Warning: the permanent HWaddr of %s " 1581 ": %s: Warning: the permanent HWaddr of %s "
2035 "- %02X:%02X:%02X:%02X:%02X:%02X - is " 1582 "- %02X:%02X:%02X:%02X:%02X:%02X - is "
2036 "still in use by %s. Set the HWaddr of " 1583 "still in use by %s. Set the HWaddr of "
2037 "%s to a different address to avoid " 1584 "%s to a different address to avoid "
2038 "conflicts.\n", 1585 "conflicts.\n",
1586 bond_dev->name,
2039 slave_dev->name, 1587 slave_dev->name,
2040 slave->perm_hwaddr[0], 1588 slave->perm_hwaddr[0],
2041 slave->perm_hwaddr[1], 1589 slave->perm_hwaddr[1],
@@ -2111,24 +1659,28 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2111 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1659 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2112 } else { 1660 } else {
2113 printk(KERN_WARNING DRV_NAME 1661 printk(KERN_WARNING DRV_NAME
2114 ": Warning: clearing HW address of %s while it " 1662 ": %s: Warning: clearing HW address of %s while it "
2115 "still has VLANs.\n", 1663 "still has VLANs.\n",
2116 bond_dev->name); 1664 bond_dev->name, bond_dev->name);
2117 printk(KERN_WARNING DRV_NAME 1665 printk(KERN_WARNING DRV_NAME
2118 ": When re-adding slaves, make sure the bond's " 1666 ": %s: When re-adding slaves, make sure the bond's "
2119 "HW address matches its VLANs'.\n"); 1667 "HW address matches its VLANs'.\n",
1668 bond_dev->name);
2120 } 1669 }
2121 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 1670 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2122 !bond_has_challenged_slaves(bond)) { 1671 !bond_has_challenged_slaves(bond)) {
2123 printk(KERN_INFO DRV_NAME 1672 printk(KERN_INFO DRV_NAME
2124 ": last VLAN challenged slave %s " 1673 ": %s: last VLAN challenged slave %s "
2125 "left bond %s. VLAN blocking is removed\n", 1674 "left bond %s. VLAN blocking is removed\n",
2126 slave_dev->name, bond_dev->name); 1675 bond_dev->name, slave_dev->name, bond_dev->name);
2127 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; 1676 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
2128 } 1677 }
2129 1678
2130 write_unlock_bh(&bond->lock); 1679 write_unlock_bh(&bond->lock);
2131 1680
1681 /* must do this from outside any spinlocks */
1682 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1683
2132 bond_del_vlans_from_slave(bond, slave_dev); 1684 bond_del_vlans_from_slave(bond, slave_dev);
2133 1685
2134 /* If the mode USES_PRIMARY, then we should only remove its 1686 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2220,6 +1772,7 @@ static int bond_release_all(struct net_device *bond_dev)
2220 */ 1772 */
2221 write_unlock_bh(&bond->lock); 1773 write_unlock_bh(&bond->lock);
2222 1774
1775 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2223 bond_del_vlans_from_slave(bond, slave_dev); 1776 bond_del_vlans_from_slave(bond, slave_dev);
2224 1777
2225 /* If the mode USES_PRIMARY, then we should only remove its 1778 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2274,12 +1827,13 @@ static int bond_release_all(struct net_device *bond_dev)
2274 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1827 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2275 } else { 1828 } else {
2276 printk(KERN_WARNING DRV_NAME 1829 printk(KERN_WARNING DRV_NAME
2277 ": Warning: clearing HW address of %s while it " 1830 ": %s: Warning: clearing HW address of %s while it "
2278 "still has VLANs.\n", 1831 "still has VLANs.\n",
2279 bond_dev->name); 1832 bond_dev->name, bond_dev->name);
2280 printk(KERN_WARNING DRV_NAME 1833 printk(KERN_WARNING DRV_NAME
2281 ": When re-adding slaves, make sure the bond's " 1834 ": %s: When re-adding slaves, make sure the bond's "
2282 "HW address matches its VLANs'.\n"); 1835 "HW address matches its VLANs'.\n",
1836 bond_dev->name);
2283 } 1837 }
2284 1838
2285 printk(KERN_INFO DRV_NAME 1839 printk(KERN_INFO DRV_NAME
@@ -2397,7 +1951,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2397/*-------------------------------- Monitoring -------------------------------*/ 1951/*-------------------------------- Monitoring -------------------------------*/
2398 1952
2399/* this function is called regularly to monitor each slave's link. */ 1953/* this function is called regularly to monitor each slave's link. */
2400static void bond_mii_monitor(struct net_device *bond_dev) 1954void bond_mii_monitor(struct net_device *bond_dev)
2401{ 1955{
2402 struct bonding *bond = bond_dev->priv; 1956 struct bonding *bond = bond_dev->priv;
2403 struct slave *slave, *oldcurrent; 1957 struct slave *slave, *oldcurrent;
@@ -2596,8 +2150,11 @@ static void bond_mii_monitor(struct net_device *bond_dev)
2596 break; 2150 break;
2597 default: 2151 default:
2598 /* Should not happen */ 2152 /* Should not happen */
2599 printk(KERN_ERR "bonding: Error: %s Illegal value (link=%d)\n", 2153 printk(KERN_ERR DRV_NAME
2600 slave->dev->name, slave->link); 2154 ": %s: Error: %s Illegal value (link=%d)\n",
2155 bond_dev->name,
2156 slave->dev->name,
2157 slave->link);
2601 goto out; 2158 goto out;
2602 } /* end of switch (slave->link) */ 2159 } /* end of switch (slave->link) */
2603 2160
@@ -2721,7 +2278,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2721 struct flowi fl; 2278 struct flowi fl;
2722 struct rtable *rt; 2279 struct rtable *rt;
2723 2280
2724 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2281 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2282 if (!targets[i])
2283 continue;
2725 dprintk("basa: target %x\n", targets[i]); 2284 dprintk("basa: target %x\n", targets[i]);
2726 if (list_empty(&bond->vlan_list)) { 2285 if (list_empty(&bond->vlan_list)) {
2727 dprintk("basa: empty vlan: arp_send\n"); 2286 dprintk("basa: empty vlan: arp_send\n");
@@ -2825,7 +2384,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2825 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2384 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2826 * arp monitoring in active backup mode. 2385 * arp monitoring in active backup mode.
2827 */ 2386 */
2828static void bond_loadbalance_arp_mon(struct net_device *bond_dev) 2387void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2829{ 2388{
2830 struct bonding *bond = bond_dev->priv; 2389 struct bonding *bond = bond_dev->priv;
2831 struct slave *slave, *oldcurrent; 2390 struct slave *slave, *oldcurrent;
@@ -2963,7 +2522,7 @@ out:
2963 * may have received. 2522 * may have received.
2964 * see loadbalance_arp_monitor for arp monitoring in load balancing mode 2523 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2965 */ 2524 */
2966static void bond_activebackup_arp_mon(struct net_device *bond_dev) 2525void bond_activebackup_arp_mon(struct net_device *bond_dev)
2967{ 2526{
2968 struct bonding *bond = bond_dev->priv; 2527 struct bonding *bond = bond_dev->priv;
2969 struct slave *slave; 2528 struct slave *slave;
@@ -3249,6 +2808,8 @@ static void bond_info_show_master(struct seq_file *seq)
3249{ 2808{
3250 struct bonding *bond = seq->private; 2809 struct bonding *bond = seq->private;
3251 struct slave *curr; 2810 struct slave *curr;
2811 int i;
2812 u32 target;
3252 2813
3253 read_lock(&bond->curr_slave_lock); 2814 read_lock(&bond->curr_slave_lock);
3254 curr = bond->curr_active_slave; 2815 curr = bond->curr_active_slave;
@@ -3257,10 +2818,17 @@ static void bond_info_show_master(struct seq_file *seq)
3257 seq_printf(seq, "Bonding Mode: %s\n", 2818 seq_printf(seq, "Bonding Mode: %s\n",
3258 bond_mode_name(bond->params.mode)); 2819 bond_mode_name(bond->params.mode));
3259 2820
2821 if (bond->params.mode == BOND_MODE_XOR ||
2822 bond->params.mode == BOND_MODE_8023AD) {
2823 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
2824 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
2825 bond->params.xmit_policy);
2826 }
2827
3260 if (USES_PRIMARY(bond->params.mode)) { 2828 if (USES_PRIMARY(bond->params.mode)) {
3261 seq_printf(seq, "Primary Slave: %s\n", 2829 seq_printf(seq, "Primary Slave: %s\n",
3262 (bond->params.primary[0]) ? 2830 (bond->primary_slave) ?
3263 bond->params.primary : "None"); 2831 bond->primary_slave->dev->name : "None");
3264 2832
3265 seq_printf(seq, "Currently Active Slave: %s\n", 2833 seq_printf(seq, "Currently Active Slave: %s\n",
3266 (curr) ? curr->dev->name : "None"); 2834 (curr) ? curr->dev->name : "None");
@@ -3273,6 +2841,27 @@ static void bond_info_show_master(struct seq_file *seq)
3273 seq_printf(seq, "Down Delay (ms): %d\n", 2841 seq_printf(seq, "Down Delay (ms): %d\n",
3274 bond->params.downdelay * bond->params.miimon); 2842 bond->params.downdelay * bond->params.miimon);
3275 2843
2844
2845 /* ARP information */
2846 if(bond->params.arp_interval > 0) {
2847 int printed=0;
2848 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
2849 bond->params.arp_interval);
2850
2851 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
2852
2853 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
2854 if (!bond->params.arp_targets[i])
2855 continue;
2856 if (printed)
2857 seq_printf(seq, ",");
2858 target = ntohl(bond->params.arp_targets[i]);
2859 seq_printf(seq, " %d.%d.%d.%d", HIPQUAD(target));
2860 printed = 1;
2861 }
2862 seq_printf(seq, "\n");
2863 }
2864
3276 if (bond->params.mode == BOND_MODE_8023AD) { 2865 if (bond->params.mode == BOND_MODE_8023AD) {
3277 struct ad_info ad_info; 2866 struct ad_info ad_info;
3278 2867
@@ -3478,7 +3067,10 @@ static int bond_event_changename(struct bonding *bond)
3478 bond_remove_proc_entry(bond); 3067 bond_remove_proc_entry(bond);
3479 bond_create_proc_entry(bond); 3068 bond_create_proc_entry(bond);
3480#endif 3069#endif
3481 3070 down_write(&(bonding_rwsem));
3071 bond_destroy_sysfs_entry(bond);
3072 bond_create_sysfs_entry(bond);
3073 up_write(&(bonding_rwsem));
3482 return NOTIFY_DONE; 3074 return NOTIFY_DONE;
3483} 3075}
3484 3076
@@ -3955,6 +3547,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3955 return -EPERM; 3547 return -EPERM;
3956 } 3548 }
3957 3549
3550 down_write(&(bonding_rwsem));
3958 slave_dev = dev_get_by_name(ifr->ifr_slave); 3551 slave_dev = dev_get_by_name(ifr->ifr_slave);
3959 3552
3960 dprintk("slave_dev=%p: \n", slave_dev); 3553 dprintk("slave_dev=%p: \n", slave_dev);
@@ -3987,6 +3580,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3987 dev_put(slave_dev); 3580 dev_put(slave_dev);
3988 } 3581 }
3989 3582
3583 up_write(&(bonding_rwsem));
3990 return res; 3584 return res;
3991} 3585}
3992 3586
@@ -4071,6 +3665,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4071 bond_for_each_slave(bond, slave, i) { 3665 bond_for_each_slave(bond, slave, i) {
4072 dprintk("s %p s->p %p c_m %p\n", slave, 3666 dprintk("s %p s->p %p c_m %p\n", slave,
4073 slave->prev, slave->dev->change_mtu); 3667 slave->prev, slave->dev->change_mtu);
3668
4074 res = dev_set_mtu(slave->dev, new_mtu); 3669 res = dev_set_mtu(slave->dev, new_mtu);
4075 3670
4076 if (res) { 3671 if (res) {
@@ -4397,8 +3992,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4397 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3992 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4398 if (!skb2) { 3993 if (!skb2) {
4399 printk(KERN_ERR DRV_NAME 3994 printk(KERN_ERR DRV_NAME
4400 ": Error: bond_xmit_broadcast(): " 3995 ": %s: Error: bond_xmit_broadcast(): "
4401 "skb_clone() failed\n"); 3996 "skb_clone() failed\n",
3997 bond_dev->name);
4402 continue; 3998 continue;
4403 } 3999 }
4404 4000
@@ -4431,7 +4027,7 @@ out:
4431/* 4027/*
4432 * set bond mode specific net device operations 4028 * set bond mode specific net device operations
4433 */ 4029 */
4434static inline void bond_set_mode_ops(struct bonding *bond, int mode) 4030void bond_set_mode_ops(struct bonding *bond, int mode)
4435{ 4031{
4436 struct net_device *bond_dev = bond->dev; 4032 struct net_device *bond_dev = bond->dev;
4437 4033
@@ -4467,7 +4063,8 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4467 default: 4063 default:
4468 /* Should never happen, mode already checked */ 4064 /* Should never happen, mode already checked */
4469 printk(KERN_ERR DRV_NAME 4065 printk(KERN_ERR DRV_NAME
4470 ": Error: Unknown bonding mode %d\n", 4066 ": %s: Error: Unknown bonding mode %d\n",
4067 bond_dev->name,
4471 mode); 4068 mode);
4472 break; 4069 break;
4473 } 4070 }
@@ -4491,7 +4088,7 @@ static struct ethtool_ops bond_ethtool_ops = {
4491 * Does not allocate but creates a /proc entry. 4088 * Does not allocate but creates a /proc entry.
4492 * Allowed to fail. 4089 * Allowed to fail.
4493 */ 4090 */
4494static int __init bond_init(struct net_device *bond_dev, struct bond_params *params) 4091static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4495{ 4092{
4496 struct bonding *bond = bond_dev->priv; 4093 struct bonding *bond = bond_dev->priv;
4497 4094
@@ -4565,7 +4162,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4565/* De-initialize device specific data. 4162/* De-initialize device specific data.
4566 * Caller must hold rtnl_lock. 4163 * Caller must hold rtnl_lock.
4567 */ 4164 */
4568static inline void bond_deinit(struct net_device *bond_dev) 4165void bond_deinit(struct net_device *bond_dev)
4569{ 4166{
4570 struct bonding *bond = bond_dev->priv; 4167 struct bonding *bond = bond_dev->priv;
4571 4168
@@ -4601,7 +4198,7 @@ static void bond_free_all(void)
4601 * Convert string input module parms. Accept either the 4198 * Convert string input module parms. Accept either the
4602 * number of the mode or its string name. 4199 * number of the mode or its string name.
4603 */ 4200 */
4604static inline int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl) 4201int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl)
4605{ 4202{
4606 int i; 4203 int i;
4607 4204
@@ -4670,7 +4267,7 @@ static int bond_check_params(struct bond_params *params)
4670 if (max_bonds < 1 || max_bonds > INT_MAX) { 4267 if (max_bonds < 1 || max_bonds > INT_MAX) {
4671 printk(KERN_WARNING DRV_NAME 4268 printk(KERN_WARNING DRV_NAME
4672 ": Warning: max_bonds (%d) not in range %d-%d, so it " 4269 ": Warning: max_bonds (%d) not in range %d-%d, so it "
4673 "was reset to BOND_DEFAULT_MAX_BONDS (%d)", 4270 "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4674 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS); 4271 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4675 max_bonds = BOND_DEFAULT_MAX_BONDS; 4272 max_bonds = BOND_DEFAULT_MAX_BONDS;
4676 } 4273 }
@@ -4881,81 +4478,96 @@ static int bond_check_params(struct bond_params *params)
4881 return 0; 4478 return 0;
4882} 4479}
4883 4480
4481/* Create a new bond based on the specified name and bonding parameters.
4482 * Caller must NOT hold rtnl_lock; we need to release it here before we
4483 * set up our sysfs entries.
4484 */
4485int bond_create(char *name, struct bond_params *params, struct bonding **newbond)
4486{
4487 struct net_device *bond_dev;
4488 int res;
4489
4490 rtnl_lock();
4491 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup);
4492 if (!bond_dev) {
4493 printk(KERN_ERR DRV_NAME
4494 ": %s: eek! can't alloc netdev!\n",
4495 name);
4496 res = -ENOMEM;
4497 goto out_rtnl;
4498 }
4499
4500 /* bond_init() must be called after dev_alloc_name() (for the
4501 * /proc files), but before register_netdevice(), because we
4502 * need to set function pointers.
4503 */
4504
4505 res = bond_init(bond_dev, params);
4506 if (res < 0) {
4507 goto out_netdev;
4508 }
4509
4510 SET_MODULE_OWNER(bond_dev);
4511
4512 res = register_netdevice(bond_dev);
4513 if (res < 0) {
4514 goto out_bond;
4515 }
4516 if (newbond)
4517 *newbond = bond_dev->priv;
4518
4519 rtnl_unlock(); /* allows sysfs registration of net device */
4520 res = bond_create_sysfs_entry(bond_dev->priv);
4521 goto done;
4522out_bond:
4523 bond_deinit(bond_dev);
4524out_netdev:
4525 free_netdev(bond_dev);
4526out_rtnl:
4527 rtnl_unlock();
4528done:
4529 return res;
4530}
4531
4884static int __init bonding_init(void) 4532static int __init bonding_init(void)
4885{ 4533{
4886 struct bond_params params;
4887 int i; 4534 int i;
4888 int res; 4535 int res;
4536 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4889 4537
4890 printk(KERN_INFO "%s", version); 4538 printk(KERN_INFO "%s", version);
4891 4539
4892 res = bond_check_params(&params); 4540 res = bond_check_params(&bonding_defaults);
4893 if (res) { 4541 if (res) {
4894 return res; 4542 goto out;
4895 } 4543 }
4896 4544
4897 rtnl_lock();
4898
4899#ifdef CONFIG_PROC_FS 4545#ifdef CONFIG_PROC_FS
4900 bond_create_proc_dir(); 4546 bond_create_proc_dir();
4901#endif 4547#endif
4902
4903 for (i = 0; i < max_bonds; i++) { 4548 for (i = 0; i < max_bonds; i++) {
4904 struct net_device *bond_dev; 4549 sprintf(new_bond_name, "bond%d",i);
4905 4550 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4906 bond_dev = alloc_netdev(sizeof(struct bonding), "", ether_setup); 4551 if (res)
4907 if (!bond_dev) { 4552 goto err;
4908 res = -ENOMEM;
4909 goto out_err;
4910 }
4911
4912 res = dev_alloc_name(bond_dev, "bond%d");
4913 if (res < 0) {
4914 free_netdev(bond_dev);
4915 goto out_err;
4916 }
4917
4918 /* bond_init() must be called after dev_alloc_name() (for the
4919 * /proc files), but before register_netdevice(), because we
4920 * need to set function pointers.
4921 */
4922 res = bond_init(bond_dev, &params);
4923 if (res < 0) {
4924 free_netdev(bond_dev);
4925 goto out_err;
4926 }
4927
4928 SET_MODULE_OWNER(bond_dev);
4929
4930 res = register_netdevice(bond_dev);
4931 if (res < 0) {
4932 bond_deinit(bond_dev);
4933 free_netdev(bond_dev);
4934 goto out_err;
4935 }
4936 } 4553 }
4937 4554
4938 rtnl_unlock(); 4555 res = bond_create_sysfs();
4556 if (res)
4557 goto err;
4558
4939 register_netdevice_notifier(&bond_netdev_notifier); 4559 register_netdevice_notifier(&bond_netdev_notifier);
4940 register_inetaddr_notifier(&bond_inetaddr_notifier); 4560 register_inetaddr_notifier(&bond_inetaddr_notifier);
4941 4561
4942 return 0; 4562 goto out;
4943 4563err:
4944out_err:
4945 /*
4946 * rtnl_unlock() will run netdev_run_todo(), putting the
4947 * thus-far-registered bonding devices into a state which
4948 * unregigister_netdevice() will accept
4949 */
4950 rtnl_unlock();
4951 rtnl_lock(); 4564 rtnl_lock();
4952
4953 /* free and unregister all bonds that were successfully added */
4954 bond_free_all(); 4565 bond_free_all();
4955 4566 bond_destroy_sysfs();
4956 rtnl_unlock(); 4567 rtnl_unlock();
4957 4568out:
4958 return res; 4569 return res;
4570
4959} 4571}
4960 4572
4961static void __exit bonding_exit(void) 4573static void __exit bonding_exit(void)
@@ -4965,6 +4577,7 @@ static void __exit bonding_exit(void)
4965 4577
4966 rtnl_lock(); 4578 rtnl_lock();
4967 bond_free_all(); 4579 bond_free_all();
4580 bond_destroy_sysfs();
4968 rtnl_unlock(); 4581 rtnl_unlock();
4969} 4582}
4970 4583
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
new file mode 100644
index 000000000000..32d13da43a0b
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -0,0 +1,1358 @@
1
2/*
3 * Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 */
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/device.h>
28#include <linux/sysdev.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/string.h>
32#include <linux/netdevice.h>
33#include <linux/inetdevice.h>
34#include <linux/in.h>
35#include <linux/sysfs.h>
36#include <linux/string.h>
37#include <linux/ctype.h>
38#include <linux/inet.h>
39#include <linux/rtnetlink.h>
40
41/* #define BONDING_DEBUG 1 */
42#include "bonding.h"
43#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
44#define to_net_dev(class) container_of(class, struct net_device, class_dev)
45#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv))
46
47/*---------------------------- Declarations -------------------------------*/
48
49
50extern struct list_head bond_dev_list;
51extern struct bond_params bonding_defaults;
52extern struct bond_parm_tbl bond_mode_tbl[];
53extern struct bond_parm_tbl bond_lacp_tbl[];
54extern struct bond_parm_tbl xmit_hashtype_tbl[];
55
56static int expected_refcount = -1;
57static struct class *netdev_class;
58/*--------------------------- Data Structures -----------------------------*/
59
60/* Bonding sysfs lock. Why can't we just use the subsytem lock?
61 * Because kobject_register tries to acquire the subsystem lock. If
62 * we already hold the lock (which we would if the user was creating
63 * a new bond through the sysfs interface), we deadlock.
64 * This lock is only needed when deleting a bond - we need to make sure
65 * that we don't collide with an ongoing ioctl.
66 */
67
68struct rw_semaphore bonding_rwsem;
69
70
71
72
73/*------------------------------ Functions --------------------------------*/
74
75/*
76 * "show" function for the bond_masters attribute.
77 * The class parameter is ignored.
78 */
79static ssize_t bonding_show_bonds(struct class *cls, char *buffer)
80{
81 int res = 0;
82 struct bonding *bond;
83
84 down_read(&(bonding_rwsem));
85
86 list_for_each_entry(bond, &bond_dev_list, bond_list) {
87 if (res > (PAGE_SIZE - IFNAMSIZ)) {
88 /* not enough space for another interface name */
89 if ((PAGE_SIZE - res) > 10)
90 res = PAGE_SIZE - 10;
91 res += sprintf(buffer + res, "++more++");
92 break;
93 }
94 res += sprintf(buffer + res, "%s ",
95 bond->dev->name);
96 }
97 res += sprintf(buffer + res, "\n");
98 res++;
99 up_read(&(bonding_rwsem));
100 return res;
101}
102
103/*
104 * "store" function for the bond_masters attribute. This is what
105 * creates and deletes entire bonds.
106 *
107 * The class parameter is ignored.
108 *
109 */
110
111static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t count)
112{
113 char command[IFNAMSIZ + 1] = {0, };
114 char *ifname;
115 int res = count;
116 struct bonding *bond;
117 struct bonding *nxt;
118
119 down_write(&(bonding_rwsem));
120 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
121 ifname = command + 1;
122 if ((strlen(command) <= 1) ||
123 !dev_valid_name(ifname))
124 goto err_no_cmd;
125
126 if (command[0] == '+') {
127
128 /* Check to see if the bond already exists. */
129 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
130 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
131 printk(KERN_ERR DRV_NAME
132 ": cannot add bond %s; it already exists\n",
133 ifname);
134 res = -EPERM;
135 goto out;
136 }
137
138 printk(KERN_INFO DRV_NAME
139 ": %s is being created...\n", ifname);
140 if (bond_create(ifname, &bonding_defaults, &bond)) {
141 printk(KERN_INFO DRV_NAME
142 ": %s interface already exists. Bond creation failed.\n",
143 ifname);
144 res = -EPERM;
145 }
146 goto out;
147 }
148
149 if (command[0] == '-') {
150 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
151 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
152 rtnl_lock();
153 /* check the ref count on the bond's kobject.
154 * If it's > expected, then there's a file open,
155 * and we have to fail.
156 */
157 if (atomic_read(&bond->dev->class_dev.kobj.kref.refcount)
158 > expected_refcount){
159 rtnl_unlock();
160 printk(KERN_INFO DRV_NAME
161 ": Unable remove bond %s due to open references.\n",
162 ifname);
163 res = -EPERM;
164 goto out;
165 }
166 printk(KERN_INFO DRV_NAME
167 ": %s is being deleted...\n",
168 bond->dev->name);
169 unregister_netdevice(bond->dev);
170 bond_deinit(bond->dev);
171 bond_destroy_sysfs_entry(bond);
172 rtnl_unlock();
173 goto out;
174 }
175
176 printk(KERN_ERR DRV_NAME
177 ": unable to delete non-existent bond %s\n", ifname);
178 res = -ENODEV;
179 goto out;
180 }
181
182err_no_cmd:
183 printk(KERN_ERR DRV_NAME
184 ": no command found in bonding_masters. Use +ifname or -ifname.\n");
185 res = -EPERM;
186
187 /* Always return either count or an error. If you return 0, you'll
188 * get called forever, which is bad.
189 */
190out:
191 up_write(&(bonding_rwsem));
192 return res;
193}
194/* class attribute for bond_masters file. This ends up in /sys/class/net */
195static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
196 bonding_show_bonds, bonding_store_bonds);
197
198int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave)
199{
200 char linkname[IFNAMSIZ+7];
201 int ret = 0;
202
203 /* first, create a link from the slave back to the master */
204 ret = sysfs_create_link(&(slave->class_dev.kobj), &(master->class_dev.kobj),
205 "master");
206 if (ret)
207 return ret;
208 /* next, create a link from the master to the slave */
209 sprintf(linkname,"slave_%s",slave->name);
210 ret = sysfs_create_link(&(master->class_dev.kobj), &(slave->class_dev.kobj),
211 linkname);
212 return ret;
213
214}
215
216void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave)
217{
218 char linkname[IFNAMSIZ+7];
219
220 sysfs_remove_link(&(slave->class_dev.kobj), "master");
221 sprintf(linkname,"slave_%s",slave->name);
222 sysfs_remove_link(&(master->class_dev.kobj), linkname);
223}
224
225
226/*
227 * Show the slaves in the current bond.
228 */
229static ssize_t bonding_show_slaves(struct class_device *cd, char *buf)
230{
231 struct slave *slave;
232 int i, res = 0;
233 struct bonding *bond = to_bond(cd);
234
235 read_lock_bh(&bond->lock);
236 bond_for_each_slave(bond, slave, i) {
237 if (res > (PAGE_SIZE - IFNAMSIZ)) {
238 /* not enough space for another interface name */
239 if ((PAGE_SIZE - res) > 10)
240 res = PAGE_SIZE - 10;
241 res += sprintf(buf + res, "++more++");
242 break;
243 }
244 res += sprintf(buf + res, "%s ", slave->dev->name);
245 }
246 read_unlock_bh(&bond->lock);
247 res += sprintf(buf + res, "\n");
248 res++;
249 return res;
250}
251
252/*
253 * Set the slaves in the current bond. The bond interface must be
254 * up for this to succeed.
255 * This function is largely the same flow as bonding_update_bonds().
256 */
257static ssize_t bonding_store_slaves(struct class_device *cd, const char *buffer, size_t count)
258{
259 char command[IFNAMSIZ + 1] = { 0, };
260 char *ifname;
261 int i, res, found, ret = count;
262 struct slave *slave;
263 struct net_device *dev = 0;
264 struct bonding *bond = to_bond(cd);
265
266 /* Quick sanity check -- is the bond interface up? */
267 if (!(bond->dev->flags & IFF_UP)) {
268 printk(KERN_ERR DRV_NAME
269 ": %s: Unable to update slaves because interface is down.\n",
270 bond->dev->name);
271 ret = -EPERM;
272 goto out;
273 }
274
275 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
276
277 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
278 ifname = command + 1;
279 if ((strlen(command) <= 1) ||
280 !dev_valid_name(ifname))
281 goto err_no_cmd;
282
283 if (command[0] == '+') {
284
285 /* Got a slave name in ifname. Is it already in the list? */
286 found = 0;
287 read_lock_bh(&bond->lock);
288 bond_for_each_slave(bond, slave, i)
289 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
290 printk(KERN_ERR DRV_NAME
291 ": %s: Interface %s is already enslaved!\n",
292 bond->dev->name, ifname);
293 ret = -EPERM;
294 read_unlock_bh(&bond->lock);
295 goto out;
296 }
297
298 read_unlock_bh(&bond->lock);
299 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
300 bond->dev->name, ifname);
301 dev = dev_get_by_name(ifname);
302 if (!dev) {
303 printk(KERN_INFO DRV_NAME
304 ": %s: Interface %s does not exist!\n",
305 bond->dev->name, ifname);
306 ret = -EPERM;
307 goto out;
308 }
309 else
310 dev_put(dev);
311
312 if (dev->flags & IFF_UP) {
313 printk(KERN_ERR DRV_NAME
314 ": %s: Error: Unable to enslave %s "
315 "because it is already up.\n",
316 bond->dev->name, dev->name);
317 ret = -EPERM;
318 goto out;
319 }
320 /* If this is the first slave, then we need to set
321 the master's hardware address to be the same as the
322 slave's. */
323 if (!(*((u32 *) & (bond->dev->dev_addr[0])))) {
324 memcpy(bond->dev->dev_addr, dev->dev_addr,
325 dev->addr_len);
326 }
327
328 /* Set the slave's MTU to match the bond */
329 if (dev->mtu != bond->dev->mtu) {
330 if (dev->change_mtu) {
331 res = dev->change_mtu(dev,
332 bond->dev->mtu);
333 if (res) {
334 ret = res;
335 goto out;
336 }
337 } else {
338 dev->mtu = bond->dev->mtu;
339 }
340 }
341 rtnl_lock();
342 res = bond_enslave(bond->dev, dev);
343 rtnl_unlock();
344 if (res) {
345 ret = res;
346 }
347 goto out;
348 }
349
350 if (command[0] == '-') {
351 dev = NULL;
352 bond_for_each_slave(bond, slave, i)
353 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
354 dev = slave->dev;
355 break;
356 }
357 if (dev) {
358 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
359 bond->dev->name, dev->name);
360 rtnl_lock();
361 res = bond_release(bond->dev, dev);
362 rtnl_unlock();
363 if (res) {
364 ret = res;
365 goto out;
366 }
367 /* set the slave MTU to the default */
368 if (dev->change_mtu) {
369 dev->change_mtu(dev, 1500);
370 } else {
371 dev->mtu = 1500;
372 }
373 }
374 else {
375 printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n",
376 ifname, bond->dev->name);
377 ret = -ENODEV;
378 }
379 goto out;
380 }
381
382err_no_cmd:
383 printk(KERN_ERR DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
384 ret = -EPERM;
385
386out:
387 return ret;
388}
389
390static CLASS_DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
391
392/*
393 * Show and set the bonding mode. The bond interface must be down to
394 * change the mode.
395 */
396static ssize_t bonding_show_mode(struct class_device *cd, char *buf)
397{
398 struct bonding *bond = to_bond(cd);
399
400 return sprintf(buf, "%s %d\n",
401 bond_mode_tbl[bond->params.mode].modename,
402 bond->params.mode) + 1;
403}
404
405static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size_t count)
406{
407 int new_value, ret = count;
408 struct bonding *bond = to_bond(cd);
409
410 if (bond->dev->flags & IFF_UP) {
411 printk(KERN_ERR DRV_NAME
412 ": unable to update mode of %s because interface is up.\n",
413 bond->dev->name);
414 ret = -EPERM;
415 goto out;
416 }
417
418 new_value = bond_parse_parm((char *)buf, bond_mode_tbl);
419 if (new_value < 0) {
420 printk(KERN_ERR DRV_NAME
421 ": %s: Ignoring invalid mode value %.*s.\n",
422 bond->dev->name,
423 (int)strlen(buf) - 1, buf);
424 ret = -EINVAL;
425 goto out;
426 } else {
427 bond->params.mode = new_value;
428 bond_set_mode_ops(bond, bond->params.mode);
429 printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n",
430 bond->dev->name, bond_mode_tbl[new_value].modename, new_value);
431 }
432out:
433 return ret;
434}
435static CLASS_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
436
437/*
438 * Show and set the bonding transmit hash method. The bond interface must be down to
439 * change the xmit hash policy.
440 */
441static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf)
442{
443 int count;
444 struct bonding *bond = to_bond(cd);
445
446 if ((bond->params.mode != BOND_MODE_XOR) &&
447 (bond->params.mode != BOND_MODE_8023AD)) {
448 // Not Applicable
449 count = sprintf(buf, "NA\n") + 1;
450 } else {
451 count = sprintf(buf, "%s %d\n",
452 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
453 bond->params.xmit_policy) + 1;
454 }
455
456 return count;
457}
458
459static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf, size_t count)
460{
461 int new_value, ret = count;
462 struct bonding *bond = to_bond(cd);
463
464 if (bond->dev->flags & IFF_UP) {
465 printk(KERN_ERR DRV_NAME
466 "%s: Interface is up. Unable to update xmit policy.\n",
467 bond->dev->name);
468 ret = -EPERM;
469 goto out;
470 }
471
472 if ((bond->params.mode != BOND_MODE_XOR) &&
473 (bond->params.mode != BOND_MODE_8023AD)) {
474 printk(KERN_ERR DRV_NAME
475 "%s: Transmit hash policy is irrelevant in this mode.\n",
476 bond->dev->name);
477 ret = -EPERM;
478 goto out;
479 }
480
481 new_value = bond_parse_parm((char *)buf, xmit_hashtype_tbl);
482 if (new_value < 0) {
483 printk(KERN_ERR DRV_NAME
484 ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
485 bond->dev->name,
486 (int)strlen(buf) - 1, buf);
487 ret = -EINVAL;
488 goto out;
489 } else {
490 bond->params.xmit_policy = new_value;
491 bond_set_mode_ops(bond, bond->params.mode);
492 printk(KERN_INFO DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
493 bond->dev->name, xmit_hashtype_tbl[new_value].modename, new_value);
494 }
495out:
496 return ret;
497}
498static CLASS_DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
499
500/*
501 * Show and set the arp timer interval. There are two tricky bits
502 * here. First, if ARP monitoring is activated, then we must disable
503 * MII monitoring. Second, if the ARP timer isn't running, we must
504 * start it.
505 */
506static ssize_t bonding_show_arp_interval(struct class_device *cd, char *buf)
507{
508 struct bonding *bond = to_bond(cd);
509
510 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1;
511}
512
513static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *buf, size_t count)
514{
515 int new_value, ret = count;
516 struct bonding *bond = to_bond(cd);
517
518 if (sscanf(buf, "%d", &new_value) != 1) {
519 printk(KERN_ERR DRV_NAME
520 ": %s: no arp_interval value specified.\n",
521 bond->dev->name);
522 ret = -EINVAL;
523 goto out;
524 }
525 if (new_value < 0) {
526 printk(KERN_ERR DRV_NAME
527 ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
528 bond->dev->name, new_value, INT_MAX);
529 ret = -EINVAL;
530 goto out;
531 }
532
533 printk(KERN_INFO DRV_NAME
534 ": %s: Setting ARP monitoring interval to %d.\n",
535 bond->dev->name, new_value);
536 bond->params.arp_interval = new_value;
537 if (bond->params.miimon) {
538 printk(KERN_INFO DRV_NAME
539 ": %s: ARP monitoring cannot be used with MII monitoring. "
540 "%s Disabling MII monitoring.\n",
541 bond->dev->name, bond->dev->name);
542 bond->params.miimon = 0;
543 /* Kill MII timer, else it brings bond's link down */
544 if (bond->arp_timer.function) {
545 printk(KERN_INFO DRV_NAME
546 ": %s: Kill MII timer, else it brings bond's link down...\n",
547 bond->dev->name);
548 del_timer_sync(&bond->mii_timer);
549 }
550 }
551 if (!bond->params.arp_targets[0]) {
552 printk(KERN_INFO DRV_NAME
553 ": %s: ARP monitoring has been set up, "
554 "but no ARP targets have been specified.\n",
555 bond->dev->name);
556 }
557 if (bond->dev->flags & IFF_UP) {
558 /* If the interface is up, we may need to fire off
559 * the ARP timer. If the interface is down, the
560 * timer will get fired off when the open function
561 * is called.
562 */
563 if (bond->arp_timer.function) {
564 /* The timer's already set up, so fire it off */
565 mod_timer(&bond->arp_timer, jiffies + 1);
566 } else {
567 /* Set up the timer. */
568 init_timer(&bond->arp_timer);
569 bond->arp_timer.expires = jiffies + 1;
570 bond->arp_timer.data =
571 (unsigned long) bond->dev;
572 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
573 bond->arp_timer.function =
574 (void *)
575 &bond_activebackup_arp_mon;
576 } else {
577 bond->arp_timer.function =
578 (void *)
579 &bond_loadbalance_arp_mon;
580 }
581 add_timer(&bond->arp_timer);
582 }
583 }
584
585out:
586 return ret;
587}
588static CLASS_DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
589
590/*
591 * Show and set the arp targets.
592 */
593static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf)
594{
595 int i, res = 0;
596 struct bonding *bond = to_bond(cd);
597
598 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
599 if (bond->params.arp_targets[i])
600 res += sprintf(buf + res, "%u.%u.%u.%u ",
601 NIPQUAD(bond->params.arp_targets[i]));
602 }
603 if (res)
604 res--; /* eat the leftover space */
605 res += sprintf(buf + res, "\n");
606 res++;
607 return res;
608}
609
610static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *buf, size_t count)
611{
612 u32 newtarget;
613 int i = 0, done = 0, ret = count;
614 struct bonding *bond = to_bond(cd);
615 u32 *targets;
616
617 targets = bond->params.arp_targets;
618 newtarget = in_aton(buf + 1);
619 /* look for adds */
620 if (buf[0] == '+') {
621 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
622 printk(KERN_ERR DRV_NAME
623 ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n",
624 bond->dev->name, NIPQUAD(newtarget));
625 ret = -EINVAL;
626 goto out;
627 }
628 /* look for an empty slot to put the target in, and check for dupes */
629 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
630 if (targets[i] == newtarget) { /* duplicate */
631 printk(KERN_ERR DRV_NAME
632 ": %s: ARP target %u.%u.%u.%u is already present\n",
633 bond->dev->name, NIPQUAD(newtarget));
634 if (done)
635 targets[i] = 0;
636 ret = -EINVAL;
637 goto out;
638 }
639 if (targets[i] == 0 && !done) {
640 printk(KERN_INFO DRV_NAME
641 ": %s: adding ARP target %d.%d.%d.%d.\n",
642 bond->dev->name, NIPQUAD(newtarget));
643 done = 1;
644 targets[i] = newtarget;
645 }
646 }
647 if (!done) {
648 printk(KERN_ERR DRV_NAME
649 ": %s: ARP target table is full!\n",
650 bond->dev->name);
651 ret = -EINVAL;
652 goto out;
653 }
654
655 }
656 else if (buf[0] == '-') {
657 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
658 printk(KERN_ERR DRV_NAME
659 ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n",
660 bond->dev->name, NIPQUAD(newtarget));
661 ret = -EINVAL;
662 goto out;
663 }
664
665 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
666 if (targets[i] == newtarget) {
667 printk(KERN_INFO DRV_NAME
668 ": %s: removing ARP target %d.%d.%d.%d.\n",
669 bond->dev->name, NIPQUAD(newtarget));
670 targets[i] = 0;
671 done = 1;
672 }
673 }
674 if (!done) {
675 printk(KERN_INFO DRV_NAME
676 ": %s: unable to remove nonexistent ARP target %d.%d.%d.%d.\n",
677 bond->dev->name, NIPQUAD(newtarget));
678 ret = -EINVAL;
679 goto out;
680 }
681 }
682 else {
683 printk(KERN_ERR DRV_NAME ": no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
684 bond->dev->name);
685 ret = -EPERM;
686 goto out;
687 }
688
689out:
690 return ret;
691}
692static CLASS_DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
693
694/*
695 * Show and set the up and down delays. These must be multiples of the
696 * MII monitoring value, and are stored internally as the multiplier.
697 * Thus, we must translate to MS for the real world.
698 */
699static ssize_t bonding_show_downdelay(struct class_device *cd, char *buf)
700{
701 struct bonding *bond = to_bond(cd);
702
703 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1;
704}
705
706static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf, size_t count)
707{
708 int new_value, ret = count;
709 struct bonding *bond = to_bond(cd);
710
711 if (!(bond->params.miimon)) {
712 printk(KERN_ERR DRV_NAME
713 ": %s: Unable to set down delay as MII monitoring is disabled\n",
714 bond->dev->name);
715 ret = -EPERM;
716 goto out;
717 }
718
719 if (sscanf(buf, "%d", &new_value) != 1) {
720 printk(KERN_ERR DRV_NAME
721 ": %s: no down delay value specified.\n",
722 bond->dev->name);
723 ret = -EINVAL;
724 goto out;
725 }
726 if (new_value < 0) {
727 printk(KERN_ERR DRV_NAME
728 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
729 bond->dev->name, new_value, 1, INT_MAX);
730 ret = -EINVAL;
731 goto out;
732 } else {
733 if ((new_value % bond->params.miimon) != 0) {
734 printk(KERN_WARNING DRV_NAME
735 ": %s: Warning: down delay (%d) is not a multiple "
736 "of miimon (%d), delay rounded to %d ms\n",
737 bond->dev->name, new_value, bond->params.miimon,
738 (new_value / bond->params.miimon) *
739 bond->params.miimon);
740 }
741 bond->params.downdelay = new_value / bond->params.miimon;
742 printk(KERN_INFO DRV_NAME ": %s: Setting down delay to %d.\n",
743 bond->dev->name, bond->params.downdelay * bond->params.miimon);
744
745 }
746
747out:
748 return ret;
749}
750static CLASS_DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
751
752static ssize_t bonding_show_updelay(struct class_device *cd, char *buf)
753{
754 struct bonding *bond = to_bond(cd);
755
756 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1;
757
758}
759
760static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, size_t count)
761{
762 int new_value, ret = count;
763 struct bonding *bond = to_bond(cd);
764
765 if (!(bond->params.miimon)) {
766 printk(KERN_ERR DRV_NAME
767 ": %s: Unable to set up delay as MII monitoring is disabled\n",
768 bond->dev->name);
769 ret = -EPERM;
770 goto out;
771 }
772
773 if (sscanf(buf, "%d", &new_value) != 1) {
774 printk(KERN_ERR DRV_NAME
775 ": %s: no up delay value specified.\n",
776 bond->dev->name);
777 ret = -EINVAL;
778 goto out;
779 }
780 if (new_value < 0) {
781 printk(KERN_ERR DRV_NAME
782 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
783 bond->dev->name, new_value, 1, INT_MAX);
784 ret = -EINVAL;
785 goto out;
786 } else {
787 if ((new_value % bond->params.miimon) != 0) {
788 printk(KERN_WARNING DRV_NAME
789 ": %s: Warning: up delay (%d) is not a multiple "
790 "of miimon (%d), updelay rounded to %d ms\n",
791 bond->dev->name, new_value, bond->params.miimon,
792 (new_value / bond->params.miimon) *
793 bond->params.miimon);
794 }
795 bond->params.updelay = new_value / bond->params.miimon;
796 printk(KERN_INFO DRV_NAME ": %s: Setting up delay to %d.\n",
797 bond->dev->name, bond->params.updelay * bond->params.miimon);
798
799 }
800
801out:
802 return ret;
803}
804static CLASS_DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
805
806/*
807 * Show and set the LACP interval. Interface must be down, and the mode
808 * must be set to 802.3ad mode.
809 */
810static ssize_t bonding_show_lacp(struct class_device *cd, char *buf)
811{
812 struct bonding *bond = to_bond(cd);
813
814 return sprintf(buf, "%s %d\n",
815 bond_lacp_tbl[bond->params.lacp_fast].modename,
816 bond->params.lacp_fast) + 1;
817}
818
819static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size_t count)
820{
821 int new_value, ret = count;
822 struct bonding *bond = to_bond(cd);
823
824 if (bond->dev->flags & IFF_UP) {
825 printk(KERN_ERR DRV_NAME
826 ": %s: Unable to update LACP rate because interface is up.\n",
827 bond->dev->name);
828 ret = -EPERM;
829 goto out;
830 }
831
832 if (bond->params.mode != BOND_MODE_8023AD) {
833 printk(KERN_ERR DRV_NAME
834 ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
835 bond->dev->name);
836 ret = -EPERM;
837 goto out;
838 }
839
840 new_value = bond_parse_parm((char *)buf, bond_lacp_tbl);
841
842 if ((new_value == 1) || (new_value == 0)) {
843 bond->params.lacp_fast = new_value;
844 printk(KERN_INFO DRV_NAME
845 ": %s: Setting LACP rate to %s (%d).\n",
846 bond->dev->name, bond_lacp_tbl[new_value].modename, new_value);
847 } else {
848 printk(KERN_ERR DRV_NAME
849 ": %s: Ignoring invalid LACP rate value %.*s.\n",
850 bond->dev->name, (int)strlen(buf) - 1, buf);
851 ret = -EINVAL;
852 }
853out:
854 return ret;
855}
856static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
857
858/*
859 * Show and set the MII monitor interval. There are two tricky bits
860 * here. First, if MII monitoring is activated, then we must disable
861 * ARP monitoring. Second, if the timer isn't running, we must
862 * start it.
863 */
864static ssize_t bonding_show_miimon(struct class_device *cd, char *buf)
865{
866 struct bonding *bond = to_bond(cd);
867
868 return sprintf(buf, "%d\n", bond->params.miimon) + 1;
869}
870
871static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, size_t count)
872{
873 int new_value, ret = count;
874 struct bonding *bond = to_bond(cd);
875
876 if (sscanf(buf, "%d", &new_value) != 1) {
877 printk(KERN_ERR DRV_NAME
878 ": %s: no miimon value specified.\n",
879 bond->dev->name);
880 ret = -EINVAL;
881 goto out;
882 }
883 if (new_value < 0) {
884 printk(KERN_ERR DRV_NAME
885 ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
886 bond->dev->name, new_value, 1, INT_MAX);
887 ret = -EINVAL;
888 goto out;
889 } else {
890 printk(KERN_INFO DRV_NAME
891 ": %s: Setting MII monitoring interval to %d.\n",
892 bond->dev->name, new_value);
893 bond->params.miimon = new_value;
894 if(bond->params.updelay)
895 printk(KERN_INFO DRV_NAME
896 ": %s: Note: Updating updelay (to %d) "
897 "since it is a multiple of the miimon value.\n",
898 bond->dev->name,
899 bond->params.updelay * bond->params.miimon);
900 if(bond->params.downdelay)
901 printk(KERN_INFO DRV_NAME
902 ": %s: Note: Updating downdelay (to %d) "
903 "since it is a multiple of the miimon value.\n",
904 bond->dev->name,
905 bond->params.downdelay * bond->params.miimon);
906 if (bond->params.arp_interval) {
907 printk(KERN_INFO DRV_NAME
908 ": %s: MII monitoring cannot be used with "
909 "ARP monitoring. Disabling ARP monitoring...\n",
910 bond->dev->name);
911 bond->params.arp_interval = 0;
912 /* Kill ARP timer, else it brings bond's link down */
913 if (bond->mii_timer.function) {
914 printk(KERN_INFO DRV_NAME
915 ": %s: Kill ARP timer, else it brings bond's link down...\n",
916 bond->dev->name);
917 del_timer_sync(&bond->arp_timer);
918 }
919 }
920
921 if (bond->dev->flags & IFF_UP) {
922 /* If the interface is up, we may need to fire off
923 * the MII timer. If the interface is down, the
924 * timer will get fired off when the open function
925 * is called.
926 */
927 if (bond->mii_timer.function) {
928 /* The timer's already set up, so fire it off */
929 mod_timer(&bond->mii_timer, jiffies + 1);
930 } else {
931 /* Set up the timer. */
932 init_timer(&bond->mii_timer);
933 bond->mii_timer.expires = jiffies + 1;
934 bond->mii_timer.data =
935 (unsigned long) bond->dev;
936 bond->mii_timer.function =
937 (void *) &bond_mii_monitor;
938 add_timer(&bond->mii_timer);
939 }
940 }
941 }
942out:
943 return ret;
944}
945static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
946
947/*
948 * Show and set the primary slave. The store function is much
949 * simpler than bonding_store_slaves function because it only needs to
950 * handle one interface name.
951 * The bond must be a mode that supports a primary for this be
952 * set.
953 */
954static ssize_t bonding_show_primary(struct class_device *cd, char *buf)
955{
956 int count = 0;
957 struct bonding *bond = to_bond(cd);
958
959 if (bond->primary_slave)
960 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1;
961 else
962 count = sprintf(buf, "\n") + 1;
963
964 return count;
965}
966
967static ssize_t bonding_store_primary(struct class_device *cd, const char *buf, size_t count)
968{
969 int i;
970 struct slave *slave;
971 struct bonding *bond = to_bond(cd);
972
973 write_lock_bh(&bond->lock);
974 if (!USES_PRIMARY(bond->params.mode)) {
975 printk(KERN_INFO DRV_NAME
976 ": %s: Unable to set primary slave; %s is in mode %d\n",
977 bond->dev->name, bond->dev->name, bond->params.mode);
978 } else {
979 bond_for_each_slave(bond, slave, i) {
980 if (strnicmp
981 (slave->dev->name, buf,
982 strlen(slave->dev->name)) == 0) {
983 printk(KERN_INFO DRV_NAME
984 ": %s: Setting %s as primary slave.\n",
985 bond->dev->name, slave->dev->name);
986 bond->primary_slave = slave;
987 bond_select_active_slave(bond);
988 goto out;
989 }
990 }
991
992 /* if we got here, then we didn't match the name of any slave */
993
994 if (strlen(buf) == 0 || buf[0] == '\n') {
995 printk(KERN_INFO DRV_NAME
996 ": %s: Setting primary slave to None.\n",
997 bond->dev->name);
998 bond->primary_slave = 0;
999 bond_select_active_slave(bond);
1000 } else {
1001 printk(KERN_INFO DRV_NAME
1002 ": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
1003 bond->dev->name, (int)strlen(buf) - 1, buf);
1004 }
1005 }
1006out:
1007 write_unlock_bh(&bond->lock);
1008 return count;
1009}
1010static CLASS_DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
1011
1012/*
1013 * Show and set the use_carrier flag.
1014 */
1015static ssize_t bonding_show_carrier(struct class_device *cd, char *buf)
1016{
1017 struct bonding *bond = to_bond(cd);
1018
1019 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1;
1020}
1021
1022static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, size_t count)
1023{
1024 int new_value, ret = count;
1025 struct bonding *bond = to_bond(cd);
1026
1027
1028 if (sscanf(buf, "%d", &new_value) != 1) {
1029 printk(KERN_ERR DRV_NAME
1030 ": %s: no use_carrier value specified.\n",
1031 bond->dev->name);
1032 ret = -EINVAL;
1033 goto out;
1034 }
1035 if ((new_value == 0) || (new_value == 1)) {
1036 bond->params.use_carrier = new_value;
1037 printk(KERN_INFO DRV_NAME ": %s: Setting use_carrier to %d.\n",
1038 bond->dev->name, new_value);
1039 } else {
1040 printk(KERN_INFO DRV_NAME
1041 ": %s: Ignoring invalid use_carrier value %d.\n",
1042 bond->dev->name, new_value);
1043 }
1044out:
1045 return count;
1046}
1047static CLASS_DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
1048
1049
1050/*
1051 * Show and set currently active_slave.
1052 */
1053static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf)
1054{
1055 struct slave *curr;
1056 struct bonding *bond = to_bond(cd);
1057 int count;
1058
1059
1060 read_lock(&bond->curr_slave_lock);
1061 curr = bond->curr_active_slave;
1062 read_unlock(&bond->curr_slave_lock);
1063
1064 if (USES_PRIMARY(bond->params.mode) && curr)
1065 count = sprintf(buf, "%s\n", curr->dev->name) + 1;
1066 else
1067 count = sprintf(buf, "\n") + 1;
1068 return count;
1069}
1070
1071static ssize_t bonding_store_active_slave(struct class_device *cd, const char *buf, size_t count)
1072{
1073 int i;
1074 struct slave *slave;
1075 struct slave *old_active = NULL;
1076 struct slave *new_active = NULL;
1077 struct bonding *bond = to_bond(cd);
1078
1079 write_lock_bh(&bond->lock);
1080 if (!USES_PRIMARY(bond->params.mode)) {
1081 printk(KERN_INFO DRV_NAME
1082 ": %s: Unable to change active slave; %s is in mode %d\n",
1083 bond->dev->name, bond->dev->name, bond->params.mode);
1084 } else {
1085 bond_for_each_slave(bond, slave, i) {
1086 if (strnicmp
1087 (slave->dev->name, buf,
1088 strlen(slave->dev->name)) == 0) {
1089 old_active = bond->curr_active_slave;
1090 new_active = slave;
1091 if (new_active && (new_active == old_active)) {
1092 /* do nothing */
1093 printk(KERN_INFO DRV_NAME
1094 ": %s: %s is already the current active slave.\n",
1095 bond->dev->name, slave->dev->name);
1096 goto out;
1097 }
1098 else {
1099 if ((new_active) &&
1100 (old_active) &&
1101 (new_active->link == BOND_LINK_UP) &&
1102 IS_UP(new_active->dev)) {
1103 printk(KERN_INFO DRV_NAME
1104 ": %s: Setting %s as active slave.\n",
1105 bond->dev->name, slave->dev->name);
1106 bond_change_active_slave(bond, new_active);
1107 }
1108 else {
1109 printk(KERN_INFO DRV_NAME
1110 ": %s: Could not set %s as active slave; "
1111 "either %s is down or the link is down.\n",
1112 bond->dev->name, slave->dev->name,
1113 slave->dev->name);
1114 }
1115 goto out;
1116 }
1117 }
1118 }
1119
1120 /* if we got here, then we didn't match the name of any slave */
1121
1122 if (strlen(buf) == 0 || buf[0] == '\n') {
1123 printk(KERN_INFO DRV_NAME
1124 ": %s: Setting active slave to None.\n",
1125 bond->dev->name);
1126 bond->primary_slave = 0;
1127 bond_select_active_slave(bond);
1128 } else {
1129 printk(KERN_INFO DRV_NAME
1130 ": %s: Unable to set %.*s as active slave as it is not a slave.\n",
1131 bond->dev->name, (int)strlen(buf) - 1, buf);
1132 }
1133 }
1134out:
1135 write_unlock_bh(&bond->lock);
1136 return count;
1137
1138}
1139static CLASS_DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
1140
1141
1142/*
1143 * Show link status of the bond interface.
1144 */
1145static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf)
1146{
1147 struct slave *curr;
1148 struct bonding *bond = to_bond(cd);
1149
1150 read_lock(&bond->curr_slave_lock);
1151 curr = bond->curr_active_slave;
1152 read_unlock(&bond->curr_slave_lock);
1153
1154 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1;
1155}
1156static CLASS_DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1157
1158
1159/*
1160 * Show current 802.3ad aggregator ID.
1161 */
1162static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf)
1163{
1164 int count = 0;
1165 struct bonding *bond = to_bond(cd);
1166
1167 if (bond->params.mode == BOND_MODE_8023AD) {
1168 struct ad_info ad_info;
1169 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id) + 1;
1170 }
1171 else
1172 count = sprintf(buf, "\n") + 1;
1173
1174 return count;
1175}
1176static CLASS_DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
1177
1178
1179/*
1180 * Show number of active 802.3ad ports.
1181 */
1182static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf)
1183{
1184 int count = 0;
1185 struct bonding *bond = to_bond(cd);
1186
1187 if (bond->params.mode == BOND_MODE_8023AD) {
1188 struct ad_info ad_info;
1189 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports) + 1;
1190 }
1191 else
1192 count = sprintf(buf, "\n") + 1;
1193
1194 return count;
1195}
1196static CLASS_DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
1197
1198
1199/*
1200 * Show current 802.3ad actor key.
1201 */
1202static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf)
1203{
1204 int count = 0;
1205 struct bonding *bond = to_bond(cd);
1206
1207 if (bond->params.mode == BOND_MODE_8023AD) {
1208 struct ad_info ad_info;
1209 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key) + 1;
1210 }
1211 else
1212 count = sprintf(buf, "\n") + 1;
1213
1214 return count;
1215}
1216static CLASS_DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
1217
1218
1219/*
1220 * Show current 802.3ad partner key.
1221 */
1222static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf)
1223{
1224 int count = 0;
1225 struct bonding *bond = to_bond(cd);
1226
1227 if (bond->params.mode == BOND_MODE_8023AD) {
1228 struct ad_info ad_info;
1229 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key) + 1;
1230 }
1231 else
1232 count = sprintf(buf, "\n") + 1;
1233
1234 return count;
1235}
1236static CLASS_DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
1237
1238
1239/*
1240 * Show current 802.3ad partner mac.
1241 */
1242static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf)
1243{
1244 int count = 0;
1245 struct bonding *bond = to_bond(cd);
1246
1247 if (bond->params.mode == BOND_MODE_8023AD) {
1248 struct ad_info ad_info;
1249 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) {
1250 count = sprintf(buf,"%02x:%02x:%02x:%02x:%02x:%02x\n",
1251 ad_info.partner_system[0],
1252 ad_info.partner_system[1],
1253 ad_info.partner_system[2],
1254 ad_info.partner_system[3],
1255 ad_info.partner_system[4],
1256 ad_info.partner_system[5]) + 1;
1257 }
1258 }
1259 else
1260 count = sprintf(buf, "\n") + 1;
1261
1262 return count;
1263}
1264static CLASS_DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1265
1266
1267
1268static struct attribute *per_bond_attrs[] = {
1269 &class_device_attr_slaves.attr,
1270 &class_device_attr_mode.attr,
1271 &class_device_attr_arp_interval.attr,
1272 &class_device_attr_arp_ip_target.attr,
1273 &class_device_attr_downdelay.attr,
1274 &class_device_attr_updelay.attr,
1275 &class_device_attr_lacp_rate.attr,
1276 &class_device_attr_xmit_hash_policy.attr,
1277 &class_device_attr_miimon.attr,
1278 &class_device_attr_primary.attr,
1279 &class_device_attr_use_carrier.attr,
1280 &class_device_attr_active_slave.attr,
1281 &class_device_attr_mii_status.attr,
1282 &class_device_attr_ad_aggregator.attr,
1283 &class_device_attr_ad_num_ports.attr,
1284 &class_device_attr_ad_actor_key.attr,
1285 &class_device_attr_ad_partner_key.attr,
1286 &class_device_attr_ad_partner_mac.attr,
1287 NULL,
1288};
1289
1290static struct attribute_group bonding_group = {
1291 .name = "bonding",
1292 .attrs = per_bond_attrs,
1293};
1294
1295/*
1296 * Initialize sysfs. This sets up the bonding_masters file in
1297 * /sys/class/net.
1298 */
1299int bond_create_sysfs(void)
1300{
1301 int ret = 0;
1302 struct bonding *firstbond;
1303
1304 init_rwsem(&bonding_rwsem);
1305
1306 /* get the netdev class pointer */
1307 firstbond = container_of(bond_dev_list.next, struct bonding, bond_list);
1308 if (!firstbond)
1309 return -ENODEV;
1310
1311 netdev_class = firstbond->dev->class_dev.class;
1312 if (!netdev_class)
1313 return -ENODEV;
1314
1315 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1316
1317 return ret;
1318
1319}
1320
1321/*
1322 * Remove /sys/class/net/bonding_masters.
1323 */
1324void bond_destroy_sysfs(void)
1325{
1326 if (netdev_class)
1327 class_remove_file(netdev_class, &class_attr_bonding_masters);
1328}
1329
1330/*
1331 * Initialize sysfs for each bond. This sets up and registers
1332 * the 'bondctl' directory for each individual bond under /sys/class/net.
1333 */
1334int bond_create_sysfs_entry(struct bonding *bond)
1335{
1336 struct net_device *dev = bond->dev;
1337 int err;
1338
1339 err = sysfs_create_group(&(dev->class_dev.kobj), &bonding_group);
1340 if (err) {
1341 printk(KERN_EMERG "eek! didn't create group!\n");
1342 }
1343
1344 if (expected_refcount < 1)
1345 expected_refcount = atomic_read(&bond->dev->class_dev.kobj.kref.refcount);
1346
1347 return err;
1348}
1349/*
1350 * Remove sysfs entries for each bond.
1351 */
1352void bond_destroy_sysfs_entry(struct bonding *bond)
1353{
1354 struct net_device *dev = bond->dev;
1355
1356 sysfs_remove_group(&(dev->class_dev.kobj), &bonding_group);
1357}
1358
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 1433e91db0f7..015c7f1d1bc0 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -10,25 +10,6 @@
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU Public License, incorporated herein by reference. 11 * of the GNU Public License, incorporated herein by reference.
12 * 12 *
13 *
14 * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
15 * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
16 * Shmulik Hen <shmulik.hen at intel dot com>
17 * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
18 *
19 * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
20 * Amir Noam <amir.noam at intel dot com>
21 * - Code beautification and style changes (mainly in comments).
22 *
23 * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
24 * - Added support for Transmit load balancing mode.
25 *
26 * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
27 * - Code cleanup and style changes
28 *
29 * 2005/05/05 - Jason Gabler <jygabler at lbl dot gov>
30 * - added "xmit_policy" kernel parameter for alternate hashing policy
31 * support for mode 2
32 */ 13 */
33 14
34#ifndef _LINUX_BONDING_H 15#ifndef _LINUX_BONDING_H
@@ -37,11 +18,12 @@
37#include <linux/timer.h> 18#include <linux/timer.h>
38#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
39#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/kobject.h>
40#include "bond_3ad.h" 22#include "bond_3ad.h"
41#include "bond_alb.h" 23#include "bond_alb.h"
42 24
43#define DRV_VERSION "2.6.5" 25#define DRV_VERSION "3.0.0"
44#define DRV_RELDATE "November 4, 2005" 26#define DRV_RELDATE "November 8, 2005"
45#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 29
@@ -152,6 +134,11 @@ struct bond_params {
152 u32 arp_targets[BOND_MAX_ARP_TARGETS]; 134 u32 arp_targets[BOND_MAX_ARP_TARGETS];
153}; 135};
154 136
137struct bond_parm_tbl {
138 char *modename;
139 int mode;
140};
141
155struct vlan_entry { 142struct vlan_entry {
156 struct list_head vlan_list; 143 struct list_head vlan_list;
157 u32 vlan_ip; 144 u32 vlan_ip;
@@ -159,7 +146,7 @@ struct vlan_entry {
159}; 146};
160 147
161struct slave { 148struct slave {
162 struct net_device *dev; /* first - usefull for panic debug */ 149 struct net_device *dev; /* first - useful for panic debug */
163 struct slave *next; 150 struct slave *next;
164 struct slave *prev; 151 struct slave *prev;
165 s16 delay; 152 s16 delay;
@@ -185,7 +172,7 @@ struct slave {
185 * beforehand. 172 * beforehand.
186 */ 173 */
187struct bonding { 174struct bonding {
188 struct net_device *dev; /* first - usefull for panic debug */ 175 struct net_device *dev; /* first - useful for panic debug */
189 struct slave *first_slave; 176 struct slave *first_slave;
190 struct slave *curr_active_slave; 177 struct slave *curr_active_slave;
191 struct slave *current_arp_slave; 178 struct slave *current_arp_slave;
@@ -255,6 +242,25 @@ extern inline void bond_set_slave_active_flags(struct slave *slave)
255 242
256struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 243struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
257int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 244int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
245int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
246void bond_deinit(struct net_device *bond_dev);
247int bond_create_sysfs(void);
248void bond_destroy_sysfs(void);
249void bond_destroy_sysfs_entry(struct bonding *bond);
250int bond_create_sysfs_entry(struct bonding *bond);
251int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
252void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
253int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
254int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
255int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev);
256void bond_mii_monitor(struct net_device *bond_dev);
257void bond_loadbalance_arp_mon(struct net_device *bond_dev);
258void bond_activebackup_arp_mon(struct net_device *bond_dev);
259void bond_set_mode_ops(struct bonding *bond, int mode);
260int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl);
261const char *bond_mode_name(int mode);
262void bond_select_active_slave(struct bonding *bond);
263void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
258 264
259#endif /* _LINUX_BONDING_H */ 265#endif /* _LINUX_BONDING_H */
260 266
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0f030b73cbb3..146f9513aea5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2,7 +2,8 @@
2 * drivers/net/gianfar.c 2 * drivers/net/gianfar.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c 7 * Based on 8260_io/fcc_enet.c
7 * 8 *
8 * Author: Andy Fleming 9 * Author: Andy Fleming
@@ -22,8 +23,6 @@
22 * B-V +1.62 23 * B-V +1.62
23 * 24 *
24 * Theory of operation 25 * Theory of operation
25 * This driver is designed for the non-CPM ethernet controllers
26 * on the 85xx and 83xx family of integrated processors
27 * 26 *
28 * The driver is initialized through platform_device. Structures which 27 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a 28 * define the configuration needed by the board are defined in a
@@ -110,7 +109,7 @@
110#endif 109#endif
111 110
112const char gfar_driver_name[] = "Gianfar Ethernet"; 111const char gfar_driver_name[] = "Gianfar Ethernet";
113const char gfar_driver_version[] = "1.2"; 112const char gfar_driver_version[] = "1.3";
114 113
115static int gfar_enet_open(struct net_device *dev); 114static int gfar_enet_open(struct net_device *dev);
116static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -139,6 +138,10 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l
139static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
140 struct vlan_group *grp); 139 struct vlan_group *grp);
141static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 140static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
141void gfar_halt(struct net_device *dev);
142void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
142 145
143extern struct ethtool_ops gfar_ethtool_ops; 146extern struct ethtool_ops gfar_ethtool_ops;
144 147
@@ -146,12 +149,10 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
146MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
148 151
149int gfar_uses_fcb(struct gfar_private *priv) 152/* Returns 1 if incoming frames use an FCB */
153static inline int gfar_uses_fcb(struct gfar_private *priv)
150{ 154{
151 if (priv->vlan_enable || priv->rx_csum_enable) 155 return (priv->vlan_enable || priv->rx_csum_enable);
152 return 1;
153 else
154 return 0;
155} 156}
156 157
157/* Set up the ethernet device structure, private data, 158/* Set up the ethernet device structure, private data,
@@ -320,15 +321,10 @@ static int gfar_probe(struct platform_device *pdev)
320 else 321 else
321 priv->padding = 0; 322 priv->padding = 0;
322 323
323 dev->hard_header_len += priv->padding;
324
325 if (dev->features & NETIF_F_IP_CSUM) 324 if (dev->features & NETIF_F_IP_CSUM)
326 dev->hard_header_len += GMAC_FCB_LEN; 325 dev->hard_header_len += GMAC_FCB_LEN;
327 326
328 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 327 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
329#ifdef CONFIG_GFAR_BUFSTASH
330 priv->rx_stash_size = STASH_LENGTH;
331#endif
332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 328 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 329 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
334 330
@@ -350,6 +346,9 @@ static int gfar_probe(struct platform_device *pdev)
350 goto register_fail; 346 goto register_fail;
351 } 347 }
352 348
349 /* Create all the sysfs files */
350 gfar_init_sysfs(dev);
351
353 /* Print out the device info */ 352 /* Print out the device info */
354 printk(KERN_INFO DEVICE_NAME, dev->name); 353 printk(KERN_INFO DEVICE_NAME, dev->name);
355 for (idx = 0; idx < 6; idx++) 354 for (idx = 0; idx < 6; idx++)
@@ -357,8 +356,7 @@ static int gfar_probe(struct platform_device *pdev)
357 printk("\n"); 356 printk("\n");
358 357
359 /* Even more device info helps when determining which kernel */ 358 /* Even more device info helps when determining which kernel */
360 /* provided which set of benchmarks. Since this is global for all */ 359 /* provided which set of benchmarks. */
361 /* devices, we only print it once */
362#ifdef CONFIG_GFAR_NAPI 360#ifdef CONFIG_GFAR_NAPI
363 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 361 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
364#else 362#else
@@ -463,19 +461,9 @@ static void init_registers(struct net_device *dev)
463 /* Initialize the max receive buffer length */ 461 /* Initialize the max receive buffer length */
464 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 462 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
465 463
466#ifdef CONFIG_GFAR_BUFSTASH
467 /* If we are stashing buffers, we need to set the
468 * extraction length to the size of the buffer */
469 gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
470#endif
471
472 /* Initialize the Minimum Frame Length Register */ 464 /* Initialize the Minimum Frame Length Register */
473 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 465 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
474 466
475 /* Setup Attributes so that snooping is on for rx */
476 gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
477 gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
478
479 /* Assign the TBI an address which won't conflict with the PHYs */ 467 /* Assign the TBI an address which won't conflict with the PHYs */
480 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 468 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
481} 469}
@@ -577,8 +565,7 @@ static void free_skb_resources(struct gfar_private *priv)
577 for (i = 0; i < priv->rx_ring_size; i++) { 565 for (i = 0; i < priv->rx_ring_size; i++) {
578 if (priv->rx_skbuff[i]) { 566 if (priv->rx_skbuff[i]) {
579 dma_unmap_single(NULL, rxbdp->bufPtr, 567 dma_unmap_single(NULL, rxbdp->bufPtr,
580 priv->rx_buffer_size 568 priv->rx_buffer_size,
581 + RXBUF_ALIGNMENT,
582 DMA_FROM_DEVICE); 569 DMA_FROM_DEVICE);
583 570
584 dev_kfree_skb_any(priv->rx_skbuff[i]); 571 dev_kfree_skb_any(priv->rx_skbuff[i]);
@@ -636,6 +623,7 @@ int startup_gfar(struct net_device *dev)
636 struct gfar *regs = priv->regs; 623 struct gfar *regs = priv->regs;
637 int err = 0; 624 int err = 0;
638 u32 rctrl = 0; 625 u32 rctrl = 0;
626 u32 attrs = 0;
639 627
640 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 628 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
641 629
@@ -795,18 +783,50 @@ int startup_gfar(struct net_device *dev)
795 if (priv->rx_csum_enable) 783 if (priv->rx_csum_enable)
796 rctrl |= RCTRL_CHECKSUMMING; 784 rctrl |= RCTRL_CHECKSUMMING;
797 785
798 if (priv->extended_hash) 786 if (priv->extended_hash) {
799 rctrl |= RCTRL_EXTHASH; 787 rctrl |= RCTRL_EXTHASH;
800 788
789 gfar_clear_exact_match(dev);
790 rctrl |= RCTRL_EMEN;
791 }
792
801 if (priv->vlan_enable) 793 if (priv->vlan_enable)
802 rctrl |= RCTRL_VLAN; 794 rctrl |= RCTRL_VLAN;
803 795
796 if (priv->padding) {
797 rctrl &= ~RCTRL_PAL_MASK;
798 rctrl |= RCTRL_PADDING(priv->padding);
799 }
800
804 /* Init rctrl based on our settings */ 801 /* Init rctrl based on our settings */
805 gfar_write(&priv->regs->rctrl, rctrl); 802 gfar_write(&priv->regs->rctrl, rctrl);
806 803
807 if (dev->features & NETIF_F_IP_CSUM) 804 if (dev->features & NETIF_F_IP_CSUM)
808 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 805 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
809 806
807 /* Set the extraction length and index */
808 attrs = ATTRELI_EL(priv->rx_stash_size) |
809 ATTRELI_EI(priv->rx_stash_index);
810
811 gfar_write(&priv->regs->attreli, attrs);
812
813 /* Start with defaults, and add stashing or locking
814 * depending on the approprate variables */
815 attrs = ATTR_INIT_SETTINGS;
816
817 if (priv->bd_stash_en)
818 attrs |= ATTR_BDSTASH;
819
820 if (priv->rx_stash_size != 0)
821 attrs |= ATTR_BUFSTASH;
822
823 gfar_write(&priv->regs->attr, attrs);
824
825 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
826 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
827 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
828
829 /* Start the controller */
810 gfar_start(dev); 830 gfar_start(dev);
811 831
812 return 0; 832 return 0;
@@ -851,34 +871,32 @@ static int gfar_enet_open(struct net_device *dev)
851 return err; 871 return err;
852} 872}
853 873
854static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 874static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
855{ 875{
856 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 876 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
857 877
858 memset(fcb, 0, GMAC_FCB_LEN); 878 memset(fcb, 0, GMAC_FCB_LEN);
859 879
860 /* Flag the bd so the controller looks for the FCB */
861 bdp->status |= TXBD_TOE;
862
863 return fcb; 880 return fcb;
864} 881}
865 882
866static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 883static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
867{ 884{
868 int len; 885 u8 flags = 0;
869 886
870 /* If we're here, it's a IP packet with a TCP or UDP 887 /* If we're here, it's a IP packet with a TCP or UDP
871 * payload. We set it to checksum, using a pseudo-header 888 * payload. We set it to checksum, using a pseudo-header
872 * we provide 889 * we provide
873 */ 890 */
874 fcb->ip = 1; 891 flags = TXFCB_DEFAULT;
875 fcb->tup = 1;
876 fcb->ctu = 1;
877 fcb->nph = 1;
878 892
879 /* Notify the controller what the protocol is */ 893 /* Tell the controller what the protocol is */
880 if (skb->nh.iph->protocol == IPPROTO_UDP) 894 /* And provide the already calculated phcs */
881 fcb->udp = 1; 895 if (skb->nh.iph->protocol == IPPROTO_UDP) {
896 flags |= TXFCB_UDP;
897 fcb->phcs = skb->h.uh->check;
898 } else
899 fcb->phcs = skb->h.th->check;
882 900
883 /* l3os is the distance between the start of the 901 /* l3os is the distance between the start of the
884 * frame (skb->data) and the start of the IP hdr. 902 * frame (skb->data) and the start of the IP hdr.
@@ -887,17 +905,12 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
887 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 905 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
888 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 906 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
889 907
890 len = skb->nh.iph->tot_len - fcb->l4os; 908 fcb->flags = flags;
891
892 /* Provide the pseudoheader csum */
893 fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr,
894 skb->nh.iph->daddr, len,
895 skb->nh.iph->protocol, 0);
896} 909}
897 910
898void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 911void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
899{ 912{
900 fcb->vln = 1; 913 fcb->flags |= TXFCB_VLN;
901 fcb->vlctl = vlan_tx_tag_get(skb); 914 fcb->vlctl = vlan_tx_tag_get(skb);
902} 915}
903 916
@@ -908,6 +921,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
908 struct gfar_private *priv = netdev_priv(dev); 921 struct gfar_private *priv = netdev_priv(dev);
909 struct txfcb *fcb = NULL; 922 struct txfcb *fcb = NULL;
910 struct txbd8 *txbdp; 923 struct txbd8 *txbdp;
924 u16 status;
911 925
912 /* Update transmit stats */ 926 /* Update transmit stats */
913 priv->stats.tx_bytes += skb->len; 927 priv->stats.tx_bytes += skb->len;
@@ -919,19 +933,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
919 txbdp = priv->cur_tx; 933 txbdp = priv->cur_tx;
920 934
921 /* Clear all but the WRAP status flags */ 935 /* Clear all but the WRAP status flags */
922 txbdp->status &= TXBD_WRAP; 936 status = txbdp->status & TXBD_WRAP;
923 937
924 /* Set up checksumming */ 938 /* Set up checksumming */
925 if ((dev->features & NETIF_F_IP_CSUM) 939 if (likely((dev->features & NETIF_F_IP_CSUM)
926 && (CHECKSUM_HW == skb->ip_summed)) { 940 && (CHECKSUM_HW == skb->ip_summed))) {
927 fcb = gfar_add_fcb(skb, txbdp); 941 fcb = gfar_add_fcb(skb, txbdp);
942 status |= TXBD_TOE;
928 gfar_tx_checksum(skb, fcb); 943 gfar_tx_checksum(skb, fcb);
929 } 944 }
930 945
931 if (priv->vlan_enable && 946 if (priv->vlan_enable &&
932 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 947 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
933 if (NULL == fcb) 948 if (unlikely(NULL == fcb)) {
934 fcb = gfar_add_fcb(skb, txbdp); 949 fcb = gfar_add_fcb(skb, txbdp);
950 status |= TXBD_TOE;
951 }
935 952
936 gfar_tx_vlan(skb, fcb); 953 gfar_tx_vlan(skb, fcb);
937 } 954 }
@@ -949,14 +966,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
949 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 966 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
950 967
951 /* Flag the BD as interrupt-causing */ 968 /* Flag the BD as interrupt-causing */
952 txbdp->status |= TXBD_INTERRUPT; 969 status |= TXBD_INTERRUPT;
953 970
954 /* Flag the BD as ready to go, last in frame, and */ 971 /* Flag the BD as ready to go, last in frame, and */
955 /* in need of CRC */ 972 /* in need of CRC */
956 txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 973 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
957 974
958 dev->trans_start = jiffies; 975 dev->trans_start = jiffies;
959 976
977 txbdp->status = status;
978
960 /* If this was the last BD in the ring, the next one */ 979 /* If this was the last BD in the ring, the next one */
961 /* is at the beginning of the ring */ 980 /* is at the beginning of the ring */
962 if (txbdp->status & TXBD_WRAP) 981 if (txbdp->status & TXBD_WRAP)
@@ -1010,21 +1029,7 @@ static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1010/* Changes the mac address if the controller is not running. */ 1029/* Changes the mac address if the controller is not running. */
1011int gfar_set_mac_address(struct net_device *dev) 1030int gfar_set_mac_address(struct net_device *dev)
1012{ 1031{
1013 struct gfar_private *priv = netdev_priv(dev); 1032 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1014 int i;
1015 char tmpbuf[MAC_ADDR_LEN];
1016 u32 tempval;
1017
1018 /* Now copy it into the mac registers backwards, cuz */
1019 /* little endian is silly */
1020 for (i = 0; i < MAC_ADDR_LEN; i++)
1021 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
1022
1023 gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
1024
1025 tempval = *((u32 *) (tmpbuf + 4));
1026
1027 gfar_write(&priv->regs->macstnaddr2, tempval);
1028 1033
1029 return 0; 1034 return 0;
1030} 1035}
@@ -1110,7 +1115,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1110 INCREMENTAL_BUFFER_SIZE; 1115 INCREMENTAL_BUFFER_SIZE;
1111 1116
1112 /* Only stop and start the controller if it isn't already 1117 /* Only stop and start the controller if it isn't already
1113 * stopped */ 1118 * stopped, and we changed something */
1114 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1119 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1115 stop_gfar(dev); 1120 stop_gfar(dev);
1116 1121
@@ -1220,6 +1225,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1220 1225
1221struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1226struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1222{ 1227{
1228 unsigned int alignamount;
1223 struct gfar_private *priv = netdev_priv(dev); 1229 struct gfar_private *priv = netdev_priv(dev);
1224 struct sk_buff *skb = NULL; 1230 struct sk_buff *skb = NULL;
1225 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1231 unsigned int timeout = SKB_ALLOC_TIMEOUT;
@@ -1231,18 +1237,18 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1231 if (NULL == skb) 1237 if (NULL == skb)
1232 return NULL; 1238 return NULL;
1233 1239
1240 alignamount = RXBUF_ALIGNMENT -
1241 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1242
1234 /* We need the data buffer to be aligned properly. We will reserve 1243 /* We need the data buffer to be aligned properly. We will reserve
1235 * as many bytes as needed to align the data properly 1244 * as many bytes as needed to align the data properly
1236 */ 1245 */
1237 skb_reserve(skb, 1246 skb_reserve(skb, alignamount);
1238 RXBUF_ALIGNMENT -
1239 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
1240 1247
1241 skb->dev = dev; 1248 skb->dev = dev;
1242 1249
1243 bdp->bufPtr = dma_map_single(NULL, skb->data, 1250 bdp->bufPtr = dma_map_single(NULL, skb->data,
1244 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1251 priv->rx_buffer_size, DMA_FROM_DEVICE);
1245 DMA_FROM_DEVICE);
1246 1252
1247 bdp->length = 0; 1253 bdp->length = 0;
1248 1254
@@ -1350,7 +1356,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1350 /* If valid headers were found, and valid sums 1356 /* If valid headers were found, and valid sums
1351 * were verified, then we tell the kernel that no 1357 * were verified, then we tell the kernel that no
1352 * checksumming is necessary. Otherwise, it is */ 1358 * checksumming is necessary. Otherwise, it is */
1353 if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu) 1359 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1354 skb->ip_summed = CHECKSUM_UNNECESSARY; 1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1355 else 1361 else
1356 skb->ip_summed = CHECKSUM_NONE; 1362 skb->ip_summed = CHECKSUM_NONE;
@@ -1401,7 +1407,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1401 skb->protocol = eth_type_trans(skb, dev); 1407 skb->protocol = eth_type_trans(skb, dev);
1402 1408
1403 /* Send the packet up the stack */ 1409 /* Send the packet up the stack */
1404 if (unlikely(priv->vlgrp && fcb->vln)) 1410 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1405 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1411 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1406 else 1412 else
1407 ret = RECEIVE(skb); 1413 ret = RECEIVE(skb);
@@ -1620,6 +1626,7 @@ static void adjust_link(struct net_device *dev)
1620 spin_lock_irqsave(&priv->lock, flags); 1626 spin_lock_irqsave(&priv->lock, flags);
1621 if (phydev->link) { 1627 if (phydev->link) {
1622 u32 tempval = gfar_read(&regs->maccfg2); 1628 u32 tempval = gfar_read(&regs->maccfg2);
1629 u32 ecntrl = gfar_read(&regs->ecntrl);
1623 1630
1624 /* Now we make sure that we can be in full duplex mode. 1631 /* Now we make sure that we can be in full duplex mode.
1625 * If not, we operate in half-duplex mode. */ 1632 * If not, we operate in half-duplex mode. */
@@ -1644,6 +1651,13 @@ static void adjust_link(struct net_device *dev)
1644 case 10: 1651 case 10:
1645 tempval = 1652 tempval =
1646 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1653 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1654
1655 /* Reduced mode distinguishes
1656 * between 10 and 100 */
1657 if (phydev->speed == SPEED_100)
1658 ecntrl |= ECNTRL_R100;
1659 else
1660 ecntrl &= ~(ECNTRL_R100);
1647 break; 1661 break;
1648 default: 1662 default:
1649 if (netif_msg_link(priv)) 1663 if (netif_msg_link(priv))
@@ -1657,6 +1671,7 @@ static void adjust_link(struct net_device *dev)
1657 } 1671 }
1658 1672
1659 gfar_write(&regs->maccfg2, tempval); 1673 gfar_write(&regs->maccfg2, tempval);
1674 gfar_write(&regs->ecntrl, ecntrl);
1660 1675
1661 if (!priv->oldlink) { 1676 if (!priv->oldlink) {
1662 new_state = 1; 1677 new_state = 1;
@@ -1721,6 +1736,9 @@ static void gfar_set_multi(struct net_device *dev)
1721 gfar_write(&regs->gaddr6, 0xffffffff); 1736 gfar_write(&regs->gaddr6, 0xffffffff);
1722 gfar_write(&regs->gaddr7, 0xffffffff); 1737 gfar_write(&regs->gaddr7, 0xffffffff);
1723 } else { 1738 } else {
1739 int em_num;
1740 int idx;
1741
1724 /* zero out the hash */ 1742 /* zero out the hash */
1725 gfar_write(&regs->igaddr0, 0x0); 1743 gfar_write(&regs->igaddr0, 0x0);
1726 gfar_write(&regs->igaddr1, 0x0); 1744 gfar_write(&regs->igaddr1, 0x0);
@@ -1739,18 +1757,47 @@ static void gfar_set_multi(struct net_device *dev)
1739 gfar_write(&regs->gaddr6, 0x0); 1757 gfar_write(&regs->gaddr6, 0x0);
1740 gfar_write(&regs->gaddr7, 0x0); 1758 gfar_write(&regs->gaddr7, 0x0);
1741 1759
1760 /* If we have extended hash tables, we need to
1761 * clear the exact match registers to prepare for
1762 * setting them */
1763 if (priv->extended_hash) {
1764 em_num = GFAR_EM_NUM + 1;
1765 gfar_clear_exact_match(dev);
1766 idx = 1;
1767 } else {
1768 idx = 0;
1769 em_num = 0;
1770 }
1771
1742 if(dev->mc_count == 0) 1772 if(dev->mc_count == 0)
1743 return; 1773 return;
1744 1774
1745 /* Parse the list, and set the appropriate bits */ 1775 /* Parse the list, and set the appropriate bits */
1746 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1776 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1747 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1777 if (idx < em_num) {
1778 gfar_set_mac_for_addr(dev, idx,
1779 mc_ptr->dmi_addr);
1780 idx++;
1781 } else
1782 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1748 } 1783 }
1749 } 1784 }
1750 1785
1751 return; 1786 return;
1752} 1787}
1753 1788
1789
1790/* Clears each of the exact match registers to zero, so they
1791 * don't interfere with normal reception */
1792static void gfar_clear_exact_match(struct net_device *dev)
1793{
1794 int idx;
1795 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1796
1797 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1798 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1799}
1800
1754/* Set the appropriate hash bit for the given addr */ 1801/* Set the appropriate hash bit for the given addr */
1755/* The algorithm works like so: 1802/* The algorithm works like so:
1756 * 1) Take the Destination Address (ie the multicast address), and 1803 * 1) Take the Destination Address (ie the multicast address), and
@@ -1781,6 +1828,32 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1781 return; 1828 return;
1782} 1829}
1783 1830
1831
1832/* There are multiple MAC Address register pairs on some controllers
1833 * This function sets the numth pair to a given address
1834 */
1835static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1836{
1837 struct gfar_private *priv = netdev_priv(dev);
1838 int idx;
1839 char tmpbuf[MAC_ADDR_LEN];
1840 u32 tempval;
1841 u32 *macptr = &priv->regs->macstnaddr1;
1842
1843 macptr += num*2;
1844
1845 /* Now copy it into the mac registers backwards, cuz */
1846 /* little endian is silly */
1847 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1848 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1849
1850 gfar_write(macptr, *((u32 *) (tmpbuf)));
1851
1852 tempval = *((u32 *) (tmpbuf + 4));
1853
1854 gfar_write(macptr+1, tempval);
1855}
1856
1784/* GFAR error interrupt handler */ 1857/* GFAR error interrupt handler */
1785static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 1858static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1786{ 1859{
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 5065ba82cb76..94a91da84fbb 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -90,12 +90,26 @@ extern const char gfar_driver_version[];
90#define GFAR_RX_MAX_RING_SIZE 256 90#define GFAR_RX_MAX_RING_SIZE 256
91#define GFAR_TX_MAX_RING_SIZE 256 91#define GFAR_TX_MAX_RING_SIZE 256
92 92
93#define GFAR_MAX_FIFO_THRESHOLD 511
94#define GFAR_MAX_FIFO_STARVE 511
95#define GFAR_MAX_FIFO_STARVE_OFF 511
96
93#define DEFAULT_RX_BUFFER_SIZE 1536 97#define DEFAULT_RX_BUFFER_SIZE 1536
94#define TX_RING_MOD_MASK(size) (size-1) 98#define TX_RING_MOD_MASK(size) (size-1)
95#define RX_RING_MOD_MASK(size) (size-1) 99#define RX_RING_MOD_MASK(size) (size-1)
96#define JUMBO_BUFFER_SIZE 9728 100#define JUMBO_BUFFER_SIZE 9728
97#define JUMBO_FRAME_SIZE 9600 101#define JUMBO_FRAME_SIZE 9600
98 102
103#define DEFAULT_FIFO_TX_THR 0x100
104#define DEFAULT_FIFO_TX_STARVE 0x40
105#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
106#define DEFAULT_BD_STASH 1
107#define DEFAULT_STASH_LENGTH 64
108#define DEFAULT_STASH_INDEX 0
109
110/* The number of Exact Match registers */
111#define GFAR_EM_NUM 15
112
99/* Latency of interface clock in nanoseconds */ 113/* Latency of interface clock in nanoseconds */
100/* Interface clock latency , in this case, means the 114/* Interface clock latency , in this case, means the
101 * time described by a value of 1 in the interrupt 115 * time described by a value of 1 in the interrupt
@@ -112,11 +126,11 @@ extern const char gfar_driver_version[];
112 126
113#define DEFAULT_TX_COALESCE 1 127#define DEFAULT_TX_COALESCE 1
114#define DEFAULT_TXCOUNT 16 128#define DEFAULT_TXCOUNT 16
115#define DEFAULT_TXTIME 400 129#define DEFAULT_TXTIME 4
116 130
117#define DEFAULT_RX_COALESCE 1 131#define DEFAULT_RX_COALESCE 1
118#define DEFAULT_RXCOUNT 16 132#define DEFAULT_RXCOUNT 16
119#define DEFAULT_RXTIME 400 133#define DEFAULT_RXTIME 4
120 134
121#define TBIPA_VALUE 0x1f 135#define TBIPA_VALUE 0x1f
122#define MIIMCFG_INIT_VALUE 0x00000007 136#define MIIMCFG_INIT_VALUE 0x00000007
@@ -147,6 +161,7 @@ extern const char gfar_driver_version[];
147 161
148#define ECNTRL_INIT_SETTINGS 0x00001000 162#define ECNTRL_INIT_SETTINGS 0x00001000
149#define ECNTRL_TBI_MODE 0x00000020 163#define ECNTRL_TBI_MODE 0x00000020
164#define ECNTRL_R100 0x00000008
150 165
151#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE 166#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
152 167
@@ -181,10 +196,12 @@ extern const char gfar_driver_version[];
181#define RCTRL_PRSDEP_MASK 0x000000c0 196#define RCTRL_PRSDEP_MASK 0x000000c0
182#define RCTRL_PRSDEP_INIT 0x000000c0 197#define RCTRL_PRSDEP_INIT 0x000000c0
183#define RCTRL_PROM 0x00000008 198#define RCTRL_PROM 0x00000008
199#define RCTRL_EMEN 0x00000002
184#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \ 200#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \
185 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT) 201 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT)
186#define RCTRL_EXTHASH (RCTRL_GHTX) 202#define RCTRL_EXTHASH (RCTRL_GHTX)
187#define RCTRL_VLAN (RCTRL_PRSDEP_INIT) 203#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
204#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
188 205
189 206
190#define RSTAT_CLEAR_RHALT 0x00800000 207#define RSTAT_CLEAR_RHALT 0x00800000
@@ -251,28 +268,26 @@ extern const char gfar_driver_version[];
251 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 268 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
252 | IMASK_PERR) 269 | IMASK_PERR)
253 270
271/* Fifo management */
272#define FIFO_TX_THR_MASK 0x01ff
273#define FIFO_TX_STARVE_MASK 0x01ff
274#define FIFO_TX_STARVE_OFF_MASK 0x01ff
254 275
255/* Attribute fields */ 276/* Attribute fields */
256 277
257/* This enables rx snooping for buffers and descriptors */ 278/* This enables rx snooping for buffers and descriptors */
258#ifdef CONFIG_GFAR_BDSTASH
259#define ATTR_BDSTASH 0x00000800 279#define ATTR_BDSTASH 0x00000800
260#else
261#define ATTR_BDSTASH 0x00000000
262#endif
263 280
264#ifdef CONFIG_GFAR_BUFSTASH
265#define ATTR_BUFSTASH 0x00004000 281#define ATTR_BUFSTASH 0x00004000
266#define STASH_LENGTH 64
267#else
268#define ATTR_BUFSTASH 0x00000000
269#endif
270 282
271#define ATTR_SNOOPING 0x000000c0 283#define ATTR_SNOOPING 0x000000c0
272#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \ 284#define ATTR_INIT_SETTINGS ATTR_SNOOPING
273 | ATTR_BDSTASH | ATTR_BUFSTASH)
274 285
275#define ATTRELI_INIT_SETTINGS 0x0 286#define ATTRELI_INIT_SETTINGS 0x0
287#define ATTRELI_EL_MASK 0x3fff0000
288#define ATTRELI_EL(x) (x << 16)
289#define ATTRELI_EI_MASK 0x00003fff
290#define ATTRELI_EI(x) (x)
276 291
277 292
278/* TxBD status field bits */ 293/* TxBD status field bits */
@@ -328,6 +343,7 @@ extern const char gfar_driver_version[];
328#define RXFCB_CTU 0x0400 343#define RXFCB_CTU 0x0400
329#define RXFCB_EIP 0x0200 344#define RXFCB_EIP 0x0200
330#define RXFCB_ETU 0x0100 345#define RXFCB_ETU 0x0100
346#define RXFCB_CSUM_MASK 0x0f00
331#define RXFCB_PERR_MASK 0x000c 347#define RXFCB_PERR_MASK 0x000c
332#define RXFCB_PERR_BADL3 0x0008 348#define RXFCB_PERR_BADL3 0x0008
333 349
@@ -339,14 +355,7 @@ struct txbd8
339}; 355};
340 356
341struct txfcb { 357struct txfcb {
342 u8 vln:1, 358 u8 flags;
343 ip:1,
344 ip6:1,
345 tup:1,
346 udp:1,
347 cip:1,
348 ctu:1,
349 nph:1;
350 u8 reserved; 359 u8 reserved;
351 u8 l4os; /* Level 4 Header Offset */ 360 u8 l4os; /* Level 4 Header Offset */
352 u8 l3os; /* Level 3 Header Offset */ 361 u8 l3os; /* Level 3 Header Offset */
@@ -362,14 +371,7 @@ struct rxbd8
362}; 371};
363 372
364struct rxfcb { 373struct rxfcb {
365 u16 vln:1, 374 u16 flags;
366 ip:1,
367 ip6:1,
368 tup:1,
369 cip:1,
370 ctu:1,
371 eip:1,
372 etu:1;
373 u8 rq; /* Receive Queue index */ 375 u8 rq; /* Receive Queue index */
374 u8 pro; /* Layer 4 Protocol */ 376 u8 pro; /* Layer 4 Protocol */
375 u16 reserved; 377 u16 reserved;
@@ -688,12 +690,17 @@ struct gfar_private {
688 spinlock_t lock; 690 spinlock_t lock;
689 unsigned int rx_buffer_size; 691 unsigned int rx_buffer_size;
690 unsigned int rx_stash_size; 692 unsigned int rx_stash_size;
693 unsigned int rx_stash_index;
691 unsigned int tx_ring_size; 694 unsigned int tx_ring_size;
692 unsigned int rx_ring_size; 695 unsigned int rx_ring_size;
696 unsigned int fifo_threshold;
697 unsigned int fifo_starve;
698 unsigned int fifo_starve_off;
693 699
694 unsigned char vlan_enable:1, 700 unsigned char vlan_enable:1,
695 rx_csum_enable:1, 701 rx_csum_enable:1,
696 extended_hash:1; 702 extended_hash:1,
703 bd_stash_en:1;
697 unsigned short padding; 704 unsigned short padding;
698 struct vlan_group *vlgrp; 705 struct vlan_group *vlgrp;
699 /* Info structure initialized by board setup code */ 706 /* Info structure initialized by board setup code */
@@ -731,6 +738,6 @@ extern void stop_gfar(struct net_device *dev);
731extern void gfar_halt(struct net_device *dev); 738extern void gfar_halt(struct net_device *dev);
732extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 739extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
733 int enable, u32 regnum, u32 read); 740 int enable, u32 regnum, u32 read);
734void gfar_setup_stashing(struct net_device *dev); 741void gfar_init_sysfs(struct net_device *dev);
735 742
736#endif /* __GIANFAR_H */ 743#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index cfa3cd7c91a0..765e810620fe 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -125,7 +125,7 @@ static char stat_gstrings[][ETH_GSTRING_LEN] = {
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) 125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{ 126{
127 struct gfar_private *priv = netdev_priv(dev); 127 struct gfar_private *priv = netdev_priv(dev);
128 128
129 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 129 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else 131 else
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index e85eb216fb5b..d527cf2f9c1d 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -24,6 +24,7 @@
24#define MII_READ_COMMAND 0x00000001 24#define MII_READ_COMMAND 0x00000001
25 25
26#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ 26#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
27 | SUPPORTED_10baseT_Full \
27 | SUPPORTED_100baseT_Half \ 28 | SUPPORTED_100baseT_Half \
28 | SUPPORTED_100baseT_Full \ 29 | SUPPORTED_100baseT_Full \
29 | SUPPORTED_Autoneg \ 30 | SUPPORTED_Autoneg \
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
new file mode 100644
index 000000000000..10d34cb19192
--- /dev/null
+++ b/drivers/net/gianfar_sysfs.c
@@ -0,0 +1,311 @@
1/*
2 * drivers/net/gianfar_sysfs.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
11 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 * Sysfs file creation and management
20 */
21
22#include <linux/config.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include <linux/unistd.h>
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/etherdevice.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/device.h>
35
36#include <asm/uaccess.h>
37#include <linux/module.h>
38#include <linux/version.h>
39
40#include "gianfar.h"
41
42#define GFAR_ATTR(_name) \
43static ssize_t gfar_show_##_name(struct class_device *cdev, char *buf); \
44static ssize_t gfar_set_##_name(struct class_device *cdev, \
45 const char *buf, size_t count); \
46static CLASS_DEVICE_ATTR(_name, 0644, gfar_show_##_name, gfar_set_##_name)
47
48#define GFAR_CREATE_FILE(_dev, _name) \
49 class_device_create_file(&_dev->class_dev, &class_device_attr_##_name)
50
51GFAR_ATTR(bd_stash);
52GFAR_ATTR(rx_stash_size);
53GFAR_ATTR(rx_stash_index);
54GFAR_ATTR(fifo_threshold);
55GFAR_ATTR(fifo_starve);
56GFAR_ATTR(fifo_starve_off);
57
58#define to_net_dev(cd) container_of(cd, struct net_device, class_dev)
59
60static ssize_t gfar_show_bd_stash(struct class_device *cdev, char *buf)
61{
62 struct net_device *dev = to_net_dev(cdev);
63 struct gfar_private *priv = netdev_priv(dev);
64
65 return sprintf(buf, "%s\n", priv->bd_stash_en? "on" : "off");
66}
67
68static ssize_t gfar_set_bd_stash(struct class_device *cdev,
69 const char *buf, size_t count)
70{
71 struct net_device *dev = to_net_dev(cdev);
72 struct gfar_private *priv = netdev_priv(dev);
73 int new_setting = 0;
74 u32 temp;
75 unsigned long flags;
76
77 /* Find out the new setting */
78 if (!strncmp("on", buf, count-1) || !strncmp("1", buf, count-1))
79 new_setting = 1;
80 else if (!strncmp("off", buf, count-1) || !strncmp("0", buf, count-1))
81 new_setting = 0;
82 else
83 return count;
84
85 spin_lock_irqsave(&priv->lock, flags);
86
87 /* Set the new stashing value */
88 priv->bd_stash_en = new_setting;
89
90 temp = gfar_read(&priv->regs->attr);
91
92 if (new_setting)
93 temp |= ATTR_BDSTASH;
94 else
95 temp &= ~(ATTR_BDSTASH);
96
97 gfar_write(&priv->regs->attr, temp);
98
99 spin_unlock_irqrestore(&priv->lock, flags);
100
101 return count;
102}
103
104static ssize_t gfar_show_rx_stash_size(struct class_device *cdev, char *buf)
105{
106 struct net_device *dev = to_net_dev(cdev);
107 struct gfar_private *priv = netdev_priv(dev);
108
109 return sprintf(buf, "%d\n", priv->rx_stash_size);
110}
111
112static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
113 const char *buf, size_t count)
114{
115 struct net_device *dev = to_net_dev(cdev);
116 struct gfar_private *priv = netdev_priv(dev);
117 unsigned int length = simple_strtoul(buf, NULL, 0);
118 u32 temp;
119 unsigned long flags;
120
121 spin_lock_irqsave(&priv->lock, flags);
122 if (length > priv->rx_buffer_size)
123 return count;
124
125 if (length == priv->rx_stash_size)
126 return count;
127
128 priv->rx_stash_size = length;
129
130 temp = gfar_read(&priv->regs->attreli);
131 temp &= ~ATTRELI_EL_MASK;
132 temp |= ATTRELI_EL(length);
133 gfar_write(&priv->regs->attreli, temp);
134
135 /* Turn stashing on/off as appropriate */
136 temp = gfar_read(&priv->regs->attr);
137
138 if (length)
139 temp |= ATTR_BUFSTASH;
140 else
141 temp &= ~(ATTR_BUFSTASH);
142
143 gfar_write(&priv->regs->attr, temp);
144
145 spin_unlock_irqrestore(&priv->lock, flags);
146
147 return count;
148}
149
150
151/* Stashing will only be enabled when rx_stash_size != 0 */
152static ssize_t gfar_show_rx_stash_index(struct class_device *cdev, char *buf)
153{
154 struct net_device *dev = to_net_dev(cdev);
155 struct gfar_private *priv = netdev_priv(dev);
156
157 return sprintf(buf, "%d\n", priv->rx_stash_index);
158}
159
160static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
161 const char *buf, size_t count)
162{
163 struct net_device *dev = to_net_dev(cdev);
164 struct gfar_private *priv = netdev_priv(dev);
165 unsigned short index = simple_strtoul(buf, NULL, 0);
166 u32 temp;
167 unsigned long flags;
168
169 spin_lock_irqsave(&priv->lock, flags);
170 if (index > priv->rx_stash_size)
171 return count;
172
173 if (index == priv->rx_stash_index)
174 return count;
175
176 priv->rx_stash_index = index;
177
178 temp = gfar_read(&priv->regs->attreli);
179 temp &= ~ATTRELI_EI_MASK;
180 temp |= ATTRELI_EI(index);
181 gfar_write(&priv->regs->attreli, flags);
182
183 spin_unlock_irqrestore(&priv->lock, flags);
184
185 return count;
186}
187
188static ssize_t gfar_show_fifo_threshold(struct class_device *cdev, char *buf)
189{
190 struct net_device *dev = to_net_dev(cdev);
191 struct gfar_private *priv = netdev_priv(dev);
192
193 return sprintf(buf, "%d\n", priv->fifo_threshold);
194}
195
196static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
197 const char *buf, size_t count)
198{
199 struct net_device *dev = to_net_dev(cdev);
200 struct gfar_private *priv = netdev_priv(dev);
201 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp;
203 unsigned long flags;
204
205 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count;
207
208 spin_lock_irqsave(&priv->lock, flags);
209
210 priv->fifo_threshold = length;
211
212 temp = gfar_read(&priv->regs->fifo_tx_thr);
213 temp &= ~FIFO_TX_THR_MASK;
214 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp);
216
217 spin_unlock_irqrestore(&priv->lock, flags);
218
219 return count;
220}
221
222static ssize_t gfar_show_fifo_starve(struct class_device *cdev, char *buf)
223{
224 struct net_device *dev = to_net_dev(cdev);
225 struct gfar_private *priv = netdev_priv(dev);
226
227 return sprintf(buf, "%d\n", priv->fifo_starve);
228}
229
230
231static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
232 const char *buf, size_t count)
233{
234 struct net_device *dev = to_net_dev(cdev);
235 struct gfar_private *priv = netdev_priv(dev);
236 unsigned int num = simple_strtoul(buf, NULL, 0);
237 u32 temp;
238 unsigned long flags;
239
240 if (num > GFAR_MAX_FIFO_STARVE)
241 return count;
242
243 spin_lock_irqsave(&priv->lock, flags);
244
245 priv->fifo_starve = num;
246
247 temp = gfar_read(&priv->regs->fifo_tx_starve);
248 temp &= ~FIFO_TX_STARVE_MASK;
249 temp |= num;
250 gfar_write(&priv->regs->fifo_tx_starve, temp);
251
252 spin_unlock_irqrestore(&priv->lock, flags);
253
254 return count;
255}
256
257static ssize_t gfar_show_fifo_starve_off(struct class_device *cdev, char *buf)
258{
259 struct net_device *dev = to_net_dev(cdev);
260 struct gfar_private *priv = netdev_priv(dev);
261
262 return sprintf(buf, "%d\n", priv->fifo_starve_off);
263}
264
265static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
266 const char *buf, size_t count)
267{
268 struct net_device *dev = to_net_dev(cdev);
269 struct gfar_private *priv = netdev_priv(dev);
270 unsigned int num = simple_strtoul(buf, NULL, 0);
271 u32 temp;
272 unsigned long flags;
273
274 if (num > GFAR_MAX_FIFO_STARVE_OFF)
275 return count;
276
277 spin_lock_irqsave(&priv->lock, flags);
278
279 priv->fifo_starve_off = num;
280
281 temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff);
282 temp &= ~FIFO_TX_STARVE_OFF_MASK;
283 temp |= num;
284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
285
286 spin_unlock_irqrestore(&priv->lock, flags);
287
288 return count;
289}
290
291void gfar_init_sysfs(struct net_device *dev)
292{
293 struct gfar_private *priv = netdev_priv(dev);
294
295 /* Initialize the default values */
296 priv->rx_stash_size = DEFAULT_STASH_LENGTH;
297 priv->rx_stash_index = DEFAULT_STASH_INDEX;
298 priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
299 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
300 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
301 priv->bd_stash_en = DEFAULT_BD_STASH;
302
303 /* Create our sysfs files */
304 GFAR_CREATE_FILE(dev, bd_stash);
305 GFAR_CREATE_FILE(dev, rx_stash_size);
306 GFAR_CREATE_FILE(dev, rx_stash_index);
307 GFAR_CREATE_FILE(dev, fifo_threshold);
308 GFAR_CREATE_FILE(dev, fifo_starve);
309 GFAR_CREATE_FILE(dev, fifo_starve_off);
310
311}
diff --git a/drivers/net/ixp2000/Kconfig b/drivers/net/ixp2000/Kconfig
new file mode 100644
index 000000000000..2fec2415651f
--- /dev/null
+++ b/drivers/net/ixp2000/Kconfig
@@ -0,0 +1,6 @@
1config ENP2611_MSF_NET
2 tristate "Radisys ENP2611 MSF network interface support"
3 depends on ARCH_ENP2611
4 help
5 This is a driver for the MSF network interface unit in
6 the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ixp2000/Makefile b/drivers/net/ixp2000/Makefile
new file mode 100644
index 000000000000..fd38351ceaa7
--- /dev/null
+++ b/drivers/net/ixp2000/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
2
3enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ixp2000/caleb.c b/drivers/net/ixp2000/caleb.c
new file mode 100644
index 000000000000..d70530adb1e2
--- /dev/null
+++ b/drivers/net/ixp2000/caleb.c
@@ -0,0 +1,136 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <asm/io.h>
16
17#define CALEB_IDLO 0x00
18#define CALEB_IDHI 0x01
19#define CALEB_RID 0x02
20#define CALEB_RESET 0x03
21#define CALEB_INTREN0 0x04
22#define CALEB_INTREN1 0x05
23#define CALEB_INTRSTAT0 0x06
24#define CALEB_INTRSTAT1 0x07
25#define CALEB_PORTEN 0x08
26#define CALEB_BURST 0x09
27#define CALEB_PORTPAUS 0x0A
28#define CALEB_PORTPAUSD 0x0B
29#define CALEB_PHY0RX 0x10
30#define CALEB_PHY1RX 0x11
31#define CALEB_PHY0TX 0x12
32#define CALEB_PHY1TX 0x13
33#define CALEB_IXPRX_HI_CNTR 0x15
34#define CALEB_PHY0RX_HI_CNTR 0x16
35#define CALEB_PHY1RX_HI_CNTR 0x17
36#define CALEB_IXPRX_CNTR 0x18
37#define CALEB_PHY0RX_CNTR 0x19
38#define CALEB_PHY1RX_CNTR 0x1A
39#define CALEB_IXPTX_CNTR 0x1B
40#define CALEB_PHY0TX_CNTR 0x1C
41#define CALEB_PHY1TX_CNTR 0x1D
42#define CALEB_DEBUG0 0x1E
43#define CALEB_DEBUG1 0x1F
44
45
46static u8 caleb_reg_read(int reg)
47{
48 u8 value;
49
50 value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
51
52// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
53
54 return value;
55}
56
57static void caleb_reg_write(int reg, u8 value)
58{
59 u8 dummy;
60
61// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
62
63 *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
64
65 dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
66 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
67}
68
69
70void caleb_reset(void)
71{
72 /*
73 * Perform a chip reset.
74 */
75 caleb_reg_write(CALEB_RESET, 0x02);
76 udelay(1);
77
78 /*
79 * Enable all interrupt sources. This is needed to get
80 * meaningful results out of the status bits (register 6
81 * and 7.)
82 */
83 caleb_reg_write(CALEB_INTREN0, 0xff);
84 caleb_reg_write(CALEB_INTREN1, 0x07);
85
86 /*
87 * Set RX and TX FIFO thresholds to 1.5kb.
88 */
89 caleb_reg_write(CALEB_PHY0RX, 0x11);
90 caleb_reg_write(CALEB_PHY1RX, 0x11);
91 caleb_reg_write(CALEB_PHY0TX, 0x11);
92 caleb_reg_write(CALEB_PHY1TX, 0x11);
93
94 /*
95 * Program SPI-3 burst size.
96 */
97 caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
98// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
99// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
100}
101
102void caleb_enable_rx(int port)
103{
104 u8 temp;
105
106 temp = caleb_reg_read(CALEB_PORTEN);
107 temp |= 1 << port;
108 caleb_reg_write(CALEB_PORTEN, temp);
109}
110
111void caleb_disable_rx(int port)
112{
113 u8 temp;
114
115 temp = caleb_reg_read(CALEB_PORTEN);
116 temp &= ~(1 << port);
117 caleb_reg_write(CALEB_PORTEN, temp);
118}
119
120void caleb_enable_tx(int port)
121{
122 u8 temp;
123
124 temp = caleb_reg_read(CALEB_PORTEN);
125 temp |= 1 << (port + 4);
126 caleb_reg_write(CALEB_PORTEN, temp);
127}
128
129void caleb_disable_tx(int port)
130{
131 u8 temp;
132
133 temp = caleb_reg_read(CALEB_PORTEN);
134 temp &= ~(1 << (port + 4));
135 caleb_reg_write(CALEB_PORTEN, temp);
136}
diff --git a/drivers/net/ixp2000/caleb.h b/drivers/net/ixp2000/caleb.h
new file mode 100644
index 000000000000..e93a1ef5b8a3
--- /dev/null
+++ b/drivers/net/ixp2000/caleb.h
@@ -0,0 +1,22 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __CALEB_H
13#define __CALEB_H
14
15void caleb_reset(void);
16void caleb_enable_rx(int port);
17void caleb_disable_rx(int port);
18void caleb_enable_tx(int port);
19void caleb_disable_tx(int port);
20
21
22#endif
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
new file mode 100644
index 000000000000..3262e70ede61
--- /dev/null
+++ b/drivers/net/ixp2000/enp2611.c
@@ -0,0 +1,238 @@
1/*
2 * IXP2400 MSF network device driver for the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/init.h>
18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h>
20#include <asm/mach-types.h>
21#include <asm/io.h>
22#include "ixpdev.h"
23#include "caleb.h"
24#include "ixp2400-msf.h"
25#include "pm3386.h"
26
27/***********************************************************************
28 * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
29 * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
30 * to the IXP2400.
31 *
32 * +-------------+
33 * SFP GBIC #0 ---+ | +---------+
34 * | PM3386 #0 +-------+ |
35 * SFP GBIC #1 ---+ | | "Caleb" | +---------+
36 * +-------------+ | | | |
37 * | SPI-3 +---------+ IXP2400 |
38 * +-------------+ | bridge | | |
39 * SFP GBIC #2 ---+ | | FPGA | +---------+
40 * | PM3386 #1 +-------+ |
41 * | | +---------+
42 * +-------------+
43 * ^ ^ ^
44 * | 1.25Gbaud | 104MHz | 104MHz
45 * | SERDES ea. | SPI-3 ea. | SPI-3
46 *
47 ***********************************************************************/
48static struct ixp2400_msf_parameters enp2611_msf_parameters =
49{
50 .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
51 IXP2400_RX_MODE_1x32 |
52 IXP2400_RX_MODE_MPHY |
53 IXP2400_RX_MODE_MPHY_32 |
54 IXP2400_RX_MODE_MPHY_POLLED_STATUS |
55 IXP2400_RX_MODE_MPHY_LEVEL3 |
56 IXP2400_RX_MODE_RBUF_SIZE_64,
57
58 .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
59
60 .rx_poll_ports = 3,
61
62 .rx_channel_mode = {
63 IXP2400_PORT_RX_MODE_MASTER |
64 IXP2400_PORT_RX_MODE_POS_PHY |
65 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
66 IXP2400_PORT_RX_MODE_ODD_PARITY |
67 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
68
69 IXP2400_PORT_RX_MODE_MASTER |
70 IXP2400_PORT_RX_MODE_POS_PHY |
71 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
72 IXP2400_PORT_RX_MODE_ODD_PARITY |
73 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
74
75 IXP2400_PORT_RX_MODE_MASTER |
76 IXP2400_PORT_RX_MODE_POS_PHY |
77 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
78 IXP2400_PORT_RX_MODE_ODD_PARITY |
79 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
80
81 IXP2400_PORT_RX_MODE_MASTER |
82 IXP2400_PORT_RX_MODE_POS_PHY |
83 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
84 IXP2400_PORT_RX_MODE_ODD_PARITY |
85 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
86 },
87
88 .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
89 IXP2400_TX_MODE_1x32 |
90 IXP2400_TX_MODE_MPHY |
91 IXP2400_TX_MODE_MPHY_32 |
92 IXP2400_TX_MODE_MPHY_POLLED_STATUS |
93 IXP2400_TX_MODE_MPHY_LEVEL3 |
94 IXP2400_TX_MODE_TBUF_SIZE_64,
95
96 .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
97
98 .tx_poll_ports = 3,
99
100 .tx_channel_mode = {
101 IXP2400_PORT_TX_MODE_MASTER |
102 IXP2400_PORT_TX_MODE_POS_PHY |
103 IXP2400_PORT_TX_MODE_ODD_PARITY |
104 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
105
106 IXP2400_PORT_TX_MODE_MASTER |
107 IXP2400_PORT_TX_MODE_POS_PHY |
108 IXP2400_PORT_TX_MODE_ODD_PARITY |
109 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
110
111 IXP2400_PORT_TX_MODE_MASTER |
112 IXP2400_PORT_TX_MODE_POS_PHY |
113 IXP2400_PORT_TX_MODE_ODD_PARITY |
114 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
115
116 IXP2400_PORT_TX_MODE_MASTER |
117 IXP2400_PORT_TX_MODE_POS_PHY |
118 IXP2400_PORT_TX_MODE_ODD_PARITY |
119 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
120 }
121};
122
123struct enp2611_ixpdev_priv
124{
125 struct ixpdev_priv ixpdev_priv;
126 struct net_device_stats stats;
127};
128
129static struct net_device *nds[3];
130static struct timer_list link_check_timer;
131
132static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
133{
134 struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
135
136 pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
137
138 return &(ip->stats);
139}
140
141/* @@@ Poll the SFP moddef0 line too. */
142/* @@@ Try to use the pm3386 DOOL interrupt as well. */
143static void enp2611_check_link_status(unsigned long __dummy)
144{
145 int i;
146
147 for (i = 0; i < 3; i++) {
148 struct net_device *dev;
149 int status;
150
151 if (!netif_running(nds[i]))
152 continue;
153
154 dev = nds[i];
155
156 status = pm3386_is_link_up(i);
157 if (status && !netif_carrier_ok(nds[i])) {
158 pm3386_enable_tx(i);
159 caleb_enable_tx(i);
160 netif_carrier_on(nds[i]);
161 } else if (!status && netif_carrier_ok(nds[i])) {
162 netif_carrier_off(nds[i]);
163 caleb_disable_tx(i);
164 pm3386_disable_tx(i);
165 }
166 }
167
168 link_check_timer.expires = jiffies + HZ / 10;
169 add_timer(&link_check_timer);
170}
171
172static void enp2611_set_port_admin_status(int port, int up)
173{
174 if (up) {
175 caleb_enable_rx(port);
176 pm3386_enable_rx(port);
177 } else {
178 caleb_disable_tx(port);
179 pm3386_disable_tx(port);
180 pm3386_disable_rx(port);
181 caleb_disable_rx(port);
182 }
183}
184
185static int __init enp2611_init_module(void)
186{
187 int i;
188
189 if (!machine_is_enp2611())
190 return -ENODEV;
191
192 caleb_reset();
193 pm3386_reset();
194
195 for (i = 0; i < 3; i++) {
196 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
197 if (nds[i] == NULL) {
198 while (--i >= 0)
199 free_netdev(nds[i]);
200 return -ENOMEM;
201 }
202
203 SET_MODULE_OWNER(nds[i]);
204 nds[i]->get_stats = enp2611_get_stats;
205 pm3386_init_port(i);
206 pm3386_get_mac(i, nds[i]->dev_addr);
207 }
208
209 ixp2400_msf_init(&enp2611_msf_parameters);
210
211 if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
212 for (i = 0; i < 3; i++)
213 free_netdev(nds[i]);
214 return -EINVAL;
215 }
216
217 init_timer(&link_check_timer);
218 link_check_timer.function = enp2611_check_link_status;
219 link_check_timer.expires = jiffies;
220 add_timer(&link_check_timer);
221
222 return 0;
223}
224
225static void __exit enp2611_cleanup_module(void)
226{
227 int i;
228
229 del_timer_sync(&link_check_timer);
230
231 ixpdev_deinit();
232 for (i = 0; i < 3; i++)
233 free_netdev(nds[i]);
234}
235
236module_init(enp2611_init_module);
237module_exit(enp2611_cleanup_module);
238MODULE_LICENSE("GPL");
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ixp2000/ixp2400-msf.c
new file mode 100644
index 000000000000..48a3a891d3a4
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400-msf.c
@@ -0,0 +1,213 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <asm/hardware.h>
18#include <asm/arch/ixp2000-regs.h>
19#include <asm/delay.h>
20#include <asm/io.h>
21#include "ixp2400-msf.h"
22
23/*
24 * This is the Intel recommended PLL init procedure as described on
25 * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
26 */
27static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
28{
29 int rx_dual_clock;
30 int tx_dual_clock;
31 u32 value;
32
33 /*
34 * If the RX mode is not 1x32, we have to enable both RX PLLs
35 * (#0 and #1.) The same thing for the TX direction.
36 */
37 rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
38 tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
39
40 /*
41 * Read initial value.
42 */
43 value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
44
45 /*
46 * Put PLLs in powerdown and bypass mode.
47 */
48 value |= 0x0000f0f0;
49 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
50
51 /*
52 * Set single or dual clock mode bits.
53 */
54 value &= ~0x03000000;
55 value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
56
57 /*
58 * Set multipliers.
59 */
60 value &= ~0x00ff0000;
61 value |= mp->rxclk01_multiplier << 16;
62 value |= mp->rxclk23_multiplier << 18;
63 value |= mp->txclk01_multiplier << 20;
64 value |= mp->txclk23_multiplier << 22;
65
66 /*
67 * And write value.
68 */
69 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
70
71 /*
72 * Disable PLL bypass mode.
73 */
74 value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
75 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
76
77 /*
78 * Turn on PLLs.
79 */
80 value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
81 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
82
83 /*
84 * Wait for PLLs to lock. There are lock status bits, but IXP2400
85 * erratum #65 says that these lock bits should not be relied upon
86 * as they might not accurately reflect the true state of the PLLs.
87 */
88 udelay(100);
89}
90
91/*
92 * Needed according to p480 of Programmer's Reference Manual.
93 */
94static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
95{
96 int size_bits;
97 int i;
98
99 /*
100 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
101 * corruption) in the Intel-recommended way: do not add the RBUF
102 * elements susceptible to corruption to the freelist.
103 */
104 size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
105 if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
106 for (i = 1; i < 128; i++) {
107 if (i == 9 || i == 18 || i == 27)
108 continue;
109 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
110 }
111 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
112 for (i = 1; i < 64; i++) {
113 if (i == 4 || i == 9 || i == 13)
114 continue;
115 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
116 }
117 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
118 for (i = 1; i < 32; i++) {
119 if (i == 2 || i == 4 || i == 6)
120 continue;
121 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
122 }
123 }
124}
125
126static u32 ixp2400_msf_valid_channels(u32 reg)
127{
128 u32 channels;
129
130 channels = 0;
131 switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
132 case IXP2400_RX_MODE_1x32:
133 channels = 0x1;
134 if (reg & IXP2400_RX_MODE_MPHY &&
135 !(reg & IXP2400_RX_MODE_MPHY_32))
136 channels = 0xf;
137 break;
138
139 case IXP2400_RX_MODE_2x16:
140 channels = 0x5;
141 break;
142
143 case IXP2400_RX_MODE_4x8:
144 channels = 0xf;
145 break;
146
147 case IXP2400_RX_MODE_1x16_2x8:
148 channels = 0xd;
149 break;
150 }
151
152 return channels;
153}
154
155static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
156{
157 u32 value;
158
159 value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
160 value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
161 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
162}
163
164static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
165{
166 u32 value;
167
168 value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
169 value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
170 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
171}
172
173
174void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
175{
176 u32 value;
177 int i;
178
179 /*
180 * Init the RX/TX PLLs based on the passed parameter block.
181 */
182 ixp2400_pll_init(mp);
183
184 /*
185 * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
186 */
187 value = ixp2000_reg_read(IXP2000_RESET0);
188 ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
189 ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
190
191 /*
192 * Initialise the RX section.
193 */
194 ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
195 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
196 for (i = 0; i < 4; i++) {
197 ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
198 mp->rx_channel_mode[i]);
199 }
200 ixp2400_msf_free_rbuf_entries(mp);
201 ixp2400_msf_enable_rx(mp);
202
203 /*
204 * Initialise the TX section.
205 */
206 ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
207 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
208 for (i = 0; i < 4; i++) {
209 ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
210 mp->tx_channel_mode[i]);
211 }
212 ixp2400_msf_enable_tx(mp);
213}
diff --git a/drivers/net/ixp2000/ixp2400-msf.h b/drivers/net/ixp2000/ixp2400-msf.h
new file mode 100644
index 000000000000..3ac1af2771da
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400-msf.h
@@ -0,0 +1,115 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#ifndef __IXP2400_MSF_H
15#define __IXP2400_MSF_H
16
17struct ixp2400_msf_parameters
18{
19 u32 rx_mode;
20 unsigned rxclk01_multiplier:2;
21 unsigned rxclk23_multiplier:2;
22 unsigned rx_poll_ports:6;
23 u32 rx_channel_mode[4];
24
25 u32 tx_mode;
26 unsigned txclk01_multiplier:2;
27 unsigned txclk23_multiplier:2;
28 unsigned tx_poll_ports:6;
29 u32 tx_channel_mode[4];
30};
31
32void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
33
34#define IXP2400_PLL_MULTIPLIER_48 0x00
35#define IXP2400_PLL_MULTIPLIER_24 0x01
36#define IXP2400_PLL_MULTIPLIER_16 0x02
37#define IXP2400_PLL_MULTIPLIER_12 0x03
38
39#define IXP2400_RX_MODE_CSIX 0x00400000
40#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
41#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
42#define IXP2400_RX_MODE_1x16_2x8 0x00300000
43#define IXP2400_RX_MODE_4x8 0x00200000
44#define IXP2400_RX_MODE_2x16 0x00100000
45#define IXP2400_RX_MODE_1x32 0x00000000
46#define IXP2400_RX_MODE_MPHY 0x00080000
47#define IXP2400_RX_MODE_SPHY 0x00000000
48#define IXP2400_RX_MODE_MPHY_32 0x00040000
49#define IXP2400_RX_MODE_MPHY_4 0x00000000
50#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
51#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
52#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
53#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
54#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
55#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
56#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
57#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
58#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
59#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
60#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
61#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
62#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
63#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
64
65#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
66#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
67#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
68#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
69#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
70#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
71#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
72#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
73#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
74#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
75#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
76#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
77#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
78
79#define IXP2400_TX_MODE_CSIX 0x00400000
80#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
81#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
82#define IXP2400_TX_MODE_1x16_2x8 0x00300000
83#define IXP2400_TX_MODE_4x8 0x00200000
84#define IXP2400_TX_MODE_2x16 0x00100000
85#define IXP2400_TX_MODE_1x32 0x00000000
86#define IXP2400_TX_MODE_MPHY 0x00080000
87#define IXP2400_TX_MODE_SPHY 0x00000000
88#define IXP2400_TX_MODE_MPHY_32 0x00040000
89#define IXP2400_TX_MODE_MPHY_4 0x00000000
90#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
91#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
92#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
93#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
94#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
95#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
96#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
97#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
98#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
99#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
100#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
101#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
102
103#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
104#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
105#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
106#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
107#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
108#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
109#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
110#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
111#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
112#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
113
114
115#endif
diff --git a/drivers/net/ixp2000/ixp2400_rx.uc b/drivers/net/ixp2000/ixp2400_rx.uc
new file mode 100644
index 000000000000..42a73e357afa
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_rx.uc
@@ -0,0 +1,408 @@
1/*
2 * RX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one full element list is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The RBUF uses 64-byte mpackets.
17 * - RX descriptors reside in SRAM, and have the following format:
18 * struct rx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 buf_length;
23 *
24 * // from uengine
25 * u32 channel;
26 * u32 pkt_length;
27 * };
28 * - Packet data resides in DRAM.
29 * - Packet buffer addresses are 8-byte aligned.
30 * - Scratch ring 0 is rx_pending.
31 * - Scratch ring 1 is rx_done, and has status condition 'full'.
32 * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
33 * - This code is run on all eight threads of the microengine it runs on.
34 *
35 * Local memory is used for per-channel RX state.
36 */
37
38#define RX_THREAD_FREELIST_0 0x0030
39#define RBUF_ELEMENT_DONE 0x0044
40
41#define CHANNEL_FLAGS *l$index0[0]
42#define CHANNEL_FLAG_RECEIVING 1
43#define PACKET_LENGTH *l$index0[1]
44#define PACKET_CHECKSUM *l$index0[2]
45#define BUFFER_HANDLE *l$index0[3]
46#define BUFFER_START *l$index0[4]
47#define BUFFER_LENGTH *l$index0[5]
48
49#define CHANNEL_STATE_SIZE 24 // in bytes
50#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
51
52
53 .sig volatile sig1
54 .sig volatile sig2
55 .sig volatile sig3
56
57 .sig mpacket_arrived
58 .reg add_to_rx_freelist
59 .reg read $rsw0, $rsw1
60 .xfer_order $rsw0 $rsw1
61
62 .reg zero
63
64 /*
65 * Initialise add_to_rx_freelist.
66 */
67 .begin
68 .reg temp
69 .reg temp2
70
71 immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
72 immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
73
74 local_csr_rd[ACTIVE_CTX_STS]
75 immed[temp, 0]
76 alu[temp2, temp, and, 0x1f]
77 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
78 alu[temp2, temp, and, 0x80]
79 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
80 .end
81
82 immed[zero, 0]
83
84 /*
85 * Skip context 0 initialisation?
86 */
87 .begin
88 br!=ctx[0, mpacket_receive_loop#]
89 .end
90
91 /*
92 * Initialise local memory.
93 */
94 .begin
95 .reg addr
96 .reg temp
97
98 immed[temp, 0]
99 init_local_mem_loop#:
100 alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
101 local_csr_wr[ACTIVE_LM_ADDR_0, addr]
102 nop
103 nop
104 nop
105
106 immed[CHANNEL_FLAGS, 0]
107
108 alu[temp, temp, +, 1]
109 alu[--, temp, and, 0x20]
110 beq[init_local_mem_loop#]
111 .end
112
113 /*
114 * Initialise signal pipeline.
115 */
116 .begin
117 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
118 .set_sig sig1
119
120 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
121 .set_sig sig2
122
123 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
124 .set_sig sig3
125 .end
126
127mpacket_receive_loop#:
128 /*
129 * Synchronise and wait for mpacket.
130 */
131 .begin
132 ctx_arb[sig1]
133 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
134
135 msf[fast_wr, --, add_to_rx_freelist, 0]
136 .set_sig mpacket_arrived
137 ctx_arb[mpacket_arrived]
138 .set $rsw0 $rsw1
139 .end
140
141 /*
142 * We halt if we see {inbparerr,parerr,null,soperror}.
143 */
144 .begin
145 alu_shf[--, 0x1b, and, $rsw0, >>8]
146 bne[abort_rswerr#]
147 .end
148
149 /*
150 * Point local memory pointer to this channel's state area.
151 */
152 .begin
153 .reg chanaddr
154
155 alu[chanaddr, $rsw0, and, 0x1f]
156 alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
157 local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
158 nop
159 nop
160 nop
161 .end
162
163 /*
164 * Check whether we received a SOP mpacket while we were already
165 * working on a packet, or a non-SOP mpacket while there was no
166 * packet pending. (SOP == RECEIVING -> abort) If everything's
167 * okay, update the RECEIVING flag to reflect our new state.
168 */
169 .begin
170 .reg temp
171 .reg eop
172
173 #if CHANNEL_FLAG_RECEIVING != 1
174 #error CHANNEL_FLAG_RECEIVING is not 1
175 #endif
176
177 alu_shf[temp, 1, and, $rsw0, >>15]
178 alu[temp, temp, xor, CHANNEL_FLAGS]
179 alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
180 beq[abort_proterr#]
181
182 alu_shf[eop, 1, and, $rsw0, >>14]
183 alu[CHANNEL_FLAGS, temp, xor, eop]
184 .end
185
186 /*
187 * Copy the mpacket into the right spot, and in case of EOP,
188 * write back the descriptor and pass the packet on.
189 */
190 .begin
191 .reg buffer_offset
192 .reg _packet_length
193 .reg _packet_checksum
194 .reg _buffer_handle
195 .reg _buffer_start
196 .reg _buffer_length
197
198 /*
199 * Determine buffer_offset, _packet_length and
200 * _packet_checksum.
201 */
202 .begin
203 .reg temp
204
205 alu[--, 1, and, $rsw0, >>15]
206 beq[not_sop#]
207
208 immed[PACKET_LENGTH, 0]
209 immed[PACKET_CHECKSUM, 0]
210
211 not_sop#:
212 alu[buffer_offset, --, b, PACKET_LENGTH]
213 alu_shf[temp, 0xff, and, $rsw0, >>16]
214 alu[_packet_length, buffer_offset, +, temp]
215 alu[PACKET_LENGTH, --, b, _packet_length]
216
217 immed[temp, 0xffff]
218 alu[temp, $rsw1, and, temp]
219 alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
220 alu[PACKET_CHECKSUM, --, b, _packet_checksum]
221 .end
222
223 /*
224 * Allocate buffer in case of SOP.
225 */
226 .begin
227 .reg temp
228
229 alu[temp, 1, and, $rsw0, >>15]
230 beq[skip_buffer_alloc#]
231
232 .begin
233 .sig zzz
234 .reg read $stemp $stemp2
235 .xfer_order $stemp $stemp2
236
237 rx_nobufs#:
238 scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
239 alu[_buffer_handle, --, b, $stemp]
240 beq[rx_nobufs#]
241
242 sram[read, $stemp, _buffer_handle, 0, 2],
243 ctx_swap[zzz]
244 alu[_buffer_start, --, b, $stemp]
245 alu[_buffer_length, --, b, $stemp2]
246 .end
247
248 skip_buffer_alloc#:
249 .end
250
251 /*
252 * Resynchronise.
253 */
254 .begin
255 ctx_arb[sig2]
256 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
257 .end
258
259 /*
260 * Synchronise buffer state.
261 */
262 .begin
263 .reg temp
264
265 alu[temp, 1, and, $rsw0, >>15]
266 beq[copy_from_local_mem#]
267
268 alu[BUFFER_HANDLE, --, b, _buffer_handle]
269 alu[BUFFER_START, --, b, _buffer_start]
270 alu[BUFFER_LENGTH, --, b, _buffer_length]
271 br[sync_state_done#]
272
273 copy_from_local_mem#:
274 alu[_buffer_handle, --, b, BUFFER_HANDLE]
275 alu[_buffer_start, --, b, BUFFER_START]
276 alu[_buffer_length, --, b, BUFFER_LENGTH]
277
278 sync_state_done#:
279 .end
280
281#if 0
282 /*
283 * Debug buffer state management.
284 */
285 .begin
286 .reg temp
287
288 alu[temp, 1, and, $rsw0, >>14]
289 beq[no_poison#]
290 immed[BUFFER_HANDLE, 0xdead]
291 immed[BUFFER_START, 0xdead]
292 immed[BUFFER_LENGTH, 0xdead]
293 no_poison#:
294
295 immed[temp, 0xdead]
296 alu[--, _buffer_handle, -, temp]
297 beq[state_corrupted#]
298 alu[--, _buffer_start, -, temp]
299 beq[state_corrupted#]
300 alu[--, _buffer_length, -, temp]
301 beq[state_corrupted#]
302 .end
303#endif
304
305 /*
306 * Check buffer length.
307 */
308 .begin
309 alu[--, _buffer_length, -, _packet_length]
310 blo[buffer_overflow#]
311 .end
312
313 /*
314 * Copy the mpacket and give back the RBUF element.
315 */
316 .begin
317 .reg element
318 .reg xfer_size
319 .reg temp
320 .sig copy_sig
321
322 alu_shf[element, 0x7f, and, $rsw0, >>24]
323 alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
324
325 alu[xfer_size, xfer_size, -, 1]
326 alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
327 alu_shf[temp, 0x10, or, xfer_size, <<21]
328 alu_shf[temp, temp, or, element, <<11]
329 alu_shf[--, temp, or, 1, <<18]
330
331 dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
332 indirect_ref, sig_done[copy_sig]
333 ctx_arb[copy_sig]
334
335 alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
336 msf[fast_wr, --, temp, 0]
337 .end
338
339 /*
340 * If EOP, write back the packet descriptor.
341 */
342 .begin
343 .reg write $stemp $stemp2
344 .xfer_order $stemp $stemp2
345 .sig zzz
346
347 alu_shf[--, 1, and, $rsw0, >>14]
348 beq[no_writeback#]
349
350 alu[$stemp, $rsw0, and, 0x1f]
351 alu[$stemp2, --, b, _packet_length]
352 sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
353
354 no_writeback#:
355 .end
356
357 /*
358 * Resynchronise.
359 */
360 .begin
361 ctx_arb[sig3]
362 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
363 .end
364
365 /*
366 * If EOP, put the buffer back onto the scratch ring.
367 */
368 .begin
369 .reg write $stemp
370 .sig zzz
371
372 br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
373
374 alu_shf[--, 1, and, $rsw0, >>14]
375 beq[mpacket_receive_loop#]
376
377 alu[--, 1, and, $rsw0, >>10]
378 bne[rxerr#]
379
380 alu[$stemp, --, b, _buffer_handle]
381 scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
382 cap[fast_wr, 0, XSCALE_INT_A]
383 br[mpacket_receive_loop#]
384
385 rxerr#:
386 alu[$stemp, --, b, _buffer_handle]
387 scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
388 br[mpacket_receive_loop#]
389 .end
390 .end
391
392
393abort_rswerr#:
394 halt
395
396abort_proterr#:
397 halt
398
399state_corrupted#:
400 halt
401
402buffer_overflow#:
403 halt
404
405rx_done_ring_overflow#:
406 halt
407
408
diff --git a/drivers/net/ixp2000/ixp2400_rx.ucode b/drivers/net/ixp2000/ixp2400_rx.ucode
new file mode 100644
index 000000000000..e8aee2f81aad
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_rx.ucode
@@ -0,0 +1,130 @@
1static struct ixp2000_uengine_code ixp2400_rx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 109,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x0c, 0xc0, 0x05,
21 0xf4, 0x44, 0x0c, 0x00, 0x05,
22 0xfc, 0x04, 0x4c, 0x00, 0x00,
23 0xf0, 0x00, 0x00, 0x3b, 0x00,
24 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
25 0x8a, 0xc0, 0x50, 0x3e, 0x05,
26 0xb4, 0x40, 0xf0, 0x3b, 0x80,
27 0x9a, 0xe0, 0x00, 0x3e, 0x05,
28 0xf0, 0x00, 0x00, 0x07, 0x00,
29 0xd8, 0x05, 0xc0, 0x00, 0x11,
30 0xf0, 0x00, 0x00, 0x0f, 0x00,
31 0x91, 0xb0, 0x20, 0x0e, 0x00,
32 0xfc, 0x06, 0x60, 0x0b, 0x00,
33 0xf0, 0x00, 0x0c, 0x03, 0x00,
34 0xf0, 0x00, 0x0c, 0x03, 0x00,
35 0xf0, 0x00, 0x0c, 0x03, 0x00,
36 0xf0, 0x00, 0x0c, 0x02, 0x00,
37 0xb0, 0xc0, 0x30, 0x0f, 0x01,
38 0xa4, 0x70, 0x00, 0x0f, 0x20,
39 0xd8, 0x02, 0xc0, 0x01, 0x00,
40 0xfc, 0x10, 0xac, 0x23, 0x08,
41 0xfc, 0x10, 0xac, 0x43, 0x10,
42 0xfc, 0x10, 0xac, 0x63, 0x18,
43 0xe0, 0x00, 0x00, 0x00, 0x02,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0x3d, 0x00, 0x04, 0x03, 0x20,
46 0xe0, 0x00, 0x00, 0x00, 0x10,
47 0x84, 0x82, 0x02, 0x01, 0x3b,
48 0xd8, 0x1a, 0x00, 0x01, 0x01,
49 0xb4, 0x00, 0x8c, 0x7d, 0x80,
50 0x91, 0xb0, 0x80, 0x22, 0x00,
51 0xfc, 0x06, 0x60, 0x23, 0x00,
52 0xf0, 0x00, 0x0c, 0x03, 0x00,
53 0xf0, 0x00, 0x0c, 0x03, 0x00,
54 0xf0, 0x00, 0x0c, 0x03, 0x00,
55 0x94, 0xf0, 0x92, 0x01, 0x21,
56 0xac, 0x40, 0x60, 0x26, 0x00,
57 0xa4, 0x30, 0x0c, 0x04, 0x06,
58 0xd8, 0x1a, 0x40, 0x01, 0x00,
59 0x94, 0xe0, 0xa2, 0x01, 0x21,
60 0xac, 0x20, 0x00, 0x28, 0x06,
61 0x84, 0xf2, 0x02, 0x01, 0x21,
62 0xd8, 0x0b, 0x40, 0x01, 0x00,
63 0xf0, 0x00, 0x0c, 0x02, 0x01,
64 0xf0, 0x00, 0x0c, 0x02, 0x02,
65 0xa0, 0x00, 0x08, 0x04, 0x00,
66 0x95, 0x00, 0xc6, 0x01, 0xff,
67 0xa0, 0x80, 0x10, 0x30, 0x00,
68 0xa0, 0x60, 0x1c, 0x00, 0x01,
69 0xf0, 0x0f, 0xf0, 0x33, 0xff,
70 0xb4, 0x00, 0xc0, 0x31, 0x81,
71 0xb0, 0x80, 0xb0, 0x32, 0x02,
72 0xa0, 0x20, 0x20, 0x2c, 0x00,
73 0x94, 0xf0, 0xd2, 0x01, 0x21,
74 0xd8, 0x0f, 0x40, 0x01, 0x00,
75 0x19, 0x40, 0x10, 0x04, 0x20,
76 0xa0, 0x00, 0x26, 0x04, 0x00,
77 0xd8, 0x0d, 0xc0, 0x01, 0x00,
78 0x00, 0x42, 0x10, 0x80, 0x02,
79 0xb0, 0x00, 0x46, 0x04, 0x00,
80 0xb0, 0x00, 0x56, 0x08, 0x00,
81 0xe0, 0x00, 0x00, 0x00, 0x04,
82 0xfc, 0x10, 0xae, 0x43, 0x90,
83 0x84, 0xf0, 0x32, 0x01, 0x21,
84 0xd8, 0x11, 0x40, 0x01, 0x00,
85 0xa0, 0x60, 0x3c, 0x00, 0x02,
86 0xa0, 0x20, 0x40, 0x10, 0x00,
87 0xa0, 0x20, 0x50, 0x14, 0x00,
88 0xd8, 0x12, 0x00, 0x00, 0x18,
89 0xa0, 0x00, 0x28, 0x0c, 0x00,
90 0xb0, 0x00, 0x48, 0x10, 0x00,
91 0xb0, 0x00, 0x58, 0x14, 0x00,
92 0xaa, 0xf0, 0x00, 0x14, 0x01,
93 0xd8, 0x1a, 0xc0, 0x01, 0x05,
94 0x85, 0x80, 0x42, 0x01, 0xff,
95 0x95, 0x00, 0x66, 0x01, 0xff,
96 0xba, 0xc0, 0x60, 0x1b, 0x01,
97 0x9a, 0x30, 0x60, 0x19, 0x30,
98 0x9a, 0xb0, 0x70, 0x1a, 0x30,
99 0x9b, 0x50, 0x78, 0x1e, 0x04,
100 0x8a, 0xe2, 0x08, 0x1e, 0x21,
101 0x6a, 0x4e, 0x00, 0x13, 0x00,
102 0xe0, 0x00, 0x00, 0x00, 0x30,
103 0x9b, 0x00, 0x7a, 0x92, 0x04,
104 0x3d, 0x00, 0x04, 0x1f, 0x20,
105 0x84, 0xe2, 0x02, 0x01, 0x21,
106 0xd8, 0x16, 0x80, 0x01, 0x00,
107 0xa4, 0x18, 0x0c, 0x7d, 0x80,
108 0xa0, 0x58, 0x1c, 0x00, 0x01,
109 0x01, 0x42, 0x00, 0xa0, 0x02,
110 0xe0, 0x00, 0x00, 0x00, 0x08,
111 0xfc, 0x10, 0xae, 0x63, 0x98,
112 0xd8, 0x1b, 0x00, 0xc2, 0x14,
113 0x84, 0xe2, 0x02, 0x01, 0x21,
114 0xd8, 0x05, 0xc0, 0x01, 0x00,
115 0x84, 0xa2, 0x02, 0x01, 0x21,
116 0xd8, 0x19, 0x40, 0x01, 0x01,
117 0xa0, 0x58, 0x0c, 0x00, 0x02,
118 0x1a, 0x40, 0x00, 0x04, 0x24,
119 0x33, 0x00, 0x01, 0x2f, 0x20,
120 0xd8, 0x05, 0xc0, 0x00, 0x18,
121 0xa0, 0x58, 0x0c, 0x00, 0x02,
122 0x1a, 0x40, 0x00, 0x04, 0x20,
123 0xd8, 0x05, 0xc0, 0x00, 0x18,
124 0xe0, 0x00, 0x02, 0x00, 0x00,
125 0xe0, 0x00, 0x02, 0x00, 0x00,
126 0xe0, 0x00, 0x02, 0x00, 0x00,
127 0xe0, 0x00, 0x02, 0x00, 0x00,
128 0xe0, 0x00, 0x02, 0x00, 0x00,
129 }
130};
diff --git a/drivers/net/ixp2000/ixp2400_tx.uc b/drivers/net/ixp2000/ixp2400_tx.uc
new file mode 100644
index 000000000000..d090d1884fb7
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_tx.uc
@@ -0,0 +1,272 @@
1/*
2 * TX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one TBUF partition is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The TBUF uses 64-byte mpackets.
17 * - TX descriptors reside in SRAM, and have the following format:
18 * struct tx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 pkt_length;
23 * u32 channel;
24 * };
25 * - Packet data resides in DRAM.
26 * - Packet buffer addresses are 8-byte aligned.
27 * - Scratch ring 2 is tx_pending.
28 * - Scratch ring 3 is tx_done, and has status condition 'full'.
29 * - This code is run on all eight threads of the microengine it runs on.
30 */
31
32#define TX_SEQUENCE_0 0x0060
33#define TBUF_CTRL 0x1800
34
35#define PARTITION_SIZE 128
36#define PARTITION_THRESH 96
37
38
39 .sig volatile sig1
40 .sig volatile sig2
41 .sig volatile sig3
42
43 .reg @old_tx_seq_0
44 .reg @mpkts_in_flight
45 .reg @next_tbuf_mpacket
46
47 .reg @buffer_handle
48 .reg @buffer_start
49 .reg @packet_length
50 .reg @channel
51 .reg @packet_offset
52
53 .reg zero
54
55 immed[zero, 0]
56
57 /*
58 * Skip context 0 initialisation?
59 */
60 .begin
61 br!=ctx[0, mpacket_tx_loop#]
62 .end
63
64 /*
65 * Wait until all pending TBUF elements have been transmitted.
66 */
67 .begin
68 .reg read $tx
69 .sig zzz
70
71 loop_empty#:
72 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
73 alu_shf[--, --, b, $tx, >>31]
74 beq[loop_empty#]
75
76 alu[@old_tx_seq_0, --, b, $tx]
77 .end
78
79 immed[@mpkts_in_flight, 0]
80 alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
81
82 immed[@buffer_handle, 0]
83
84 /*
85 * Initialise signal pipeline.
86 */
87 .begin
88 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
89 .set_sig sig1
90
91 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
92 .set_sig sig2
93
94 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
95 .set_sig sig3
96 .end
97
98mpacket_tx_loop#:
99 .begin
100 .reg tbuf_element_index
101 .reg buffer_handle
102 .reg sop_eop
103 .reg packet_data
104 .reg channel
105 .reg mpacket_size
106
107 /*
108 * If there is no packet currently being transmitted,
109 * dequeue the next TX descriptor, and fetch the buffer
110 * address, packet length and destination channel number.
111 */
112 .begin
113 .reg read $stemp $stemp2 $stemp3
114 .xfer_order $stemp $stemp2 $stemp3
115 .sig zzz
116
117 ctx_arb[sig1]
118
119 alu[--, --, b, @buffer_handle]
120 bne[already_got_packet#]
121
122 tx_nobufs#:
123 scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
124 alu[@buffer_handle, --, b, $stemp]
125 beq[tx_nobufs#]
126
127 sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
128 alu[@buffer_start, --, b, $stemp]
129 alu[@packet_length, --, b, $stemp2]
130 beq[zero_byte_packet#]
131 alu[@channel, --, b, $stemp3]
132 immed[@packet_offset, 0]
133
134 already_got_packet#:
135 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
136 .end
137
138 /*
139 * Determine tbuf element index, SOP/EOP flags, mpacket
140 * offset and mpacket size and cache buffer_handle and
141 * channel number.
142 */
143 .begin
144 alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
145 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
146 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
147 (PARTITION_SIZE - 1)]
148
149 alu[buffer_handle, --, b, @buffer_handle]
150 immed[@buffer_handle, 0]
151
152 immed[sop_eop, 1]
153
154 alu[packet_data, --, b, @packet_offset]
155 bne[no_sop#]
156 alu[sop_eop, sop_eop, or, 2]
157 no_sop#:
158 alu[packet_data, packet_data, +, @buffer_start]
159
160 alu[channel, --, b, @channel]
161
162 alu[mpacket_size, @packet_length, -, @packet_offset]
163 alu[--, 64, -, mpacket_size]
164 bhs[eop#]
165 alu[@buffer_handle, --, b, buffer_handle]
166 immed[mpacket_size, 64]
167 alu[sop_eop, sop_eop, and, 2]
168 eop#:
169
170 alu[@packet_offset, @packet_offset, +, mpacket_size]
171 .end
172
173 /*
174 * Wait until there's enough space in the TBUF.
175 */
176 .begin
177 .reg read $tx
178 .reg temp
179 .sig zzz
180
181 ctx_arb[sig2]
182
183 br[test_space#]
184
185 loop_space#:
186 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
187
188 alu[temp, $tx, -, @old_tx_seq_0]
189 alu[temp, temp, and, 0xff]
190 alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
191
192 alu[@old_tx_seq_0, --, b, $tx]
193
194 test_space#:
195 alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
196 blo[loop_space#]
197
198 alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
199
200 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
201 .end
202
203 /*
204 * Copy the packet data to the TBUF.
205 */
206 .begin
207 .reg temp
208 .sig copy_sig
209
210 alu[temp, mpacket_size, -, 1]
211 alu_shf[temp, 0x10, or, temp, >>3]
212 alu_shf[temp, 0x10, or, temp, <<21]
213 alu_shf[temp, temp, or, tbuf_element_index, <<11]
214 alu_shf[--, temp, or, 1, <<18]
215
216 dram[tbuf_wr, --, packet_data, 0, max_8],
217 indirect_ref, sig_done[copy_sig]
218 ctx_arb[copy_sig]
219 .end
220
221 /*
222 * Mark TBUF element as ready-to-be-transmitted.
223 */
224 .begin
225 .reg write $tsw $tsw2
226 .xfer_order $tsw $tsw2
227 .reg temp
228 .sig zzz
229
230 alu_shf[temp, channel, or, mpacket_size, <<24]
231 alu_shf[$tsw, temp, or, sop_eop, <<8]
232 immed[$tsw2, 0]
233
234 immed[temp, TBUF_CTRL]
235 alu_shf[temp, temp, or, tbuf_element_index, <<3]
236 msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
237 .end
238
239 /*
240 * Resynchronise.
241 */
242 .begin
243 ctx_arb[sig3]
244 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
245 .end
246
247 /*
248 * If this was an EOP mpacket, recycle the TX buffer
249 * and signal the host.
250 */
251 .begin
252 .reg write $stemp
253 .sig zzz
254
255 alu[--, sop_eop, and, 1]
256 beq[mpacket_tx_loop#]
257
258 tx_done_ring_full#:
259 br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
260
261 alu[$stemp, --, b, buffer_handle]
262 scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
263 cap[fast_wr, 0, XSCALE_INT_A]
264 br[mpacket_tx_loop#]
265 .end
266 .end
267
268
269zero_byte_packet#:
270 halt
271
272
diff --git a/drivers/net/ixp2000/ixp2400_tx.ucode b/drivers/net/ixp2000/ixp2400_tx.ucode
new file mode 100644
index 000000000000..a433e24b0a51
--- /dev/null
+++ b/drivers/net/ixp2000/ixp2400_tx.ucode
@@ -0,0 +1,98 @@
1static struct ixp2000_uengine_code ixp2400_tx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 77,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x00, 0x07, 0x00,
21 0xd8, 0x03, 0x00, 0x00, 0x11,
22 0x3c, 0x40, 0x00, 0x04, 0xe0,
23 0x81, 0xf2, 0x02, 0x01, 0x00,
24 0xd8, 0x00, 0x80, 0x01, 0x00,
25 0xb0, 0x08, 0x06, 0x00, 0x00,
26 0xf0, 0x00, 0x0c, 0x00, 0x80,
27 0xb4, 0x49, 0x02, 0x03, 0x7f,
28 0xf0, 0x00, 0x02, 0x83, 0x00,
29 0xfc, 0x10, 0xac, 0x23, 0x08,
30 0xfc, 0x10, 0xac, 0x43, 0x10,
31 0xfc, 0x10, 0xac, 0x63, 0x18,
32 0xe0, 0x00, 0x00, 0x00, 0x02,
33 0xa0, 0x30, 0x02, 0x80, 0x00,
34 0xd8, 0x06, 0x00, 0x01, 0x01,
35 0x19, 0x40, 0x00, 0x04, 0x28,
36 0xb0, 0x0a, 0x06, 0x00, 0x00,
37 0xd8, 0x03, 0xc0, 0x01, 0x00,
38 0x00, 0x44, 0x00, 0x80, 0x80,
39 0xa0, 0x09, 0x06, 0x00, 0x00,
40 0xb0, 0x0b, 0x06, 0x04, 0x00,
41 0xd8, 0x13, 0x00, 0x01, 0x00,
42 0xb0, 0x0c, 0x06, 0x08, 0x00,
43 0xf0, 0x00, 0x0c, 0x00, 0xa0,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0xa0, 0x00, 0x12, 0x40, 0x00,
46 0xb0, 0xc9, 0x02, 0x43, 0x01,
47 0xb4, 0x49, 0x02, 0x43, 0x7f,
48 0xb0, 0x00, 0x22, 0x80, 0x00,
49 0xf0, 0x00, 0x02, 0x83, 0x00,
50 0xf0, 0x00, 0x0c, 0x04, 0x02,
51 0xb0, 0x40, 0x6c, 0x00, 0xa0,
52 0xd8, 0x08, 0x80, 0x01, 0x01,
53 0xaa, 0x00, 0x2c, 0x08, 0x02,
54 0xa0, 0xc0, 0x30, 0x18, 0x90,
55 0xa0, 0x00, 0x43, 0x00, 0x00,
56 0xba, 0xc0, 0x32, 0xc0, 0xa0,
57 0xaa, 0xb0, 0x00, 0x0f, 0x40,
58 0xd8, 0x0a, 0x80, 0x01, 0x04,
59 0xb0, 0x0a, 0x00, 0x08, 0x00,
60 0xf0, 0x00, 0x00, 0x0f, 0x40,
61 0xa4, 0x00, 0x2c, 0x08, 0x02,
62 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
63 0xe0, 0x00, 0x00, 0x00, 0x04,
64 0xd8, 0x0c, 0x80, 0x00, 0x18,
65 0x3c, 0x40, 0x00, 0x04, 0xe0,
66 0xba, 0x80, 0x42, 0x01, 0x80,
67 0xb4, 0x40, 0x40, 0x13, 0xff,
68 0xaa, 0x88, 0x00, 0x10, 0x80,
69 0xb0, 0x08, 0x06, 0x00, 0x00,
70 0xaa, 0xf0, 0x0d, 0x80, 0x80,
71 0xd8, 0x0b, 0x40, 0x01, 0x05,
72 0xa0, 0x88, 0x0c, 0x04, 0x80,
73 0xfc, 0x10, 0xae, 0x43, 0x90,
74 0xba, 0xc0, 0x50, 0x0f, 0x01,
75 0x9a, 0x30, 0x50, 0x15, 0x30,
76 0x9a, 0xb0, 0x50, 0x16, 0x30,
77 0x9b, 0x50, 0x58, 0x16, 0x01,
78 0x8a, 0xe2, 0x08, 0x16, 0x21,
79 0x6b, 0x4e, 0x00, 0x83, 0x03,
80 0xe0, 0x00, 0x00, 0x00, 0x30,
81 0x9a, 0x80, 0x70, 0x0e, 0x04,
82 0x8b, 0x88, 0x08, 0x1e, 0x02,
83 0xf0, 0x00, 0x0c, 0x01, 0x81,
84 0xf0, 0x01, 0x80, 0x1f, 0x00,
85 0x9b, 0xd0, 0x78, 0x1e, 0x01,
86 0x3d, 0x42, 0x00, 0x1c, 0x20,
87 0xe0, 0x00, 0x00, 0x00, 0x08,
88 0xfc, 0x10, 0xae, 0x63, 0x98,
89 0xa4, 0x30, 0x0c, 0x04, 0x02,
90 0xd8, 0x03, 0x00, 0x01, 0x00,
91 0xd8, 0x11, 0xc1, 0x42, 0x14,
92 0xa0, 0x18, 0x00, 0x08, 0x00,
93 0x1a, 0x40, 0x00, 0x04, 0x2c,
94 0x33, 0x00, 0x01, 0x2f, 0x20,
95 0xd8, 0x03, 0x00, 0x00, 0x18,
96 0xe0, 0x00, 0x02, 0x00, 0x00,
97 }
98};
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
new file mode 100644
index 000000000000..216aad1911e6
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -0,0 +1,404 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/init.h>
18#include <linux/moduleparam.h>
19#include <asm/arch/uengine.h>
20#include <asm/mach-types.h>
21#include <asm/io.h>
22#include "ixp2400_rx.ucode"
23#include "ixp2400_tx.ucode"
24#include "ixpdev_priv.h"
25#include "ixpdev.h"
26
27static int nds_count;
28static struct net_device **nds;
29static int nds_open;
30static void (*set_port_admin_status)(int port, int up);
31
32static struct ixpdev_rx_desc * const rx_desc =
33 (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
34static struct ixpdev_tx_desc * const tx_desc =
35 (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
36static int tx_pointer;
37
38
39static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
40{
41 struct ixpdev_priv *ip = netdev_priv(dev);
42 struct ixpdev_tx_desc *desc;
43 int entry;
44
45 if (unlikely(skb->len > PAGE_SIZE)) {
46 /* @@@ Count drops. */
47 dev_kfree_skb(skb);
48 return 0;
49 }
50
51 entry = tx_pointer;
52 tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
53
54 desc = tx_desc + entry;
55 desc->pkt_length = skb->len;
56 desc->channel = ip->channel;
57
58 skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
59 dev_kfree_skb(skb);
60
61 ixp2000_reg_write(RING_TX_PENDING,
62 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
63
64 dev->trans_start = jiffies;
65
66 local_irq_disable();
67 ip->tx_queue_entries++;
68 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
69 netif_stop_queue(dev);
70 local_irq_enable();
71
72 return 0;
73}
74
75
76static int ixpdev_rx(struct net_device *dev, int *budget)
77{
78 while (*budget > 0) {
79 struct ixpdev_rx_desc *desc;
80 struct sk_buff *skb;
81 void *buf;
82 u32 _desc;
83
84 _desc = ixp2000_reg_read(RING_RX_DONE);
85 if (_desc == 0)
86 return 0;
87
88 desc = rx_desc +
89 ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
90 buf = phys_to_virt(desc->buf_addr);
91
92 if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
93 printk(KERN_ERR "ixp2000: rx err, length %d\n",
94 desc->pkt_length);
95 goto err;
96 }
97
98 if (desc->channel < 0 || desc->channel >= nds_count) {
99 printk(KERN_ERR "ixp2000: rx err, channel %d\n",
100 desc->channel);
101 goto err;
102 }
103
104 /* @@@ Make FCS stripping configurable. */
105 desc->pkt_length -= 4;
106
107 if (unlikely(!netif_running(nds[desc->channel])))
108 goto err;
109
110 skb = dev_alloc_skb(desc->pkt_length + 2);
111 if (likely(skb != NULL)) {
112 skb->dev = nds[desc->channel];
113 skb_reserve(skb, 2);
114 eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, skb->dev);
117
118 skb->dev->last_rx = jiffies;
119
120 netif_receive_skb(skb);
121 }
122
123err:
124 ixp2000_reg_write(RING_RX_PENDING, _desc);
125 dev->quota--;
126 (*budget)--;
127 }
128
129 return 1;
130}
131
132/* dev always points to nds[0]. */
133static int ixpdev_poll(struct net_device *dev, int *budget)
134{
135 /* @@@ Have to stop polling when nds[0] is administratively
136 * downed while we are polling. */
137 do {
138 ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
139
140 if (ixpdev_rx(dev, budget))
141 return 1;
142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
143
144 netif_rx_complete(dev);
145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
146
147 return 0;
148}
149
150/* @@@ Ugly hack. */
151static inline int netif_rx_schedule_prep_notup(struct net_device *dev)
152{
153 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
154}
155
156static void ixpdev_tx_complete(void)
157{
158 int channel;
159 u32 wake;
160
161 wake = 0;
162 while (1) {
163 struct ixpdev_priv *ip;
164 u32 desc;
165 int entry;
166
167 desc = ixp2000_reg_read(RING_TX_DONE);
168 if (desc == 0)
169 break;
170
171 /* @@@ Check whether entries come back in order. */
172 entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
173 channel = tx_desc[entry].channel;
174
175 if (channel < 0 || channel >= nds_count) {
176 printk(KERN_ERR "ixp2000: txcomp channel index "
177 "out of bounds (%d, %.8i, %d)\n",
178 channel, (unsigned int)desc, entry);
179 continue;
180 }
181
182 ip = netdev_priv(nds[channel]);
183 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
184 wake |= 1 << channel;
185 ip->tx_queue_entries--;
186 }
187
188 for (channel = 0; wake != 0; channel++) {
189 if (wake & (1 << channel)) {
190 netif_wake_queue(nds[channel]);
191 wake &= ~(1 << channel);
192 }
193 }
194}
195
196static irqreturn_t ixpdev_interrupt(int irq, void *dev_id, struct pt_regs *regs)
197{
198 u32 status;
199
200 status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
201 if (status == 0)
202 return IRQ_NONE;
203
204 /*
205 * Any of the eight receive units signaled RX?
206 */
207 if (status & 0x00ff) {
208 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
209 if (likely(netif_rx_schedule_prep_notup(nds[0]))) {
210 __netif_rx_schedule(nds[0]);
211 } else {
212 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
213 }
214 }
215
216 /*
217 * Any of the eight transmit units signaled TXdone?
218 */
219 if (status & 0xff00) {
220 ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
221 ixpdev_tx_complete();
222 }
223
224 return IRQ_HANDLED;
225}
226
227static int ixpdev_open(struct net_device *dev)
228{
229 struct ixpdev_priv *ip = netdev_priv(dev);
230 int err;
231
232 if (!nds_open++) {
233 err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
234 SA_SHIRQ, "ixp2000_eth", nds);
235 if (err) {
236 nds_open--;
237 return err;
238 }
239
240 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
241 }
242
243 set_port_admin_status(ip->channel, 1);
244 netif_start_queue(dev);
245
246 return 0;
247}
248
249static int ixpdev_close(struct net_device *dev)
250{
251 struct ixpdev_priv *ip = netdev_priv(dev);
252
253 netif_stop_queue(dev);
254 set_port_admin_status(ip->channel, 0);
255
256 if (!--nds_open) {
257 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
258 free_irq(IRQ_IXP2000_THDA0, nds);
259 }
260
261 return 0;
262}
263
264struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
265{
266 struct net_device *dev;
267 struct ixpdev_priv *ip;
268
269 dev = alloc_etherdev(sizeof_priv);
270 if (dev == NULL)
271 return NULL;
272
273 dev->hard_start_xmit = ixpdev_xmit;
274 dev->poll = ixpdev_poll;
275 dev->open = ixpdev_open;
276 dev->stop = ixpdev_close;
277
278 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
279 dev->weight = 64;
280
281 ip = netdev_priv(dev);
282 ip->channel = channel;
283 ip->tx_queue_entries = 0;
284
285 return dev;
286}
287
288int ixpdev_init(int __nds_count, struct net_device **__nds,
289 void (*__set_port_admin_status)(int port, int up))
290{
291 int i;
292 int err;
293
294 if (RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192) {
295 static void __too_many_rx_or_tx_buffers(void);
296 __too_many_rx_or_tx_buffers();
297 }
298
299 nds_count = __nds_count;
300 nds = __nds;
301 set_port_admin_status = __set_port_admin_status;
302
303 for (i = 0; i < nds_count; i++) {
304 err = register_netdev(nds[i]);
305 if (err) {
306 while (--i >= 0)
307 unregister_netdev(nds[i]);
308 goto err_out;
309 }
310 }
311
312 for (i = 0; i < RX_BUF_COUNT; i++) {
313 void *buf;
314
315 buf = (void *)get_zeroed_page(GFP_KERNEL);
316 if (buf == NULL) {
317 err = -ENOMEM;
318 while (--i >= 0)
319 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
320 goto err_unregister;
321 }
322 rx_desc[i].buf_addr = virt_to_phys(buf);
323 rx_desc[i].buf_length = PAGE_SIZE;
324 }
325
326 /* @@@ Maybe we shouldn't be preallocating TX buffers. */
327 for (i = 0; i < TX_BUF_COUNT; i++) {
328 void *buf;
329
330 buf = (void *)get_zeroed_page(GFP_KERNEL);
331 if (buf == NULL) {
332 err = -ENOMEM;
333 while (--i >= 0)
334 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
335 goto err_free_rx;
336 }
337 tx_desc[i].buf_addr = virt_to_phys(buf);
338 }
339
340 /* 256 entries, ring status set means 'empty', base address 0x0000. */
341 ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
342 ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
343 ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
344
345 /* 256 entries, ring status set means 'full', base address 0x0400. */
346 ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
347 ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
348 ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
349
350 for (i = 0; i < RX_BUF_COUNT; i++) {
351 ixp2000_reg_write(RING_RX_PENDING,
352 RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
353 }
354
355 ixp2000_uengine_load(0, &ixp2400_rx);
356 ixp2000_uengine_start_contexts(0, 0xff);
357
358
359 /* 256 entries, ring status set means 'empty', base address 0x0800. */
360 ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
361 ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
362 ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
363
364 /* 256 entries, ring status set means 'full', base address 0x0c00. */
365 ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
366 ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
367 ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
368
369 ixp2000_uengine_load(1, &ixp2400_tx);
370 ixp2000_uengine_start_contexts(1, 0xff);
371
372 return 0;
373
374err_free_rx:
375 for (i = 0; i < RX_BUF_COUNT; i++)
376 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
377
378err_unregister:
379 for (i = 0; i < nds_count; i++)
380 unregister_netdev(nds[i]);
381
382err_out:
383 return err;
384}
385
386void ixpdev_deinit(void)
387{
388 int i;
389
390 /* @@@ Flush out pending packets. */
391
392 ixp2000_uengine_stop_contexts(1, 0xff);
393 ixp2000_uengine_stop_contexts(0, 0xff);
394 ixp2000_uengine_reset(0x3);
395
396 for (i = 0; i < TX_BUF_COUNT; i++)
397 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
398
399 for (i = 0; i < RX_BUF_COUNT; i++)
400 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
401
402 for (i = 0; i < nds_count; i++)
403 unregister_netdev(nds[i]);
404}
diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ixp2000/ixpdev.h
new file mode 100644
index 000000000000..bd686cb63058
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev.h
@@ -0,0 +1,27 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_H
13#define __IXPDEV_H
14
15struct ixpdev_priv
16{
17 int channel;
18 int tx_queue_entries;
19};
20
21struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
22int ixpdev_init(int num_ports, struct net_device **nds,
23 void (*set_port_admin_status)(int port, int up));
24void ixpdev_deinit(void);
25
26
27#endif
diff --git a/drivers/net/ixp2000/ixpdev_priv.h b/drivers/net/ixp2000/ixpdev_priv.h
new file mode 100644
index 000000000000..86aa08ea0c33
--- /dev/null
+++ b/drivers/net/ixp2000/ixpdev_priv.h
@@ -0,0 +1,57 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_PRIV_H
13#define __IXPDEV_PRIV_H
14
15#define RX_BUF_DESC_BASE 0x00001000
16#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
17#define TX_BUF_DESC_BASE 0x00002000
18#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
19#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
20
21#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
22#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
23#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
24#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
25
26#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
27#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
28#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
29#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
30#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
31#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
32#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
33#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
34#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
35#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
36#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
37#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
38#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
39
40struct ixpdev_rx_desc
41{
42 u32 buf_addr;
43 u32 buf_length;
44 u32 channel;
45 u32 pkt_length;
46};
47
48struct ixpdev_tx_desc
49{
50 u32 buf_addr;
51 u32 pkt_length;
52 u32 channel;
53 u32 unused;
54};
55
56
57#endif
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
new file mode 100644
index 000000000000..cf0681fb1276
--- /dev/null
+++ b/drivers/net/ixp2000/pm3386.c
@@ -0,0 +1,304 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <linux/netdevice.h>
16#include <asm/io.h>
17
18/*
19 * Read from register 'reg' of PM3386 device 'pm'.
20 */
21static u16 pm3386_reg_read(int pm, int reg)
22{
23 void *_reg;
24 u16 value;
25
26 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
27 if (pm == 1)
28 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
29
30 value = *((volatile u16 *)(_reg + (reg << 1)));
31
32// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
33
34 return value;
35}
36
37/*
38 * Write to register 'reg' of PM3386 device 'pm', and perform
39 * a readback from the identification register.
40 */
41static void pm3386_reg_write(int pm, int reg, u16 value)
42{
43 void *_reg;
44 u16 dummy;
45
46// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
47
48 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
49 if (pm == 1)
50 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
51
52 *((volatile u16 *)(_reg + (reg << 1))) = value;
53
54 dummy = *((volatile u16 *)_reg);
55 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
56}
57
58/*
59 * Read from port 'port' register 'reg', where the registers
60 * for the different ports are 'spacing' registers apart.
61 */
62static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
63{
64 int reg;
65
66 reg = _reg;
67 if (port & 1)
68 reg += spacing;
69
70 return pm3386_reg_read(port >> 1, reg);
71}
72
73/*
74 * Write to port 'port' register 'reg', where the registers
75 * for the different ports are 'spacing' registers apart.
76 */
77static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
78{
79 int reg;
80
81 reg = _reg;
82 if (port & 1)
83 reg += spacing;
84
85 pm3386_reg_write(port >> 1, reg, value);
86}
87
88
89void pm3386_reset(void)
90{
91 /* @@@ Implement me. */
92}
93
94static u16 swaph(u16 x)
95{
96 return ((x << 8) | (x >> 8)) & 0xffff;
97}
98
99void pm3386_init_port(int port)
100{
101 int pm = port >> 1;
102
103 /*
104 * Work around ENP2611 bootloader programming MAC address
105 * in reverse.
106 */
107 if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
108 (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
109 u16 temp[3];
110
111 temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
112 temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
113 temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
114 pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
115 pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
116 pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
117 }
118
119 /*
120 * Initialise narrowbanding mode. See application note 2010486
121 * for more information. (@@@ We also need to issue a reset
122 * when ROOL or DOOL are detected.)
123 */
124 pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
125 udelay(500);
126 pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
127
128 /*
129 * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
130 * towards SPI-3 bridge.
131 */
132 pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
133
134 /*
135 * Enable ingress protocol checking, and soft reset the
136 * SPI-3 ingress block.
137 */
138 pm3386_reg_write(pm, 0x103, 0x0003);
139 while (!(pm3386_reg_read(pm, 0x103) & 0x80))
140 ;
141
142 /*
143 * SPI-3 egress block. Gather 12288 bytes of the current
144 * packet in the TX fifo before initiating transmit on the
145 * SERDES interface. (Prevents TX underflows.)
146 */
147 pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
148
149 /*
150 * Enforce odd parity from the SPI-3 bridge, and soft reset
151 * the SPI-3 egress block.
152 */
153 pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
154 while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
155 ;
156
157 /*
158 * EGMAC block. Set this channels to reject long preambles,
159 * not send or transmit PAUSE frames, enable preamble checking,
160 * disable frame length checking, enable FCS appending, enable
161 * TX frame padding.
162 */
163 pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
164
165 /*
166 * Soft reset the EGMAC block.
167 */
168 pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
169 udelay(10);
170 pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
171 udelay(10);
172
173 /*
174 * Auto-sense autonegotiation status.
175 */
176 pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
177
178 /*
179 * Allow reception of jumbo frames.
180 */
181 pm3386_port_reg_write(port, 0x310, 0x100, 9018);
182
183 /*
184 * Allow transmission of jumbo frames.
185 */
186 pm3386_port_reg_write(port, 0x336, 0x100, 9018);
187
188 /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
189
190 /*
191 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
192 */
193 pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
194 udelay(10);
195
196 /*
197 * Enable and restart autonegotiation.
198 */
199 pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
200 udelay(1000);
201 pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
202 udelay(10);
203}
204
205void pm3386_get_mac(int port, u8 *mac)
206{
207 u16 temp;
208
209 temp = pm3386_port_reg_read(port, 0x308, 0x100);
210 mac[0] = temp & 0xff;
211 mac[1] = (temp >> 8) & 0xff;
212
213 temp = pm3386_port_reg_read(port, 0x309, 0x100);
214 mac[2] = temp & 0xff;
215 mac[3] = (temp >> 8) & 0xff;
216
217 temp = pm3386_port_reg_read(port, 0x30a, 0x100);
218 mac[4] = temp & 0xff;
219 mac[5] = (temp >> 8) & 0xff;
220}
221
222static u32 pm3386_get_stat(int port, u16 base)
223{
224 u32 value;
225
226 value = pm3386_port_reg_read(port, base, 0x100);
227 value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
228
229 return value;
230}
231
232void pm3386_get_stats(int port, struct net_device_stats *stats)
233{
234 /*
235 * Snapshot statistics counters.
236 */
237 pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
238 while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
239 ;
240
241 memset(stats, 0, sizeof(stats));
242
243 stats->rx_packets = pm3386_get_stat(port, 0x510);
244 stats->tx_packets = pm3386_get_stat(port, 0x590);
245 stats->rx_bytes = pm3386_get_stat(port, 0x514);
246 stats->tx_bytes = pm3386_get_stat(port, 0x594);
247 /* @@@ Add other stats. */
248}
249
250int pm3386_is_link_up(int port)
251{
252 u16 temp;
253
254 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
255 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
256
257 return !!(temp & 0x0002);
258}
259
260void pm3386_enable_rx(int port)
261{
262 u16 temp;
263
264 temp = pm3386_port_reg_read(port, 0x303, 0x100);
265 temp |= 0x1000;
266 pm3386_port_reg_write(port, 0x303, 0x100, temp);
267
268 udelay(10);
269}
270
271void pm3386_disable_rx(int port)
272{
273 u16 temp;
274
275 temp = pm3386_port_reg_read(port, 0x303, 0x100);
276 temp &= 0xefff;
277 pm3386_port_reg_write(port, 0x303, 0x100, temp);
278
279 udelay(10);
280}
281
282void pm3386_enable_tx(int port)
283{
284 u16 temp;
285
286 temp = pm3386_port_reg_read(port, 0x303, 0x100);
287 temp |= 0x4000;
288 pm3386_port_reg_write(port, 0x303, 0x100, temp);
289
290 udelay(10);
291}
292
293void pm3386_disable_tx(int port)
294{
295 u16 temp;
296
297 temp = pm3386_port_reg_read(port, 0x303, 0x100);
298 temp &= 0xbfff;
299 pm3386_port_reg_write(port, 0x303, 0x100, temp);
300
301 udelay(10);
302}
303
304MODULE_LICENSE("GPL");
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
new file mode 100644
index 000000000000..55ecb1834219
--- /dev/null
+++ b/drivers/net/ixp2000/pm3386.h
@@ -0,0 +1,26 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __PM3386_H
13#define __PM3386_H
14
15void pm3386_reset(void);
16void pm3386_init_port(int port);
17void pm3386_get_mac(int port, u8 *mac);
18void pm3386_get_stats(int port, struct net_device_stats *stats);
19int pm3386_is_link_up(int port);
20void pm3386_enable_rx(int port);
21void pm3386_disable_rx(int port);
22void pm3386_enable_tx(int port);
23void pm3386_disable_tx(int port);
24
25
26#endif
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e57df8dfe6b4..5303a96b4327 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -66,7 +66,7 @@
66#include "s2io.h" 66#include "s2io.h"
67#include "s2io-regs.h" 67#include "s2io-regs.h"
68 68
69#define DRV_VERSION "Version 2.0.9.3" 69#define DRV_VERSION "Version 2.0.9.4"
70 70
71/* S2io Driver name & version. */ 71/* S2io Driver name & version. */
72static char s2io_driver_name[] = "Neterion"; 72static char s2io_driver_name[] = "Neterion";
@@ -412,7 +412,7 @@ static int init_shared_mem(struct s2io_nic *nic)
412 config->tx_cfg[i].fifo_len - 1; 412 config->tx_cfg[i].fifo_len - 1;
413 mac_control->fifos[i].fifo_no = i; 413 mac_control->fifos[i].fifo_no = i;
414 mac_control->fifos[i].nic = nic; 414 mac_control->fifos[i].nic = nic;
415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1; 415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
416 416
417 for (j = 0; j < page_num; j++) { 417 for (j = 0; j < page_num; j++) {
418 int k = 0; 418 int k = 0;
@@ -459,6 +459,10 @@ static int init_shared_mem(struct s2io_nic *nic)
459 } 459 }
460 } 460 }
461 461
462 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
463 if (!nic->ufo_in_band_v)
464 return -ENOMEM;
465
462 /* Allocation and initialization of RXDs in Rings */ 466 /* Allocation and initialization of RXDs in Rings */
463 size = 0; 467 size = 0;
464 for (i = 0; i < config->rx_ring_num; i++) { 468 for (i = 0; i < config->rx_ring_num; i++) {
@@ -731,6 +735,8 @@ static void free_shared_mem(struct s2io_nic *nic)
731 mac_control->stats_mem, 735 mac_control->stats_mem,
732 mac_control->stats_mem_phy); 736 mac_control->stats_mem_phy);
733 } 737 }
738 if (nic->ufo_in_band_v)
739 kfree(nic->ufo_in_band_v);
734} 740}
735 741
736/** 742/**
@@ -2003,6 +2009,49 @@ static int start_nic(struct s2io_nic *nic)
2003 2009
2004 return SUCCESS; 2010 return SUCCESS;
2005} 2011}
2012/**
2013 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2014 */
2015static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2016{
2017 nic_t *nic = fifo_data->nic;
2018 struct sk_buff *skb;
2019 TxD_t *txds;
2020 u16 j, frg_cnt;
2021
2022 txds = txdlp;
2023 if (txds->Host_Control == (u64) nic->ufo_in_band_v) {
2024 pci_unmap_single(nic->pdev, (dma_addr_t)
2025 txds->Buffer_Pointer, sizeof(u64),
2026 PCI_DMA_TODEVICE);
2027 txds++;
2028 }
2029
2030 skb = (struct sk_buff *) ((unsigned long)
2031 txds->Host_Control);
2032 if (!skb) {
2033 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2034 return NULL;
2035 }
2036 pci_unmap_single(nic->pdev, (dma_addr_t)
2037 txds->Buffer_Pointer,
2038 skb->len - skb->data_len,
2039 PCI_DMA_TODEVICE);
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 if (frg_cnt) {
2042 txds++;
2043 for (j = 0; j < frg_cnt; j++, txds++) {
2044 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2045 if (!txds->Buffer_Pointer)
2046 break;
2047 pci_unmap_page(nic->pdev, (dma_addr_t)
2048 txds->Buffer_Pointer,
2049 frag->size, PCI_DMA_TODEVICE);
2050 }
2051 }
2052 txdlp->Host_Control = 0;
2053 return(skb);
2054}
2006 2055
2007/** 2056/**
2008 * free_tx_buffers - Free all queued Tx buffers 2057 * free_tx_buffers - Free all queued Tx buffers
@@ -2020,7 +2069,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2020 int i, j; 2069 int i, j;
2021 mac_info_t *mac_control; 2070 mac_info_t *mac_control;
2022 struct config_param *config; 2071 struct config_param *config;
2023 int cnt = 0, frg_cnt; 2072 int cnt = 0;
2024 2073
2025 mac_control = &nic->mac_control; 2074 mac_control = &nic->mac_control;
2026 config = &nic->config; 2075 config = &nic->config;
@@ -2029,38 +2078,11 @@ static void free_tx_buffers(struct s2io_nic *nic)
2029 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2078 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2030 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2079 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2031 list_virt_addr; 2080 list_virt_addr;
2032 skb = 2081 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2033 (struct sk_buff *) ((unsigned long) txdp-> 2082 if (skb) {
2034 Host_Control); 2083 dev_kfree_skb(skb);
2035 if (skb == NULL) { 2084 cnt++;
2036 memset(txdp, 0, sizeof(TxD_t) *
2037 config->max_txds);
2038 continue;
2039 }
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 pci_unmap_single(nic->pdev, (dma_addr_t)
2042 txdp->Buffer_Pointer,
2043 skb->len - skb->data_len,
2044 PCI_DMA_TODEVICE);
2045 if (frg_cnt) {
2046 TxD_t *temp;
2047 temp = txdp;
2048 txdp++;
2049 for (j = 0; j < frg_cnt; j++, txdp++) {
2050 skb_frag_t *frag =
2051 &skb_shinfo(skb)->frags[j];
2052 pci_unmap_page(nic->pdev,
2053 (dma_addr_t)
2054 txdp->
2055 Buffer_Pointer,
2056 frag->size,
2057 PCI_DMA_TODEVICE);
2058 }
2059 txdp = temp;
2060 } 2085 }
2061 dev_kfree_skb(skb);
2062 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2063 cnt++;
2064 } 2086 }
2065 DBG_PRINT(INTR_DBG, 2087 DBG_PRINT(INTR_DBG,
2066 "%s:forcibly freeing %d skbs on FIFO%d\n", 2088 "%s:forcibly freeing %d skbs on FIFO%d\n",
@@ -2661,7 +2683,6 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2661 tx_curr_get_info_t get_info, put_info; 2683 tx_curr_get_info_t get_info, put_info;
2662 struct sk_buff *skb; 2684 struct sk_buff *skb;
2663 TxD_t *txdlp; 2685 TxD_t *txdlp;
2664 u16 j, frg_cnt;
2665 2686
2666 get_info = fifo_data->tx_curr_get_info; 2687 get_info = fifo_data->tx_curr_get_info;
2667 put_info = fifo_data->tx_curr_put_info; 2688 put_info = fifo_data->tx_curr_put_info;
@@ -2684,8 +2705,7 @@ to loss of link\n");
2684 } 2705 }
2685 } 2706 }
2686 2707
2687 skb = (struct sk_buff *) ((unsigned long) 2708 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2688 txdlp->Host_Control);
2689 if (skb == NULL) { 2709 if (skb == NULL) {
2690 DBG_PRINT(ERR_DBG, "%s: Null skb ", 2710 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2691 __FUNCTION__); 2711 __FUNCTION__);
@@ -2693,34 +2713,6 @@ to loss of link\n");
2693 return; 2713 return;
2694 } 2714 }
2695 2715
2696 frg_cnt = skb_shinfo(skb)->nr_frags;
2697 nic->tx_pkt_count++;
2698
2699 pci_unmap_single(nic->pdev, (dma_addr_t)
2700 txdlp->Buffer_Pointer,
2701 skb->len - skb->data_len,
2702 PCI_DMA_TODEVICE);
2703 if (frg_cnt) {
2704 TxD_t *temp;
2705 temp = txdlp;
2706 txdlp++;
2707 for (j = 0; j < frg_cnt; j++, txdlp++) {
2708 skb_frag_t *frag =
2709 &skb_shinfo(skb)->frags[j];
2710 if (!txdlp->Buffer_Pointer)
2711 break;
2712 pci_unmap_page(nic->pdev,
2713 (dma_addr_t)
2714 txdlp->
2715 Buffer_Pointer,
2716 frag->size,
2717 PCI_DMA_TODEVICE);
2718 }
2719 txdlp = temp;
2720 }
2721 memset(txdlp, 0,
2722 (sizeof(TxD_t) * fifo_data->max_txds));
2723
2724 /* Updating the statistics block */ 2716 /* Updating the statistics block */
2725 nic->stats.tx_bytes += skb->len; 2717 nic->stats.tx_bytes += skb->len;
2726 dev_kfree_skb_irq(skb); 2718 dev_kfree_skb_irq(skb);
@@ -3527,6 +3519,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3527 return 0; 3519 return 0;
3528 } 3520 }
3529 3521
3522 txdp->Control_1 = 0;
3523 txdp->Control_2 = 0;
3530#ifdef NETIF_F_TSO 3524#ifdef NETIF_F_TSO
3531 mss = skb_shinfo(skb)->tso_size; 3525 mss = skb_shinfo(skb)->tso_size;
3532 if (mss) { 3526 if (mss) {
@@ -3534,19 +3528,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3534 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3528 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3535 } 3529 }
3536#endif 3530#endif
3537
3538 frg_cnt = skb_shinfo(skb)->nr_frags;
3539 frg_len = skb->len - skb->data_len;
3540
3541 txdp->Buffer_Pointer = pci_map_single
3542 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3543 txdp->Host_Control = (unsigned long) skb;
3544 if (skb->ip_summed == CHECKSUM_HW) { 3531 if (skb->ip_summed == CHECKSUM_HW) {
3545 txdp->Control_2 |= 3532 txdp->Control_2 |=
3546 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3533 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3547 TXD_TX_CKO_UDP_EN); 3534 TXD_TX_CKO_UDP_EN);
3548 } 3535 }
3549 3536 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3537 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3550 txdp->Control_2 |= config->tx_intr_type; 3538 txdp->Control_2 |= config->tx_intr_type;
3551 3539
3552 if (sp->vlgrp && vlan_tx_tag_present(skb)) { 3540 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
@@ -3554,10 +3542,40 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3554 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 3542 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3555 } 3543 }
3556 3544
3557 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3545 frg_len = skb->len - skb->data_len;
3558 TXD_GATHER_CODE_FIRST); 3546 if (skb_shinfo(skb)->ufo_size) {
3559 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3547 int ufo_size;
3548
3549 ufo_size = skb_shinfo(skb)->ufo_size;
3550 ufo_size &= ~7;
3551 txdp->Control_1 |= TXD_UFO_EN;
3552 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3553 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3554#ifdef __BIG_ENDIAN
3555 sp->ufo_in_band_v[put_off] =
3556 (u64)skb_shinfo(skb)->ip6_frag_id;
3557#else
3558 sp->ufo_in_band_v[put_off] =
3559 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3560#endif
3561 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3562 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3563 sp->ufo_in_band_v,
3564 sizeof(u64), PCI_DMA_TODEVICE);
3565 txdp++;
3566 txdp->Control_1 = 0;
3567 txdp->Control_2 = 0;
3568 }
3569
3570 txdp->Buffer_Pointer = pci_map_single
3571 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3572 txdp->Host_Control = (unsigned long) skb;
3573 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3560 3574
3575 if (skb_shinfo(skb)->ufo_size)
3576 txdp->Control_1 |= TXD_UFO_EN;
3577
3578 frg_cnt = skb_shinfo(skb)->nr_frags;
3561 /* For fragmented SKB. */ 3579 /* For fragmented SKB. */
3562 for (i = 0; i < frg_cnt; i++) { 3580 for (i = 0; i < frg_cnt; i++) {
3563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3569,9 +3587,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3569 (sp->pdev, frag->page, frag->page_offset, 3587 (sp->pdev, frag->page, frag->page_offset,
3570 frag->size, PCI_DMA_TODEVICE); 3588 frag->size, PCI_DMA_TODEVICE);
3571 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size); 3589 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3590 if (skb_shinfo(skb)->ufo_size)
3591 txdp->Control_1 |= TXD_UFO_EN;
3572 } 3592 }
3573 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3593 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3574 3594
3595 if (skb_shinfo(skb)->ufo_size)
3596 frg_cnt++; /* as Txd0 was used for inband header */
3597
3575 tx_fifo = mac_control->tx_FIFO_start[queue]; 3598 tx_fifo = mac_control->tx_FIFO_start[queue];
3576 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr; 3599 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3577 writeq(val64, &tx_fifo->TxDL_Pointer); 3600 writeq(val64, &tx_fifo->TxDL_Pointer);
@@ -3583,6 +3606,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3583 if (mss) 3606 if (mss)
3584 val64 |= TX_FIFO_SPECIAL_FUNC; 3607 val64 |= TX_FIFO_SPECIAL_FUNC;
3585#endif 3608#endif
3609 if (skb_shinfo(skb)->ufo_size)
3610 val64 |= TX_FIFO_SPECIAL_FUNC;
3586 writeq(val64, &tx_fifo->List_Control); 3611 writeq(val64, &tx_fifo->List_Control);
3587 3612
3588 mmiowb(); 3613 mmiowb();
@@ -5190,6 +5215,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5190 .get_tso = ethtool_op_get_tso, 5215 .get_tso = ethtool_op_get_tso,
5191 .set_tso = ethtool_op_set_tso, 5216 .set_tso = ethtool_op_set_tso,
5192#endif 5217#endif
5218 .get_ufo = ethtool_op_get_ufo,
5219 .set_ufo = ethtool_op_set_ufo,
5193 .self_test_count = s2io_ethtool_self_test_count, 5220 .self_test_count = s2io_ethtool_self_test_count,
5194 .self_test = s2io_ethtool_test, 5221 .self_test = s2io_ethtool_test,
5195 .get_strings = s2io_ethtool_get_strings, 5222 .get_strings = s2io_ethtool_get_strings,
@@ -5941,7 +5968,8 @@ Defaulting to INTA\n");
5941 break; 5968 break;
5942 } 5969 }
5943 } 5970 }
5944 config->max_txds = MAX_SKB_FRAGS + 1; 5971 /* + 2 because one Txd for skb->data and one Txd for UFO */
5972 config->max_txds = MAX_SKB_FRAGS + 2;
5945 5973
5946 /* Rx side parameters. */ 5974 /* Rx side parameters. */
5947 if (rx_ring_sz[0] == 0) 5975 if (rx_ring_sz[0] == 0)
@@ -6035,6 +6063,10 @@ Defaulting to INTA\n");
6035#ifdef NETIF_F_TSO 6063#ifdef NETIF_F_TSO
6036 dev->features |= NETIF_F_TSO; 6064 dev->features |= NETIF_F_TSO;
6037#endif 6065#endif
6066 if (sp->device_type & XFRAME_II_DEVICE) {
6067 dev->features |= NETIF_F_UFO;
6068 dev->features |= NETIF_F_HW_CSUM;
6069 }
6038 6070
6039 dev->tx_timeout = &s2io_tx_watchdog; 6071 dev->tx_timeout = &s2io_tx_watchdog;
6040 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6072 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 419aad7f10e7..852a6a899d07 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -393,7 +393,9 @@ typedef struct _TxD {
393#define TXD_GATHER_CODE_LAST BIT(23) 393#define TXD_GATHER_CODE_LAST BIT(23)
394#define TXD_TCP_LSO_EN BIT(30) 394#define TXD_TCP_LSO_EN BIT(30)
395#define TXD_UDP_COF_EN BIT(31) 395#define TXD_UDP_COF_EN BIT(31)
396#define TXD_UFO_EN BIT(31) | BIT(30)
396#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14) 397#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
398#define TXD_UFO_MSS(val) vBIT(val,34,14)
397#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16) 399#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
398 400
399 u64 Control_2; 401 u64 Control_2;
@@ -789,6 +791,7 @@ struct s2io_nic {
789 791
790 spinlock_t rx_lock; 792 spinlock_t rx_lock;
791 atomic_t isr_cnt; 793 atomic_t isr_cnt;
794 u64 *ufo_in_band_v;
792}; 795};
793 796
794#define RESET_ERROR 1; 797#define RESET_ERROR 1;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 1d4d88680db1..3d95fa20cd88 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. 1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
2 Copyright 1999 Silicon Integrated System Corporation 2 Copyright 1999 Silicon Integrated System Corporation
3 Revision: 1.08.08 Jan. 22 2005 3 Revision: 1.08.09 Sep. 19 2005
4 4
5 Modified from the driver which is originally written by Donald Becker. 5 Modified from the driver which is originally written by Donald Becker.
6 6
@@ -17,6 +17,7 @@
17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, 17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
18 preliminary Rev. 1.0 Jan. 18, 1998 18 preliminary Rev. 1.0 Jan. 18, 1998
19 19
20 Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
20 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages 21 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
21 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support 22 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
22 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support 23 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
@@ -76,7 +77,7 @@
76#include "sis900.h" 77#include "sis900.h"
77 78
78#define SIS900_MODULE_NAME "sis900" 79#define SIS900_MODULE_NAME "sis900"
79#define SIS900_DRV_VERSION "v1.08.08 Jan. 22 2005" 80#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
80 81
81static char version[] __devinitdata = 82static char version[] __devinitdata =
82KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; 83KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -538,6 +539,11 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
538 printk("%2.2x:", (u8)net_dev->dev_addr[i]); 539 printk("%2.2x:", (u8)net_dev->dev_addr[i]);
539 printk("%2.2x.\n", net_dev->dev_addr[i]); 540 printk("%2.2x.\n", net_dev->dev_addr[i]);
540 541
542 /* Detect Wake on Lan support */
543 ret = inl(CFGPMC & PMESP);
544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
546
541 return 0; 547 return 0;
542 548
543 err_unmap_rx: 549 err_unmap_rx:
@@ -2015,6 +2021,67 @@ static int sis900_nway_reset(struct net_device *net_dev)
2015 return mii_nway_restart(&sis_priv->mii_info); 2021 return mii_nway_restart(&sis_priv->mii_info);
2016} 2022}
2017 2023
2024/**
2025 * sis900_set_wol - Set up Wake on Lan registers
2026 * @net_dev: the net device to probe
2027 * @wol: container for info passed to the driver
2028 *
2029 * Process ethtool command "wol" to setup wake on lan features.
2030 * SiS900 supports sending WoL events if a correct packet is received,
2031 * but there is no simple way to filter them to only a subset (broadcast,
2032 * multicast, unicast or arp).
2033 */
2034
2035static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2036{
2037 struct sis900_private *sis_priv = net_dev->priv;
2038 long pmctrl_addr = net_dev->base_addr + pmctrl;
2039 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2040
2041 if (wol->wolopts == 0) {
2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2043 cfgpmcsr |= ~PME_EN;
2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2045 outl(pmctrl_bits, pmctrl_addr);
2046 if (netif_msg_wol(sis_priv))
2047 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2048 return 0;
2049 }
2050
2051 if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
2052 | WAKE_BCAST | WAKE_ARP))
2053 return -EINVAL;
2054
2055 if (wol->wolopts & WAKE_MAGIC)
2056 pmctrl_bits |= MAGICPKT;
2057 if (wol->wolopts & WAKE_PHY)
2058 pmctrl_bits |= LINKON;
2059
2060 outl(pmctrl_bits, pmctrl_addr);
2061
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr |= PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 if (netif_msg_wol(sis_priv))
2066 printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
2067
2068 return 0;
2069}
2070
2071static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2072{
2073 long pmctrl_addr = net_dev->base_addr + pmctrl;
2074 u32 pmctrl_bits;
2075
2076 pmctrl_bits = inl(pmctrl_addr);
2077 if (pmctrl_bits & MAGICPKT)
2078 wol->wolopts |= WAKE_MAGIC;
2079 if (pmctrl_bits & LINKON)
2080 wol->wolopts |= WAKE_PHY;
2081
2082 wol->supported = (WAKE_PHY | WAKE_MAGIC);
2083}
2084
2018static struct ethtool_ops sis900_ethtool_ops = { 2085static struct ethtool_ops sis900_ethtool_ops = {
2019 .get_drvinfo = sis900_get_drvinfo, 2086 .get_drvinfo = sis900_get_drvinfo,
2020 .get_msglevel = sis900_get_msglevel, 2087 .get_msglevel = sis900_get_msglevel,
@@ -2023,6 +2090,8 @@ static struct ethtool_ops sis900_ethtool_ops = {
2023 .get_settings = sis900_get_settings, 2090 .get_settings = sis900_get_settings,
2024 .set_settings = sis900_set_settings, 2091 .set_settings = sis900_set_settings,
2025 .nway_reset = sis900_nway_reset, 2092 .nway_reset = sis900_nway_reset,
2093 .get_wol = sis900_get_wol,
2094 .set_wol = sis900_set_wol
2026}; 2095};
2027 2096
2028/** 2097/**
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index de3c06735d15..4233ea55670f 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -33,6 +33,7 @@ enum sis900_registers {
33 rxcfg=0x34, //Receive Configuration Register 33 rxcfg=0x34, //Receive Configuration Register
34 flctrl=0x38, //Flow Control Register 34 flctrl=0x38, //Flow Control Register
35 rxlen=0x3c, //Receive Packet Length Register 35 rxlen=0x3c, //Receive Packet Length Register
36 cfgpmcsr=0x44, //Configuration Power Management Control/Status Register
36 rfcr=0x48, //Receive Filter Control Register 37 rfcr=0x48, //Receive Filter Control Register
37 rfdr=0x4C, //Receive Filter Data Register 38 rfdr=0x4C, //Receive Filter Data Register
38 pmctrl=0xB0, //Power Management Control Register 39 pmctrl=0xB0, //Power Management Control Register
@@ -140,6 +141,50 @@ enum sis96x_eeprom_command {
140 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100 141 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
141}; 142};
142 143
144/* PCI Registers */
145enum sis900_pci_registers {
146 CFGPMC = 0x40,
147 CFGPMCSR = 0x44
148};
149
150/* Power management capabilities bits */
151enum sis900_cfgpmc_register_bits {
152 PMVER = 0x00070000,
153 DSI = 0x00100000,
154 PMESP = 0xf8000000
155};
156
157enum sis900_pmesp_bits {
158 PME_D0 = 0x1,
159 PME_D1 = 0x2,
160 PME_D2 = 0x4,
161 PME_D3H = 0x8,
162 PME_D3C = 0x10
163};
164
165/* Power management control/status bits */
166enum sis900_cfgpmcsr_register_bits {
167 PMESTS = 0x00004000,
168 PME_EN = 0x00000100, // Power management enable
169 PWR_STA = 0x00000003 // Current power state
170};
171
172/* Wake-on-LAN support. */
173enum sis900_power_management_control_register_bits {
174 LINKLOSS = 0x00000001,
175 LINKON = 0x00000002,
176 MAGICPKT = 0x00000400,
177 ALGORITHM = 0x00000800,
178 FRM1EN = 0x00100000,
179 FRM2EN = 0x00200000,
180 FRM3EN = 0x00400000,
181 FRM1ACS = 0x01000000,
182 FRM2ACS = 0x02000000,
183 FRM3ACS = 0x04000000,
184 WAKEALL = 0x40000000,
185 GATECLK = 0x80000000
186};
187
143/* Management Data I/O (mdio) frame */ 188/* Management Data I/O (mdio) frame */
144#define MIIread 0x6000 189#define MIIread 0x6000
145#define MIIwrite 0x5002 190#define MIIwrite 0x5002
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
index 2dc5728e3ef6..9bdfde80c30b 100644
--- a/drivers/net/sk98lin/h/skdrv2nd.h
+++ b/drivers/net/sk98lin/h/skdrv2nd.h
@@ -60,7 +60,6 @@ extern SK_U64 SkOsGetTime(SK_AC*);
60extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*); 60extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*);
61extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*); 61extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*);
62extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*); 62extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*);
63extern int SkPciWriteCfgDWord(SK_AC*, int, SK_U32);
64extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16); 63extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16);
65extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8); 64extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8);
66extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA); 65extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA);
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h
index bdc1a5eaaae9..daa9a8d154fc 100644
--- a/drivers/net/sk98lin/h/skvpd.h
+++ b/drivers/net/sk98lin/h/skvpd.h
@@ -130,14 +130,12 @@ typedef struct s_vpd_key {
130#ifndef VPD_DO_IO 130#ifndef VPD_DO_IO
131#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val) 131#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val)
132#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val) 132#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val)
133#define VPD_OUT32(pAC,IoC,Addr,Val) (void)SkPciWriteCfgDWord(pAC,Addr,Val)
134#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal) 133#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal)
135#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal) 134#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal)
136#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal) 135#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal)
137#else /* VPD_DO_IO */ 136#else /* VPD_DO_IO */
138#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val) 137#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val)
139#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val) 138#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val)
140#define VPD_OUT32(pAC,IoC,Addr,Val) SK_OUT32(IoC,PCI_C(Addr),Val)
141#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal) 139#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal)
142#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal) 140#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal)
143#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal) 141#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal)
@@ -155,12 +153,6 @@ typedef struct s_vpd_key {
155 else \ 153 else \
156 SK_OUT16(pAC,PCI_C(Addr),Val); \ 154 SK_OUT16(pAC,PCI_C(Addr),Val); \
157 } 155 }
158#define VPD_OUT32(pAC,Ioc,Addr,Val) { \
159 if ((pAC)->DgT.DgUseCfgCycle) \
160 SkPciWriteCfgDWord(pAC,Addr,Val); \
161 else \
162 SK_OUT32(pAC,PCI_C(Addr),Val); \
163 }
164#define VPD_IN8(pAC,Ioc,Addr,pVal) { \ 156#define VPD_IN8(pAC,Ioc,Addr,pVal) { \
165 if ((pAC)->DgT.DgUseCfgCycle) \ 157 if ((pAC)->DgT.DgUseCfgCycle) \
166 SkPciReadCfgByte(pAC,Addr,pVal); \ 158 SkPciReadCfgByte(pAC,Addr,pVal); \
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 00c5d7f04c68..107c5d97546c 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -279,6 +279,27 @@ static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480};
279 279
280/***************************************************************************** 280/*****************************************************************************
281 * 281 *
282 * SkPciWriteCfgDWord - write a 32 bit value to pci config space
283 *
284 * Description:
285 * This routine writes a 32 bit value to the pci configuration
286 * space.
287 *
288 * Returns:
289 * 0 - indicate everything worked ok.
290 * != 0 - error indication
291 */
292static inline int SkPciWriteCfgDWord(
293SK_AC *pAC, /* Adapter Control structure pointer */
294int PciAddr, /* PCI register address */
295SK_U32 Val) /* pointer to store the read value */
296{
297 pci_write_config_dword(pAC->PciDev, PciAddr, Val);
298 return(0);
299} /* SkPciWriteCfgDWord */
300
301/*****************************************************************************
302 *
282 * SkGeInitPCI - Init the PCI resources 303 * SkGeInitPCI - Init the PCI resources
283 * 304 *
284 * Description: 305 * Description:
@@ -3992,28 +4013,6 @@ SK_U8 *pVal) /* pointer to store the read value */
3992 4013
3993/***************************************************************************** 4014/*****************************************************************************
3994 * 4015 *
3995 * SkPciWriteCfgDWord - write a 32 bit value to pci config space
3996 *
3997 * Description:
3998 * This routine writes a 32 bit value to the pci configuration
3999 * space.
4000 *
4001 * Returns:
4002 * 0 - indicate everything worked ok.
4003 * != 0 - error indication
4004 */
4005int SkPciWriteCfgDWord(
4006SK_AC *pAC, /* Adapter Control structure pointer */
4007int PciAddr, /* PCI register address */
4008SK_U32 Val) /* pointer to store the read value */
4009{
4010 pci_write_config_dword(pAC->PciDev, PciAddr, Val);
4011 return(0);
4012} /* SkPciWriteCfgDWord */
4013
4014
4015/*****************************************************************************
4016 *
4017 * SkPciWriteCfgWord - write a 16 bit value to pci config space 4016 * SkPciWriteCfgWord - write a 16 bit value to pci config space
4018 * 4017 *
4019 * Description: 4018 * Description:
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
new file mode 100644
index 000000000000..a1884b472cd0
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,3123 @@
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26/*
27 * TODO
28 * - coalescing setting?
29 *
30 * TOTEST
31 * - speed setting
32 * - suspend/resume
33 */
34
35#include <linux/config.h>
36#include <linux/crc32.h>
37#include <linux/kernel.h>
38#include <linux/version.h>
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/dma-mapping.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/pci.h>
45#include <linux/ip.h>
46#include <linux/tcp.h>
47#include <linux/in.h>
48#include <linux/delay.h>
49#include <linux/if_vlan.h>
50#include <linux/mii.h>
51
52#include <asm/irq.h>
53
54#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55#define SKY2_VLAN_TAG_USED 1
56#endif
57
58#include "sky2.h"
59
60#define DRV_NAME "sky2"
61#define DRV_VERSION "0.9"
62#define PFX DRV_NAME " "
63
64/*
65 * The Yukon II chipset takes 64 bit command blocks (called list elements)
66 * that are organized into three (receive, transmit, status) different rings
67 * similar to Tigon3. A transmit can require several elements;
68 * a receive requires one (or two if using 64 bit dma).
69 */
70
71#define is_ec_a1(hw) \
72 unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
73 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
74
75#define RX_LE_SIZE 512
76#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
77#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
78#define RX_DEF_PENDING RX_MAX_PENDING
79#define RX_COPY_THRESHOLD 256
80
81#define TX_RING_SIZE 512
82#define TX_DEF_PENDING (TX_RING_SIZE - 1)
83#define TX_MIN_PENDING 64
84#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
85
86#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
87#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
88#define ETH_JUMBO_MTU 9000
89#define TX_WATCHDOG (5 * HZ)
90#define NAPI_WEIGHT 64
91#define PHY_RETRIES 1000
92
93static const u32 default_msg =
94 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
95 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
96 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
97
98static int debug = -1; /* defaults above */
99module_param(debug, int, 0);
100MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
101
102static const struct pci_device_id sky2_id_table[] = {
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
107 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, sky2_id_table);
126
127/* Avoid conditionals by using array */
128static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
129static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
130
131static const char *yukon_name[] = {
132 [CHIP_ID_YUKON_LITE - CHIP_ID_YUKON] = "Lite", /* 0xb0 */
133 [CHIP_ID_YUKON_LP - CHIP_ID_YUKON] = "LP", /* 0xb2 */
134 [CHIP_ID_YUKON_XL - CHIP_ID_YUKON] = "XL", /* 0xb3 */
135 [CHIP_ID_YUKON_EC_U - CHIP_ID_YUKON] = "EC Ultra", /* 0xb4 */
136
137 [CHIP_ID_YUKON_EC - CHIP_ID_YUKON] = "EC", /* 0xb6 */
138 [CHIP_ID_YUKON_FE - CHIP_ID_YUKON] = "FE", /* 0xb7 */
139};
140
141
142/* Access to external PHY */
143static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
144{
145 int i;
146
147 gma_write16(hw, port, GM_SMI_DATA, val);
148 gma_write16(hw, port, GM_SMI_CTRL,
149 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
150
151 for (i = 0; i < PHY_RETRIES; i++) {
152 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
153 return 0;
154 udelay(1);
155 }
156
157 printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
158 return -ETIMEDOUT;
159}
160
161static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
162{
163 int i;
164
165 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
166 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
167
168 for (i = 0; i < PHY_RETRIES; i++) {
169 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) {
170 *val = gma_read16(hw, port, GM_SMI_DATA);
171 return 0;
172 }
173
174 udelay(1);
175 }
176
177 return -ETIMEDOUT;
178}
179
180static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
181{
182 u16 v;
183
184 if (__gm_phy_read(hw, port, reg, &v) != 0)
185 printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
186 return v;
187}
188
189static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
190{
191 u16 power_control;
192 u32 reg1;
193 int vaux;
194 int ret = 0;
195
196 pr_debug("sky2_set_power_state %d\n", state);
197 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
198
199 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
200 vaux = (sky2_read8(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
201 (power_control & PCI_PM_CAP_PME_D3cold);
202
203 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
204
205 power_control |= PCI_PM_CTRL_PME_STATUS;
206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
207
208 switch (state) {
209 case PCI_D0:
210 /* switch power to VCC (WA for VAUX problem) */
211 sky2_write8(hw, B0_POWER_CTRL,
212 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
213
214 /* disable Core Clock Division, */
215 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
216
217 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
218 /* enable bits are inverted */
219 sky2_write8(hw, B2_Y2_CLK_GATE,
220 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
221 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
222 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
223 else
224 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
225
226 /* Turn off phy power saving */
227 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
228 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
229
230 /* looks like this XL is back asswards .. */
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
232 reg1 |= PCI_Y2_PHY1_COMA;
233 if (hw->ports > 1)
234 reg1 |= PCI_Y2_PHY2_COMA;
235 }
236 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
237 break;
238
239 case PCI_D3hot:
240 case PCI_D3cold:
241 /* Turn on phy power saving */
242 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
243 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
244 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
245 else
246 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
247 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
248
249 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
250 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
251 else
252 /* enable bits are inverted */
253 sky2_write8(hw, B2_Y2_CLK_GATE,
254 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
255 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
256 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
257
258 /* switch power to VAUX */
259 if (vaux && state != PCI_D3cold)
260 sky2_write8(hw, B0_POWER_CTRL,
261 (PC_VAUX_ENA | PC_VCC_ENA |
262 PC_VAUX_ON | PC_VCC_OFF));
263 break;
264 default:
265 printk(KERN_ERR PFX "Unknown power state %d\n", state);
266 ret = -1;
267 }
268
269 pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
270 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
271 return ret;
272}
273
274static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
275{
276 u16 reg;
277
278 /* disable all GMAC IRQ's */
279 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
280 /* disable PHY IRQs */
281 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
282
283 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
284 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
285 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
286 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
287
288 reg = gma_read16(hw, port, GM_RX_CTRL);
289 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
290 gma_write16(hw, port, GM_RX_CTRL, reg);
291}
292
293static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
294{
295 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
296 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
297
298 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
299 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
300
301 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
302 PHY_M_EC_MAC_S_MSK);
303 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
304
305 if (hw->chip_id == CHIP_ID_YUKON_EC)
306 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
307 else
308 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
309
310 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
311 }
312
313 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
314 if (hw->copper) {
315 if (hw->chip_id == CHIP_ID_YUKON_FE) {
316 /* enable automatic crossover */
317 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
318 } else {
319 /* disable energy detect */
320 ctrl &= ~PHY_M_PC_EN_DET_MSK;
321
322 /* enable automatic crossover */
323 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
324
325 if (sky2->autoneg == AUTONEG_ENABLE &&
326 hw->chip_id == CHIP_ID_YUKON_XL) {
327 ctrl &= ~PHY_M_PC_DSC_MSK;
328 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
329 }
330 }
331 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
332 } else {
333 /* workaround for deviation #4.88 (CRC errors) */
334 /* disable Automatic Crossover */
335
336 ctrl &= ~PHY_M_PC_MDIX_MSK;
337 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
338
339 if (hw->chip_id == CHIP_ID_YUKON_XL) {
340 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
341 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
342 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
343 ctrl &= ~PHY_M_MAC_MD_MSK;
344 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
345 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
346
347 /* select page 1 to access Fiber registers */
348 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
349 }
350 }
351
352 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
353 if (sky2->autoneg == AUTONEG_DISABLE)
354 ctrl &= ~PHY_CT_ANE;
355 else
356 ctrl |= PHY_CT_ANE;
357
358 ctrl |= PHY_CT_RESET;
359 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
360
361 ctrl = 0;
362 ct1000 = 0;
363 adv = PHY_AN_CSMA;
364
365 if (sky2->autoneg == AUTONEG_ENABLE) {
366 if (hw->copper) {
367 if (sky2->advertising & ADVERTISED_1000baseT_Full)
368 ct1000 |= PHY_M_1000C_AFD;
369 if (sky2->advertising & ADVERTISED_1000baseT_Half)
370 ct1000 |= PHY_M_1000C_AHD;
371 if (sky2->advertising & ADVERTISED_100baseT_Full)
372 adv |= PHY_M_AN_100_FD;
373 if (sky2->advertising & ADVERTISED_100baseT_Half)
374 adv |= PHY_M_AN_100_HD;
375 if (sky2->advertising & ADVERTISED_10baseT_Full)
376 adv |= PHY_M_AN_10_FD;
377 if (sky2->advertising & ADVERTISED_10baseT_Half)
378 adv |= PHY_M_AN_10_HD;
379 } else /* special defines for FIBER (88E1011S only) */
380 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
381
382 /* Set Flow-control capabilities */
383 if (sky2->tx_pause && sky2->rx_pause)
384 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
385 else if (sky2->rx_pause && !sky2->tx_pause)
386 adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
387 else if (!sky2->rx_pause && sky2->tx_pause)
388 adv |= PHY_AN_PAUSE_ASYM; /* local */
389
390 /* Restart Auto-negotiation */
391 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
392 } else {
393 /* forced speed/duplex settings */
394 ct1000 = PHY_M_1000C_MSE;
395
396 if (sky2->duplex == DUPLEX_FULL)
397 ctrl |= PHY_CT_DUP_MD;
398
399 switch (sky2->speed) {
400 case SPEED_1000:
401 ctrl |= PHY_CT_SP1000;
402 break;
403 case SPEED_100:
404 ctrl |= PHY_CT_SP100;
405 break;
406 }
407
408 ctrl |= PHY_CT_RESET;
409 }
410
411 if (hw->chip_id != CHIP_ID_YUKON_FE)
412 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
413
414 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
415 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
416
417 /* Setup Phy LED's */
418 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
419 ledover = 0;
420
421 switch (hw->chip_id) {
422 case CHIP_ID_YUKON_FE:
423 /* on 88E3082 these bits are at 11..9 (shifted left) */
424 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
425
426 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
427
428 /* delete ACT LED control bits */
429 ctrl &= ~PHY_M_FELP_LED1_MSK;
430 /* change ACT LED control to blink mode */
431 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
432 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
433 break;
434
435 case CHIP_ID_YUKON_XL:
436 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
437
438 /* select page 3 to access LED control register */
439 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
440
441 /* set LED Function Control register */
442 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
443 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
444 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
445 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
446
447 /* set Polarity Control register */
448 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
449 (PHY_M_POLC_LS1_P_MIX(4) |
450 PHY_M_POLC_IS0_P_MIX(4) |
451 PHY_M_POLC_LOS_CTRL(2) |
452 PHY_M_POLC_INIT_CTRL(2) |
453 PHY_M_POLC_STA1_CTRL(2) |
454 PHY_M_POLC_STA0_CTRL(2)));
455
456 /* restore page register */
457 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
458 break;
459
460 default:
461 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
462 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
463 /* turn off the Rx LED (LED_RX) */
464 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
465 }
466
467 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
468
469 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
470 /* turn on 100 Mbps LED (LED_LINK100) */
471 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
472 }
473
474 if (ledover)
475 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
476
477 /* Enable phy interrupt on auto-negotiation complete (or link up) */
478 if (sky2->autoneg == AUTONEG_ENABLE)
479 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
480 else
481 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
482}
483
484static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
485{
486 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
487 u16 reg;
488 int i;
489 const u8 *addr = hw->dev[port]->dev_addr;
490
491 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
492 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
493
494 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
495
496 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
497 /* WA DEV_472 -- looks like crossed wires on port 2 */
498 /* clear GMAC 1 Control reset */
499 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
500 do {
501 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
502 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
503 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
504 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
505 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
506 }
507
508 if (sky2->autoneg == AUTONEG_DISABLE) {
509 reg = gma_read16(hw, port, GM_GP_CTRL);
510 reg |= GM_GPCR_AU_ALL_DIS;
511 gma_write16(hw, port, GM_GP_CTRL, reg);
512 gma_read16(hw, port, GM_GP_CTRL);
513
514 switch (sky2->speed) {
515 case SPEED_1000:
516 reg |= GM_GPCR_SPEED_1000;
517 /* fallthru */
518 case SPEED_100:
519 reg |= GM_GPCR_SPEED_100;
520 }
521
522 if (sky2->duplex == DUPLEX_FULL)
523 reg |= GM_GPCR_DUP_FULL;
524 } else
525 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
526
527 if (!sky2->tx_pause && !sky2->rx_pause) {
528 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
529 reg |=
530 GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
531 } else if (sky2->tx_pause && !sky2->rx_pause) {
532 /* disable Rx flow-control */
533 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
534 }
535
536 gma_write16(hw, port, GM_GP_CTRL, reg);
537
538 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
539
540 spin_lock_bh(&hw->phy_lock);
541 sky2_phy_init(hw, port);
542 spin_unlock_bh(&hw->phy_lock);
543
544 /* MIB clear */
545 reg = gma_read16(hw, port, GM_PHY_ADDR);
546 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
547
548 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
549 gma_read16(hw, port, GM_MIB_CNT_BASE + 8 * i);
550 gma_write16(hw, port, GM_PHY_ADDR, reg);
551
552 /* transmit control */
553 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
554
555 /* receive control reg: unicast + multicast + no FCS */
556 gma_write16(hw, port, GM_RX_CTRL,
557 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
558
559 /* transmit flow control */
560 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
561
562 /* transmit parameter */
563 gma_write16(hw, port, GM_TX_PARAM,
564 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
565 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
566 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
567 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
568
569 /* serial mode register */
570 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
571 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
572
573 if (hw->dev[port]->mtu > ETH_DATA_LEN)
574 reg |= GM_SMOD_JUMBO_ENA;
575
576 gma_write16(hw, port, GM_SERIAL_MODE, reg);
577
578 /* virtual address for data */
579 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
580
581 /* physical address: used for pause frames */
582 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
583
584 /* ignore counter overflows */
585 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
586 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
587 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
588
589 /* Configure Rx MAC FIFO */
590 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
591 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
592 GMF_RX_CTRL_DEF);
593
594 /* Flush Rx MAC FIFO on any flow control or error */
595 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
596
597 /* Set threshold to 0xa (64 bytes)
598 * ASF disabled so no need to do WA dev #4.30
599 */
600 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
601
602 /* Configure Tx MAC FIFO */
603 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
604 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
605
606 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
607 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
608 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
609 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
610 /* set Tx GMAC FIFO Almost Empty Threshold */
611 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
612 /* Disable Store & Forward mode for TX */
613 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
614 }
615 }
616
617}
618
619static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
620{
621 u32 end;
622
623 start /= 8;
624 len /= 8;
625 end = start + len - 1;
626
627 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
628 sky2_write32(hw, RB_ADDR(q, RB_START), start);
629 sky2_write32(hw, RB_ADDR(q, RB_END), end);
630 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
631 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
632
633 if (q == Q_R1 || q == Q_R2) {
634 u32 rxup, rxlo;
635
636 rxlo = len/2;
637 rxup = rxlo + len/4;
638
639 /* Set thresholds on receive queue's */
640 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
641 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
642 } else {
643 /* Enable store & forward on Tx queue's because
644 * Tx FIFO is only 1K on Yukon
645 */
646 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
647 }
648
649 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
650 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
651}
652
653/* Setup Bus Memory Interface */
654static void sky2_qset(struct sky2_hw *hw, u16 q)
655{
656 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
657 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
658 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
659 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
660}
661
662/* Setup prefetch unit registers. This is the interface between
663 * hardware and driver list elements
664 */
665static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
666 u64 addr, u32 last)
667{
668 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
669 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
670 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
671 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
672 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
673 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
674
675 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
676}
677
678static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
679{
680 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
681
682 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
683 return le;
684}
685
686/*
687 * This is a workaround code taken from SysKonnect sk98lin driver
688 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
689 */
690static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
691 u16 idx, u16 *last, u16 size)
692{
693 if (is_ec_a1(hw) && idx < *last) {
694 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
695
696 if (hwget == 0) {
697 /* Start prefetching again */
698 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
699 goto setnew;
700 }
701
702 if (hwget == size - 1) {
703 /* set watermark to one list element */
704 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
705
706 /* set put index to first list element */
707 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
708 } else /* have hardware go to end of list */
709 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
710 size - 1);
711 } else {
712setnew:
713 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
714 }
715 *last = idx;
716}
717
718
719static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
720{
721 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
722 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
723 return le;
724}
725
726/* Return high part of DMA address (could be 32 or 64 bit) */
727static inline u32 high32(dma_addr_t a)
728{
729 return (a >> 16) >> 16;
730}
731
732/* Build description to hardware about buffer */
733static inline void sky2_rx_add(struct sky2_port *sky2, struct ring_info *re)
734{
735 struct sky2_rx_le *le;
736 u32 hi = high32(re->mapaddr);
737
738 re->idx = sky2->rx_put;
739 if (sky2->rx_addr64 != hi) {
740 le = sky2_next_rx(sky2);
741 le->addr = cpu_to_le32(hi);
742 le->ctrl = 0;
743 le->opcode = OP_ADDR64 | HW_OWNER;
744 sky2->rx_addr64 = high32(re->mapaddr + re->maplen);
745 }
746
747 le = sky2_next_rx(sky2);
748 le->addr = cpu_to_le32((u32) re->mapaddr);
749 le->length = cpu_to_le16(re->maplen);
750 le->ctrl = 0;
751 le->opcode = OP_PACKET | HW_OWNER;
752}
753
754
755/* Tell chip where to start receive checksum.
756 * Actually has two checksums, but set both same to avoid possible byte
757 * order problems.
758 */
759static void rx_set_checksum(struct sky2_port *sky2)
760{
761 struct sky2_rx_le *le;
762
763 le = sky2_next_rx(sky2);
764 le->addr = (ETH_HLEN << 16) | ETH_HLEN;
765 le->ctrl = 0;
766 le->opcode = OP_TCPSTART | HW_OWNER;
767
768 sky2_write32(sky2->hw,
769 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
770 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
771
772}
773
774/*
775 * The RX Stop command will not work for Yukon-2 if the BMU does not
776 * reach the end of packet and since we can't make sure that we have
777 * incoming data, we must reset the BMU while it is not doing a DMA
778 * transfer. Since it is possible that the RX path is still active,
779 * the RX RAM buffer will be stopped first, so any possible incoming
780 * data will not trigger a DMA. After the RAM buffer is stopped, the
781 * BMU is polled until any DMA in progress is ended and only then it
782 * will be reset.
783 */
784static void sky2_rx_stop(struct sky2_port *sky2)
785{
786 struct sky2_hw *hw = sky2->hw;
787 unsigned rxq = rxqaddr[sky2->port];
788 int i;
789
790 /* disable the RAM Buffer receive queue */
791 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
792
793 for (i = 0; i < 0xffff; i++)
794 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
795 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
796 goto stopped;
797
798 printk(KERN_WARNING PFX "%s: receiver stop failed\n",
799 sky2->netdev->name);
800stopped:
801 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
802
803 /* reset the Rx prefetch unit */
804 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
805}
806
807/* Clean out receive buffer area, assumes receiver hardware stopped */
808static void sky2_rx_clean(struct sky2_port *sky2)
809{
810 unsigned i;
811
812 memset(sky2->rx_le, 0, RX_LE_BYTES);
813 for (i = 0; i < sky2->rx_pending; i++) {
814 struct ring_info *re = sky2->rx_ring + i;
815
816 if (re->skb) {
817 pci_unmap_single(sky2->hw->pdev,
818 re->mapaddr, re->maplen,
819 PCI_DMA_FROMDEVICE);
820 kfree_skb(re->skb);
821 re->skb = NULL;
822 }
823 }
824}
825
826/* Basic MII support */
827static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
828{
829 struct mii_ioctl_data *data = if_mii(ifr);
830 struct sky2_port *sky2 = netdev_priv(dev);
831 struct sky2_hw *hw = sky2->hw;
832 int err = -EOPNOTSUPP;
833
834 if (!netif_running(dev))
835 return -ENODEV; /* Phy still in reset */
836
837 switch(cmd) {
838 case SIOCGMIIPHY:
839 data->phy_id = PHY_ADDR_MARV;
840
841 /* fallthru */
842 case SIOCGMIIREG: {
843 u16 val = 0;
844 spin_lock_bh(&hw->phy_lock);
845 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
846 spin_unlock_bh(&hw->phy_lock);
847 data->val_out = val;
848 break;
849 }
850
851 case SIOCSMIIREG:
852 if (!capable(CAP_NET_ADMIN))
853 return -EPERM;
854
855 spin_lock_bh(&hw->phy_lock);
856 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
857 data->val_in);
858 spin_unlock_bh(&hw->phy_lock);
859 break;
860 }
861 return err;
862}
863
864#ifdef SKY2_VLAN_TAG_USED
865static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
866{
867 struct sky2_port *sky2 = netdev_priv(dev);
868 struct sky2_hw *hw = sky2->hw;
869 u16 port = sky2->port;
870 unsigned long flags;
871
872 spin_lock_irqsave(&sky2->tx_lock, flags);
873
874 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
875 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
876 sky2->vlgrp = grp;
877
878 spin_unlock_irqrestore(&sky2->tx_lock, flags);
879}
880
881static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
882{
883 struct sky2_port *sky2 = netdev_priv(dev);
884 struct sky2_hw *hw = sky2->hw;
885 u16 port = sky2->port;
886 unsigned long flags;
887
888 spin_lock_irqsave(&sky2->tx_lock, flags);
889
890 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
891 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
892 if (sky2->vlgrp)
893 sky2->vlgrp->vlan_devices[vid] = NULL;
894
895 spin_unlock_irqrestore(&sky2->tx_lock, flags);
896}
897#endif
898
899#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
900static inline unsigned rx_size(const struct sky2_port *sky2)
901{
902 return roundup(sky2->netdev->mtu + ETH_HLEN + 4, 8);
903}
904
905/*
906 * Allocate and setup receiver buffer pool.
907 * In case of 64 bit dma, there are 2X as many list elements
908 * available as ring entries
909 * and need to reserve one list element so we don't wrap around.
910 *
911 * It appears the hardware has a bug in the FIFO logic that
912 * cause it to hang if the FIFO gets overrun and the receive buffer
913 * is not aligned. This means we can't use skb_reserve to align
914 * the IP header.
915 */
916static int sky2_rx_start(struct sky2_port *sky2)
917{
918 struct sky2_hw *hw = sky2->hw;
919 unsigned size = rx_size(sky2);
920 unsigned rxq = rxqaddr[sky2->port];
921 int i;
922
923 sky2->rx_put = sky2->rx_next = 0;
924 sky2_qset(hw, rxq);
925 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
926
927 rx_set_checksum(sky2);
928 for (i = 0; i < sky2->rx_pending; i++) {
929 struct ring_info *re = sky2->rx_ring + i;
930
931 re->skb = dev_alloc_skb(size);
932 if (!re->skb)
933 goto nomem;
934
935 re->mapaddr = pci_map_single(hw->pdev, re->skb->data,
936 size, PCI_DMA_FROMDEVICE);
937 re->maplen = size;
938 sky2_rx_add(sky2, re);
939 }
940
941 /* Tell chip about available buffers */
942 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
943 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
944 return 0;
945nomem:
946 sky2_rx_clean(sky2);
947 return -ENOMEM;
948}
949
950/* Bring up network interface. */
951static int sky2_up(struct net_device *dev)
952{
953 struct sky2_port *sky2 = netdev_priv(dev);
954 struct sky2_hw *hw = sky2->hw;
955 unsigned port = sky2->port;
956 u32 ramsize, rxspace;
957 int err = -ENOMEM;
958
959 if (netif_msg_ifup(sky2))
960 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
961
962 /* must be power of 2 */
963 sky2->tx_le = pci_alloc_consistent(hw->pdev,
964 TX_RING_SIZE *
965 sizeof(struct sky2_tx_le),
966 &sky2->tx_le_map);
967 if (!sky2->tx_le)
968 goto err_out;
969
970 sky2->tx_ring = kzalloc(TX_RING_SIZE * sizeof(struct ring_info),
971 GFP_KERNEL);
972 if (!sky2->tx_ring)
973 goto err_out;
974 sky2->tx_prod = sky2->tx_cons = 0;
975
976 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
977 &sky2->rx_le_map);
978 if (!sky2->rx_le)
979 goto err_out;
980 memset(sky2->rx_le, 0, RX_LE_BYTES);
981
982 sky2->rx_ring = kzalloc(sky2->rx_pending * sizeof(struct ring_info),
983 GFP_KERNEL);
984 if (!sky2->rx_ring)
985 goto err_out;
986
987 sky2_mac_init(hw, port);
988
989 /* Configure RAM buffers */
990 if (hw->chip_id == CHIP_ID_YUKON_FE ||
991 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
992 ramsize = 4096;
993 else {
994 u8 e0 = sky2_read8(hw, B2_E_0);
995 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
996 }
997
998 /* 2/3 for Rx */
999 rxspace = (2 * ramsize) / 3;
1000 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1001 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1002
1003 /* Make sure SyncQ is disabled */
1004 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1005 RB_RST_SET);
1006
1007 sky2_qset(hw, txqaddr[port]);
1008 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
1009 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1010
1011
1012 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1013 TX_RING_SIZE - 1);
1014
1015 err = sky2_rx_start(sky2);
1016 if (err)
1017 goto err_out;
1018
1019 /* Enable interrupts from phy/mac for port */
1020 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1021 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1022 return 0;
1023
1024err_out:
1025 if (sky2->rx_le)
1026 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1027 sky2->rx_le, sky2->rx_le_map);
1028 if (sky2->tx_le)
1029 pci_free_consistent(hw->pdev,
1030 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1031 sky2->tx_le, sky2->tx_le_map);
1032 if (sky2->tx_ring)
1033 kfree(sky2->tx_ring);
1034 if (sky2->rx_ring)
1035 kfree(sky2->rx_ring);
1036
1037 return err;
1038}
1039
1040/* Modular subtraction in ring */
1041static inline int tx_dist(unsigned tail, unsigned head)
1042{
1043 return (head >= tail ? head : head + TX_RING_SIZE) - tail;
1044}
1045
1046/* Number of list elements available for next tx */
1047static inline int tx_avail(const struct sky2_port *sky2)
1048{
1049 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1050}
1051
1052/* Estimate of number of transmit list elements required */
1053static inline unsigned tx_le_req(const struct sk_buff *skb)
1054{
1055 unsigned count;
1056
1057 count = sizeof(dma_addr_t) / sizeof(u32);
1058 count += skb_shinfo(skb)->nr_frags * count;
1059
1060 if (skb_shinfo(skb)->tso_size)
1061 ++count;
1062
1063 if (skb->ip_summed)
1064 ++count;
1065
1066 return count;
1067}
1068
1069/*
1070 * Put one packet in ring for transmit.
1071 * A single packet can generate multiple list elements, and
1072 * the number of ring elements will probably be less than the number
1073 * of list elements used.
1074 */
1075static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1076{
1077 struct sky2_port *sky2 = netdev_priv(dev);
1078 struct sky2_hw *hw = sky2->hw;
1079 struct sky2_tx_le *le = NULL;
1080 struct ring_info *re;
1081 unsigned long flags;
1082 unsigned i, len;
1083 dma_addr_t mapping;
1084 u32 addr64;
1085 u16 mss;
1086 u8 ctrl;
1087
1088 local_irq_save(flags);
1089 if (!spin_trylock(&sky2->tx_lock)) {
1090 local_irq_restore(flags);
1091 return NETDEV_TX_LOCKED;
1092 }
1093
1094 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1095 netif_stop_queue(dev);
1096 spin_unlock_irqrestore(&sky2->tx_lock, flags);
1097
1098 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1099 dev->name);
1100 return NETDEV_TX_BUSY;
1101 }
1102
1103 if (unlikely(netif_msg_tx_queued(sky2)))
1104 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1105 dev->name, sky2->tx_prod, skb->len);
1106
1107 len = skb_headlen(skb);
1108 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1109 addr64 = high32(mapping);
1110
1111 re = sky2->tx_ring + sky2->tx_prod;
1112
1113 /* Send high bits if changed or crosses boundary */
1114 if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
1115 le = get_tx_le(sky2);
1116 le->tx.addr = cpu_to_le32(addr64);
1117 le->ctrl = 0;
1118 le->opcode = OP_ADDR64 | HW_OWNER;
1119 sky2->tx_addr64 = high32(mapping + len);
1120 }
1121
1122 /* Check for TCP Segmentation Offload */
1123 mss = skb_shinfo(skb)->tso_size;
1124 if (mss != 0) {
1125 /* just drop the packet if non-linear expansion fails */
1126 if (skb_header_cloned(skb) &&
1127 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1128 dev_kfree_skb_any(skb);
1129 goto out_unlock;
1130 }
1131
1132 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1133 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1134 mss += ETH_HLEN;
1135 }
1136
1137 if (mss != sky2->tx_last_mss) {
1138 le = get_tx_le(sky2);
1139 le->tx.tso.size = cpu_to_le16(mss);
1140 le->tx.tso.rsvd = 0;
1141 le->opcode = OP_LRGLEN | HW_OWNER;
1142 le->ctrl = 0;
1143 sky2->tx_last_mss = mss;
1144 }
1145
1146 ctrl = 0;
1147#ifdef SKY2_VLAN_TAG_USED
1148 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1149 if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1150 if (!le) {
1151 le = get_tx_le(sky2);
1152 le->tx.addr = 0;
1153 le->opcode = OP_VLAN|HW_OWNER;
1154 le->ctrl = 0;
1155 } else
1156 le->opcode |= OP_VLAN;
1157 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1158 ctrl |= INS_VLAN;
1159 }
1160#endif
1161
1162 /* Handle TCP checksum offload */
1163 if (skb->ip_summed == CHECKSUM_HW) {
1164 u16 hdr = skb->h.raw - skb->data;
1165 u16 offset = hdr + skb->csum;
1166
1167 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1168 if (skb->nh.iph->protocol == IPPROTO_UDP)
1169 ctrl |= UDPTCP;
1170
1171 le = get_tx_le(sky2);
1172 le->tx.csum.start = cpu_to_le16(hdr);
1173 le->tx.csum.offset = cpu_to_le16(offset);
1174 le->length = 0; /* initial checksum value */
1175 le->ctrl = 1; /* one packet */
1176 le->opcode = OP_TCPLISW | HW_OWNER;
1177 }
1178
1179 le = get_tx_le(sky2);
1180 le->tx.addr = cpu_to_le32((u32) mapping);
1181 le->length = cpu_to_le16(len);
1182 le->ctrl = ctrl;
1183 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1184
1185 /* Record the transmit mapping info */
1186 re->skb = skb;
1187 re->mapaddr = mapping;
1188 re->maplen = len;
1189
1190 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1191 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1192 struct ring_info *fre;
1193
1194 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1195 frag->size, PCI_DMA_TODEVICE);
1196 addr64 = (mapping >> 16) >> 16;
1197 if (addr64 != sky2->tx_addr64) {
1198 le = get_tx_le(sky2);
1199 le->tx.addr = cpu_to_le32(addr64);
1200 le->ctrl = 0;
1201 le->opcode = OP_ADDR64 | HW_OWNER;
1202 sky2->tx_addr64 = addr64;
1203 }
1204
1205 le = get_tx_le(sky2);
1206 le->tx.addr = cpu_to_le32((u32) mapping);
1207 le->length = cpu_to_le16(frag->size);
1208 le->ctrl = ctrl;
1209 le->opcode = OP_BUFFER | HW_OWNER;
1210
1211 fre = sky2->tx_ring
1212 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
1213 fre->skb = NULL;
1214 fre->mapaddr = mapping;
1215 fre->maplen = frag->size;
1216 }
1217 re->idx = sky2->tx_prod;
1218 le->ctrl |= EOP;
1219
1220 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
1221 &sky2->tx_last_put, TX_RING_SIZE);
1222
1223 if (tx_avail(sky2) < MAX_SKB_TX_LE + 1)
1224 netif_stop_queue(dev);
1225
1226out_unlock:
1227 mmiowb();
1228 spin_unlock_irqrestore(&sky2->tx_lock, flags);
1229
1230 dev->trans_start = jiffies;
1231 return NETDEV_TX_OK;
1232}
1233
1234/*
1235 * Free ring elements from starting at tx_cons until "done"
1236 *
1237 * NB: the hardware will tell us about partial completion of multi-part
1238 * buffers; these are deferred until completion.
1239 */
1240static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1241{
1242 struct net_device *dev = sky2->netdev;
1243 unsigned i;
1244
1245 if (done == sky2->tx_cons)
1246 return;
1247
1248 if (unlikely(netif_msg_tx_done(sky2)))
1249 printk(KERN_DEBUG "%s: tx done, up to %u\n",
1250 dev->name, done);
1251
1252 spin_lock(&sky2->tx_lock);
1253
1254 while (sky2->tx_cons != done) {
1255 struct ring_info *re = sky2->tx_ring + sky2->tx_cons;
1256 struct sk_buff *skb;
1257
1258 /* Check for partial status */
1259 if (tx_dist(sky2->tx_cons, done)
1260 < tx_dist(sky2->tx_cons, re->idx))
1261 goto out;
1262
1263 skb = re->skb;
1264 pci_unmap_single(sky2->hw->pdev,
1265 re->mapaddr, re->maplen, PCI_DMA_TODEVICE);
1266
1267 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1268 struct ring_info *fre;
1269 fre =
1270 sky2->tx_ring + (sky2->tx_cons + i +
1271 1) % TX_RING_SIZE;
1272 pci_unmap_page(sky2->hw->pdev, fre->mapaddr,
1273 fre->maplen, PCI_DMA_TODEVICE);
1274 }
1275
1276 dev_kfree_skb_any(skb);
1277
1278 sky2->tx_cons = re->idx;
1279 }
1280out:
1281
1282 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1283 netif_wake_queue(dev);
1284 spin_unlock(&sky2->tx_lock);
1285}
1286
1287/* Cleanup all untransmitted buffers, assume transmitter not running */
1288static inline void sky2_tx_clean(struct sky2_port *sky2)
1289{
1290 sky2_tx_complete(sky2, sky2->tx_prod);
1291}
1292
1293/* Network shutdown */
1294static int sky2_down(struct net_device *dev)
1295{
1296 struct sky2_port *sky2 = netdev_priv(dev);
1297 struct sky2_hw *hw = sky2->hw;
1298 unsigned port = sky2->port;
1299 u16 ctrl;
1300
1301 if (netif_msg_ifdown(sky2))
1302 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1303
1304 /* Stop more packets from being queued */
1305 netif_stop_queue(dev);
1306
1307 /* Disable port IRQ */
1308 local_irq_disable();
1309 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1310 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1311 local_irq_enable();
1312
1313
1314 sky2_phy_reset(hw, port);
1315
1316 /* Stop transmitter */
1317 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1318 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1319
1320 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1321 RB_RST_SET | RB_DIS_OP_MD);
1322
1323 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1324 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1325 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1326
1327 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1328
1329 /* Workaround shared GMAC reset */
1330 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1331 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1332 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1333
1334 /* Disable Force Sync bit and Enable Alloc bit */
1335 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1336 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1337
1338 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1339 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1340 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1341
1342 /* Reset the PCI FIFO of the async Tx queue */
1343 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1344 BMU_RST_SET | BMU_FIFO_RST);
1345
1346 /* Reset the Tx prefetch units */
1347 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1348 PREF_UNIT_RST_SET);
1349
1350 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1351
1352 sky2_rx_stop(sky2);
1353
1354 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1355 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1356
1357 /* turn off LED's */
1358 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1359
1360 synchronize_irq(hw->pdev->irq);
1361
1362 sky2_tx_clean(sky2);
1363 sky2_rx_clean(sky2);
1364
1365 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1366 sky2->rx_le, sky2->rx_le_map);
1367 kfree(sky2->rx_ring);
1368
1369 pci_free_consistent(hw->pdev,
1370 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1371 sky2->tx_le, sky2->tx_le_map);
1372 kfree(sky2->tx_ring);
1373
1374 return 0;
1375}
1376
1377static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1378{
1379 if (!hw->copper)
1380 return SPEED_1000;
1381
1382 if (hw->chip_id == CHIP_ID_YUKON_FE)
1383 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1384
1385 switch (aux & PHY_M_PS_SPEED_MSK) {
1386 case PHY_M_PS_SPEED_1000:
1387 return SPEED_1000;
1388 case PHY_M_PS_SPEED_100:
1389 return SPEED_100;
1390 default:
1391 return SPEED_10;
1392 }
1393}
1394
1395static void sky2_link_up(struct sky2_port *sky2)
1396{
1397 struct sky2_hw *hw = sky2->hw;
1398 unsigned port = sky2->port;
1399 u16 reg;
1400
1401 /* Enable Transmit FIFO Underrun */
1402 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1403
1404 reg = gma_read16(hw, port, GM_GP_CTRL);
1405 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1406 reg |= GM_GPCR_DUP_FULL;
1407
1408 /* enable Rx/Tx */
1409 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1410 gma_write16(hw, port, GM_GP_CTRL, reg);
1411 gma_read16(hw, port, GM_GP_CTRL);
1412
1413 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1414
1415 netif_carrier_on(sky2->netdev);
1416 netif_wake_queue(sky2->netdev);
1417
1418 /* Turn on link LED */
1419 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1420 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1421
1422 if (hw->chip_id == CHIP_ID_YUKON_XL) {
1423 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1424
1425 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1426 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
1427 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1428 SPEED_10 ? 7 : 0) |
1429 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1430 SPEED_100 ? 7 : 0) |
1431 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1432 SPEED_1000 ? 7 : 0));
1433 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1434 }
1435
1436 if (netif_msg_link(sky2))
1437 printk(KERN_INFO PFX
1438 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1439 sky2->netdev->name, sky2->speed,
1440 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1441 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1442 sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1443}
1444
1445static void sky2_link_down(struct sky2_port *sky2)
1446{
1447 struct sky2_hw *hw = sky2->hw;
1448 unsigned port = sky2->port;
1449 u16 reg;
1450
1451 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1452
1453 reg = gma_read16(hw, port, GM_GP_CTRL);
1454 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1455 gma_write16(hw, port, GM_GP_CTRL, reg);
1456 gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1457
1458 if (sky2->rx_pause && !sky2->tx_pause) {
1459 /* restore Asymmetric Pause bit */
1460 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1461 gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1462 | PHY_M_AN_ASP);
1463 }
1464
1465 sky2_phy_reset(hw, port);
1466
1467 netif_carrier_off(sky2->netdev);
1468 netif_stop_queue(sky2->netdev);
1469
1470 /* Turn on link LED */
1471 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1472
1473 if (netif_msg_link(sky2))
1474 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1475 sky2_phy_init(hw, port);
1476}
1477
1478static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1479{
1480 struct sky2_hw *hw = sky2->hw;
1481 unsigned port = sky2->port;
1482 u16 lpa;
1483
1484 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1485
1486 if (lpa & PHY_M_AN_RF) {
1487 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1488 return -1;
1489 }
1490
1491 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1492 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1493 printk(KERN_ERR PFX "%s: master/slave fault",
1494 sky2->netdev->name);
1495 return -1;
1496 }
1497
1498 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1499 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1500 sky2->netdev->name);
1501 return -1;
1502 }
1503
1504 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1505
1506 sky2->speed = sky2_phy_speed(hw, aux);
1507
1508 /* Pause bits are offset (9..8) */
1509 if (hw->chip_id == CHIP_ID_YUKON_XL)
1510 aux >>= 6;
1511
1512 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1513 sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1514
1515 if ((sky2->tx_pause || sky2->rx_pause)
1516 && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1517 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1518 else
1519 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1520
1521 return 0;
1522}
1523
1524/*
1525 * Interrupt from PHY are handled in tasklet (soft irq)
1526 * because accessing phy registers requires spin wait which might
1527 * cause excess interrupt latency.
1528 */
1529static void sky2_phy_task(unsigned long data)
1530{
1531 struct sky2_port *sky2 = (struct sky2_port *)data;
1532 struct sky2_hw *hw = sky2->hw;
1533 u16 istatus, phystat;
1534
1535 spin_lock(&hw->phy_lock);
1536 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
1537 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
1538
1539 if (netif_msg_intr(sky2))
1540 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1541 sky2->netdev->name, istatus, phystat);
1542
1543 if (istatus & PHY_M_IS_AN_COMPL) {
1544 if (sky2_autoneg_done(sky2, phystat) == 0)
1545 sky2_link_up(sky2);
1546 goto out;
1547 }
1548
1549 if (istatus & PHY_M_IS_LSP_CHANGE)
1550 sky2->speed = sky2_phy_speed(hw, phystat);
1551
1552 if (istatus & PHY_M_IS_DUP_CHANGE)
1553 sky2->duplex =
1554 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1555
1556 if (istatus & PHY_M_IS_LST_CHANGE) {
1557 if (phystat & PHY_M_PS_LINK_UP)
1558 sky2_link_up(sky2);
1559 else
1560 sky2_link_down(sky2);
1561 }
1562out:
1563 spin_unlock(&hw->phy_lock);
1564
1565 local_irq_disable();
1566 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1567 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1568 local_irq_enable();
1569}
1570
1571static void sky2_tx_timeout(struct net_device *dev)
1572{
1573 struct sky2_port *sky2 = netdev_priv(dev);
1574
1575 if (netif_msg_timer(sky2))
1576 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1577
1578 sky2_write32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR), BMU_STOP);
1579 sky2_read32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR));
1580
1581 sky2_tx_clean(sky2);
1582}
1583
1584static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1585{
1586 struct sky2_port *sky2 = netdev_priv(dev);
1587 struct sky2_hw *hw = sky2->hw;
1588 int err;
1589 u16 ctl, mode;
1590
1591 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1592 return -EINVAL;
1593
1594 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1595 return -EINVAL;
1596
1597 if (!netif_running(dev)) {
1598 dev->mtu = new_mtu;
1599 return 0;
1600 }
1601
1602 sky2_write32(hw, B0_IMSK, 0);
1603
1604 dev->trans_start = jiffies; /* prevent tx timeout */
1605 netif_stop_queue(dev);
1606 netif_poll_disable(hw->dev[0]);
1607
1608 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1609 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1610 sky2_rx_stop(sky2);
1611 sky2_rx_clean(sky2);
1612
1613 dev->mtu = new_mtu;
1614 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1615 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1616
1617 if (dev->mtu > ETH_DATA_LEN)
1618 mode |= GM_SMOD_JUMBO_ENA;
1619
1620 gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1621
1622 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1623
1624 err = sky2_rx_start(sky2);
1625 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
1626
1627 netif_poll_disable(hw->dev[0]);
1628 netif_wake_queue(dev);
1629 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1630
1631 return err;
1632}
1633
1634/*
1635 * Receive one packet.
1636 * For small packets or errors, just reuse existing skb.
1637 * For larger packets, get new buffer.
1638 */
1639static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1640 u16 length, u32 status)
1641{
1642 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1643 struct sk_buff *skb = NULL;
1644 const unsigned int bufsize = rx_size(sky2);
1645
1646 if (unlikely(netif_msg_rx_status(sky2)))
1647 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1648 sky2->netdev->name, sky2->rx_next, status, length);
1649
1650 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1651
1652 if (status & GMR_FS_ANY_ERR)
1653 goto error;
1654
1655 if (!(status & GMR_FS_RX_OK))
1656 goto resubmit;
1657
1658 if (length < RX_COPY_THRESHOLD) {
1659 skb = alloc_skb(length + 2, GFP_ATOMIC);
1660 if (!skb)
1661 goto resubmit;
1662
1663 skb_reserve(skb, 2);
1664 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1665 length, PCI_DMA_FROMDEVICE);
1666 memcpy(skb->data, re->skb->data, length);
1667 skb->ip_summed = re->skb->ip_summed;
1668 skb->csum = re->skb->csum;
1669 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1670 length, PCI_DMA_FROMDEVICE);
1671 } else {
1672 struct sk_buff *nskb;
1673
1674 nskb = dev_alloc_skb(bufsize);
1675 if (!nskb)
1676 goto resubmit;
1677
1678 skb = re->skb;
1679 re->skb = nskb;
1680 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1681 re->maplen, PCI_DMA_FROMDEVICE);
1682 prefetch(skb->data);
1683
1684 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1685 bufsize, PCI_DMA_FROMDEVICE);
1686 re->maplen = bufsize;
1687 }
1688
1689 skb_put(skb, length);
1690resubmit:
1691 re->skb->ip_summed = CHECKSUM_NONE;
1692 sky2_rx_add(sky2, re);
1693
1694 /* Tell receiver about new buffers. */
1695 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
1696 &sky2->rx_last_put, RX_LE_SIZE);
1697
1698 return skb;
1699
1700error:
1701 if (netif_msg_rx_err(sky2))
1702 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1703 sky2->netdev->name, status, length);
1704
1705 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1706 sky2->net_stats.rx_length_errors++;
1707 if (status & GMR_FS_FRAGMENT)
1708 sky2->net_stats.rx_frame_errors++;
1709 if (status & GMR_FS_CRC_ERR)
1710 sky2->net_stats.rx_crc_errors++;
1711 if (status & GMR_FS_RX_FF_OV)
1712 sky2->net_stats.rx_fifo_errors++;
1713
1714 goto resubmit;
1715}
1716
1717/*
1718 * Check for transmit complete
1719 */
1720static inline void sky2_tx_check(struct sky2_hw *hw, int port)
1721{
1722 struct net_device *dev = hw->dev[port];
1723
1724 if (dev && netif_running(dev)) {
1725 sky2_tx_complete(netdev_priv(dev),
1726 sky2_read16(hw, port == 0
1727 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX));
1728 }
1729}
1730
1731/*
1732 * Both ports share the same status interrupt, therefore there is only
1733 * one poll routine.
1734 */
1735static int sky2_poll(struct net_device *dev0, int *budget)
1736{
1737 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
1738 unsigned int to_do = min(dev0->quota, *budget);
1739 unsigned int work_done = 0;
1740 u16 hwidx;
1741
1742 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1743 BUG_ON(hwidx >= STATUS_RING_SIZE);
1744 rmb();
1745
1746 while (hwidx != hw->st_idx) {
1747 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1748 struct net_device *dev;
1749 struct sky2_port *sky2;
1750 struct sk_buff *skb;
1751 u32 status;
1752 u16 length;
1753 u8 op;
1754
1755 le = hw->st_le + hw->st_idx;
1756 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1757 prefetch(hw->st_le + hw->st_idx);
1758
1759 BUG_ON(le->link >= hw->ports || !hw->dev[le->link]);
1760
1761 BUG_ON(le->link >= 2);
1762 dev = hw->dev[le->link];
1763 if (dev == NULL || !netif_running(dev))
1764 continue;
1765
1766 sky2 = netdev_priv(dev);
1767 status = le32_to_cpu(le->status);
1768 length = le16_to_cpu(le->length);
1769 op = le->opcode & ~HW_OWNER;
1770 le->opcode = 0;
1771
1772 switch (op) {
1773 case OP_RXSTAT:
1774 skb = sky2_receive(sky2, length, status);
1775 if (!skb)
1776 break;
1777
1778 skb->dev = dev;
1779 skb->protocol = eth_type_trans(skb, dev);
1780 dev->last_rx = jiffies;
1781
1782#ifdef SKY2_VLAN_TAG_USED
1783 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
1784 vlan_hwaccel_receive_skb(skb,
1785 sky2->vlgrp,
1786 be16_to_cpu(sky2->rx_tag));
1787 } else
1788#endif
1789 netif_receive_skb(skb);
1790
1791 if (++work_done >= to_do)
1792 goto exit_loop;
1793 break;
1794
1795#ifdef SKY2_VLAN_TAG_USED
1796 case OP_RXVLAN:
1797 sky2->rx_tag = length;
1798 break;
1799
1800 case OP_RXCHKSVLAN:
1801 sky2->rx_tag = length;
1802 /* fall through */
1803#endif
1804 case OP_RXCHKS:
1805 skb = sky2->rx_ring[sky2->rx_next].skb;
1806 skb->ip_summed = CHECKSUM_HW;
1807 skb->csum = le16_to_cpu(status);
1808 break;
1809
1810 case OP_TXINDEXLE:
1811 /* pick up transmit status later */
1812 break;
1813
1814 default:
1815 if (net_ratelimit())
1816 printk(KERN_WARNING PFX
1817 "unknown status opcode 0x%x\n", op);
1818 break;
1819 }
1820 }
1821
1822exit_loop:
1823 sky2_tx_check(hw, 0);
1824 sky2_tx_check(hw, 1);
1825
1826 mmiowb();
1827
1828 if (work_done < to_do) {
1829 /*
1830 * Another chip workaround, need to restart TX timer if status
1831 * LE was handled. WA_DEV_43_418
1832 */
1833 if (is_ec_a1(hw)) {
1834 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1835 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1836 }
1837
1838 netif_rx_complete(dev0);
1839 hw->intr_mask |= Y2_IS_STAT_BMU;
1840 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1841 mmiowb();
1842 return 0;
1843 } else {
1844 *budget -= work_done;
1845 dev0->quota -= work_done;
1846 return 1;
1847 }
1848}
1849
1850static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1851{
1852 struct net_device *dev = hw->dev[port];
1853
1854 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1855 dev->name, status);
1856
1857 if (status & Y2_IS_PAR_RD1) {
1858 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1859 dev->name);
1860 /* Clear IRQ */
1861 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1862 }
1863
1864 if (status & Y2_IS_PAR_WR1) {
1865 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1866 dev->name);
1867
1868 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1869 }
1870
1871 if (status & Y2_IS_PAR_MAC1) {
1872 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1873 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1874 }
1875
1876 if (status & Y2_IS_PAR_RX1) {
1877 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1878 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1879 }
1880
1881 if (status & Y2_IS_TCP_TXA1) {
1882 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
1883 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1884 }
1885}
1886
1887static void sky2_hw_intr(struct sky2_hw *hw)
1888{
1889 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1890
1891 if (status & Y2_IS_TIST_OV)
1892 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1893
1894 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1895 u16 pci_err;
1896
1897 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1898 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1899 pci_name(hw->pdev), pci_err);
1900
1901 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1902 pci_write_config_word(hw->pdev, PCI_STATUS,
1903 pci_err | PCI_STATUS_ERROR_BITS);
1904 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1905 }
1906
1907 if (status & Y2_IS_PCI_EXP) {
1908 /* PCI-Express uncorrectable Error occurred */
1909 u32 pex_err;
1910
1911 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1912
1913 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
1914 pci_name(hw->pdev), pex_err);
1915
1916 /* clear the interrupt */
1917 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1918 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1919 0xffffffffUL);
1920 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1921
1922 if (pex_err & PEX_FATAL_ERRORS) {
1923 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
1924 hwmsk &= ~Y2_IS_PCI_EXP;
1925 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
1926 }
1927 }
1928
1929 if (status & Y2_HWE_L1_MASK)
1930 sky2_hw_error(hw, 0, status);
1931 status >>= 8;
1932 if (status & Y2_HWE_L1_MASK)
1933 sky2_hw_error(hw, 1, status);
1934}
1935
1936static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1937{
1938 struct net_device *dev = hw->dev[port];
1939 struct sky2_port *sky2 = netdev_priv(dev);
1940 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1941
1942 if (netif_msg_intr(sky2))
1943 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
1944 dev->name, status);
1945
1946 if (status & GM_IS_RX_FF_OR) {
1947 ++sky2->net_stats.rx_fifo_errors;
1948 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1949 }
1950
1951 if (status & GM_IS_TX_FF_UR) {
1952 ++sky2->net_stats.tx_fifo_errors;
1953 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1954 }
1955}
1956
1957static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1958{
1959 struct net_device *dev = hw->dev[port];
1960 struct sky2_port *sky2 = netdev_priv(dev);
1961
1962 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1963 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1964 tasklet_schedule(&sky2->phy_task);
1965}
1966
1967static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
1968{
1969 struct sky2_hw *hw = dev_id;
1970 struct net_device *dev0 = hw->dev[0];
1971 u32 status;
1972
1973 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
1974 if (status == 0 || status == ~0)
1975 return IRQ_NONE;
1976
1977 if (status & Y2_IS_HW_ERR)
1978 sky2_hw_intr(hw);
1979
1980 /* Do NAPI for Rx and Tx status */
1981 if (status & Y2_IS_STAT_BMU) {
1982 hw->intr_mask &= ~Y2_IS_STAT_BMU;
1983 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1984
1985 if (likely(__netif_rx_schedule_prep(dev0))) {
1986 prefetch(&hw->st_le[hw->st_idx]);
1987 __netif_rx_schedule(dev0);
1988 }
1989 }
1990
1991 if (status & Y2_IS_IRQ_PHY1)
1992 sky2_phy_intr(hw, 0);
1993
1994 if (status & Y2_IS_IRQ_PHY2)
1995 sky2_phy_intr(hw, 1);
1996
1997 if (status & Y2_IS_IRQ_MAC1)
1998 sky2_mac_intr(hw, 0);
1999
2000 if (status & Y2_IS_IRQ_MAC2)
2001 sky2_mac_intr(hw, 1);
2002
2003 sky2_write32(hw, B0_Y2_SP_ICR, 2);
2004
2005 sky2_read32(hw, B0_IMSK);
2006
2007 return IRQ_HANDLED;
2008}
2009
2010#ifdef CONFIG_NET_POLL_CONTROLLER
2011static void sky2_netpoll(struct net_device *dev)
2012{
2013 struct sky2_port *sky2 = netdev_priv(dev);
2014
2015 sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
2016}
2017#endif
2018
2019/* Chip internal frequency for clock calculations */
2020static inline u32 sky2_khz(const struct sky2_hw *hw)
2021{
2022 switch (hw->chip_id) {
2023 case CHIP_ID_YUKON_EC:
2024 case CHIP_ID_YUKON_EC_U:
2025 return 125000; /* 125 Mhz */
2026 case CHIP_ID_YUKON_FE:
2027 return 100000; /* 100 Mhz */
2028 default: /* YUKON_XL */
2029 return 156000; /* 156 Mhz */
2030 }
2031}
2032
2033static inline u32 sky2_ms2clk(const struct sky2_hw *hw, u32 ms)
2034{
2035 return sky2_khz(hw) * ms;
2036}
2037
2038static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2039{
2040 return (sky2_khz(hw) * us) / 1000;
2041}
2042
2043static int sky2_reset(struct sky2_hw *hw)
2044{
2045 u32 ctst;
2046 u16 status;
2047 u8 t8, pmd_type;
2048 int i;
2049
2050 ctst = sky2_read32(hw, B0_CTST);
2051
2052 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2053 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2054 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2055 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2056 pci_name(hw->pdev), hw->chip_id);
2057 return -EOPNOTSUPP;
2058 }
2059
2060 /* ring for status responses */
2061 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
2062 &hw->st_dma);
2063 if (!hw->st_le)
2064 return -ENOMEM;
2065
2066 /* disable ASF */
2067 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2068 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2069 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2070 }
2071
2072 /* do a SW reset */
2073 sky2_write8(hw, B0_CTST, CS_RST_SET);
2074 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2075
2076 /* clear PCI errors, if any */
2077 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2078 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2079 pci_write_config_word(hw->pdev, PCI_STATUS,
2080 status | PCI_STATUS_ERROR_BITS);
2081
2082 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2083
2084 /* clear any PEX errors */
2085 if (is_pciex(hw)) {
2086 u16 lstat;
2087 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
2088 0xffffffffUL);
2089 pci_read_config_word(hw->pdev, PEX_LNK_STAT, &lstat);
2090 }
2091
2092 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2093 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
2094
2095 hw->ports = 1;
2096 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2097 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2098 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2099 ++hw->ports;
2100 }
2101 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2102
2103 sky2_set_power_state(hw, PCI_D0);
2104
2105 for (i = 0; i < hw->ports; i++) {
2106 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2107 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2108 }
2109
2110 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2111
2112 /* Clear I2C IRQ noise */
2113 sky2_write32(hw, B2_I2C_IRQ, 1);
2114
2115 /* turn off hardware timer (unused) */
2116 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2117 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2118
2119 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2120
2121 /* Turn on descriptor polling (every 75us) */
2122 sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
2123 sky2_write8(hw, B28_DPT_CTRL, DPT_START);
2124
2125 /* Turn off receive timestamp */
2126 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2127 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2128
2129 /* enable the Tx Arbiters */
2130 for (i = 0; i < hw->ports; i++)
2131 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2132
2133 /* Initialize ram interface */
2134 for (i = 0; i < hw->ports; i++) {
2135 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2136
2137 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2138 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2139 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2140 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2141 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2142 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2143 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2144 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2145 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2146 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2147 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2148 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2149 }
2150
2151 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2152
2153 spin_lock_bh(&hw->phy_lock);
2154 for (i = 0; i < hw->ports; i++)
2155 sky2_phy_reset(hw, i);
2156 spin_unlock_bh(&hw->phy_lock);
2157
2158 memset(hw->st_le, 0, STATUS_LE_BYTES);
2159 hw->st_idx = 0;
2160
2161 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2162 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2163
2164 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2165 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2166
2167 /* Set the list last index */
2168 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2169
2170 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
2171
2172 /* These status setup values are copied from SysKonnect's driver */
2173 if (is_ec_a1(hw)) {
2174 /* WA for dev. #4.3 */
2175 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
2176
2177 /* set Status-FIFO watermark */
2178 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
2179
2180 /* set Status-FIFO ISR watermark */
2181 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
2182
2183 } else {
2184 sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
2185
2186 /* set Status-FIFO watermark */
2187 sky2_write8(hw, STAT_FIFO_WM, 0x10);
2188
2189 /* set Status-FIFO ISR watermark */
2190 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2191 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
2192
2193 else /* WA dev 4.109 */
2194 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
2195
2196 sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
2197 }
2198
2199 /* enable status unit */
2200 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2201
2202 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2203 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2204 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2205
2206 return 0;
2207}
2208
2209static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
2210{
2211 u32 modes;
2212 if (hw->copper) {
2213 modes = SUPPORTED_10baseT_Half
2214 | SUPPORTED_10baseT_Full
2215 | SUPPORTED_100baseT_Half
2216 | SUPPORTED_100baseT_Full
2217 | SUPPORTED_Autoneg | SUPPORTED_TP;
2218
2219 if (hw->chip_id != CHIP_ID_YUKON_FE)
2220 modes |= SUPPORTED_1000baseT_Half
2221 | SUPPORTED_1000baseT_Full;
2222 } else
2223 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
2224 | SUPPORTED_Autoneg;
2225 return modes;
2226}
2227
2228static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2229{
2230 struct sky2_port *sky2 = netdev_priv(dev);
2231 struct sky2_hw *hw = sky2->hw;
2232
2233 ecmd->transceiver = XCVR_INTERNAL;
2234 ecmd->supported = sky2_supported_modes(hw);
2235 ecmd->phy_address = PHY_ADDR_MARV;
2236 if (hw->copper) {
2237 ecmd->supported = SUPPORTED_10baseT_Half
2238 | SUPPORTED_10baseT_Full
2239 | SUPPORTED_100baseT_Half
2240 | SUPPORTED_100baseT_Full
2241 | SUPPORTED_1000baseT_Half
2242 | SUPPORTED_1000baseT_Full
2243 | SUPPORTED_Autoneg | SUPPORTED_TP;
2244 ecmd->port = PORT_TP;
2245 } else
2246 ecmd->port = PORT_FIBRE;
2247
2248 ecmd->advertising = sky2->advertising;
2249 ecmd->autoneg = sky2->autoneg;
2250 ecmd->speed = sky2->speed;
2251 ecmd->duplex = sky2->duplex;
2252 return 0;
2253}
2254
2255static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2256{
2257 struct sky2_port *sky2 = netdev_priv(dev);
2258 const struct sky2_hw *hw = sky2->hw;
2259 u32 supported = sky2_supported_modes(hw);
2260
2261 if (ecmd->autoneg == AUTONEG_ENABLE) {
2262 ecmd->advertising = supported;
2263 sky2->duplex = -1;
2264 sky2->speed = -1;
2265 } else {
2266 u32 setting;
2267
2268 switch (ecmd->speed) {
2269 case SPEED_1000:
2270 if (ecmd->duplex == DUPLEX_FULL)
2271 setting = SUPPORTED_1000baseT_Full;
2272 else if (ecmd->duplex == DUPLEX_HALF)
2273 setting = SUPPORTED_1000baseT_Half;
2274 else
2275 return -EINVAL;
2276 break;
2277 case SPEED_100:
2278 if (ecmd->duplex == DUPLEX_FULL)
2279 setting = SUPPORTED_100baseT_Full;
2280 else if (ecmd->duplex == DUPLEX_HALF)
2281 setting = SUPPORTED_100baseT_Half;
2282 else
2283 return -EINVAL;
2284 break;
2285
2286 case SPEED_10:
2287 if (ecmd->duplex == DUPLEX_FULL)
2288 setting = SUPPORTED_10baseT_Full;
2289 else if (ecmd->duplex == DUPLEX_HALF)
2290 setting = SUPPORTED_10baseT_Half;
2291 else
2292 return -EINVAL;
2293 break;
2294 default:
2295 return -EINVAL;
2296 }
2297
2298 if ((setting & supported) == 0)
2299 return -EINVAL;
2300
2301 sky2->speed = ecmd->speed;
2302 sky2->duplex = ecmd->duplex;
2303 }
2304
2305 sky2->autoneg = ecmd->autoneg;
2306 sky2->advertising = ecmd->advertising;
2307
2308 if (netif_running(dev)) {
2309 sky2_down(dev);
2310 sky2_up(dev);
2311 }
2312
2313 return 0;
2314}
2315
2316static void sky2_get_drvinfo(struct net_device *dev,
2317 struct ethtool_drvinfo *info)
2318{
2319 struct sky2_port *sky2 = netdev_priv(dev);
2320
2321 strcpy(info->driver, DRV_NAME);
2322 strcpy(info->version, DRV_VERSION);
2323 strcpy(info->fw_version, "N/A");
2324 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2325}
2326
2327static const struct sky2_stat {
2328 char name[ETH_GSTRING_LEN];
2329 u16 offset;
2330} sky2_stats[] = {
2331 { "tx_bytes", GM_TXO_OK_HI },
2332 { "rx_bytes", GM_RXO_OK_HI },
2333 { "tx_broadcast", GM_TXF_BC_OK },
2334 { "rx_broadcast", GM_RXF_BC_OK },
2335 { "tx_multicast", GM_TXF_MC_OK },
2336 { "rx_multicast", GM_RXF_MC_OK },
2337 { "tx_unicast", GM_TXF_UC_OK },
2338 { "rx_unicast", GM_RXF_UC_OK },
2339 { "tx_mac_pause", GM_TXF_MPAUSE },
2340 { "rx_mac_pause", GM_RXF_MPAUSE },
2341 { "collisions", GM_TXF_SNG_COL },
2342 { "late_collision",GM_TXF_LAT_COL },
2343 { "aborted", GM_TXF_ABO_COL },
2344 { "multi_collisions", GM_TXF_MUL_COL },
2345 { "fifo_underrun", GM_TXE_FIFO_UR },
2346 { "fifo_overflow", GM_RXE_FIFO_OV },
2347 { "rx_toolong", GM_RXF_LNG_ERR },
2348 { "rx_jabber", GM_RXF_JAB_PKT },
2349 { "rx_runt", GM_RXE_FRAG },
2350 { "rx_too_long", GM_RXF_LNG_ERR },
2351 { "rx_fcs_error", GM_RXF_FCS_ERR },
2352};
2353
2354static u32 sky2_get_rx_csum(struct net_device *dev)
2355{
2356 struct sky2_port *sky2 = netdev_priv(dev);
2357
2358 return sky2->rx_csum;
2359}
2360
2361static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2362{
2363 struct sky2_port *sky2 = netdev_priv(dev);
2364
2365 sky2->rx_csum = data;
2366
2367 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2368 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2369
2370 return 0;
2371}
2372
2373static u32 sky2_get_msglevel(struct net_device *netdev)
2374{
2375 struct sky2_port *sky2 = netdev_priv(netdev);
2376 return sky2->msg_enable;
2377}
2378
2379static int sky2_nway_reset(struct net_device *dev)
2380{
2381 struct sky2_port *sky2 = netdev_priv(dev);
2382 struct sky2_hw *hw = sky2->hw;
2383
2384 if (sky2->autoneg != AUTONEG_ENABLE)
2385 return -EINVAL;
2386
2387 netif_stop_queue(dev);
2388
2389 spin_lock_irq(&hw->phy_lock);
2390 sky2_phy_reset(hw, sky2->port);
2391 sky2_phy_init(hw, sky2->port);
2392 spin_unlock_irq(&hw->phy_lock);
2393
2394 return 0;
2395}
2396
2397static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2398{
2399 struct sky2_hw *hw = sky2->hw;
2400 unsigned port = sky2->port;
2401 int i;
2402
2403 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2404 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2405 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2406 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2407
2408 for (i = 2; i < count; i++)
2409 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2410}
2411
2412static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2413{
2414 struct sky2_port *sky2 = netdev_priv(netdev);
2415 sky2->msg_enable = value;
2416}
2417
2418static int sky2_get_stats_count(struct net_device *dev)
2419{
2420 return ARRAY_SIZE(sky2_stats);
2421}
2422
2423static void sky2_get_ethtool_stats(struct net_device *dev,
2424 struct ethtool_stats *stats, u64 * data)
2425{
2426 struct sky2_port *sky2 = netdev_priv(dev);
2427
2428 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2429}
2430
2431static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2432{
2433 int i;
2434
2435 switch (stringset) {
2436 case ETH_SS_STATS:
2437 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
2438 memcpy(data + i * ETH_GSTRING_LEN,
2439 sky2_stats[i].name, ETH_GSTRING_LEN);
2440 break;
2441 }
2442}
2443
2444/* Use hardware MIB variables for critical path statistics and
2445 * transmit feedback not reported at interrupt.
2446 * Other errors are accounted for in interrupt handler.
2447 */
2448static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2449{
2450 struct sky2_port *sky2 = netdev_priv(dev);
2451 u64 data[13];
2452
2453 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2454
2455 sky2->net_stats.tx_bytes = data[0];
2456 sky2->net_stats.rx_bytes = data[1];
2457 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2458 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2459 sky2->net_stats.multicast = data[5] + data[7];
2460 sky2->net_stats.collisions = data[10];
2461 sky2->net_stats.tx_aborted_errors = data[12];
2462
2463 return &sky2->net_stats;
2464}
2465
2466static int sky2_set_mac_address(struct net_device *dev, void *p)
2467{
2468 struct sky2_port *sky2 = netdev_priv(dev);
2469 struct sockaddr *addr = p;
2470 int err = 0;
2471
2472 if (!is_valid_ether_addr(addr->sa_data))
2473 return -EADDRNOTAVAIL;
2474
2475 sky2_down(dev);
2476 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2477 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8,
2478 dev->dev_addr, ETH_ALEN);
2479 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8,
2480 dev->dev_addr, ETH_ALEN);
2481 if (dev->flags & IFF_UP)
2482 err = sky2_up(dev);
2483 return err;
2484}
2485
2486static void sky2_set_multicast(struct net_device *dev)
2487{
2488 struct sky2_port *sky2 = netdev_priv(dev);
2489 struct sky2_hw *hw = sky2->hw;
2490 unsigned port = sky2->port;
2491 struct dev_mc_list *list = dev->mc_list;
2492 u16 reg;
2493 u8 filter[8];
2494
2495 memset(filter, 0, sizeof(filter));
2496
2497 reg = gma_read16(hw, port, GM_RX_CTRL);
2498 reg |= GM_RXCR_UCF_ENA;
2499
2500 if (dev->flags & IFF_PROMISC) /* promiscuous */
2501 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2502 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
2503 memset(filter, 0xff, sizeof(filter));
2504 else if (dev->mc_count == 0) /* no multicast */
2505 reg &= ~GM_RXCR_MCF_ENA;
2506 else {
2507 int i;
2508 reg |= GM_RXCR_MCF_ENA;
2509
2510 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2511 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2512 filter[bit / 8] |= 1 << (bit % 8);
2513 }
2514 }
2515
2516 gma_write16(hw, port, GM_MC_ADDR_H1,
2517 (u16) filter[0] | ((u16) filter[1] << 8));
2518 gma_write16(hw, port, GM_MC_ADDR_H2,
2519 (u16) filter[2] | ((u16) filter[3] << 8));
2520 gma_write16(hw, port, GM_MC_ADDR_H3,
2521 (u16) filter[4] | ((u16) filter[5] << 8));
2522 gma_write16(hw, port, GM_MC_ADDR_H4,
2523 (u16) filter[6] | ((u16) filter[7] << 8));
2524
2525 gma_write16(hw, port, GM_RX_CTRL, reg);
2526}
2527
2528/* Can have one global because blinking is controlled by
2529 * ethtool and that is always under RTNL mutex
2530 */
2531static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2532{
2533 u16 pg;
2534
2535 spin_lock_bh(&hw->phy_lock);
2536 switch (hw->chip_id) {
2537 case CHIP_ID_YUKON_XL:
2538 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2539 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2540 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
2541 on ? (PHY_M_LEDC_LOS_CTRL(1) |
2542 PHY_M_LEDC_INIT_CTRL(7) |
2543 PHY_M_LEDC_STA1_CTRL(7) |
2544 PHY_M_LEDC_STA0_CTRL(7))
2545 : 0);
2546
2547 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2548 break;
2549
2550 default:
2551 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2552 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2553 on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
2554 PHY_M_LED_MO_10(MO_LED_ON) |
2555 PHY_M_LED_MO_100(MO_LED_ON) |
2556 PHY_M_LED_MO_1000(MO_LED_ON) |
2557 PHY_M_LED_MO_RX(MO_LED_ON)
2558 : PHY_M_LED_MO_DUP(MO_LED_OFF) |
2559 PHY_M_LED_MO_10(MO_LED_OFF) |
2560 PHY_M_LED_MO_100(MO_LED_OFF) |
2561 PHY_M_LED_MO_1000(MO_LED_OFF) |
2562 PHY_M_LED_MO_RX(MO_LED_OFF));
2563
2564 }
2565 spin_unlock_bh(&hw->phy_lock);
2566}
2567
2568/* blink LED's for finding board */
2569static int sky2_phys_id(struct net_device *dev, u32 data)
2570{
2571 struct sky2_port *sky2 = netdev_priv(dev);
2572 struct sky2_hw *hw = sky2->hw;
2573 unsigned port = sky2->port;
2574 u16 ledctrl, ledover = 0;
2575 long ms;
2576 int onoff = 1;
2577
2578 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
2579 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2580 else
2581 ms = data * 1000;
2582
2583 /* save initial values */
2584 spin_lock_bh(&hw->phy_lock);
2585 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2586 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2587 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2588 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2589 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2590 } else {
2591 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2592 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2593 }
2594 spin_unlock_bh(&hw->phy_lock);
2595
2596 while (ms > 0) {
2597 sky2_led(hw, port, onoff);
2598 onoff = !onoff;
2599
2600 if (msleep_interruptible(250))
2601 break; /* interrupted */
2602 ms -= 250;
2603 }
2604
2605 /* resume regularly scheduled programming */
2606 spin_lock_bh(&hw->phy_lock);
2607 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2608 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2609 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2610 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
2611 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2612 } else {
2613 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2614 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2615 }
2616 spin_unlock_bh(&hw->phy_lock);
2617
2618 return 0;
2619}
2620
2621static void sky2_get_pauseparam(struct net_device *dev,
2622 struct ethtool_pauseparam *ecmd)
2623{
2624 struct sky2_port *sky2 = netdev_priv(dev);
2625
2626 ecmd->tx_pause = sky2->tx_pause;
2627 ecmd->rx_pause = sky2->rx_pause;
2628 ecmd->autoneg = sky2->autoneg;
2629}
2630
2631static int sky2_set_pauseparam(struct net_device *dev,
2632 struct ethtool_pauseparam *ecmd)
2633{
2634 struct sky2_port *sky2 = netdev_priv(dev);
2635 int err = 0;
2636
2637 sky2->autoneg = ecmd->autoneg;
2638 sky2->tx_pause = ecmd->tx_pause != 0;
2639 sky2->rx_pause = ecmd->rx_pause != 0;
2640
2641 if (netif_running(dev)) {
2642 sky2_down(dev);
2643 err = sky2_up(dev);
2644 }
2645
2646 return err;
2647}
2648
2649#ifdef CONFIG_PM
2650static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2651{
2652 struct sky2_port *sky2 = netdev_priv(dev);
2653
2654 wol->supported = WAKE_MAGIC;
2655 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2656}
2657
2658static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2659{
2660 struct sky2_port *sky2 = netdev_priv(dev);
2661 struct sky2_hw *hw = sky2->hw;
2662
2663 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2664 return -EOPNOTSUPP;
2665
2666 sky2->wol = wol->wolopts == WAKE_MAGIC;
2667
2668 if (sky2->wol) {
2669 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2670
2671 sky2_write16(hw, WOL_CTRL_STAT,
2672 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2673 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2674 } else
2675 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2676
2677 return 0;
2678}
2679#endif
2680
2681static void sky2_get_ringparam(struct net_device *dev,
2682 struct ethtool_ringparam *ering)
2683{
2684 struct sky2_port *sky2 = netdev_priv(dev);
2685
2686 ering->rx_max_pending = RX_MAX_PENDING;
2687 ering->rx_mini_max_pending = 0;
2688 ering->rx_jumbo_max_pending = 0;
2689 ering->tx_max_pending = TX_RING_SIZE - 1;
2690
2691 ering->rx_pending = sky2->rx_pending;
2692 ering->rx_mini_pending = 0;
2693 ering->rx_jumbo_pending = 0;
2694 ering->tx_pending = sky2->tx_pending;
2695}
2696
2697static int sky2_set_ringparam(struct net_device *dev,
2698 struct ethtool_ringparam *ering)
2699{
2700 struct sky2_port *sky2 = netdev_priv(dev);
2701 int err = 0;
2702
2703 if (ering->rx_pending > RX_MAX_PENDING ||
2704 ering->rx_pending < 8 ||
2705 ering->tx_pending < MAX_SKB_TX_LE ||
2706 ering->tx_pending > TX_RING_SIZE - 1)
2707 return -EINVAL;
2708
2709 if (netif_running(dev))
2710 sky2_down(dev);
2711
2712 sky2->rx_pending = ering->rx_pending;
2713 sky2->tx_pending = ering->tx_pending;
2714
2715 if (netif_running(dev))
2716 err = sky2_up(dev);
2717
2718 return err;
2719}
2720
2721static int sky2_get_regs_len(struct net_device *dev)
2722{
2723 return 0x4000;
2724}
2725
2726/*
2727 * Returns copy of control register region
2728 * Note: access to the RAM address register set will cause timeouts.
2729 */
2730static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2731 void *p)
2732{
2733 const struct sky2_port *sky2 = netdev_priv(dev);
2734 const void __iomem *io = sky2->hw->regs;
2735
2736 BUG_ON(regs->len < B3_RI_WTO_R1);
2737 regs->version = 1;
2738 memset(p, 0, regs->len);
2739
2740 memcpy_fromio(p, io, B3_RAM_ADDR);
2741
2742 memcpy_fromio(p + B3_RI_WTO_R1,
2743 io + B3_RI_WTO_R1,
2744 regs->len - B3_RI_WTO_R1);
2745}
2746
2747static struct ethtool_ops sky2_ethtool_ops = {
2748 .get_settings = sky2_get_settings,
2749 .set_settings = sky2_set_settings,
2750 .get_drvinfo = sky2_get_drvinfo,
2751 .get_msglevel = sky2_get_msglevel,
2752 .set_msglevel = sky2_set_msglevel,
2753 .nway_reset = sky2_nway_reset,
2754 .get_regs_len = sky2_get_regs_len,
2755 .get_regs = sky2_get_regs,
2756 .get_link = ethtool_op_get_link,
2757 .get_sg = ethtool_op_get_sg,
2758 .set_sg = ethtool_op_set_sg,
2759 .get_tx_csum = ethtool_op_get_tx_csum,
2760 .set_tx_csum = ethtool_op_set_tx_csum,
2761 .get_tso = ethtool_op_get_tso,
2762 .set_tso = ethtool_op_set_tso,
2763 .get_rx_csum = sky2_get_rx_csum,
2764 .set_rx_csum = sky2_set_rx_csum,
2765 .get_strings = sky2_get_strings,
2766 .get_ringparam = sky2_get_ringparam,
2767 .set_ringparam = sky2_set_ringparam,
2768 .get_pauseparam = sky2_get_pauseparam,
2769 .set_pauseparam = sky2_set_pauseparam,
2770#ifdef CONFIG_PM
2771 .get_wol = sky2_get_wol,
2772 .set_wol = sky2_set_wol,
2773#endif
2774 .phys_id = sky2_phys_id,
2775 .get_stats_count = sky2_get_stats_count,
2776 .get_ethtool_stats = sky2_get_ethtool_stats,
2777 .get_perm_addr = ethtool_op_get_perm_addr,
2778};
2779
2780/* Initialize network device */
2781static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2782 unsigned port, int highmem)
2783{
2784 struct sky2_port *sky2;
2785 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
2786
2787 if (!dev) {
2788 printk(KERN_ERR "sky2 etherdev alloc failed");
2789 return NULL;
2790 }
2791
2792 SET_MODULE_OWNER(dev);
2793 SET_NETDEV_DEV(dev, &hw->pdev->dev);
2794 dev->irq = hw->pdev->irq;
2795 dev->open = sky2_up;
2796 dev->stop = sky2_down;
2797 dev->do_ioctl = sky2_ioctl;
2798 dev->hard_start_xmit = sky2_xmit_frame;
2799 dev->get_stats = sky2_get_stats;
2800 dev->set_multicast_list = sky2_set_multicast;
2801 dev->set_mac_address = sky2_set_mac_address;
2802 dev->change_mtu = sky2_change_mtu;
2803 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
2804 dev->tx_timeout = sky2_tx_timeout;
2805 dev->watchdog_timeo = TX_WATCHDOG;
2806 if (port == 0)
2807 dev->poll = sky2_poll;
2808 dev->weight = NAPI_WEIGHT;
2809#ifdef CONFIG_NET_POLL_CONTROLLER
2810 dev->poll_controller = sky2_netpoll;
2811#endif
2812
2813 sky2 = netdev_priv(dev);
2814 sky2->netdev = dev;
2815 sky2->hw = hw;
2816 sky2->msg_enable = netif_msg_init(debug, default_msg);
2817
2818 spin_lock_init(&sky2->tx_lock);
2819 /* Auto speed and flow control */
2820 sky2->autoneg = AUTONEG_ENABLE;
2821 sky2->tx_pause = 0;
2822 sky2->rx_pause = 1;
2823 sky2->duplex = -1;
2824 sky2->speed = -1;
2825 sky2->advertising = sky2_supported_modes(hw);
2826 sky2->rx_csum = 1;
2827 tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long)sky2);
2828 sky2->tx_pending = TX_DEF_PENDING;
2829 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
2830
2831 hw->dev[port] = dev;
2832
2833 sky2->port = port;
2834
2835 dev->features |= NETIF_F_LLTX;
2836 if (hw->chip_id != CHIP_ID_YUKON_EC_U)
2837 dev->features |= NETIF_F_TSO;
2838 if (highmem)
2839 dev->features |= NETIF_F_HIGHDMA;
2840 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2841
2842#ifdef SKY2_VLAN_TAG_USED
2843 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2844 dev->vlan_rx_register = sky2_vlan_rx_register;
2845 dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid;
2846#endif
2847
2848 /* read the mac address */
2849 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
2850 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2851
2852 /* device is off until link detection */
2853 netif_carrier_off(dev);
2854 netif_stop_queue(dev);
2855
2856 return dev;
2857}
2858
2859static inline void sky2_show_addr(struct net_device *dev)
2860{
2861 const struct sky2_port *sky2 = netdev_priv(dev);
2862
2863 if (netif_msg_probe(sky2))
2864 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
2865 dev->name,
2866 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2867 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2868}
2869
2870static int __devinit sky2_probe(struct pci_dev *pdev,
2871 const struct pci_device_id *ent)
2872{
2873 struct net_device *dev, *dev1 = NULL;
2874 struct sky2_hw *hw;
2875 int err, pm_cap, using_dac = 0;
2876
2877 err = pci_enable_device(pdev);
2878 if (err) {
2879 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
2880 pci_name(pdev));
2881 goto err_out;
2882 }
2883
2884 err = pci_request_regions(pdev, DRV_NAME);
2885 if (err) {
2886 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
2887 pci_name(pdev));
2888 goto err_out;
2889 }
2890
2891 pci_set_master(pdev);
2892
2893 /* Find power-management capability. */
2894 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
2895 if (pm_cap == 0) {
2896 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
2897 "aborting.\n");
2898 err = -EIO;
2899 goto err_out_free_regions;
2900 }
2901
2902 if (sizeof(dma_addr_t) > sizeof(u32)) {
2903 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2904 if (!err)
2905 using_dac = 1;
2906 }
2907
2908 if (!using_dac) {
2909 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2910 if (err) {
2911 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
2912 pci_name(pdev));
2913 goto err_out_free_regions;
2914 }
2915 }
2916#ifdef __BIG_ENDIAN
2917 /* byte swap descriptors in hardware */
2918 {
2919 u32 reg;
2920
2921 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
2922 reg |= PCI_REV_DESC;
2923 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
2924 }
2925#endif
2926
2927 err = -ENOMEM;
2928 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
2929 if (!hw) {
2930 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
2931 pci_name(pdev));
2932 goto err_out_free_regions;
2933 }
2934
2935 memset(hw, 0, sizeof(*hw));
2936 hw->pdev = pdev;
2937 spin_lock_init(&hw->phy_lock);
2938
2939 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
2940 if (!hw->regs) {
2941 printk(KERN_ERR PFX "%s: cannot map device registers\n",
2942 pci_name(pdev));
2943 goto err_out_free_hw;
2944 }
2945 hw->pm_cap = pm_cap;
2946
2947 err = sky2_reset(hw);
2948 if (err)
2949 goto err_out_iounmap;
2950
2951 printk(KERN_INFO PFX "v%s addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n",
2952 DRV_VERSION, pci_resource_start(pdev, 0), pdev->irq,
2953 yukon_name[hw->chip_id - CHIP_ID_YUKON],
2954 hw->chip_id, hw->chip_rev);
2955
2956 dev = sky2_init_netdev(hw, 0, using_dac);
2957 if (!dev)
2958 goto err_out_free_pci;
2959
2960 err = register_netdev(dev);
2961 if (err) {
2962 printk(KERN_ERR PFX "%s: cannot register net device\n",
2963 pci_name(pdev));
2964 goto err_out_free_netdev;
2965 }
2966
2967 sky2_show_addr(dev);
2968
2969 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
2970 if (register_netdev(dev1) == 0)
2971 sky2_show_addr(dev1);
2972 else {
2973 /* Failure to register second port need not be fatal */
2974 printk(KERN_WARNING PFX
2975 "register of second port failed\n");
2976 hw->dev[1] = NULL;
2977 free_netdev(dev1);
2978 }
2979 }
2980
2981 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
2982 if (err) {
2983 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
2984 pci_name(pdev), pdev->irq);
2985 goto err_out_unregister;
2986 }
2987
2988 hw->intr_mask = Y2_IS_BASE;
2989 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2990
2991 pci_set_drvdata(pdev, hw);
2992
2993 return 0;
2994
2995err_out_unregister:
2996 if (dev1) {
2997 unregister_netdev(dev1);
2998 free_netdev(dev1);
2999 }
3000 unregister_netdev(dev);
3001err_out_free_netdev:
3002 free_netdev(dev);
3003err_out_free_pci:
3004 sky2_write8(hw, B0_CTST, CS_RST_SET);
3005 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3006err_out_iounmap:
3007 iounmap(hw->regs);
3008err_out_free_hw:
3009 kfree(hw);
3010err_out_free_regions:
3011 pci_release_regions(pdev);
3012 pci_disable_device(pdev);
3013err_out:
3014 return err;
3015}
3016
3017static void __devexit sky2_remove(struct pci_dev *pdev)
3018{
3019 struct sky2_hw *hw = pci_get_drvdata(pdev);
3020 struct net_device *dev0, *dev1;
3021
3022 if (!hw)
3023 return;
3024
3025 dev0 = hw->dev[0];
3026 dev1 = hw->dev[1];
3027 if (dev1)
3028 unregister_netdev(dev1);
3029 unregister_netdev(dev0);
3030
3031 sky2_write32(hw, B0_IMSK, 0);
3032 sky2_set_power_state(hw, PCI_D3hot);
3033 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3034 sky2_write8(hw, B0_CTST, CS_RST_SET);
3035 sky2_read8(hw, B0_CTST);
3036
3037 free_irq(pdev->irq, hw);
3038 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3039 pci_release_regions(pdev);
3040 pci_disable_device(pdev);
3041
3042 if (dev1)
3043 free_netdev(dev1);
3044 free_netdev(dev0);
3045 iounmap(hw->regs);
3046 kfree(hw);
3047
3048 pci_set_drvdata(pdev, NULL);
3049}
3050
3051#ifdef CONFIG_PM
3052static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3053{
3054 struct sky2_hw *hw = pci_get_drvdata(pdev);
3055 int i;
3056
3057 for (i = 0; i < 2; i++) {
3058 struct net_device *dev = hw->dev[i];
3059
3060 if (dev) {
3061 if (!netif_running(dev))
3062 continue;
3063
3064 sky2_down(dev);
3065 netif_device_detach(dev);
3066 }
3067 }
3068
3069 return sky2_set_power_state(hw, pci_choose_state(pdev, state));
3070}
3071
3072static int sky2_resume(struct pci_dev *pdev)
3073{
3074 struct sky2_hw *hw = pci_get_drvdata(pdev);
3075 int i;
3076
3077 pci_restore_state(pdev);
3078 pci_enable_wake(pdev, PCI_D0, 0);
3079 sky2_set_power_state(hw, PCI_D0);
3080
3081 sky2_reset(hw);
3082
3083 for (i = 0; i < 2; i++) {
3084 struct net_device *dev = hw->dev[i];
3085 if (dev) {
3086 if (netif_running(dev)) {
3087 netif_device_attach(dev);
3088 sky2_up(dev);
3089 }
3090 }
3091 }
3092 return 0;
3093}
3094#endif
3095
3096static struct pci_driver sky2_driver = {
3097 .name = DRV_NAME,
3098 .id_table = sky2_id_table,
3099 .probe = sky2_probe,
3100 .remove = __devexit_p(sky2_remove),
3101#ifdef CONFIG_PM
3102 .suspend = sky2_suspend,
3103 .resume = sky2_resume,
3104#endif
3105};
3106
3107static int __init sky2_init_module(void)
3108{
3109 return pci_register_driver(&sky2_driver);
3110}
3111
3112static void __exit sky2_cleanup_module(void)
3113{
3114 pci_unregister_driver(&sky2_driver);
3115}
3116
3117module_init(sky2_init_module);
3118module_exit(sky2_cleanup_module);
3119
3120MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
3121MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
3122MODULE_LICENSE("GPL");
3123MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
new file mode 100644
index 000000000000..930680f50fca
--- /dev/null
+++ b/drivers/net/sky2.h
@@ -0,0 +1,1917 @@
1/*
2 * Definitions for the new Marvell Yukon 2 driver.
3 */
4#ifndef _SKY2_H
5#define _SKY2_H
6
7/* PCI config registers */
8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44
10#define PCI_DEV_STATUS 0x7c
11#define PCI_OS_PCI_X (1<<26)
12
13#define PEX_LNK_STAT 0xf2
14#define PEX_UNC_ERR_STAT 0x104
15#define PEX_DEV_CTRL 0xe8
16
17/* Yukon-2 */
18enum pci_dev_reg_1 {
19 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
20 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
21 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
22 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
23 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
24 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
25};
26
27enum pci_dev_reg_2 {
28 PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
29 PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
30 PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
31
32 PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
33 PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
34 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
35 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
36
37 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
38};
39
40
41#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
42 PCI_STATUS_SIG_SYSTEM_ERROR | \
43 PCI_STATUS_REC_MASTER_ABORT | \
44 PCI_STATUS_REC_TARGET_ABORT | \
45 PCI_STATUS_PARITY)
46
47enum pex_dev_ctrl {
48 PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
49 PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
50 PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
51 PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
52 PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
53 PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
54 PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
55 PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
56 PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
57 PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
58 PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
59};
60#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
61
62/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
63enum pex_err {
64 PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
65
66 PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
67
68 PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
69
70 PEX_COMP_TO = 1<<14, /* Completion Timeout */
71 PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
72 PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
73
74 PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
75 PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
76};
77
78
79enum csr_regs {
80 B0_RAP = 0x0000,
81 B0_CTST = 0x0004,
82 B0_Y2LED = 0x0005,
83 B0_POWER_CTRL = 0x0007,
84 B0_ISRC = 0x0008,
85 B0_IMSK = 0x000c,
86 B0_HWE_ISRC = 0x0010,
87 B0_HWE_IMSK = 0x0014,
88
89 /* Special ISR registers (Yukon-2 only) */
90 B0_Y2_SP_ISRC2 = 0x001c,
91 B0_Y2_SP_ISRC3 = 0x0020,
92 B0_Y2_SP_EISR = 0x0024,
93 B0_Y2_SP_LISR = 0x0028,
94 B0_Y2_SP_ICR = 0x002c,
95
96 B2_MAC_1 = 0x0100,
97 B2_MAC_2 = 0x0108,
98 B2_MAC_3 = 0x0110,
99 B2_CONN_TYP = 0x0118,
100 B2_PMD_TYP = 0x0119,
101 B2_MAC_CFG = 0x011a,
102 B2_CHIP_ID = 0x011b,
103 B2_E_0 = 0x011c,
104
105 B2_Y2_CLK_GATE = 0x011d,
106 B2_Y2_HW_RES = 0x011e,
107 B2_E_3 = 0x011f,
108 B2_Y2_CLK_CTRL = 0x0120,
109
110 B2_TI_INI = 0x0130,
111 B2_TI_VAL = 0x0134,
112 B2_TI_CTRL = 0x0138,
113 B2_TI_TEST = 0x0139,
114
115 B2_TST_CTRL1 = 0x0158,
116 B2_TST_CTRL2 = 0x0159,
117 B2_GP_IO = 0x015c,
118
119 B2_I2C_CTRL = 0x0160,
120 B2_I2C_DATA = 0x0164,
121 B2_I2C_IRQ = 0x0168,
122 B2_I2C_SW = 0x016c,
123
124 B3_RAM_ADDR = 0x0180,
125 B3_RAM_DATA_LO = 0x0184,
126 B3_RAM_DATA_HI = 0x0188,
127
128/* RAM Interface Registers */
129/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
130/*
131 * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
132 * not usable in SW. Please notice these are NOT real timeouts, these are
133 * the number of qWords transferred continuously.
134 */
135#define RAM_BUFFER(port, reg) (reg | (port <<6))
136
137 B3_RI_WTO_R1 = 0x0190,
138 B3_RI_WTO_XA1 = 0x0191,
139 B3_RI_WTO_XS1 = 0x0192,
140 B3_RI_RTO_R1 = 0x0193,
141 B3_RI_RTO_XA1 = 0x0194,
142 B3_RI_RTO_XS1 = 0x0195,
143 B3_RI_WTO_R2 = 0x0196,
144 B3_RI_WTO_XA2 = 0x0197,
145 B3_RI_WTO_XS2 = 0x0198,
146 B3_RI_RTO_R2 = 0x0199,
147 B3_RI_RTO_XA2 = 0x019a,
148 B3_RI_RTO_XS2 = 0x019b,
149 B3_RI_TO_VAL = 0x019c,
150 B3_RI_CTRL = 0x01a0,
151 B3_RI_TEST = 0x01a2,
152 B3_MA_TOINI_RX1 = 0x01b0,
153 B3_MA_TOINI_RX2 = 0x01b1,
154 B3_MA_TOINI_TX1 = 0x01b2,
155 B3_MA_TOINI_TX2 = 0x01b3,
156 B3_MA_TOVAL_RX1 = 0x01b4,
157 B3_MA_TOVAL_RX2 = 0x01b5,
158 B3_MA_TOVAL_TX1 = 0x01b6,
159 B3_MA_TOVAL_TX2 = 0x01b7,
160 B3_MA_TO_CTRL = 0x01b8,
161 B3_MA_TO_TEST = 0x01ba,
162 B3_MA_RCINI_RX1 = 0x01c0,
163 B3_MA_RCINI_RX2 = 0x01c1,
164 B3_MA_RCINI_TX1 = 0x01c2,
165 B3_MA_RCINI_TX2 = 0x01c3,
166 B3_MA_RCVAL_RX1 = 0x01c4,
167 B3_MA_RCVAL_RX2 = 0x01c5,
168 B3_MA_RCVAL_TX1 = 0x01c6,
169 B3_MA_RCVAL_TX2 = 0x01c7,
170 B3_MA_RC_CTRL = 0x01c8,
171 B3_MA_RC_TEST = 0x01ca,
172 B3_PA_TOINI_RX1 = 0x01d0,
173 B3_PA_TOINI_RX2 = 0x01d4,
174 B3_PA_TOINI_TX1 = 0x01d8,
175 B3_PA_TOINI_TX2 = 0x01dc,
176 B3_PA_TOVAL_RX1 = 0x01e0,
177 B3_PA_TOVAL_RX2 = 0x01e4,
178 B3_PA_TOVAL_TX1 = 0x01e8,
179 B3_PA_TOVAL_TX2 = 0x01ec,
180 B3_PA_CTRL = 0x01f0,
181 B3_PA_TEST = 0x01f2,
182
183 Y2_CFG_SPC = 0x1c00,
184};
185
186/* B0_CTST 16 bit Control/Status register */
187enum {
188 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
189 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
190 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
191 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
192 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
193 Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
194 Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
195 Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
196
197 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
198 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
199 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
200 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
201 CS_MRST_CLR = 1<<3, /* Clear Master reset */
202 CS_MRST_SET = 1<<2, /* Set Master reset */
203 CS_RST_CLR = 1<<1, /* Clear Software reset */
204 CS_RST_SET = 1, /* Set Software reset */
205};
206
207/* B0_LED 8 Bit LED register */
208enum {
209/* Bit 7.. 2: reserved */
210 LED_STAT_ON = 1<<1, /* Status LED on */
211 LED_STAT_OFF = 1, /* Status LED off */
212};
213
214/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
215enum {
216 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
217 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
218 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
219 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
220 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
221 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
222 PC_VCC_ON = 1<<1, /* Switch VCC On */
223 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
224};
225
226/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
227
228/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
229/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
230/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
231/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
232enum {
233 Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
234 Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
235 Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
236
237 Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
238 Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
239 Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
240 Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
241
242 Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
243 Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
244 Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
245 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
246 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
247
248 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
249 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
250 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
251 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
252 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
253
254 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
255 Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
256 Y2_IS_IRQ_SW | Y2_IS_TIMINT,
257 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
258 Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
259 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
260 Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
261};
262
263/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
264enum {
265 IS_ERR_MSK = 0x00003fff,/* All Error bits */
266
267 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
268 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
269 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
270 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
271 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
272 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
273 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
274 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
275 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
276 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
277 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
278 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
279 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
280 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
281};
282
283/* Hardware error interrupt mask for Yukon 2 */
284enum {
285 Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
286 Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
287 Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
288 Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
289 Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
290 Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
291 /* Link 2 */
292 Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
293 Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
294 Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
295 Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
296 Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
297 Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
298 /* Link 1 */
299 Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
300 Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
301 Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
302 Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
303 Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
304 Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
305
306 Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
307 Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
308 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
309 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
310
311 Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
312 Y2_IS_PCI_EXP |
313 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
314};
315
316/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
317enum {
318 DPT_START = 1<<1,
319 DPT_STOP = 1<<0,
320};
321
322/* B2_TST_CTRL1 8 bit Test Control Register 1 */
323enum {
324 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
325 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
326 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
327 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
328 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
329 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
330 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
331 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
332};
333
334/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
335enum {
336 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
337 /* Bit 3.. 2: reserved */
338 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
339 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
340};
341
342/* B2_CHIP_ID 8 bit Chip Identification Number */
343enum {
344 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
345 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
346 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
347 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
348 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
349 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
350 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
351 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
352
353 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
354 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
355 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
356};
357
358/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
359enum {
360 Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
361 Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
362 Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
363 Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
364 Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
365 Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
366 Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
367 Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
368};
369
370/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
371enum {
372 CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
373 CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
374 CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
375};
376#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
377#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
378
379
380/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
381enum {
382 Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
383#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
384 Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
385 Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
386#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
387#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
388 Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
389 Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
390};
391
392/* B2_TI_CTRL 8 bit Timer control */
393/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
394enum {
395 TIM_START = 1<<2, /* Start Timer */
396 TIM_STOP = 1<<1, /* Stop Timer */
397 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
398};
399
400/* B2_TI_TEST 8 Bit Timer Test */
401/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
402/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
403enum {
404 TIM_T_ON = 1<<2, /* Test mode on */
405 TIM_T_OFF = 1<<1, /* Test mode off */
406 TIM_T_STEP = 1<<0, /* Test step */
407};
408
409/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
410 /* Bit 31..19: reserved */
411#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
412/* RAM Interface Registers */
413
414/* B3_RI_CTRL 16 bit RAM Interface Control Register */
415enum {
416 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
417 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
418
419 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
420 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
421};
422
423#define SK_RI_TO_53 36 /* RAM interface timeout */
424
425
426/* Port related registers FIFO, and Arbiter */
427#define SK_REG(port,reg) (((port)<<7)+(reg))
428
429/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
430/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
431/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
432/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
433/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
434
435#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
436
437/* TXA_CTRL 8 bit Tx Arbiter Control Register */
438enum {
439 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
440 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
441 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
442 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
443 TXA_START_RC = 1<<3, /* Start sync Rate Control */
444 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
445 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
446 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
447};
448
449/*
450 * Bank 4 - 5
451 */
452/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
453enum {
454 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
455 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
456 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
457 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
458 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
459 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
460 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
461};
462
463
464enum {
465 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
466 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
467 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
468 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
469 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
470 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
471 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
472 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
473 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
474};
475
476/* Queue Register Offsets, use Q_ADDR() to access */
477enum {
478 B8_Q_REGS = 0x0400, /* base of Queue registers */
479 Q_D = 0x00, /* 8*32 bit Current Descriptor */
480 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
481 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
482 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
483 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
484 Q_BC = 0x30, /* 32 bit Current Byte Counter */
485 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
486 Q_F = 0x38, /* 32 bit Flag Register */
487 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
488 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
489 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
490 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
491 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
492 Q_T2 = 0x40, /* 32 bit Test Register 2 */
493 Q_T3 = 0x44, /* 32 bit Test Register 3 */
494
495/* Yukon-2 */
496 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
497 Q_WM = 0x40, /* 16 bit FIFO Watermark */
498 Q_AL = 0x42, /* 8 bit FIFO Alignment */
499 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
500 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
501 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
502 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
503 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
504 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
505 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
506 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
507};
508#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
509
510
511/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
512enum {
513 Y2_B8_PREF_REGS = 0x0450,
514
515 PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
516 PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
517 PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
518 PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
519 PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
520 PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
521 PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
522 PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
523 PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
524 PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
525
526 PREF_UNIT_MASK_IDX = 0x0fff,
527};
528#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
529
530/* RAM Buffer Register Offsets */
531enum {
532
533 RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
534 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
535 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
536 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
537 RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
538 RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
539 RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
540 RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
541 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
542 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
543 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
544 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
545 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
546 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
547};
548
549/* Receive and Transmit Queues */
550enum {
551 Q_R1 = 0x0000, /* Receive Queue 1 */
552 Q_R2 = 0x0080, /* Receive Queue 2 */
553 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
554 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
555 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
556 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
557};
558
559/* Different PHY Types */
560enum {
561 PHY_ADDR_MARV = 0,
562};
563
564#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
565
566
567enum {
568 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
569 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
570 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
571 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
572
573 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
574
575/* Receive GMAC FIFO (YUKON and Yukon-2) */
576
577 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
578 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
579 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
580 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
581 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
582 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
583 RX_GMF_UP_THR = 0x0c58,/* 8 bit Rx Upper Pause Thr (Yukon-EC_U) */
584 RX_GMF_LP_THR = 0x0c5a,/* 8 bit Rx Lower Pause Thr (Yukon-EC_U) */
585 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
586 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
587
588 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
589
590 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
591
592 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
593};
594
595
596/* Q_BC 32 bit Current Byte Counter */
597
598/* BMU Control Status Registers */
599/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
600/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
601/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
602/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
603/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
604/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
605/* Q_CSR 32 bit BMU Control/Status Register */
606
607/* Rx BMU Control / Status Registers (Yukon-2) */
608enum {
609 BMU_IDLE = 1<<31, /* BMU Idle State */
610 BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
611 BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
612
613 BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
614 BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
615 BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
616 BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
617 BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
618 BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
619 BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
620 BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
621 BMU_START = 1<<8, /* Start Rx/Tx Queue */
622 BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
623 BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
624 BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
625 BMU_FIFO_RST = 1<<4, /* Reset FIFO */
626 BMU_OP_ON = 1<<3, /* BMU Operational On */
627 BMU_OP_OFF = 1<<2, /* BMU Operational Off */
628 BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
629 BMU_RST_SET = 1<<0, /* Set BMU Reset */
630
631 BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
632 BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
633 BMU_FIFO_ENA | BMU_OP_ON,
634
635 BMU_WM_DEFAULT = 0x600,
636};
637
638/* Tx BMU Control / Status Registers (Yukon-2) */
639 /* Bit 31: same as for Rx */
640enum {
641 BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
642 BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
643 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
644};
645
646/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
647/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
648enum {
649 PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
650 PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
651 PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
652 PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
653};
654
655/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
656/* RB_START 32 bit RAM Buffer Start Address */
657/* RB_END 32 bit RAM Buffer End Address */
658/* RB_WP 32 bit RAM Buffer Write Pointer */
659/* RB_RP 32 bit RAM Buffer Read Pointer */
660/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
661/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
662/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
663/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
664/* RB_PC 32 bit RAM Buffer Packet Counter */
665/* RB_LEV 32 bit RAM Buffer Level Register */
666
667#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
668/* RB_TST2 8 bit RAM Buffer Test Register 2 */
669/* RB_TST1 8 bit RAM Buffer Test Register 1 */
670
671/* RB_CTRL 8 bit RAM Buffer Control Register */
672enum {
673 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
674 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
675 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
676 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
677 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
678 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
679};
680
681
682/* Transmit GMAC FIFO (YUKON only) */
683enum {
684 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
685 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
686 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
687
688 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
689 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
690 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
691
692 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
693 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
694 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
695};
696
697/* Descriptor Poll Timer Registers */
698enum {
699 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
700 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
701 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
702
703 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
704};
705
706/* Time Stamp Timer Registers (YUKON only) */
707enum {
708 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
709 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
710 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
711};
712
713/* Polling Unit Registers (Yukon-2 only) */
714enum {
715 POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
716 POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
717
718 POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
719 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
720};
721
722/* ASF Subsystem Registers (Yukon-2 only) */
723enum {
724 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
725 B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
726 B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
727
728 B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
729 B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
730 B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
731 B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
732 B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
733 B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
734};
735
736/* Status BMU Registers (Yukon-2 only)*/
737enum {
738 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
739 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
740
741 STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
742 STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
743 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
744 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
745 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
746 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
747 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
748 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
749
750/* FIFO Control/Status Registers (Yukon-2 only)*/
751 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
752 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
753 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
754 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
755 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
756 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
757 STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
758
759/* Level and ISR Timer Registers (Yukon-2 only)*/
760 STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
761 STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
762 STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
763 STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
764 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
765 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
766 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
767 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
768 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
769 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
770 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
771 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
772};
773
774enum {
775 LINKLED_OFF = 0x01,
776 LINKLED_ON = 0x02,
777 LINKLED_LINKSYNC_OFF = 0x04,
778 LINKLED_LINKSYNC_ON = 0x08,
779 LINKLED_BLINK_OFF = 0x10,
780 LINKLED_BLINK_ON = 0x20,
781};
782
783/* GMAC and GPHY Control Registers (YUKON only) */
784enum {
785 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
786 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
787 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
788 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
789 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
790
791/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
792
793 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
794
795 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
796 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
797 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
798 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
799 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
800 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
801 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
802
803/* WOL Pattern Length Registers (YUKON only) */
804
805 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
806 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
807
808/* WOL Pattern Counter Registers (YUKON only) */
809
810
811 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
812 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
813};
814
815enum {
816 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
817 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
818};
819
820enum {
821 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
822 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
823};
824
825/*
826 * Marvel-PHY Registers, indirect addressed over GMAC
827 */
828enum {
829 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
830 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
831 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
832 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
833 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
834 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
835 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
836 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
837 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
838 /* Marvel-specific registers */
839 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
840 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
841 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
842 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
843 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
844 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
845 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
846 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
847 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
848 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
849 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
850 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
851 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
852 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
853 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
854 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
855 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
856 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
857
858/* for 10/100 Fast Ethernet PHY (88E3082 only) */
859 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
860 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
861 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
862 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
863 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
864};
865
866enum {
867 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
868 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
869 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
870 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
871 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
872 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
873 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
874 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
875 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
876 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
877};
878
879enum {
880 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
881 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
882 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
883};
884
885enum {
886 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
887
888 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
889 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
890 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
891 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
892 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
893 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
894 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
895};
896
897enum {
898 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
899 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
900 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
901};
902
903/* different Marvell PHY Ids */
904enum {
905 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
906
907 PHY_BCOM_ID1_A1 = 0x6041,
908 PHY_BCOM_ID1_B2 = 0x6043,
909 PHY_BCOM_ID1_C0 = 0x6044,
910 PHY_BCOM_ID1_C5 = 0x6047,
911
912 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
913 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
914 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
915 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
916};
917
918/* Advertisement register bits */
919enum {
920 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
921 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
922 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
923
924 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
925 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
926 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
927 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
928 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
929 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
930 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
931 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
932 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
933 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
934 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
935 PHY_AN_100HALF | PHY_AN_100FULL,
936};
937
938/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
939/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
940enum {
941 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
942 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
943 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
944 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
945 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
946 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
947 /* Bit 9..8: reserved */
948 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
949};
950
951/** Marvell-Specific */
952enum {
953 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
954 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
955 PHY_M_AN_RF = 1<<13, /* Remote Fault */
956
957 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
958 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
959 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
960 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
961 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
962 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
963 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
964 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
965};
966
967/* special defines for FIBER (88E1011S only) */
968enum {
969 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
970 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
971 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
972 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
973};
974
975/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
976enum {
977 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
978 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
979 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
980 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
981};
982
983/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
984enum {
985 PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
986 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
987 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
988 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
989 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
990 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
991};
992
993/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
994enum {
995 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
996 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
997 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
998 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
999 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1000 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1001 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1002 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1003 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1004 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1005 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1006 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1007};
1008
1009enum {
1010 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1011 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1012};
1013
1014#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1015
1016enum {
1017 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1018 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1019 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1020};
1021
1022/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1023enum {
1024 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1025 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1026 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1027 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1028 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1029
1030 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1031 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1032
1033 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1034 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1035};
1036
1037/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1038enum {
1039 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1040 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1041 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1042 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1043 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1044 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1045 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1046 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1047 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1048 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1049 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1050 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1051 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1052 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1053 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1054 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1055};
1056
1057#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1058
1059/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1060enum {
1061 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1062 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1063};
1064
1065enum {
1066 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1067 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1068 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1069 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1070 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1071 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1072 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1073 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1074 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1075 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1076 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1077 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1078
1079 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1080 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1081 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1082
1083 PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
1084 | PHY_M_IS_FIFO_ERROR,
1085 PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1086};
1087
1088
1089/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1090enum {
1091 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1092 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1093
1094 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1095 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1096 /* (88E1011 only) */
1097 PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1098 /* (88E1011 only) */
1099 PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1100 /* (88E1111 only) */
1101 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1102 /* !!! Errata in spec. (1 = disable) */
1103 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1104 PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1105 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1106 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1107 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1108 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1109
1110#define PHY_M_EC_M_DSC(x) ((x)<<10 & PHY_M_EC_M_DSC_MSK)
1111 /* 00=1x; 01=2x; 10=3x; 11=4x */
1112#define PHY_M_EC_S_DSC(x) ((x)<<8 & PHY_M_EC_S_DSC_MSK)
1113 /* 00=dis; 01=1x; 10=2x; 11=3x */
1114#define PHY_M_EC_DSC_2(x) ((x)<<9 & PHY_M_EC_M_DSC_MSK2)
1115 /* 000=1x; 001=2x; 010=3x; 011=4x */
1116#define PHY_M_EC_MAC_S(x) ((x)<<4 & PHY_M_EC_MAC_S_MSK)
1117 /* 01X=0; 110=2.5; 111=25 (MHz) */
1118
1119/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1120enum {
1121 PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
1122 PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
1123 PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
1124};
1125/* !!! Errata in spec. (1 = disable) */
1126
1127#define PHY_M_PC_DSC(x) (((x)<<12) & PHY_M_PC_DSC_MSK)
1128 /* 100=5x; 101=6x; 110=7x; 111=8x */
1129enum {
1130 MAC_TX_CLK_0_MHZ = 2,
1131 MAC_TX_CLK_2_5_MHZ = 6,
1132 MAC_TX_CLK_25_MHZ = 7,
1133};
1134
1135/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1136enum {
1137 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1138 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1139 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1140 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1141 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1142 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1143 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1144 /* (88E1111 only) */
1145};
1146
1147enum {
1148 PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1149 /* (88E1011 only) */
1150 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1151 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1152 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1153 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1154 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1155};
1156
1157#define PHY_M_LED_PULS_DUR(x) (((x)<<12) & PHY_M_LEDC_PULS_MSK)
1158
1159/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
1160enum {
1161 PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
1162 PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
1163 PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
1164 PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
1165 PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
1166 PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
1167};
1168
1169#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
1170#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
1171#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
1172#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
1173#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
1174#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
1175
1176enum {
1177 PULS_NO_STR = 0,/* no pulse stretching */
1178 PULS_21MS = 1,/* 21 ms to 42 ms */
1179 PULS_42MS = 2,/* 42 ms to 84 ms */
1180 PULS_84MS = 3,/* 84 ms to 170 ms */
1181 PULS_170MS = 4,/* 170 ms to 340 ms */
1182 PULS_340MS = 5,/* 340 ms to 670 ms */
1183 PULS_670MS = 6,/* 670 ms to 1.3 s */
1184 PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1185};
1186
1187#define PHY_M_LED_BLINK_RT(x) (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
1188
1189enum {
1190 BLINK_42MS = 0,/* 42 ms */
1191 BLINK_84MS = 1,/* 84 ms */
1192 BLINK_170MS = 2,/* 170 ms */
1193 BLINK_340MS = 3,/* 340 ms */
1194 BLINK_670MS = 4,/* 670 ms */
1195};
1196
1197/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1198#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1199 /* Bit 13..12: reserved */
1200#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1201#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1202#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1203#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1204#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1205#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1206
1207enum {
1208 MO_LED_NORM = 0,
1209 MO_LED_BLINK = 1,
1210 MO_LED_OFF = 2,
1211 MO_LED_ON = 3,
1212};
1213
1214/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1215enum {
1216 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1217 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1218 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1219 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1220 PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1221};
1222
1223/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1224enum {
1225 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1226 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1227 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1228 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1229 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1230 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1231 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1232 /* (88E1111 only) */
1233
1234 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1235 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1236 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1237};
1238
1239/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1240/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1241 /* Bit 15..12: reserved (used internally) */
1242enum {
1243 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1244 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1245 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1246};
1247
1248#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
1249#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
1250#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
1251
1252enum {
1253 LED_PAR_CTRL_COLX = 0x00,
1254 LED_PAR_CTRL_ERROR = 0x01,
1255 LED_PAR_CTRL_DUPLEX = 0x02,
1256 LED_PAR_CTRL_DP_COL = 0x03,
1257 LED_PAR_CTRL_SPEED = 0x04,
1258 LED_PAR_CTRL_LINK = 0x05,
1259 LED_PAR_CTRL_TX = 0x06,
1260 LED_PAR_CTRL_RX = 0x07,
1261 LED_PAR_CTRL_ACT = 0x08,
1262 LED_PAR_CTRL_LNK_RX = 0x09,
1263 LED_PAR_CTRL_LNK_AC = 0x0a,
1264 LED_PAR_CTRL_ACT_BL = 0x0b,
1265 LED_PAR_CTRL_TX_BL = 0x0c,
1266 LED_PAR_CTRL_RX_BL = 0x0d,
1267 LED_PAR_CTRL_COL_BL = 0x0e,
1268 LED_PAR_CTRL_INACT = 0x0f
1269};
1270
1271/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1272enum {
1273 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1274 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1275 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1276};
1277
1278/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1279/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1280enum {
1281 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1282 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1283 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1284 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1285};
1286#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
1287
1288/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1289enum {
1290 PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1291 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1292 PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1293 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1294};
1295
1296#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1297#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1298#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1299#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1300
1301/* GMAC registers */
1302/* Port Registers */
1303enum {
1304 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1305 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1306 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1307 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1308 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1309 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1310 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1311/* Source Address Registers */
1312 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1313 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1314 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1315 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1316 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1317 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1318
1319/* Multicast Address Hash Registers */
1320 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1321 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1322 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1323 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1324
1325/* Interrupt Source Registers */
1326 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1327 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1328 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1329
1330/* Interrupt Mask Registers */
1331 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1332 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1333 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1334
1335/* Serial Management Interface (SMI) Registers */
1336 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1337 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1338 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1339};
1340
1341/* MIB Counters */
1342#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1343#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1344
1345/*
1346 * MIB Counters base address definitions (low word) -
1347 * use offset 4 for access to high word (32 bit r/o)
1348 */
1349enum {
1350 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1351 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1352 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1353 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1354 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1355 /* GM_MIB_CNT_BASE + 40: reserved */
1356 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1357 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1358 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1359 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1360 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1361 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1362 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1363 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
1364 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
1365 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
1366 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
1367 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
1368 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
1369 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
1370 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
1371 /* GM_MIB_CNT_BASE + 168: reserved */
1372 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
1373 /* GM_MIB_CNT_BASE + 184: reserved */
1374 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
1375 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
1376 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
1377 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
1378 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
1379 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
1380 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
1381 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
1382 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
1383 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
1384 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
1385 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
1386 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
1387
1388 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
1389 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
1390 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
1391 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
1392 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
1393 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1394};
1395
1396/* GMAC Bit Definitions */
1397/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1398enum {
1399 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1400 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1401 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1402 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1403 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1404 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1405 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
1406 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
1407
1408 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1409 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1410 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1411 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1412 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1413};
1414
1415/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1416enum {
1417 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1418 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1419 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1420 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1421 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1422 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1423 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1424 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1425 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1426 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1427 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1428 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1429 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1430 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1431 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1432};
1433
1434#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1435#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
1436
1437/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1438enum {
1439 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1440 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1441 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1442 GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
1443};
1444
1445#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1446#define TX_COL_DEF 0x04
1447
1448/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1449enum {
1450 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1451 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1452 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1453 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1454};
1455
1456/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1457enum {
1458 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1459 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1460 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1461 GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
1462
1463 TX_JAM_LEN_DEF = 0x03,
1464 TX_JAM_IPG_DEF = 0x0b,
1465 TX_IPG_JAM_DEF = 0x1c,
1466 TX_BOF_LIM_DEF = 0x04,
1467};
1468
1469#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1470#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1471#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1472#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
1473
1474
1475/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1476enum {
1477 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1478 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
1479 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
1480 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
1481 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1482};
1483
1484#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1485#define DATA_BLIND_DEF 0x04
1486
1487#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1488#define IPG_DATA_DEF 0x1e
1489
1490/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1491enum {
1492 GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
1493 GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
1494 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1495 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1496 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1497};
1498
1499#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
1500#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
1501
1502/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1503enum {
1504 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1505 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1506};
1507
1508/* Receive Frame Status Encoding */
1509enum {
1510 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1511 GMR_FS_VLAN = 1<<13, /* VLAN Packet */
1512 GMR_FS_JABBER = 1<<12, /* Jabber Packet */
1513 GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
1514 GMR_FS_MC = 1<<10, /* Multicast Packet */
1515 GMR_FS_BC = 1<<9, /* Broadcast Packet */
1516 GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
1517 GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
1518 GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
1519 GMR_FS_MII_ERR = 1<<5, /* MII Error */
1520 GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
1521 GMR_FS_FRAGMENT = 1<<3, /* Fragment */
1522
1523 GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
1524 GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
1525
1526 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1527 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1528 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1529 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1530};
1531
1532/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1533enum {
1534 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1535 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1536 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1537 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1538
1539 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1540 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1541 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1542
1543 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1544 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1545 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1546 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1547 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1548 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1549 GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
1550
1551 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1552 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1553 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1554 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1555
1556 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1557
1558 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1559};
1560
1561
1562/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1563enum {
1564 TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
1565 TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
1566
1567 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1568 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1569
1570 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1571 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1572 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
1573
1574 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1575 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1576 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1577};
1578
1579/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1580enum {
1581 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1582 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1583 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1584};
1585
1586/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
1587enum {
1588 Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
1589 Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
1590 Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
1591 Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
1592 Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
1593
1594 Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
1595 Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
1596};
1597
1598/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
1599enum {
1600 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1601 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1602};
1603
1604/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1605enum {
1606 SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
1607 SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
1608 SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
1609 SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
1610 SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
1611};
1612
1613/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1614enum {
1615 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1616 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1617 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
1618 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
1619 GMC_PAUSE_ON = 1<<3, /* Pause On */
1620 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
1621 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
1622 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
1623};
1624
1625/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1626enum {
1627 GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
1628 GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
1629 GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
1630 GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
1631 GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
1632 GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
1633 GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
1634 GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
1635 GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
1636 GPC_ANEG_0 = 1<<19, /* ANEG[0] */
1637 GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
1638 GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
1639 GPC_ANEG_3 = 1<<16, /* ANEG[3] */
1640 GPC_ANEG_2 = 1<<15, /* ANEG[2] */
1641 GPC_ANEG_1 = 1<<14, /* ANEG[1] */
1642 GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
1643 GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
1644 GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
1645 GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
1646 GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
1647 GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
1648 /* Bits 7..2: reserved */
1649 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
1650 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
1651};
1652
1653/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1654/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1655enum {
1656 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
1657 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
1658 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
1659 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
1660 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1661 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1662
1663#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1664
1665/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1666 /* Bits 15.. 2: reserved */
1667 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1668 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1669
1670
1671/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1672 WOL_CTL_LINK_CHG_OCC = 1<<15,
1673 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1674 WOL_CTL_PATTERN_OCC = 1<<13,
1675 WOL_CTL_CLEAR_RESULT = 1<<12,
1676 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
1677 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
1678 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
1679 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
1680 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
1681 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
1682 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
1683 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
1684 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
1685 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
1686 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
1687 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1688};
1689
1690#define WOL_CTL_DEFAULT \
1691 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1692 WOL_CTL_DIS_PME_ON_PATTERN | \
1693 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1694 WOL_CTL_DIS_LINK_CHG_UNIT | \
1695 WOL_CTL_DIS_PATTERN_UNIT | \
1696 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1697
1698/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1699#define WOL_CTL_PATT_ENA(x) (1 << (x))
1700
1701
1702/* Control flags */
1703enum {
1704 UDPTCP = 1<<0,
1705 CALSUM = 1<<1,
1706 WR_SUM = 1<<2,
1707 INIT_SUM= 1<<3,
1708 LOCK_SUM= 1<<4,
1709 INS_VLAN= 1<<5,
1710 FRC_STAT= 1<<6,
1711 EOP = 1<<7,
1712};
1713
1714enum {
1715 HW_OWNER = 1<<7,
1716 OP_TCPWRITE = 0x11,
1717 OP_TCPSTART = 0x12,
1718 OP_TCPINIT = 0x14,
1719 OP_TCPLCK = 0x18,
1720 OP_TCPCHKSUM = OP_TCPSTART,
1721 OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
1722 OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
1723 OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
1724 OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
1725
1726 OP_ADDR64 = 0x21,
1727 OP_VLAN = 0x22,
1728 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
1729 OP_LRGLEN = 0x24,
1730 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
1731 OP_BUFFER = 0x40,
1732 OP_PACKET = 0x41,
1733 OP_LARGESEND = 0x43,
1734
1735/* YUKON-2 STATUS opcodes defines */
1736 OP_RXSTAT = 0x60,
1737 OP_RXTIMESTAMP = 0x61,
1738 OP_RXVLAN = 0x62,
1739 OP_RXCHKS = 0x64,
1740 OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
1741 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
1742 OP_RSS_HASH = 0x65,
1743 OP_TXINDEXLE = 0x68,
1744};
1745
1746/* Yukon 2 hardware interface
1747 * Not tested on big endian
1748 */
1749struct sky2_tx_le {
1750 union {
1751 __le32 addr;
1752 struct {
1753 __le16 offset;
1754 __le16 start;
1755 } csum __attribute((packed));
1756 struct {
1757 __le16 size;
1758 __le16 rsvd;
1759 } tso __attribute((packed));
1760 } tx;
1761 __le16 length; /* also vlan tag or checksum start */
1762 u8 ctrl;
1763 u8 opcode;
1764} __attribute((packed));
1765
1766struct sky2_rx_le {
1767 __le32 addr;
1768 __le16 length;
1769 u8 ctrl;
1770 u8 opcode;
1771} __attribute((packed));;
1772
1773struct sky2_status_le {
1774 __le32 status; /* also checksum */
1775 __le16 length; /* also vlan tag */
1776 u8 link;
1777 u8 opcode;
1778} __attribute((packed));
1779
1780struct ring_info {
1781 struct sk_buff *skb;
1782 dma_addr_t mapaddr;
1783 u16 maplen;
1784 u16 idx;
1785};
1786
1787struct sky2_port {
1788 struct sky2_hw *hw;
1789 struct net_device *netdev;
1790 unsigned port;
1791 u32 msg_enable;
1792
1793 struct ring_info *tx_ring;
1794 struct sky2_tx_le *tx_le;
1795 spinlock_t tx_lock;
1796 u32 tx_addr64;
1797 u16 tx_cons; /* next le to check */
1798 u16 tx_prod; /* next le to use */
1799 u16 tx_pending;
1800 u16 tx_last_put;
1801 u16 tx_last_mss;
1802
1803 struct ring_info *rx_ring;
1804 struct sky2_rx_le *rx_le;
1805 u32 rx_addr64;
1806 u16 rx_next; /* next re to check */
1807 u16 rx_put; /* next le index to use */
1808 u16 rx_pending;
1809 u16 rx_last_put;
1810#ifdef SKY2_VLAN_TAG_USED
1811 u16 rx_tag;
1812 struct vlan_group *vlgrp;
1813#endif
1814
1815 dma_addr_t rx_le_map;
1816 dma_addr_t tx_le_map;
1817 u32 advertising; /* ADVERTISED_ bits */
1818 u16 speed; /* SPEED_1000, SPEED_100, ... */
1819 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1820 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1821 u8 rx_pause;
1822 u8 tx_pause;
1823 u8 rx_csum;
1824 u8 wol;
1825
1826 struct tasklet_struct phy_task;
1827 struct net_device_stats net_stats;
1828};
1829
1830struct sky2_hw {
1831 void __iomem *regs;
1832 struct pci_dev *pdev;
1833 u32 intr_mask;
1834 struct net_device *dev[2];
1835
1836 int pm_cap;
1837 u8 chip_id;
1838 u8 chip_rev;
1839 u8 copper;
1840 u8 ports;
1841
1842 struct sky2_status_le *st_le;
1843 u32 st_idx;
1844 dma_addr_t st_dma;
1845
1846 spinlock_t phy_lock;
1847};
1848
1849/* Register accessor for memory mapped device */
1850static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
1851{
1852 return readl(hw->regs + reg);
1853}
1854
1855static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
1856{
1857 return readw(hw->regs + reg);
1858}
1859
1860static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
1861{
1862 return readb(hw->regs + reg);
1863}
1864
1865/* This should probably go away, bus based tweeks suck */
1866static inline int is_pciex(const struct sky2_hw *hw)
1867{
1868 u32 status;
1869 pci_read_config_dword(hw->pdev, PCI_DEV_STATUS, &status);
1870 return (status & PCI_OS_PCI_X) == 0;
1871}
1872
1873static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
1874{
1875 writel(val, hw->regs + reg);
1876}
1877
1878static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
1879{
1880 writew(val, hw->regs + reg);
1881}
1882
1883static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
1884{
1885 writeb(val, hw->regs + reg);
1886}
1887
1888/* Yukon PHY related registers */
1889#define SK_GMAC_REG(port,reg) \
1890 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
1891#define GM_PHY_RETRIES 100
1892
1893static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
1894{
1895 return sky2_read16(hw, SK_GMAC_REG(port,reg));
1896}
1897
1898static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
1899{
1900 unsigned base = SK_GMAC_REG(port, reg);
1901 return (u32) sky2_read16(hw, base)
1902 | (u32) sky2_read16(hw, base+4) << 16;
1903}
1904
1905static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
1906{
1907 sky2_write16(hw, SK_GMAC_REG(port,r), v);
1908}
1909
1910static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
1911 const u8 *addr)
1912{
1913 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
1914 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
1915 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
1916}
1917#endif
diff --git a/drivers/net/wan/lmc/lmc_prot.h b/drivers/net/wan/lmc/lmc_prot.h
deleted file mode 100644
index f3b1df9e2cdb..000000000000
--- a/drivers/net/wan/lmc/lmc_prot.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_
3
4void lmc_proto_init(lmc_softc_t * const)
5void lmc_proto_attach(lmc_softc_t *sc const)
6void lmc_proto_detach(lmc_softc *sc const)
7void lmc_proto_reopen(lmc_softc_t *sc const)
8int lmc_proto_ioctl(lmc_softc_t *sc const, struct ifreq *ifr, int cmd)
9void lmc_proto_open(lmc_softc_t *sc const)
10void lmc_proto_close(lmc_softc_t *sc const)
11unsigned short lmc_proto_type(lmc_softc_t *sc const, struct skbuff *skb)
12
13
14#endif
15
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 5e53c5258a33..e4729ddf29fd 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -5,9 +5,9 @@
5 Copyright 2000-2001 ATMEL Corporation. 5 Copyright 2000-2001 ATMEL Corporation.
6 Copyright 2003-2004 Simon Kelley. 6 Copyright 2003-2004 Simon Kelley.
7 7
8 This code was developed from version 2.1.1 of the Atmel drivers, 8 This code was developed from version 2.1.1 of the Atmel drivers,
9 released by Atmel corp. under the GPL in December 2002. It also 9 released by Atmel corp. under the GPL in December 2002. It also
10 includes code from the Linux aironet drivers (C) Benjamin Reed, 10 includes code from the Linux aironet drivers (C) Benjamin Reed,
11 and the Linux PCMCIA package, (C) David Hinds and the Linux wireless 11 and the Linux PCMCIA package, (C) David Hinds and the Linux wireless
12 extensions, (C) Jean Tourrilhes. 12 extensions, (C) Jean Tourrilhes.
13 13
@@ -31,7 +31,7 @@
31 along with Atmel wireless lan drivers; if not, write to the Free Software 31 along with Atmel wireless lan drivers; if not, write to the Free Software
32 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 32 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 33
34 For all queries about this code, please contact the current author, 34 For all queries about this code, please contact the current author,
35 Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. 35 Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
36 36
37 Credit is due to HP UK and Cambridge Online Systems Ltd for supplying 37 Credit is due to HP UK and Cambridge Online Systems Ltd for supplying
@@ -79,13 +79,13 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards"); 80MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards");
81 81
82/* The name of the firmware file to be loaded 82/* The name of the firmware file to be loaded
83 over-rides any automatic selection */ 83 over-rides any automatic selection */
84static char *firmware = NULL; 84static char *firmware = NULL;
85module_param(firmware, charp, 0); 85module_param(firmware, charp, 0);
86 86
87/* table of firmware file names */ 87/* table of firmware file names */
88static struct { 88static struct {
89 AtmelFWType fw_type; 89 AtmelFWType fw_type;
90 const char *fw_file; 90 const char *fw_file;
91 const char *fw_file_ext; 91 const char *fw_file_ext;
@@ -104,17 +104,17 @@ static struct {
104#define MAX_SSID_LENGTH 32 104#define MAX_SSID_LENGTH 32
105#define MGMT_JIFFIES (256 * HZ / 100) 105#define MGMT_JIFFIES (256 * HZ / 100)
106 106
107#define MAX_BSS_ENTRIES 64 107#define MAX_BSS_ENTRIES 64
108 108
109/* registers */ 109/* registers */
110#define GCR 0x00 // (SIR0) General Configuration Register 110#define GCR 0x00 // (SIR0) General Configuration Register
111#define BSR 0x02 // (SIR1) Bank Switching Select Register 111#define BSR 0x02 // (SIR1) Bank Switching Select Register
112#define AR 0x04 112#define AR 0x04
113#define DR 0x08 113#define DR 0x08
114#define MR1 0x12 // Mirror Register 1 114#define MR1 0x12 // Mirror Register 1
115#define MR2 0x14 // Mirror Register 2 115#define MR2 0x14 // Mirror Register 2
116#define MR3 0x16 // Mirror Register 3 116#define MR3 0x16 // Mirror Register 3
117#define MR4 0x18 // Mirror Register 4 117#define MR4 0x18 // Mirror Register 4
118 118
119#define GPR1 0x0c 119#define GPR1 0x0c
120#define GPR2 0x0e 120#define GPR2 0x0e
@@ -123,9 +123,9 @@ static struct {
123// Constants for the GCR register. 123// Constants for the GCR register.
124// 124//
125#define GCR_REMAP 0x0400 // Remap internal SRAM to 0 125#define GCR_REMAP 0x0400 // Remap internal SRAM to 0
126#define GCR_SWRES 0x0080 // BIU reset (ARM and PAI are NOT reset) 126#define GCR_SWRES 0x0080 // BIU reset (ARM and PAI are NOT reset)
127#define GCR_CORES 0x0060 // Core Reset (ARM and PAI are reset) 127#define GCR_CORES 0x0060 // Core Reset (ARM and PAI are reset)
128#define GCR_ENINT 0x0002 // Enable Interrupts 128#define GCR_ENINT 0x0002 // Enable Interrupts
129#define GCR_ACKINT 0x0008 // Acknowledge Interrupts 129#define GCR_ACKINT 0x0008 // Acknowledge Interrupts
130 130
131#define BSS_SRAM 0x0200 // AMBA module selection --> SRAM 131#define BSS_SRAM 0x0200 // AMBA module selection --> SRAM
@@ -190,7 +190,7 @@ struct rx_desc {
190 u32 Next; 190 u32 Next;
191 u16 MsduPos; 191 u16 MsduPos;
192 u16 MsduSize; 192 u16 MsduSize;
193 193
194 u8 State; 194 u8 State;
195 u8 Status; 195 u8 Status;
196 u8 Rate; 196 u8 Rate;
@@ -199,7 +199,6 @@ struct rx_desc {
199 u8 PreambleType; 199 u8 PreambleType;
200 u16 Duration; 200 u16 Duration;
201 u32 RxTime; 201 u32 RxTime;
202
203}; 202};
204 203
205#define RX_DESC_FLAG_VALID 0x80 204#define RX_DESC_FLAG_VALID 0x80
@@ -218,16 +217,15 @@ struct rx_desc {
218#define RX_DESC_DURATION_OFFSET 14 217#define RX_DESC_DURATION_OFFSET 14
219#define RX_DESC_RX_TIME_OFFSET 16 218#define RX_DESC_RX_TIME_OFFSET 16
220 219
221
222struct tx_desc { 220struct tx_desc {
223 u32 NextDescriptor; 221 u32 NextDescriptor;
224 u16 TxStartOfFrame; 222 u16 TxStartOfFrame;
225 u16 TxLength; 223 u16 TxLength;
226 224
227 u8 TxState; 225 u8 TxState;
228 u8 TxStatus; 226 u8 TxStatus;
229 u8 RetryCount; 227 u8 RetryCount;
230 228
231 u8 TxRate; 229 u8 TxRate;
232 230
233 u8 KeyIndex; 231 u8 KeyIndex;
@@ -238,10 +236,8 @@ struct tx_desc {
238 u8 Reserved; 236 u8 Reserved;
239 u8 PacketType; 237 u8 PacketType;
240 u16 HostTxLength; 238 u16 HostTxLength;
241
242}; 239};
243 240
244
245#define TX_DESC_NEXT_OFFSET 0 241#define TX_DESC_NEXT_OFFSET 0
246#define TX_DESC_POS_OFFSET 4 242#define TX_DESC_POS_OFFSET 4
247#define TX_DESC_SIZE_OFFSET 6 243#define TX_DESC_SIZE_OFFSET 6
@@ -255,8 +251,6 @@ struct tx_desc {
255#define TX_DESC_PACKET_TYPE_OFFSET 17 251#define TX_DESC_PACKET_TYPE_OFFSET 17
256#define TX_DESC_HOST_LENGTH_OFFSET 18 252#define TX_DESC_HOST_LENGTH_OFFSET 18
257 253
258
259
260/////////////////////////////////////////////////////// 254///////////////////////////////////////////////////////
261// Host-MAC interface 255// Host-MAC interface
262/////////////////////////////////////////////////////// 256///////////////////////////////////////////////////////
@@ -266,7 +260,6 @@ struct tx_desc {
266#define TX_FIRM_OWN 0x80 260#define TX_FIRM_OWN 0x80
267#define TX_DONE 0x40 261#define TX_DONE 0x40
268 262
269
270#define TX_ERROR 0x01 263#define TX_ERROR 0x01
271 264
272#define TX_PACKET_TYPE_DATA 0x01 265#define TX_PACKET_TYPE_DATA 0x01
@@ -280,8 +273,7 @@ struct tx_desc {
280#define ISR_COMMAND_COMPLETE 0x10 // command completed 273#define ISR_COMMAND_COMPLETE 0x10 // command completed
281#define ISR_OUT_OF_RANGE 0x20 // command completed 274#define ISR_OUT_OF_RANGE 0x20 // command completed
282#define ISR_IBSS_MERGE 0x40 // (4.1.2.30): IBSS merge 275#define ISR_IBSS_MERGE 0x40 // (4.1.2.30): IBSS merge
283#define ISR_GENERIC_IRQ 0x80 276#define ISR_GENERIC_IRQ 0x80
284
285 277
286#define Local_Mib_Type 0x01 278#define Local_Mib_Type 0x01
287#define Mac_Address_Mib_Type 0x02 279#define Mac_Address_Mib_Type 0x02
@@ -317,7 +309,6 @@ struct tx_desc {
317#define LOCAL_MIB_PREAMBLE_TYPE 9 309#define LOCAL_MIB_PREAMBLE_TYPE 9
318#define MAC_ADDR_MIB_MAC_ADDR_POS 0 310#define MAC_ADDR_MIB_MAC_ADDR_POS 0
319 311
320
321#define CMD_Set_MIB_Vars 0x01 312#define CMD_Set_MIB_Vars 0x01
322#define CMD_Get_MIB_Vars 0x02 313#define CMD_Get_MIB_Vars 0x02
323#define CMD_Scan 0x03 314#define CMD_Scan 0x03
@@ -338,7 +329,6 @@ struct tx_desc {
338#define CMD_STATUS_HOST_ERROR 0xFF 329#define CMD_STATUS_HOST_ERROR 0xFF
339#define CMD_STATUS_BUSY 0xFE 330#define CMD_STATUS_BUSY 0xFE
340 331
341
342#define CMD_BLOCK_COMMAND_OFFSET 0 332#define CMD_BLOCK_COMMAND_OFFSET 0
343#define CMD_BLOCK_STATUS_OFFSET 1 333#define CMD_BLOCK_STATUS_OFFSET 1
344#define CMD_BLOCK_PARAMETERS_OFFSET 4 334#define CMD_BLOCK_PARAMETERS_OFFSET 4
@@ -347,15 +337,15 @@ struct tx_desc {
347 337
348#define MGMT_FRAME_BODY_OFFSET 24 338#define MGMT_FRAME_BODY_OFFSET 24
349#define MAX_AUTHENTICATION_RETRIES 3 339#define MAX_AUTHENTICATION_RETRIES 3
350#define MAX_ASSOCIATION_RETRIES 3 340#define MAX_ASSOCIATION_RETRIES 3
351 341
352#define AUTHENTICATION_RESPONSE_TIME_OUT 1000 342#define AUTHENTICATION_RESPONSE_TIME_OUT 1000
353 343
354#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */ 344#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */
355#define LOOP_RETRY_LIMIT 500000 345#define LOOP_RETRY_LIMIT 500000
356 346
357#define ACTIVE_MODE 1 347#define ACTIVE_MODE 1
358#define PS_MODE 2 348#define PS_MODE 2
359 349
360#define MAX_ENCRYPTION_KEYS 4 350#define MAX_ENCRYPTION_KEYS 4
361#define MAX_ENCRYPTION_KEY_SIZE 40 351#define MAX_ENCRYPTION_KEY_SIZE 40
@@ -377,7 +367,7 @@ struct tx_desc {
377#define REG_DOMAIN_MKK1 0x41 //Channel 1-14 Japan(MKK1) 367#define REG_DOMAIN_MKK1 0x41 //Channel 1-14 Japan(MKK1)
378#define REG_DOMAIN_ISRAEL 0x50 //Channel 3-9 ISRAEL 368#define REG_DOMAIN_ISRAEL 0x50 //Channel 3-9 ISRAEL
379 369
380#define BSS_TYPE_AD_HOC 1 370#define BSS_TYPE_AD_HOC 1
381#define BSS_TYPE_INFRASTRUCTURE 2 371#define BSS_TYPE_INFRASTRUCTURE 2
382 372
383#define SCAN_TYPE_ACTIVE 0 373#define SCAN_TYPE_ACTIVE 0
@@ -389,7 +379,7 @@ struct tx_desc {
389 379
390#define DATA_FRAME_WS_HEADER_SIZE 30 380#define DATA_FRAME_WS_HEADER_SIZE 30
391 381
392/* promiscuous mode control */ 382/* promiscuous mode control */
393#define PROM_MODE_OFF 0x0 383#define PROM_MODE_OFF 0x0
394#define PROM_MODE_UNKNOWN 0x1 384#define PROM_MODE_UNKNOWN 0x1
395#define PROM_MODE_CRC_FAILED 0x2 385#define PROM_MODE_CRC_FAILED 0x2
@@ -398,8 +388,7 @@ struct tx_desc {
398#define PROM_MODE_CTRL 0x10 388#define PROM_MODE_CTRL 0x10
399#define PROM_MODE_BAD_PROTOCOL 0x20 389#define PROM_MODE_BAD_PROTOCOL 0x20
400 390
401 391#define IFACE_INT_STATUS_OFFSET 0
402#define IFACE_INT_STATUS_OFFSET 0
403#define IFACE_INT_MASK_OFFSET 1 392#define IFACE_INT_MASK_OFFSET 1
404#define IFACE_LOCKOUT_HOST_OFFSET 2 393#define IFACE_LOCKOUT_HOST_OFFSET 2
405#define IFACE_LOCKOUT_MAC_OFFSET 3 394#define IFACE_LOCKOUT_MAC_OFFSET 3
@@ -407,7 +396,7 @@ struct tx_desc {
407#define IFACE_MAC_STAT_OFFSET 30 396#define IFACE_MAC_STAT_OFFSET 30
408#define IFACE_GENERIC_INT_TYPE_OFFSET 32 397#define IFACE_GENERIC_INT_TYPE_OFFSET 32
409 398
410#define CIPHER_SUITE_NONE 0 399#define CIPHER_SUITE_NONE 0
411#define CIPHER_SUITE_WEP_64 1 400#define CIPHER_SUITE_WEP_64 1
412#define CIPHER_SUITE_TKIP 2 401#define CIPHER_SUITE_TKIP 2
413#define CIPHER_SUITE_AES 3 402#define CIPHER_SUITE_AES 3
@@ -419,11 +408,11 @@ struct tx_desc {
419// 408//
420// 409//
421 410
422// FuncCtrl field: 411// FuncCtrl field:
423// 412//
424#define FUNC_CTRL_TxENABLE 0x10 413#define FUNC_CTRL_TxENABLE 0x10
425#define FUNC_CTRL_RxENABLE 0x20 414#define FUNC_CTRL_RxENABLE 0x20
426#define FUNC_CTRL_INIT_COMPLETE 0x01 415#define FUNC_CTRL_INIT_COMPLETE 0x01
427 416
428/* A stub firmware image which reads the MAC address from NVRAM on the card. 417/* A stub firmware image which reads the MAC address from NVRAM on the card.
429 For copyright information and source see the end of this file. */ 418 For copyright information and source see the end of this file. */
@@ -486,10 +475,10 @@ struct atmel_private {
486 struct net_device_stats stats; // device stats 475 struct net_device_stats stats; // device stats
487 spinlock_t irqlock, timerlock; // spinlocks 476 spinlock_t irqlock, timerlock; // spinlocks
488 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type; 477 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
489 enum { 478 enum {
490 CARD_TYPE_PARALLEL_FLASH, 479 CARD_TYPE_PARALLEL_FLASH,
491 CARD_TYPE_SPI_FLASH, 480 CARD_TYPE_SPI_FLASH,
492 CARD_TYPE_EEPROM 481 CARD_TYPE_EEPROM
493 } card_type; 482 } card_type;
494 int do_rx_crc; /* If we need to CRC incoming packets */ 483 int do_rx_crc; /* If we need to CRC incoming packets */
495 int probe_crc; /* set if we don't yet know */ 484 int probe_crc; /* set if we don't yet know */
@@ -497,18 +486,18 @@ struct atmel_private {
497 u16 rx_desc_head; 486 u16 rx_desc_head;
498 u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous; 487 u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous;
499 u16 tx_free_mem, tx_buff_head, tx_buff_tail; 488 u16 tx_free_mem, tx_buff_head, tx_buff_tail;
500 489
501 u16 frag_seq, frag_len, frag_no; 490 u16 frag_seq, frag_len, frag_no;
502 u8 frag_source[6]; 491 u8 frag_source[6];
503 492
504 u8 wep_is_on, default_key, exclude_unencrypted, encryption_level; 493 u8 wep_is_on, default_key, exclude_unencrypted, encryption_level;
505 u8 group_cipher_suite, pairwise_cipher_suite; 494 u8 group_cipher_suite, pairwise_cipher_suite;
506 u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE]; 495 u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
507 int wep_key_len[MAX_ENCRYPTION_KEYS]; 496 int wep_key_len[MAX_ENCRYPTION_KEYS];
508 int use_wpa, radio_on_broken; /* firmware dependent stuff. */ 497 int use_wpa, radio_on_broken; /* firmware dependent stuff. */
509 498
510 u16 host_info_base; 499 u16 host_info_base;
511 struct host_info_struct { 500 struct host_info_struct {
512 /* NB this is matched to the hardware, don't change. */ 501 /* NB this is matched to the hardware, don't change. */
513 u8 volatile int_status; 502 u8 volatile int_status;
514 u8 volatile int_mask; 503 u8 volatile int_mask;
@@ -524,20 +513,20 @@ struct atmel_private {
524 u16 rx_buff_size; 513 u16 rx_buff_size;
525 u16 rx_desc_pos; 514 u16 rx_desc_pos;
526 u16 rx_desc_count; 515 u16 rx_desc_count;
527 516
528 u16 build_version; 517 u16 build_version;
529 u16 command_pos; 518 u16 command_pos;
530 519
531 u16 major_version; 520 u16 major_version;
532 u16 minor_version; 521 u16 minor_version;
533 522
534 u16 func_ctrl; 523 u16 func_ctrl;
535 u16 mac_status; 524 u16 mac_status;
536 u16 generic_IRQ_type; 525 u16 generic_IRQ_type;
537 u8 reserved[2]; 526 u8 reserved[2];
538 } host_info; 527 } host_info;
539 528
540 enum { 529 enum {
541 STATION_STATE_SCANNING, 530 STATION_STATE_SCANNING,
542 STATION_STATE_JOINNING, 531 STATION_STATE_JOINNING,
543 STATION_STATE_AUTHENTICATING, 532 STATION_STATE_AUTHENTICATING,
@@ -547,7 +536,7 @@ struct atmel_private {
547 STATION_STATE_DOWN, 536 STATION_STATE_DOWN,
548 STATION_STATE_MGMT_ERROR 537 STATION_STATE_MGMT_ERROR
549 } station_state; 538 } station_state;
550 539
551 int operating_mode, power_mode; 540 int operating_mode, power_mode;
552 time_t last_qual; 541 time_t last_qual;
553 int beacons_this_sec; 542 int beacons_this_sec;
@@ -560,18 +549,18 @@ struct atmel_private {
560 int long_retry, short_retry; 549 int long_retry, short_retry;
561 int preamble; 550 int preamble;
562 int default_beacon_period, beacon_period, listen_interval; 551 int default_beacon_period, beacon_period, listen_interval;
563 int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum; 552 int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum;
564 int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt; 553 int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt;
565 enum { 554 enum {
566 SITE_SURVEY_IDLE, 555 SITE_SURVEY_IDLE,
567 SITE_SURVEY_IN_PROGRESS, 556 SITE_SURVEY_IN_PROGRESS,
568 SITE_SURVEY_COMPLETED 557 SITE_SURVEY_COMPLETED
569 } site_survey_state; 558 } site_survey_state;
570 time_t last_survey; 559 time_t last_survey;
571 560
572 int station_was_associated, station_is_associated; 561 int station_was_associated, station_is_associated;
573 int fast_scan; 562 int fast_scan;
574 563
575 struct bss_info { 564 struct bss_info {
576 int channel; 565 int channel;
577 int SSIDsize; 566 int SSIDsize;
@@ -584,13 +573,12 @@ struct atmel_private {
584 u8 SSID[MAX_SSID_LENGTH]; 573 u8 SSID[MAX_SSID_LENGTH];
585 } BSSinfo[MAX_BSS_ENTRIES]; 574 } BSSinfo[MAX_BSS_ENTRIES];
586 int BSS_list_entries, current_BSS; 575 int BSS_list_entries, current_BSS;
587 int connect_to_any_BSS; 576 int connect_to_any_BSS;
588 int SSID_size, new_SSID_size; 577 int SSID_size, new_SSID_size;
589 u8 CurrentBSSID[6], BSSID[6]; 578 u8 CurrentBSSID[6], BSSID[6];
590 u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH]; 579 u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH];
591 u64 last_beacon_timestamp; 580 u64 last_beacon_timestamp;
592 u8 rx_buf[MAX_WIRELESS_BODY]; 581 u8 rx_buf[MAX_WIRELESS_BODY];
593
594}; 582};
595 583
596static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16}; 584static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16};
@@ -598,39 +586,49 @@ static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16};
598static const struct { 586static const struct {
599 int reg_domain; 587 int reg_domain;
600 int min, max; 588 int min, max;
601 char *name; 589 char *name;
602} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" }, 590} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" },
603 { REG_DOMAIN_DOC, 1, 11, "Canada" }, 591 { REG_DOMAIN_DOC, 1, 11, "Canada" },
604 { REG_DOMAIN_ETSI, 1, 13, "Europe" }, 592 { REG_DOMAIN_ETSI, 1, 13, "Europe" },
605 { REG_DOMAIN_SPAIN, 10, 11, "Spain" }, 593 { REG_DOMAIN_SPAIN, 10, 11, "Spain" },
606 { REG_DOMAIN_FRANCE, 10, 13, "France" }, 594 { REG_DOMAIN_FRANCE, 10, 13, "France" },
607 { REG_DOMAIN_MKK, 14, 14, "MKK" }, 595 { REG_DOMAIN_MKK, 14, 14, "MKK" },
608 { REG_DOMAIN_MKK1, 1, 14, "MKK1" }, 596 { REG_DOMAIN_MKK1, 1, 14, "MKK1" },
609 { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} }; 597 { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} };
610 598
611static void build_wpa_mib(struct atmel_private *priv); 599static void build_wpa_mib(struct atmel_private *priv);
612static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 600static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
613static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len); 601static void atmel_copy_to_card(struct net_device *dev, u16 dest,
614static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len); 602 unsigned char *src, u16 len);
603static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
604 u16 src, u16 len);
615static void atmel_set_gcr(struct net_device *dev, u16 mask); 605static void atmel_set_gcr(struct net_device *dev, u16 mask);
616static void atmel_clear_gcr(struct net_device *dev, u16 mask); 606static void atmel_clear_gcr(struct net_device *dev, u16 mask);
617static int atmel_lock_mac(struct atmel_private *priv); 607static int atmel_lock_mac(struct atmel_private *priv);
618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); 608static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
619static void atmel_command_irq(struct atmel_private *priv); 609static void atmel_command_irq(struct atmel_private *priv);
620static int atmel_validate_channel(struct atmel_private *priv, int channel); 610static int atmel_validate_channel(struct atmel_private *priv, int channel);
621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 611static void atmel_management_frame(struct atmel_private *priv,
612 struct ieee80211_hdr_4addr *header,
622 u16 frame_len, u8 rssi); 613 u16 frame_len, u8 rssi);
623static void atmel_management_timer(u_long a); 614static void atmel_management_timer(u_long a);
624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); 615static void atmel_send_command(struct atmel_private *priv, int command,
625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); 616 void *cmd, int cmd_size);
626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 617static int atmel_send_command_wait(struct atmel_private *priv, int command,
618 void *cmd, int cmd_size);
619static void atmel_transmit_management_frame(struct atmel_private *priv,
620 struct ieee80211_hdr_4addr *header,
627 u8 *body, int body_len); 621 u8 *body, int body_len);
628 622
629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 623static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
630static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data); 624static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index,
631static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data); 625 u8 data);
632static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len); 626static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
633static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len); 627 u16 data);
628static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
629 u8 *data, int data_len);
630static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
631 u8 *data, int data_len);
634static void atmel_scan(struct atmel_private *priv, int specific_ssid); 632static void atmel_scan(struct atmel_private *priv, int specific_ssid);
635static void atmel_join_bss(struct atmel_private *priv, int bss_index); 633static void atmel_join_bss(struct atmel_private *priv, int bss_index);
636static void atmel_smooth_qual(struct atmel_private *priv); 634static void atmel_smooth_qual(struct atmel_private *priv);
@@ -650,12 +648,12 @@ static inline u16 atmel_co(struct atmel_private *priv, u16 offset)
650 return priv->host_info.command_pos + offset; 648 return priv->host_info.command_pos + offset;
651} 649}
652 650
653static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc) 651static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc)
654{ 652{
655 return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset; 653 return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset;
656} 654}
657 655
658static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc) 656static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc)
659{ 657{
660 return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset; 658 return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset;
661} 659}
@@ -682,25 +680,25 @@ static inline void atmel_write16(struct net_device *dev, u16 offset, u16 data)
682 680
683static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos) 681static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos)
684{ 682{
685 atmel_writeAR(priv->dev, pos); 683 atmel_writeAR(priv->dev, pos);
686 return atmel_read8(priv->dev, DR); 684 return atmel_read8(priv->dev, DR);
687} 685}
688 686
689static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data) 687static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data)
690{ 688{
691 atmel_writeAR(priv->dev, pos); 689 atmel_writeAR(priv->dev, pos);
692 atmel_write8(priv->dev, DR, data); 690 atmel_write8(priv->dev, DR, data);
693} 691}
694 692
695static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos) 693static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos)
696{ 694{
697 atmel_writeAR(priv->dev, pos); 695 atmel_writeAR(priv->dev, pos);
698 return atmel_read16(priv->dev, DR); 696 return atmel_read16(priv->dev, DR);
699} 697}
700 698
701static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data) 699static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data)
702{ 700{
703 atmel_writeAR(priv->dev, pos); 701 atmel_writeAR(priv->dev, pos);
704 atmel_write16(priv->dev, DR, data); 702 atmel_write16(priv->dev, DR, data);
705} 703}
706 704
@@ -710,11 +708,10 @@ static void tx_done_irq(struct atmel_private *priv)
710{ 708{
711 int i; 709 int i;
712 710
713 for (i = 0; 711 for (i = 0;
714 atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE && 712 atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE &&
715 i < priv->host_info.tx_desc_count; 713 i < priv->host_info.tx_desc_count;
716 i++) { 714 i++) {
717
718 u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head)); 715 u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head));
719 u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head)); 716 u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head));
720 u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head)); 717 u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head));
@@ -728,16 +725,16 @@ static void tx_done_irq(struct atmel_private *priv)
728 priv->tx_buff_head = 0; 725 priv->tx_buff_head = 0;
729 else 726 else
730 priv->tx_buff_head += msdu_size; 727 priv->tx_buff_head += msdu_size;
731 728
732 if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1)) 729 if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1))
733 priv->tx_desc_head++ ; 730 priv->tx_desc_head++ ;
734 else 731 else
735 priv->tx_desc_head = 0; 732 priv->tx_desc_head = 0;
736 733
737 if (type == TX_PACKET_TYPE_DATA) { 734 if (type == TX_PACKET_TYPE_DATA) {
738 if (status == TX_STATUS_SUCCESS) 735 if (status == TX_STATUS_SUCCESS)
739 priv->stats.tx_packets++; 736 priv->stats.tx_packets++;
740 else 737 else
741 priv->stats.tx_errors++; 738 priv->stats.tx_errors++;
742 netif_wake_queue(priv->dev); 739 netif_wake_queue(priv->dev);
743 } 740 }
@@ -748,21 +745,22 @@ static u16 find_tx_buff(struct atmel_private *priv, u16 len)
748{ 745{
749 u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail; 746 u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail;
750 747
751 if (priv->tx_desc_free == 3 || priv->tx_free_mem < len) 748 if (priv->tx_desc_free == 3 || priv->tx_free_mem < len)
752 return 0; 749 return 0;
753 750
754 if (bottom_free >= len) 751 if (bottom_free >= len)
755 return priv->host_info.tx_buff_pos + priv->tx_buff_tail; 752 return priv->host_info.tx_buff_pos + priv->tx_buff_tail;
756 753
757 if (priv->tx_free_mem - bottom_free >= len) { 754 if (priv->tx_free_mem - bottom_free >= len) {
758 priv->tx_buff_tail = 0; 755 priv->tx_buff_tail = 0;
759 return priv->host_info.tx_buff_pos; 756 return priv->host_info.tx_buff_pos;
760 } 757 }
761 758
762 return 0; 759 return 0;
763} 760}
764 761
765static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 len, u16 buff, u8 type) 762static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
763 u16 len, u16 buff, u8 type)
766{ 764{
767 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff); 765 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff);
768 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len); 766 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len);
@@ -775,8 +773,8 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
775 int cipher_type, cipher_length; 773 int cipher_type, cipher_length;
776 if (is_bcast) { 774 if (is_bcast) {
777 cipher_type = priv->group_cipher_suite; 775 cipher_type = priv->group_cipher_suite;
778 if (cipher_type == CIPHER_SUITE_WEP_64 || 776 if (cipher_type == CIPHER_SUITE_WEP_64 ||
779 cipher_type == CIPHER_SUITE_WEP_128 ) 777 cipher_type == CIPHER_SUITE_WEP_128)
780 cipher_length = 8; 778 cipher_length = 8;
781 else if (cipher_type == CIPHER_SUITE_TKIP) 779 else if (cipher_type == CIPHER_SUITE_TKIP)
782 cipher_length = 12; 780 cipher_length = 12;
@@ -790,8 +788,8 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
790 } 788 }
791 } else { 789 } else {
792 cipher_type = priv->pairwise_cipher_suite; 790 cipher_type = priv->pairwise_cipher_suite;
793 if (cipher_type == CIPHER_SUITE_WEP_64 || 791 if (cipher_type == CIPHER_SUITE_WEP_64 ||
794 cipher_type == CIPHER_SUITE_WEP_128 ) 792 cipher_type == CIPHER_SUITE_WEP_128)
795 cipher_length = 8; 793 cipher_length = 8;
796 else if (cipher_type == CIPHER_SUITE_TKIP) 794 else if (cipher_type == CIPHER_SUITE_TKIP)
797 cipher_length = 12; 795 cipher_length = 12;
@@ -804,9 +802,9 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
804 cipher_length = 0; 802 cipher_length = 0;
805 } 803 }
806 } 804 }
807 805
808 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail), 806 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail),
809 cipher_type); 807 cipher_type);
810 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail), 808 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail),
811 cipher_length); 809 cipher_length);
812 } 810 }
@@ -815,46 +813,46 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
815 if (priv->tx_desc_previous != priv->tx_desc_tail) 813 if (priv->tx_desc_previous != priv->tx_desc_tail)
816 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0); 814 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0);
817 priv->tx_desc_previous = priv->tx_desc_tail; 815 priv->tx_desc_previous = priv->tx_desc_tail;
818 if (priv->tx_desc_tail < (priv->host_info.tx_desc_count -1 )) 816 if (priv->tx_desc_tail < (priv->host_info.tx_desc_count - 1))
819 priv->tx_desc_tail++; 817 priv->tx_desc_tail++;
820 else 818 else
821 priv->tx_desc_tail = 0; 819 priv->tx_desc_tail = 0;
822 priv->tx_desc_free--; 820 priv->tx_desc_free--;
823 priv->tx_free_mem -= len; 821 priv->tx_free_mem -= len;
824
825} 822}
826 823
827static int start_tx (struct sk_buff *skb, struct net_device *dev) 824static int start_tx(struct sk_buff *skb, struct net_device *dev)
828{ 825{
829 struct atmel_private *priv = netdev_priv(dev); 826 struct atmel_private *priv = netdev_priv(dev);
830 struct ieee80211_hdr_4addr header; 827 struct ieee80211_hdr_4addr header;
831 unsigned long flags; 828 unsigned long flags;
832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 829 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; 830 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
834 831
835 if (priv->card && priv->present_callback && 832 if (priv->card && priv->present_callback &&
836 !(*priv->present_callback)(priv->card)) { 833 !(*priv->present_callback)(priv->card)) {
837 priv->stats.tx_errors++; 834 priv->stats.tx_errors++;
838 dev_kfree_skb(skb); 835 dev_kfree_skb(skb);
839 return 0; 836 return 0;
840 } 837 }
841 838
842 if (priv->station_state != STATION_STATE_READY) { 839 if (priv->station_state != STATION_STATE_READY) {
843 priv->stats.tx_errors++; 840 priv->stats.tx_errors++;
844 dev_kfree_skb(skb); 841 dev_kfree_skb(skb);
845 return 0; 842 return 0;
846 } 843 }
847 844
848 /* first ensure the timer func cannot run */ 845 /* first ensure the timer func cannot run */
849 spin_lock_bh(&priv->timerlock); 846 spin_lock_bh(&priv->timerlock);
850 /* then stop the hardware ISR */ 847 /* then stop the hardware ISR */
851 spin_lock_irqsave(&priv->irqlock, flags); 848 spin_lock_irqsave(&priv->irqlock, flags);
852 /* nb doing the above in the opposite order will deadlock */ 849 /* nb doing the above in the opposite order will deadlock */
853 850
854 /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the 851 /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the
855 12 first bytes (containing DA/SA) and put them in the appropriate fields of 852 12 first bytes (containing DA/SA) and put them in the appropriate
856 the Wireless Header. Thus the packet length is then the initial + 18 (+30-12) */ 853 fields of the Wireless Header. Thus the packet length is then the
857 854 initial + 18 (+30-12) */
855
858 if (!(buff = find_tx_buff(priv, len + 18))) { 856 if (!(buff = find_tx_buff(priv, len + 18))) {
859 priv->stats.tx_dropped++; 857 priv->stats.tx_dropped++;
860 spin_unlock_irqrestore(&priv->irqlock, flags); 858 spin_unlock_irqrestore(&priv->irqlock, flags);
@@ -862,7 +860,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
862 netif_stop_queue(dev); 860 netif_stop_queue(dev);
863 return 1; 861 return 1;
864 } 862 }
865 863
866 frame_ctl = IEEE80211_FTYPE_DATA; 864 frame_ctl = IEEE80211_FTYPE_DATA;
867 header.duration_id = 0; 865 header.duration_id = 0;
868 header.seq_ctl = 0; 866 header.seq_ctl = 0;
@@ -878,7 +876,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
878 memcpy(&header.addr2, dev->dev_addr, 6); 876 memcpy(&header.addr2, dev->dev_addr, 6);
879 memcpy(&header.addr3, skb->data, 6); 877 memcpy(&header.addr3, skb->data, 6);
880 } 878 }
881 879
882 if (priv->use_wpa) 880 if (priv->use_wpa)
883 memcpy(&header.addr4, SNAP_RFC1024, 6); 881 memcpy(&header.addr4, SNAP_RFC1024, 6);
884 882
@@ -888,27 +886,27 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
888 /* Copy the packet sans its 802.3 header addresses which have been replaced */ 886 /* Copy the packet sans its 802.3 header addresses which have been replaced */
889 atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12); 887 atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
890 priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE; 888 priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE;
891 889
892 /* low bit of first byte of destination tells us if broadcast */ 890 /* low bit of first byte of destination tells us if broadcast */
893 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA); 891 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
894 dev->trans_start = jiffies; 892 dev->trans_start = jiffies;
895 priv->stats.tx_bytes += len; 893 priv->stats.tx_bytes += len;
896 894
897 spin_unlock_irqrestore(&priv->irqlock, flags); 895 spin_unlock_irqrestore(&priv->irqlock, flags);
898 spin_unlock_bh(&priv->timerlock); 896 spin_unlock_bh(&priv->timerlock);
899 dev_kfree_skb(skb); 897 dev_kfree_skb(skb);
900 898
901 return 0; 899 return 0;
902} 900}
903 901
904static void atmel_transmit_management_frame(struct atmel_private *priv, 902static void atmel_transmit_management_frame(struct atmel_private *priv,
905 struct ieee80211_hdr_4addr *header, 903 struct ieee80211_hdr_4addr *header,
906 u8 *body, int body_len) 904 u8 *body, int body_len)
907{ 905{
908 u16 buff; 906 u16 buff;
909 int len = MGMT_FRAME_BODY_OFFSET + body_len; 907 int len = MGMT_FRAME_BODY_OFFSET + body_len;
910 908
911 if (!(buff = find_tx_buff(priv, len))) 909 if (!(buff = find_tx_buff(priv, len)))
912 return; 910 return;
913 911
914 atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET); 912 atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET);
@@ -916,24 +914,25 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
916 priv->tx_buff_tail += len; 914 priv->tx_buff_tail += len;
917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); 915 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
918} 916}
919 917
920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 918static void fast_rx_path(struct atmel_private *priv,
919 struct ieee80211_hdr_4addr *header,
921 u16 msdu_size, u16 rx_packet_loc, u32 crc) 920 u16 msdu_size, u16 rx_packet_loc, u32 crc)
922{ 921{
923 /* fast path: unfragmented packet copy directly into skbuf */ 922 /* fast path: unfragmented packet copy directly into skbuf */
924 u8 mac4[6]; 923 u8 mac4[6];
925 struct sk_buff *skb; 924 struct sk_buff *skb;
926 unsigned char *skbp; 925 unsigned char *skbp;
927 926
928 /* get the final, mac 4 header field, this tells us encapsulation */ 927 /* get the final, mac 4 header field, this tells us encapsulation */
929 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6); 928 atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6);
930 msdu_size -= 6; 929 msdu_size -= 6;
931 930
932 if (priv->do_rx_crc) { 931 if (priv->do_rx_crc) {
933 crc = crc32_le(crc, mac4, 6); 932 crc = crc32_le(crc, mac4, 6);
934 msdu_size -= 4; 933 msdu_size -= 4;
935 } 934 }
936 935
937 if (!(skb = dev_alloc_skb(msdu_size + 14))) { 936 if (!(skb = dev_alloc_skb(msdu_size + 14))) {
938 priv->stats.rx_dropped++; 937 priv->stats.rx_dropped++;
939 return; 938 return;
@@ -942,7 +941,7 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
942 skb_reserve(skb, 2); 941 skb_reserve(skb, 2);
943 skbp = skb_put(skb, msdu_size + 12); 942 skbp = skb_put(skb, msdu_size + 12);
944 atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size); 943 atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size);
945 944
946 if (priv->do_rx_crc) { 945 if (priv->do_rx_crc) {
947 u32 netcrc; 946 u32 netcrc;
948 crc = crc32_le(crc, skbp + 12, msdu_size); 947 crc = crc32_le(crc, skbp + 12, msdu_size);
@@ -953,24 +952,25 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
953 return; 952 return;
954 } 953 }
955 } 954 }
956 955
957 memcpy(skbp, header->addr1, 6); /* destination address */ 956 memcpy(skbp, header->addr1, 6); /* destination address */
958 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 957 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
959 memcpy(&skbp[6], header->addr3, 6); 958 memcpy(&skbp[6], header->addr3, 6);
960 else 959 else
961 memcpy(&skbp[6], header->addr2, 6); /* source address */ 960 memcpy(&skbp[6], header->addr2, 6); /* source address */
962 961
963 priv->dev->last_rx=jiffies; 962 priv->dev->last_rx = jiffies;
964 skb->dev = priv->dev; 963 skb->dev = priv->dev;
965 skb->protocol = eth_type_trans(skb, priv->dev); 964 skb->protocol = eth_type_trans(skb, priv->dev);
966 skb->ip_summed = CHECKSUM_NONE; 965 skb->ip_summed = CHECKSUM_NONE;
967 netif_rx(skb); 966 netif_rx(skb);
968 priv->stats.rx_bytes += 12 + msdu_size; 967 priv->stats.rx_bytes += 12 + msdu_size;
969 priv->stats.rx_packets++; 968 priv->stats.rx_packets++;
970} 969}
971 970
972/* Test to see if the packet in card memory at packet_loc has a valid CRC 971/* Test to see if the packet in card memory at packet_loc has a valid CRC
973 It doesn't matter that this is slow: it is only used to proble the first few packets. */ 972 It doesn't matter that this is slow: it is only used to proble the first few
973 packets. */
974static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size) 974static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
975{ 975{
976 int i = msdu_size - 4; 976 int i = msdu_size - 4;
@@ -980,7 +980,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
980 return 0; 980 return 0;
981 981
982 atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4); 982 atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4);
983 983
984 atmel_writeAR(priv->dev, packet_loc); 984 atmel_writeAR(priv->dev, packet_loc);
985 while (i--) { 985 while (i--) {
986 u8 octet = atmel_read8(priv->dev, DR); 986 u8 octet = atmel_read8(priv->dev, DR);
@@ -990,20 +990,22 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
990 return (crc ^ 0xffffffff) == netcrc; 990 return (crc ^ 0xffffffff) == netcrc;
991} 991}
992 992
993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 993static void frag_rx_path(struct atmel_private *priv,
994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) 994 struct ieee80211_hdr_4addr *header,
995 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
996 u8 frag_no, int more_frags)
995{ 997{
996 u8 mac4[6]; 998 u8 mac4[6];
997 u8 source[6]; 999 u8 source[6];
998 struct sk_buff *skb; 1000 struct sk_buff *skb;
999 1001
1000 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 1002 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
1001 memcpy(source, header->addr3, 6); 1003 memcpy(source, header->addr3, 6);
1002 else 1004 else
1003 memcpy(source, header->addr2, 6); 1005 memcpy(source, header->addr2, 6);
1004 1006
1005 rx_packet_loc += 24; /* skip header */ 1007 rx_packet_loc += 24; /* skip header */
1006 1008
1007 if (priv->do_rx_crc) 1009 if (priv->do_rx_crc)
1008 msdu_size -= 4; 1010 msdu_size -= 4;
1009 1011
@@ -1012,16 +1014,16 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1012 msdu_size -= 6; 1014 msdu_size -= 6;
1013 rx_packet_loc += 6; 1015 rx_packet_loc += 6;
1014 1016
1015 if (priv->do_rx_crc) 1017 if (priv->do_rx_crc)
1016 crc = crc32_le(crc, mac4, 6); 1018 crc = crc32_le(crc, mac4, 6);
1017 1019
1018 priv->frag_seq = seq_no; 1020 priv->frag_seq = seq_no;
1019 priv->frag_no = 1; 1021 priv->frag_no = 1;
1020 priv->frag_len = msdu_size; 1022 priv->frag_len = msdu_size;
1021 memcpy(priv->frag_source, source, 6); 1023 memcpy(priv->frag_source, source, 6);
1022 memcpy(&priv->rx_buf[6], source, 6); 1024 memcpy(&priv->rx_buf[6], source, 6);
1023 memcpy(priv->rx_buf, header->addr1, 6); 1025 memcpy(priv->rx_buf, header->addr1, 6);
1024 1026
1025 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size); 1027 atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
1026 1028
1027 if (priv->do_rx_crc) { 1029 if (priv->do_rx_crc) {
@@ -1033,17 +1035,17 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1033 memset(priv->frag_source, 0xff, 6); 1035 memset(priv->frag_source, 0xff, 6);
1034 } 1036 }
1035 } 1037 }
1036 1038
1037 } else if (priv->frag_no == frag_no && 1039 } else if (priv->frag_no == frag_no &&
1038 priv->frag_seq == seq_no && 1040 priv->frag_seq == seq_no &&
1039 memcmp(priv->frag_source, source, 6) == 0) { 1041 memcmp(priv->frag_source, source, 6) == 0) {
1040 1042
1041 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len], 1043 atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
1042 rx_packet_loc, msdu_size); 1044 rx_packet_loc, msdu_size);
1043 if (priv->do_rx_crc) { 1045 if (priv->do_rx_crc) {
1044 u32 netcrc; 1046 u32 netcrc;
1045 crc = crc32_le(crc, 1047 crc = crc32_le(crc,
1046 &priv->rx_buf[12 + priv->frag_len], 1048 &priv->rx_buf[12 + priv->frag_len],
1047 msdu_size); 1049 msdu_size);
1048 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1050 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1049 if ((crc ^ 0xffffffff) != netcrc) { 1051 if ((crc ^ 0xffffffff) != netcrc) {
@@ -1052,7 +1054,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1052 more_frags = 1; /* don't send broken assembly */ 1054 more_frags = 1; /* don't send broken assembly */
1053 } 1055 }
1054 } 1056 }
1055 1057
1056 priv->frag_len += msdu_size; 1058 priv->frag_len += msdu_size;
1057 priv->frag_no++; 1059 priv->frag_no++;
1058 1060
@@ -1062,60 +1064,60 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr
1062 priv->stats.rx_dropped++; 1064 priv->stats.rx_dropped++;
1063 } else { 1065 } else {
1064 skb_reserve(skb, 2); 1066 skb_reserve(skb, 2);
1065 memcpy(skb_put(skb, priv->frag_len + 12), 1067 memcpy(skb_put(skb, priv->frag_len + 12),
1066 priv->rx_buf, 1068 priv->rx_buf,
1067 priv->frag_len + 12); 1069 priv->frag_len + 12);
1068 priv->dev->last_rx = jiffies; 1070 priv->dev->last_rx = jiffies;
1069 skb->dev = priv->dev; 1071 skb->dev = priv->dev;
1070 skb->protocol = eth_type_trans(skb, priv->dev); 1072 skb->protocol = eth_type_trans(skb, priv->dev);
1071 skb->ip_summed = CHECKSUM_NONE; 1073 skb->ip_summed = CHECKSUM_NONE;
1072 netif_rx(skb); 1074 netif_rx(skb);
1073 priv->stats.rx_bytes += priv->frag_len + 12; 1075 priv->stats.rx_bytes += priv->frag_len + 12;
1074 priv->stats.rx_packets++; 1076 priv->stats.rx_packets++;
1075 } 1077 }
1076 } 1078 }
1077
1078 } else 1079 } else
1079 priv->wstats.discard.fragment++; 1080 priv->wstats.discard.fragment++;
1080} 1081}
1081 1082
1082static void rx_done_irq(struct atmel_private *priv) 1083static void rx_done_irq(struct atmel_private *priv)
1083{ 1084{
1084 int i; 1085 int i;
1085 struct ieee80211_hdr_4addr header; 1086 struct ieee80211_hdr_4addr header;
1086 1087
1087 for (i = 0; 1088 for (i = 0;
1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1089 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
1089 i < priv->host_info.rx_desc_count; 1090 i < priv->host_info.rx_desc_count;
1090 i++) { 1091 i++) {
1091 1092
1092 u16 msdu_size, rx_packet_loc, frame_ctl, seq_control; 1093 u16 msdu_size, rx_packet_loc, frame_ctl, seq_control;
1093 u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head)); 1094 u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head));
1094 u32 crc = 0xffffffff; 1095 u32 crc = 0xffffffff;
1095 1096
1096 if (status != RX_STATUS_SUCCESS) { 1097 if (status != RX_STATUS_SUCCESS) {
1097 if (status == 0xc1) /* determined by experiment */ 1098 if (status == 0xc1) /* determined by experiment */
1098 priv->wstats.discard.nwid++; 1099 priv->wstats.discard.nwid++;
1099 else 1100 else
1100 priv->stats.rx_errors++; 1101 priv->stats.rx_errors++;
1101 goto next; 1102 goto next;
1102 } 1103 }
1103 1104
1104 msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head)); 1105 msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head));
1105 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head)); 1106 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
1106 1107
1107 if (msdu_size < 30) { 1108 if (msdu_size < 30) {
1108 priv->stats.rx_errors++; 1109 priv->stats.rx_errors++;
1109 goto next; 1110 goto next;
1110 } 1111 }
1111 1112
1112 /* Get header as far as end of seq_ctl */ 1113 /* Get header as far as end of seq_ctl */
1113 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24); 1114 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
1114 frame_ctl = le16_to_cpu(header.frame_ctl); 1115 frame_ctl = le16_to_cpu(header.frame_ctl);
1115 seq_control = le16_to_cpu(header.seq_ctl); 1116 seq_control = le16_to_cpu(header.seq_ctl);
1116 1117
1117 /* probe for CRC use here if needed once five packets have arrived with 1118 /* probe for CRC use here if needed once five packets have
1118 the same crc status, we assume we know what's happening and stop probing */ 1119 arrived with the same crc status, we assume we know what's
1120 happening and stop probing */
1119 if (priv->probe_crc) { 1121 if (priv->probe_crc) {
1120 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) { 1122 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) {
1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size); 1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
@@ -1130,34 +1132,33 @@ static void rx_done_irq(struct atmel_private *priv)
1130 priv->probe_crc = 0; 1132 priv->probe_crc = 0;
1131 } 1133 }
1132 } 1134 }
1133 1135
1134 /* don't CRC header when WEP in use */ 1136 /* don't CRC header when WEP in use */
1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) { 1137 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) {
1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24); 1138 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
1137 } 1139 }
1138 msdu_size -= 24; /* header */ 1140 msdu_size -= 24; /* header */
1139 1141
1140 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { 1142 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
1141
1142 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS; 1143 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
1143 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG; 1144 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
1144 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4; 1145 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
1145 1146
1146 if (!more_fragments && packet_fragment_no == 0 ) { 1147 if (!more_fragments && packet_fragment_no == 0) {
1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc); 1148 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
1148 } else { 1149 } else {
1149 frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc, 1150 frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc,
1150 packet_sequence_no, packet_fragment_no, more_fragments); 1151 packet_sequence_no, packet_fragment_no, more_fragments);
1151 } 1152 }
1152 } 1153 }
1153 1154
1154 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 1155 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
1155 /* copy rest of packet into buffer */ 1156 /* copy rest of packet into buffer */
1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); 1157 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
1157 1158
1158 /* we use the same buffer for frag reassembly and control packets */ 1159 /* we use the same buffer for frag reassembly and control packets */
1159 memset(priv->frag_source, 0xff, 6); 1160 memset(priv->frag_source, 0xff, 6);
1160 1161
1161 if (priv->do_rx_crc) { 1162 if (priv->do_rx_crc) {
1162 /* last 4 octets is crc */ 1163 /* last 4 octets is crc */
1163 msdu_size -= 4; 1164 msdu_size -= 4;
@@ -1170,18 +1171,18 @@ static void rx_done_irq(struct atmel_private *priv)
1170 1171
1171 atmel_management_frame(priv, &header, msdu_size, 1172 atmel_management_frame(priv, &header, msdu_size,
1172 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head))); 1173 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head)));
1173 } 1174 }
1174 1175
1175 next: 1176next:
1176 /* release descriptor */ 1177 /* release descriptor */
1177 atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED); 1178 atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED);
1178 1179
1179 if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1)) 1180 if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1))
1180 priv->rx_desc_head++; 1181 priv->rx_desc_head++;
1181 else 1182 else
1182 priv->rx_desc_head = 0; 1183 priv->rx_desc_head = 0;
1183 } 1184 }
1184} 1185}
1185 1186
1186static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1187static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1187{ 1188{
@@ -1189,7 +1190,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1189 struct atmel_private *priv = netdev_priv(dev); 1190 struct atmel_private *priv = netdev_priv(dev);
1190 u8 isr; 1191 u8 isr;
1191 int i = -1; 1192 int i = -1;
1192 static u8 irq_order[] = { 1193 static u8 irq_order[] = {
1193 ISR_OUT_OF_RANGE, 1194 ISR_OUT_OF_RANGE,
1194 ISR_RxCOMPLETE, 1195 ISR_RxCOMPLETE,
1195 ISR_TxCOMPLETE, 1196 ISR_TxCOMPLETE,
@@ -1199,20 +1200,19 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1199 ISR_IBSS_MERGE, 1200 ISR_IBSS_MERGE,
1200 ISR_GENERIC_IRQ 1201 ISR_GENERIC_IRQ
1201 }; 1202 };
1202
1203 1203
1204 if (priv->card && priv->present_callback && 1204 if (priv->card && priv->present_callback &&
1205 !(*priv->present_callback)(priv->card)) 1205 !(*priv->present_callback)(priv->card))
1206 return IRQ_HANDLED; 1206 return IRQ_HANDLED;
1207 1207
1208 /* In this state upper-level code assumes it can mess with 1208 /* In this state upper-level code assumes it can mess with
1209 the card unhampered by interrupts which may change register state. 1209 the card unhampered by interrupts which may change register state.
1210 Note that even though the card shouldn't generate interrupts 1210 Note that even though the card shouldn't generate interrupts
1211 the inturrupt line may be shared. This allows card setup 1211 the inturrupt line may be shared. This allows card setup
1212 to go on without disabling interrupts for a long time. */ 1212 to go on without disabling interrupts for a long time. */
1213 if (priv->station_state == STATION_STATE_DOWN) 1213 if (priv->station_state == STATION_STATE_DOWN)
1214 return IRQ_NONE; 1214 return IRQ_NONE;
1215 1215
1216 atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */ 1216 atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */
1217 1217
1218 while (1) { 1218 while (1) {
@@ -1221,36 +1221,36 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1221 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name); 1221 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
1222 return IRQ_HANDLED; 1222 return IRQ_HANDLED;
1223 } 1223 }
1224 1224
1225 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET)); 1225 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
1226 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 1226 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
1227 1227
1228 if (!isr) { 1228 if (!isr) {
1229 atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */ 1229 atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */
1230 return i == -1 ? IRQ_NONE : IRQ_HANDLED; 1230 return i == -1 ? IRQ_NONE : IRQ_HANDLED;
1231 } 1231 }
1232 1232
1233 atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */ 1233 atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
1234 1234
1235 for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++) 1235 for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++)
1236 if (isr & irq_order[i]) 1236 if (isr & irq_order[i])
1237 break; 1237 break;
1238 1238
1239 if (!atmel_lock_mac(priv)) { 1239 if (!atmel_lock_mac(priv)) {
1240 /* failed to contact card */ 1240 /* failed to contact card */
1241 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name); 1241 printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
1242 return IRQ_HANDLED; 1242 return IRQ_HANDLED;
1243 } 1243 }
1244 1244
1245 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET)); 1245 isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
1246 isr ^= irq_order[i]; 1246 isr ^= irq_order[i];
1247 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr); 1247 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr);
1248 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 1248 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
1249 1249
1250 switch (irq_order[i]) { 1250 switch (irq_order[i]) {
1251 1251
1252 case ISR_OUT_OF_RANGE: 1252 case ISR_OUT_OF_RANGE:
1253 if (priv->operating_mode == IW_MODE_INFRA && 1253 if (priv->operating_mode == IW_MODE_INFRA &&
1254 priv->station_state == STATION_STATE_READY) { 1254 priv->station_state == STATION_STATE_READY) {
1255 priv->station_is_associated = 0; 1255 priv->station_is_associated = 0;
1256 atmel_scan(priv, 1); 1256 atmel_scan(priv, 1);
@@ -1261,24 +1261,24 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1261 priv->wstats.discard.misc++; 1261 priv->wstats.discard.misc++;
1262 /* fall through */ 1262 /* fall through */
1263 case ISR_RxCOMPLETE: 1263 case ISR_RxCOMPLETE:
1264 rx_done_irq(priv); 1264 rx_done_irq(priv);
1265 break; 1265 break;
1266 1266
1267 case ISR_TxCOMPLETE: 1267 case ISR_TxCOMPLETE:
1268 tx_done_irq(priv); 1268 tx_done_irq(priv);
1269 break; 1269 break;
1270 1270
1271 case ISR_FATAL_ERROR: 1271 case ISR_FATAL_ERROR:
1272 printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name); 1272 printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name);
1273 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 1273 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
1274 break; 1274 break;
1275 1275
1276 case ISR_COMMAND_COMPLETE: 1276 case ISR_COMMAND_COMPLETE:
1277 atmel_command_irq(priv); 1277 atmel_command_irq(priv);
1278 break; 1278 break;
1279 1279
1280 case ISR_IBSS_MERGE: 1280 case ISR_IBSS_MERGE:
1281 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS, 1281 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
1282 priv->CurrentBSSID, 6); 1282 priv->CurrentBSSID, 6);
1283 /* The WPA stuff cares about the current AP address */ 1283 /* The WPA stuff cares about the current AP address */
1284 if (priv->use_wpa) 1284 if (priv->use_wpa)
@@ -1288,24 +1288,23 @@ static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs
1288 printk(KERN_INFO "%s: Generic_irq received.\n", dev->name); 1288 printk(KERN_INFO "%s: Generic_irq received.\n", dev->name);
1289 break; 1289 break;
1290 } 1290 }
1291 } 1291 }
1292} 1292}
1293 1293
1294 1294static struct net_device_stats *atmel_get_stats(struct net_device *dev)
1295static struct net_device_stats *atmel_get_stats (struct net_device *dev)
1296{ 1295{
1297 struct atmel_private *priv = netdev_priv(dev); 1296 struct atmel_private *priv = netdev_priv(dev);
1298 return &priv->stats; 1297 return &priv->stats;
1299} 1298}
1300 1299
1301static struct iw_statistics *atmel_get_wireless_stats (struct net_device *dev) 1300static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
1302{ 1301{
1303 struct atmel_private *priv = netdev_priv(dev); 1302 struct atmel_private *priv = netdev_priv(dev);
1304 1303
1305 /* update the link quality here in case we are seeing no beacons 1304 /* update the link quality here in case we are seeing no beacons
1306 at all to drive the process */ 1305 at all to drive the process */
1307 atmel_smooth_qual(priv); 1306 atmel_smooth_qual(priv);
1308 1307
1309 priv->wstats.status = priv->station_state; 1308 priv->wstats.status = priv->station_state;
1310 1309
1311 if (priv->operating_mode == IW_MODE_INFRA) { 1310 if (priv->operating_mode == IW_MODE_INFRA) {
@@ -1328,8 +1327,8 @@ static struct iw_statistics *atmel_get_wireless_stats (struct net_device *dev)
1328 | IW_QUAL_NOISE_INVALID; 1327 | IW_QUAL_NOISE_INVALID;
1329 priv->wstats.miss.beacon = 0; 1328 priv->wstats.miss.beacon = 0;
1330 } 1329 }
1331 1330
1332 return (&priv->wstats); 1331 return &priv->wstats;
1333} 1332}
1334 1333
1335static int atmel_change_mtu(struct net_device *dev, int new_mtu) 1334static int atmel_change_mtu(struct net_device *dev, int new_mtu)
@@ -1343,21 +1342,21 @@ static int atmel_change_mtu(struct net_device *dev, int new_mtu)
1343static int atmel_set_mac_address(struct net_device *dev, void *p) 1342static int atmel_set_mac_address(struct net_device *dev, void *p)
1344{ 1343{
1345 struct sockaddr *addr = p; 1344 struct sockaddr *addr = p;
1346 1345
1347 memcpy (dev->dev_addr, addr->sa_data, dev->addr_len); 1346 memcpy (dev->dev_addr, addr->sa_data, dev->addr_len);
1348 return atmel_open(dev); 1347 return atmel_open(dev);
1349} 1348}
1350 1349
1351EXPORT_SYMBOL(atmel_open); 1350EXPORT_SYMBOL(atmel_open);
1352 1351
1353int atmel_open (struct net_device *dev) 1352int atmel_open(struct net_device *dev)
1354{ 1353{
1355 struct atmel_private *priv = netdev_priv(dev); 1354 struct atmel_private *priv = netdev_priv(dev);
1356 int i, channel; 1355 int i, channel;
1357 1356
1358 /* any scheduled timer is no longer needed and might screw things up.. */ 1357 /* any scheduled timer is no longer needed and might screw things up.. */
1359 del_timer_sync(&priv->management_timer); 1358 del_timer_sync(&priv->management_timer);
1360 1359
1361 /* Interrupts will not touch the card once in this state... */ 1360 /* Interrupts will not touch the card once in this state... */
1362 priv->station_state = STATION_STATE_DOWN; 1361 priv->station_state = STATION_STATE_DOWN;
1363 1362
@@ -1377,7 +1376,7 @@ int atmel_open (struct net_device *dev)
1377 priv->site_survey_state = SITE_SURVEY_IDLE; 1376 priv->site_survey_state = SITE_SURVEY_IDLE;
1378 priv->station_is_associated = 0; 1377 priv->station_is_associated = 0;
1379 1378
1380 if (!reset_atmel_card(dev)) 1379 if (!reset_atmel_card(dev))
1381 return -EAGAIN; 1380 return -EAGAIN;
1382 1381
1383 if (priv->config_reg_domain) { 1382 if (priv->config_reg_domain) {
@@ -1391,26 +1390,26 @@ int atmel_open (struct net_device *dev)
1391 if (i == sizeof(channel_table)/sizeof(channel_table[0])) { 1390 if (i == sizeof(channel_table)/sizeof(channel_table[0])) {
1392 priv->reg_domain = REG_DOMAIN_MKK1; 1391 priv->reg_domain = REG_DOMAIN_MKK1;
1393 printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name); 1392 printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
1394 } 1393 }
1395 } 1394 }
1396 1395
1397 if ((channel = atmel_validate_channel(priv, priv->channel))) 1396 if ((channel = atmel_validate_channel(priv, priv->channel)))
1398 priv->channel = channel; 1397 priv->channel = channel;
1399 1398
1400 /* this moves station_state on.... */ 1399 /* this moves station_state on.... */
1401 atmel_scan(priv, 1); 1400 atmel_scan(priv, 1);
1402 1401
1403 atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */ 1402 atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */
1404 return 0; 1403 return 0;
1405} 1404}
1406 1405
1407static int atmel_close (struct net_device *dev) 1406static int atmel_close(struct net_device *dev)
1408{ 1407{
1409 struct atmel_private *priv = netdev_priv(dev); 1408 struct atmel_private *priv = netdev_priv(dev);
1410 1409
1411 atmel_enter_state(priv, STATION_STATE_DOWN); 1410 atmel_enter_state(priv, STATION_STATE_DOWN);
1412 1411
1413 if (priv->bus_type == BUS_TYPE_PCCARD) 1412 if (priv->bus_type == BUS_TYPE_PCCARD)
1414 atmel_write16(dev, GCR, 0x0060); 1413 atmel_write16(dev, GCR, 0x0060);
1415 atmel_write16(dev, GCR, 0x0040); 1414 atmel_write16(dev, GCR, 0x0040);
1416 return 0; 1415 return 0;
@@ -1438,43 +1437,46 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
1438 int i; 1437 int i;
1439 char *p = buf; 1438 char *p = buf;
1440 char *s, *r, *c; 1439 char *s, *r, *c;
1441 1440
1442 p += sprintf(p, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR); 1441 p += sprintf(p, "Driver version:\t\t%d.%d\n",
1443 1442 DRIVER_MAJOR, DRIVER_MINOR);
1443
1444 if (priv->station_state != STATION_STATE_DOWN) { 1444 if (priv->station_state != STATION_STATE_DOWN) {
1445 p += sprintf(p, "Firmware version:\t%d.%d build %d\nFirmware location:\t", 1445 p += sprintf(p, "Firmware version:\t%d.%d build %d\n"
1446 "Firmware location:\t",
1446 priv->host_info.major_version, 1447 priv->host_info.major_version,
1447 priv->host_info.minor_version, 1448 priv->host_info.minor_version,
1448 priv->host_info.build_version); 1449 priv->host_info.build_version);
1449 1450
1450 if (priv->card_type != CARD_TYPE_EEPROM) 1451 if (priv->card_type != CARD_TYPE_EEPROM)
1451 p += sprintf(p, "on card\n"); 1452 p += sprintf(p, "on card\n");
1452 else if (priv->firmware) 1453 else if (priv->firmware)
1453 p += sprintf(p, "%s loaded by host\n", priv->firmware_id); 1454 p += sprintf(p, "%s loaded by host\n",
1455 priv->firmware_id);
1454 else 1456 else
1455 p += sprintf(p, "%s loaded by hotplug\n", priv->firmware_id); 1457 p += sprintf(p, "%s loaded by hotplug\n",
1456 1458 priv->firmware_id);
1457 switch(priv->card_type) { 1459
1460 switch (priv->card_type) {
1458 case CARD_TYPE_PARALLEL_FLASH: c = "Parallel flash"; break; 1461 case CARD_TYPE_PARALLEL_FLASH: c = "Parallel flash"; break;
1459 case CARD_TYPE_SPI_FLASH: c = "SPI flash\n"; break; 1462 case CARD_TYPE_SPI_FLASH: c = "SPI flash\n"; break;
1460 case CARD_TYPE_EEPROM: c = "EEPROM"; break; 1463 case CARD_TYPE_EEPROM: c = "EEPROM"; break;
1461 default: c = "<unknown>"; 1464 default: c = "<unknown>";
1462 } 1465 }
1463 1466
1464
1465 r = "<unknown>"; 1467 r = "<unknown>";
1466 for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) 1468 for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
1467 if (priv->reg_domain == channel_table[i].reg_domain) 1469 if (priv->reg_domain == channel_table[i].reg_domain)
1468 r = channel_table[i].name; 1470 r = channel_table[i].name;
1469 1471
1470 p += sprintf(p, "MAC memory type:\t%s\n", c); 1472 p += sprintf(p, "MAC memory type:\t%s\n", c);
1471 p += sprintf(p, "Regulatory domain:\t%s\n", r); 1473 p += sprintf(p, "Regulatory domain:\t%s\n", r);
1472 p += sprintf(p, "Host CRC checking:\t%s\n", 1474 p += sprintf(p, "Host CRC checking:\t%s\n",
1473 priv->do_rx_crc ? "On" : "Off"); 1475 priv->do_rx_crc ? "On" : "Off");
1474 p += sprintf(p, "WPA-capable firmware:\t%s\n", 1476 p += sprintf(p, "WPA-capable firmware:\t%s\n",
1475 priv->use_wpa ? "Yes" : "No"); 1477 priv->use_wpa ? "Yes" : "No");
1476 } 1478 }
1477 1479
1478 switch(priv->station_state) { 1480 switch(priv->station_state) {
1479 case STATION_STATE_SCANNING: s = "Scanning"; break; 1481 case STATION_STATE_SCANNING: s = "Scanning"; break;
1480 case STATION_STATE_JOINNING: s = "Joining"; break; 1482 case STATION_STATE_JOINNING: s = "Joining"; break;
@@ -1486,9 +1488,9 @@ static int atmel_proc_output (char *buf, struct atmel_private *priv)
1486 case STATION_STATE_DOWN: s = "Down"; break; 1488 case STATION_STATE_DOWN: s = "Down"; break;
1487 default: s = "<unknown>"; 1489 default: s = "<unknown>";
1488 } 1490 }
1489 1491
1490 p += sprintf(p, "Current state:\t\t%s\n", s); 1492 p += sprintf(p, "Current state:\t\t%s\n", s);
1491 return p - buf; 1493 return p - buf;
1492} 1494}
1493 1495
1494static int atmel_read_proc(char *page, char **start, off_t off, 1496static int atmel_read_proc(char *page, char **start, off_t off,
@@ -1504,9 +1506,12 @@ static int atmel_read_proc(char *page, char **start, off_t off,
1504 return len; 1506 return len;
1505} 1507}
1506 1508
1507struct net_device *init_atmel_card( unsigned short irq, unsigned long port, const AtmelFWType fw_type, 1509struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1508 struct device *sys_dev, int (*card_present)(void *), void *card) 1510 const AtmelFWType fw_type,
1511 struct device *sys_dev,
1512 int (*card_present)(void *), void *card)
1509{ 1513{
1514 struct proc_dir_entry *ent;
1510 struct net_device *dev; 1515 struct net_device *dev;
1511 struct atmel_private *priv; 1516 struct atmel_private *priv;
1512 int rc; 1517 int rc;
@@ -1514,11 +1519,11 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1514 /* Create the network device object. */ 1519 /* Create the network device object. */
1515 dev = alloc_etherdev(sizeof(*priv)); 1520 dev = alloc_etherdev(sizeof(*priv));
1516 if (!dev) { 1521 if (!dev) {
1517 printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n"); 1522 printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n");
1518 return NULL; 1523 return NULL;
1519 } 1524 }
1520 if (dev_alloc_name(dev, dev->name) < 0) { 1525 if (dev_alloc_name(dev, dev->name) < 0) {
1521 printk(KERN_ERR "atmel: Couldn't get name!\n"); 1526 printk(KERN_ERR "atmel: Couldn't get name!\n");
1522 goto err_out_free; 1527 goto err_out_free;
1523 } 1528 }
1524 1529
@@ -1550,7 +1555,7 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1550 memset(priv->BSSID, 0, 6); 1555 memset(priv->BSSID, 0, 6);
1551 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ 1556 priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
1552 priv->station_was_associated = 0; 1557 priv->station_was_associated = 0;
1553 1558
1554 priv->last_survey = jiffies; 1559 priv->last_survey = jiffies;
1555 priv->preamble = LONG_PREAMBLE; 1560 priv->preamble = LONG_PREAMBLE;
1556 priv->operating_mode = IW_MODE_INFRA; 1561 priv->operating_mode = IW_MODE_INFRA;
@@ -1586,7 +1591,7 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1586 spin_lock_init(&priv->timerlock); 1591 spin_lock_init(&priv->timerlock);
1587 priv->management_timer.function = atmel_management_timer; 1592 priv->management_timer.function = atmel_management_timer;
1588 priv->management_timer.data = (unsigned long) dev; 1593 priv->management_timer.data = (unsigned long) dev;
1589 1594
1590 dev->open = atmel_open; 1595 dev->open = atmel_open;
1591 dev->stop = atmel_close; 1596 dev->stop = atmel_close;
1592 dev->change_mtu = atmel_change_mtu; 1597 dev->change_mtu = atmel_change_mtu;
@@ -1597,44 +1602,46 @@ struct net_device *init_atmel_card( unsigned short irq, unsigned long port, cons
1597 dev->do_ioctl = atmel_ioctl; 1602 dev->do_ioctl = atmel_ioctl;
1598 dev->irq = irq; 1603 dev->irq = irq;
1599 dev->base_addr = port; 1604 dev->base_addr = port;
1600 1605
1601 SET_NETDEV_DEV(dev, sys_dev); 1606 SET_NETDEV_DEV(dev, sys_dev);
1602 1607
1603 if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) { 1608 if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) {
1604 printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc ); 1609 printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc);
1605 goto err_out_free; 1610 goto err_out_free;
1606 } 1611 }
1607 1612
1608 if (!request_region(dev->base_addr, 32, 1613 if (!request_region(dev->base_addr, 32,
1609 priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) { 1614 priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) {
1610 goto err_out_irq; 1615 goto err_out_irq;
1611 } 1616 }
1612 1617
1613 if (register_netdev(dev)) 1618 if (register_netdev(dev))
1614 goto err_out_res; 1619 goto err_out_res;
1615 1620
1616 if (!probe_atmel_card(dev)){ 1621 if (!probe_atmel_card(dev)){
1617 unregister_netdev(dev); 1622 unregister_netdev(dev);
1618 goto err_out_res; 1623 goto err_out_res;
1619 } 1624 }
1620 1625
1621 netif_carrier_off(dev); 1626 netif_carrier_off(dev);
1622 1627
1623 create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv); 1628 ent = create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv);
1624 1629 if (!ent)
1630 printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
1631
1625 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 1632 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1626 dev->name, DRIVER_MAJOR, DRIVER_MINOR, 1633 dev->name, DRIVER_MAJOR, DRIVER_MINOR,
1627 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 1634 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1628 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); 1635 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
1629 1636
1630 SET_MODULE_OWNER(dev); 1637 SET_MODULE_OWNER(dev);
1631 return dev; 1638 return dev;
1632 1639
1633 err_out_res: 1640err_out_res:
1634 release_region( dev->base_addr, 32); 1641 release_region( dev->base_addr, 32);
1635 err_out_irq: 1642err_out_irq:
1636 free_irq(dev->irq, dev); 1643 free_irq(dev->irq, dev);
1637 err_out_free: 1644err_out_free:
1638 free_netdev(dev); 1645 free_netdev(dev);
1639 return NULL; 1646 return NULL;
1640} 1647}
@@ -1644,12 +1651,12 @@ EXPORT_SYMBOL(init_atmel_card);
1644void stop_atmel_card(struct net_device *dev) 1651void stop_atmel_card(struct net_device *dev)
1645{ 1652{
1646 struct atmel_private *priv = netdev_priv(dev); 1653 struct atmel_private *priv = netdev_priv(dev);
1647 1654
1648 /* put a brick on it... */ 1655 /* put a brick on it... */
1649 if (priv->bus_type == BUS_TYPE_PCCARD) 1656 if (priv->bus_type == BUS_TYPE_PCCARD)
1650 atmel_write16(dev, GCR, 0x0060); 1657 atmel_write16(dev, GCR, 0x0060);
1651 atmel_write16(dev, GCR, 0x0040); 1658 atmel_write16(dev, GCR, 0x0040);
1652 1659
1653 del_timer_sync(&priv->management_timer); 1660 del_timer_sync(&priv->management_timer);
1654 unregister_netdev(dev); 1661 unregister_netdev(dev);
1655 remove_proc_entry("driver/atmel", NULL); 1662 remove_proc_entry("driver/atmel", NULL);
@@ -1675,13 +1682,13 @@ static int atmel_set_essid(struct net_device *dev,
1675 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1682 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1676 1683
1677 priv->connect_to_any_BSS = 0; 1684 priv->connect_to_any_BSS = 0;
1678 1685
1679 /* Check the size of the string */ 1686 /* Check the size of the string */
1680 if (dwrq->length > MAX_SSID_LENGTH + 1) 1687 if (dwrq->length > MAX_SSID_LENGTH + 1)
1681 return -E2BIG ; 1688 return -E2BIG;
1682 if (index != 0) 1689 if (index != 0)
1683 return -EINVAL; 1690 return -EINVAL;
1684 1691
1685 memcpy(priv->new_SSID, extra, dwrq->length - 1); 1692 memcpy(priv->new_SSID, extra, dwrq->length - 1);
1686 priv->new_SSID_size = dwrq->length - 1; 1693 priv->new_SSID_size = dwrq->length - 1;
1687 } 1694 }
@@ -1706,7 +1713,7 @@ static int atmel_get_essid(struct net_device *dev,
1706 extra[priv->SSID_size] = '\0'; 1713 extra[priv->SSID_size] = '\0';
1707 dwrq->length = priv->SSID_size + 1; 1714 dwrq->length = priv->SSID_size + 1;
1708 } 1715 }
1709 1716
1710 dwrq->flags = !priv->connect_to_any_BSS; /* active */ 1717 dwrq->flags = !priv->connect_to_any_BSS; /* active */
1711 1718
1712 return 0; 1719 return 0;
@@ -1768,7 +1775,7 @@ static int atmel_set_encode(struct net_device *dev,
1768 /* WE specify that if a valid key is set, encryption 1775 /* WE specify that if a valid key is set, encryption
1769 * should be enabled (user may turn it off later) 1776 * should be enabled (user may turn it off later)
1770 * This is also how "iwconfig ethX key on" works */ 1777 * This is also how "iwconfig ethX key on" works */
1771 if (index == current_index && 1778 if (index == current_index &&
1772 priv->wep_key_len[index] > 0) { 1779 priv->wep_key_len[index] > 0) {
1773 priv->wep_is_on = 1; 1780 priv->wep_is_on = 1;
1774 priv->exclude_unencrypted = 1; 1781 priv->exclude_unencrypted = 1;
@@ -1783,18 +1790,18 @@ static int atmel_set_encode(struct net_device *dev,
1783 } else { 1790 } else {
1784 /* Do we want to just set the transmit key index ? */ 1791 /* Do we want to just set the transmit key index ? */
1785 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1792 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1786 if ( index>=0 && index < 4 ) { 1793 if (index >= 0 && index < 4) {
1787 priv->default_key = index; 1794 priv->default_key = index;
1788 } else 1795 } else
1789 /* Don't complain if only change the mode */ 1796 /* Don't complain if only change the mode */
1790 if(!dwrq->flags & IW_ENCODE_MODE) { 1797 if (!dwrq->flags & IW_ENCODE_MODE) {
1791 return -EINVAL; 1798 return -EINVAL;
1792 } 1799 }
1793 } 1800 }
1794 /* Read the flags */ 1801 /* Read the flags */
1795 if(dwrq->flags & IW_ENCODE_DISABLED) { 1802 if (dwrq->flags & IW_ENCODE_DISABLED) {
1796 priv->wep_is_on = 0; 1803 priv->wep_is_on = 0;
1797 priv->encryption_level = 0; 1804 priv->encryption_level = 0;
1798 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; 1805 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
1799 } else { 1806 } else {
1800 priv->wep_is_on = 1; 1807 priv->wep_is_on = 1;
@@ -1806,15 +1813,14 @@ static int atmel_set_encode(struct net_device *dev,
1806 priv->encryption_level = 1; 1813 priv->encryption_level = 1;
1807 } 1814 }
1808 } 1815 }
1809 if(dwrq->flags & IW_ENCODE_RESTRICTED) 1816 if (dwrq->flags & IW_ENCODE_RESTRICTED)
1810 priv->exclude_unencrypted = 1; 1817 priv->exclude_unencrypted = 1;
1811 if(dwrq->flags & IW_ENCODE_OPEN) 1818 if(dwrq->flags & IW_ENCODE_OPEN)
1812 priv->exclude_unencrypted = 0; 1819 priv->exclude_unencrypted = 0;
1813 1820
1814 return -EINPROGRESS; /* Call commit handler */ 1821 return -EINPROGRESS; /* Call commit handler */
1815} 1822}
1816 1823
1817
1818static int atmel_get_encode(struct net_device *dev, 1824static int atmel_get_encode(struct net_device *dev,
1819 struct iw_request_info *info, 1825 struct iw_request_info *info,
1820 struct iw_point *dwrq, 1826 struct iw_point *dwrq,
@@ -1822,7 +1828,7 @@ static int atmel_get_encode(struct net_device *dev,
1822{ 1828{
1823 struct atmel_private *priv = netdev_priv(dev); 1829 struct atmel_private *priv = netdev_priv(dev);
1824 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1830 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1825 1831
1826 if (!priv->wep_is_on) 1832 if (!priv->wep_is_on)
1827 dwrq->flags = IW_ENCODE_DISABLED; 1833 dwrq->flags = IW_ENCODE_DISABLED;
1828 else { 1834 else {
@@ -1843,7 +1849,7 @@ static int atmel_get_encode(struct net_device *dev,
1843 memset(extra, 0, 16); 1849 memset(extra, 0, 16);
1844 memcpy(extra, priv->wep_keys[index], dwrq->length); 1850 memcpy(extra, priv->wep_keys[index], dwrq->length);
1845 } 1851 }
1846 1852
1847 return 0; 1853 return 0;
1848} 1854}
1849 1855
@@ -1862,17 +1868,17 @@ static int atmel_set_rate(struct net_device *dev,
1862 char *extra) 1868 char *extra)
1863{ 1869{
1864 struct atmel_private *priv = netdev_priv(dev); 1870 struct atmel_private *priv = netdev_priv(dev);
1865 1871
1866 if (vwrq->fixed == 0) { 1872 if (vwrq->fixed == 0) {
1867 priv->tx_rate = 3; 1873 priv->tx_rate = 3;
1868 priv->auto_tx_rate = 1; 1874 priv->auto_tx_rate = 1;
1869 } else { 1875 } else {
1870 priv->auto_tx_rate = 0; 1876 priv->auto_tx_rate = 0;
1871 1877
1872 /* Which type of value ? */ 1878 /* Which type of value ? */
1873 if((vwrq->value < 4) && (vwrq->value >= 0)) { 1879 if ((vwrq->value < 4) && (vwrq->value >= 0)) {
1874 /* Setting by rate index */ 1880 /* Setting by rate index */
1875 priv->tx_rate = vwrq->value; 1881 priv->tx_rate = vwrq->value;
1876 } else { 1882 } else {
1877 /* Setting by frequency value */ 1883 /* Setting by frequency value */
1878 switch (vwrq->value) { 1884 switch (vwrq->value) {
@@ -1899,7 +1905,7 @@ static int atmel_set_mode(struct net_device *dev,
1899 return -EINVAL; 1905 return -EINVAL;
1900 1906
1901 priv->operating_mode = *uwrq; 1907 priv->operating_mode = *uwrq;
1902 return -EINPROGRESS; 1908 return -EINPROGRESS;
1903} 1909}
1904 1910
1905static int atmel_get_mode(struct net_device *dev, 1911static int atmel_get_mode(struct net_device *dev,
@@ -1908,7 +1914,7 @@ static int atmel_get_mode(struct net_device *dev,
1908 char *extra) 1914 char *extra)
1909{ 1915{
1910 struct atmel_private *priv = netdev_priv(dev); 1916 struct atmel_private *priv = netdev_priv(dev);
1911 1917
1912 *uwrq = priv->operating_mode; 1918 *uwrq = priv->operating_mode;
1913 return 0; 1919 return 0;
1914} 1920}
@@ -1962,9 +1968,9 @@ static int atmel_set_retry(struct net_device *dev,
1962 char *extra) 1968 char *extra)
1963{ 1969{
1964 struct atmel_private *priv = netdev_priv(dev); 1970 struct atmel_private *priv = netdev_priv(dev);
1965 1971
1966 if(!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) { 1972 if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
1967 if(vwrq->flags & IW_RETRY_MAX) 1973 if (vwrq->flags & IW_RETRY_MAX)
1968 priv->long_retry = vwrq->value; 1974 priv->long_retry = vwrq->value;
1969 else if (vwrq->flags & IW_RETRY_MIN) 1975 else if (vwrq->flags & IW_RETRY_MIN)
1970 priv->short_retry = vwrq->value; 1976 priv->short_retry = vwrq->value;
@@ -1973,9 +1979,9 @@ static int atmel_set_retry(struct net_device *dev,
1973 priv->long_retry = vwrq->value; 1979 priv->long_retry = vwrq->value;
1974 priv->short_retry = vwrq->value; 1980 priv->short_retry = vwrq->value;
1975 } 1981 }
1976 return -EINPROGRESS; 1982 return -EINPROGRESS;
1977 } 1983 }
1978 1984
1979 return -EINVAL; 1985 return -EINVAL;
1980} 1986}
1981 1987
@@ -1989,13 +1995,13 @@ static int atmel_get_retry(struct net_device *dev,
1989 vwrq->disabled = 0; /* Can't be disabled */ 1995 vwrq->disabled = 0; /* Can't be disabled */
1990 1996
1991 /* Note : by default, display the min retry number */ 1997 /* Note : by default, display the min retry number */
1992 if((vwrq->flags & IW_RETRY_MAX)) { 1998 if (vwrq->flags & IW_RETRY_MAX) {
1993 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; 1999 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
1994 vwrq->value = priv->long_retry; 2000 vwrq->value = priv->long_retry;
1995 } else { 2001 } else {
1996 vwrq->flags = IW_RETRY_LIMIT; 2002 vwrq->flags = IW_RETRY_LIMIT;
1997 vwrq->value = priv->short_retry; 2003 vwrq->value = priv->short_retry;
1998 if(priv->long_retry != priv->short_retry) 2004 if (priv->long_retry != priv->short_retry)
1999 vwrq->flags |= IW_RETRY_MIN; 2005 vwrq->flags |= IW_RETRY_MIN;
2000 } 2006 }
2001 2007
@@ -2010,13 +2016,13 @@ static int atmel_set_rts(struct net_device *dev,
2010 struct atmel_private *priv = netdev_priv(dev); 2016 struct atmel_private *priv = netdev_priv(dev);
2011 int rthr = vwrq->value; 2017 int rthr = vwrq->value;
2012 2018
2013 if(vwrq->disabled) 2019 if (vwrq->disabled)
2014 rthr = 2347; 2020 rthr = 2347;
2015 if((rthr < 0) || (rthr > 2347)) { 2021 if ((rthr < 0) || (rthr > 2347)) {
2016 return -EINVAL; 2022 return -EINVAL;
2017 } 2023 }
2018 priv->rts_threshold = rthr; 2024 priv->rts_threshold = rthr;
2019 2025
2020 return -EINPROGRESS; /* Call commit handler */ 2026 return -EINPROGRESS; /* Call commit handler */
2021} 2027}
2022 2028
@@ -2026,7 +2032,7 @@ static int atmel_get_rts(struct net_device *dev,
2026 char *extra) 2032 char *extra)
2027{ 2033{
2028 struct atmel_private *priv = netdev_priv(dev); 2034 struct atmel_private *priv = netdev_priv(dev);
2029 2035
2030 vwrq->value = priv->rts_threshold; 2036 vwrq->value = priv->rts_threshold;
2031 vwrq->disabled = (vwrq->value >= 2347); 2037 vwrq->disabled = (vwrq->value >= 2347);
2032 vwrq->fixed = 1; 2038 vwrq->fixed = 1;
@@ -2042,14 +2048,14 @@ static int atmel_set_frag(struct net_device *dev,
2042 struct atmel_private *priv = netdev_priv(dev); 2048 struct atmel_private *priv = netdev_priv(dev);
2043 int fthr = vwrq->value; 2049 int fthr = vwrq->value;
2044 2050
2045 if(vwrq->disabled) 2051 if (vwrq->disabled)
2046 fthr = 2346; 2052 fthr = 2346;
2047 if((fthr < 256) || (fthr > 2346)) { 2053 if ((fthr < 256) || (fthr > 2346)) {
2048 return -EINVAL; 2054 return -EINVAL;
2049 } 2055 }
2050 fthr &= ~0x1; /* Get an even value - is it really needed ??? */ 2056 fthr &= ~0x1; /* Get an even value - is it really needed ??? */
2051 priv->frag_threshold = fthr; 2057 priv->frag_threshold = fthr;
2052 2058
2053 return -EINPROGRESS; /* Call commit handler */ 2059 return -EINPROGRESS; /* Call commit handler */
2054} 2060}
2055 2061
@@ -2077,21 +2083,21 @@ static int atmel_set_freq(struct net_device *dev,
2077{ 2083{
2078 struct atmel_private *priv = netdev_priv(dev); 2084 struct atmel_private *priv = netdev_priv(dev);
2079 int rc = -EINPROGRESS; /* Call commit handler */ 2085 int rc = -EINPROGRESS; /* Call commit handler */
2080 2086
2081 /* If setting by frequency, convert to a channel */ 2087 /* If setting by frequency, convert to a channel */
2082 if((fwrq->e == 1) && 2088 if ((fwrq->e == 1) &&
2083 (fwrq->m >= (int) 241200000) && 2089 (fwrq->m >= (int) 241200000) &&
2084 (fwrq->m <= (int) 248700000)) { 2090 (fwrq->m <= (int) 248700000)) {
2085 int f = fwrq->m / 100000; 2091 int f = fwrq->m / 100000;
2086 int c = 0; 2092 int c = 0;
2087 while((c < 14) && (f != frequency_list[c])) 2093 while ((c < 14) && (f != frequency_list[c]))
2088 c++; 2094 c++;
2089 /* Hack to fall through... */ 2095 /* Hack to fall through... */
2090 fwrq->e = 0; 2096 fwrq->e = 0;
2091 fwrq->m = c + 1; 2097 fwrq->m = c + 1;
2092 } 2098 }
2093 /* Setting by channel number */ 2099 /* Setting by channel number */
2094 if((fwrq->m > 1000) || (fwrq->e > 0)) 2100 if ((fwrq->m > 1000) || (fwrq->e > 0))
2095 rc = -EOPNOTSUPP; 2101 rc = -EOPNOTSUPP;
2096 else { 2102 else {
2097 int channel = fwrq->m; 2103 int channel = fwrq->m;
@@ -2099,7 +2105,7 @@ static int atmel_set_freq(struct net_device *dev,
2099 priv->channel = channel; 2105 priv->channel = channel;
2100 } else { 2106 } else {
2101 rc = -EINVAL; 2107 rc = -EINVAL;
2102 } 2108 }
2103 } 2109 }
2104 return rc; 2110 return rc;
2105} 2111}
@@ -2130,7 +2136,7 @@ static int atmel_set_scan(struct net_device *dev,
2130 * This is not an error, while the device perform scanning, 2136 * This is not an error, while the device perform scanning,
2131 * traffic doesn't flow, so it's a perfect DoS... 2137 * traffic doesn't flow, so it's a perfect DoS...
2132 * Jean II */ 2138 * Jean II */
2133 2139
2134 if (priv->station_state == STATION_STATE_DOWN) 2140 if (priv->station_state == STATION_STATE_DOWN)
2135 return -EAGAIN; 2141 return -EAGAIN;
2136 2142
@@ -2142,15 +2148,15 @@ static int atmel_set_scan(struct net_device *dev,
2142 /* Initiate a scan command */ 2148 /* Initiate a scan command */
2143 if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS) 2149 if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS)
2144 return -EBUSY; 2150 return -EBUSY;
2145 2151
2146 del_timer_sync(&priv->management_timer); 2152 del_timer_sync(&priv->management_timer);
2147 spin_lock_irqsave(&priv->irqlock, flags); 2153 spin_lock_irqsave(&priv->irqlock, flags);
2148 2154
2149 priv->site_survey_state = SITE_SURVEY_IN_PROGRESS; 2155 priv->site_survey_state = SITE_SURVEY_IN_PROGRESS;
2150 priv->fast_scan = 0; 2156 priv->fast_scan = 0;
2151 atmel_scan(priv, 0); 2157 atmel_scan(priv, 0);
2152 spin_unlock_irqrestore(&priv->irqlock, flags); 2158 spin_unlock_irqrestore(&priv->irqlock, flags);
2153 2159
2154 return 0; 2160 return 0;
2155} 2161}
2156 2162
@@ -2163,11 +2169,11 @@ static int atmel_get_scan(struct net_device *dev,
2163 int i; 2169 int i;
2164 char *current_ev = extra; 2170 char *current_ev = extra;
2165 struct iw_event iwe; 2171 struct iw_event iwe;
2166 2172
2167 if (priv->site_survey_state != SITE_SURVEY_COMPLETED) 2173 if (priv->site_survey_state != SITE_SURVEY_COMPLETED)
2168 return -EAGAIN; 2174 return -EAGAIN;
2169 2175
2170 for(i=0; i<priv->BSS_list_entries; i++) { 2176 for (i = 0; i < priv->BSS_list_entries; i++) {
2171 iwe.cmd = SIOCGIWAP; 2177 iwe.cmd = SIOCGIWAP;
2172 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 2178 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
2173 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6); 2179 memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
@@ -2179,16 +2185,16 @@ static int atmel_get_scan(struct net_device *dev,
2179 iwe.cmd = SIOCGIWESSID; 2185 iwe.cmd = SIOCGIWESSID;
2180 iwe.u.data.flags = 1; 2186 iwe.u.data.flags = 1;
2181 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, priv->BSSinfo[i].SSID); 2187 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, priv->BSSinfo[i].SSID);
2182 2188
2183 iwe.cmd = SIOCGIWMODE; 2189 iwe.cmd = SIOCGIWMODE;
2184 iwe.u.mode = priv->BSSinfo[i].BSStype; 2190 iwe.u.mode = priv->BSSinfo[i].BSStype;
2185 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN); 2191 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN);
2186 2192
2187 iwe.cmd = SIOCGIWFREQ; 2193 iwe.cmd = SIOCGIWFREQ;
2188 iwe.u.freq.m = priv->BSSinfo[i].channel; 2194 iwe.u.freq.m = priv->BSSinfo[i].channel;
2189 iwe.u.freq.e = 0; 2195 iwe.u.freq.e = 0;
2190 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); 2196 current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN);
2191 2197
2192 iwe.cmd = SIOCGIWENCODE; 2198 iwe.cmd = SIOCGIWENCODE;
2193 if (priv->BSSinfo[i].UsingWEP) 2199 if (priv->BSSinfo[i].UsingWEP)
2194 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 2200 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -2196,13 +2202,12 @@ static int atmel_get_scan(struct net_device *dev,
2196 iwe.u.data.flags = IW_ENCODE_DISABLED; 2202 iwe.u.data.flags = IW_ENCODE_DISABLED;
2197 iwe.u.data.length = 0; 2203 iwe.u.data.length = 0;
2198 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL); 2204 current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL);
2199
2200 } 2205 }
2201 2206
2202 /* Length of data */ 2207 /* Length of data */
2203 dwrq->length = (current_ev - extra); 2208 dwrq->length = (current_ev - extra);
2204 dwrq->flags = 0; 2209 dwrq->flags = 0;
2205 2210
2206 return 0; 2211 return 0;
2207} 2212}
2208 2213
@@ -2213,7 +2218,7 @@ static int atmel_get_range(struct net_device *dev,
2213{ 2218{
2214 struct atmel_private *priv = netdev_priv(dev); 2219 struct atmel_private *priv = netdev_priv(dev);
2215 struct iw_range *range = (struct iw_range *) extra; 2220 struct iw_range *range = (struct iw_range *) extra;
2216 int k,i,j; 2221 int k, i, j;
2217 2222
2218 dwrq->length = sizeof(struct iw_range); 2223 dwrq->length = sizeof(struct iw_range);
2219 memset(range, 0, sizeof(struct iw_range)); 2224 memset(range, 0, sizeof(struct iw_range));
@@ -2226,14 +2231,14 @@ static int atmel_get_range(struct net_device *dev,
2226 break; 2231 break;
2227 } 2232 }
2228 if (range->num_channels != 0) { 2233 if (range->num_channels != 0) {
2229 for(k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) { 2234 for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
2230 range->freq[k].i = i; /* List index */ 2235 range->freq[k].i = i; /* List index */
2231 range->freq[k].m = frequency_list[i-1] * 100000; 2236 range->freq[k].m = frequency_list[i - 1] * 100000;
2232 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ 2237 range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
2233 } 2238 }
2234 range->num_frequency = k; 2239 range->num_frequency = k;
2235 } 2240 }
2236 2241
2237 range->max_qual.qual = 100; 2242 range->max_qual.qual = 100;
2238 range->max_qual.level = 100; 2243 range->max_qual.level = 100;
2239 range->max_qual.noise = 0; 2244 range->max_qual.noise = 0;
@@ -2261,11 +2266,11 @@ static int atmel_get_range(struct net_device *dev,
2261 range->encoding_size[1] = 13; 2266 range->encoding_size[1] = 13;
2262 range->num_encoding_sizes = 2; 2267 range->num_encoding_sizes = 2;
2263 range->max_encoding_tokens = 4; 2268 range->max_encoding_tokens = 4;
2264 2269
2265 range->pmp_flags = IW_POWER_ON; 2270 range->pmp_flags = IW_POWER_ON;
2266 range->pmt_flags = IW_POWER_ON; 2271 range->pmt_flags = IW_POWER_ON;
2267 range->pm_capa = 0; 2272 range->pm_capa = 0;
2268 2273
2269 range->we_version_source = WIRELESS_EXT; 2274 range->we_version_source = WIRELESS_EXT;
2270 range->we_version_compiled = WIRELESS_EXT; 2275 range->we_version_compiled = WIRELESS_EXT;
2271 range->retry_capa = IW_RETRY_LIMIT ; 2276 range->retry_capa = IW_RETRY_LIMIT ;
@@ -2289,7 +2294,7 @@ static int atmel_set_wap(struct net_device *dev,
2289 2294
2290 if (awrq->sa_family != ARPHRD_ETHER) 2295 if (awrq->sa_family != ARPHRD_ETHER)
2291 return -EINVAL; 2296 return -EINVAL;
2292 2297
2293 if (memcmp(bcast, awrq->sa_data, 6) == 0) { 2298 if (memcmp(bcast, awrq->sa_data, 6) == 0) {
2294 del_timer_sync(&priv->management_timer); 2299 del_timer_sync(&priv->management_timer);
2295 spin_lock_irqsave(&priv->irqlock, flags); 2300 spin_lock_irqsave(&priv->irqlock, flags);
@@ -2297,8 +2302,8 @@ static int atmel_set_wap(struct net_device *dev,
2297 spin_unlock_irqrestore(&priv->irqlock, flags); 2302 spin_unlock_irqrestore(&priv->irqlock, flags);
2298 return 0; 2303 return 0;
2299 } 2304 }
2300 2305
2301 for(i=0; i<priv->BSS_list_entries; i++) { 2306 for (i = 0; i < priv->BSS_list_entries; i++) {
2302 if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) { 2307 if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) {
2303 if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) { 2308 if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) {
2304 return -EINVAL; 2309 return -EINVAL;
@@ -2313,10 +2318,10 @@ static int atmel_set_wap(struct net_device *dev,
2313 } 2318 }
2314 } 2319 }
2315 } 2320 }
2316 2321
2317 return -EINVAL; 2322 return -EINVAL;
2318} 2323}
2319 2324
2320static int atmel_config_commit(struct net_device *dev, 2325static int atmel_config_commit(struct net_device *dev,
2321 struct iw_request_info *info, /* NULL */ 2326 struct iw_request_info *info, /* NULL */
2322 void *zwrq, /* NULL */ 2327 void *zwrq, /* NULL */
@@ -2325,18 +2330,18 @@ static int atmel_config_commit(struct net_device *dev,
2325 return atmel_open(dev); 2330 return atmel_open(dev);
2326} 2331}
2327 2332
2328static const iw_handler atmel_handler[] = 2333static const iw_handler atmel_handler[] =
2329{ 2334{
2330 (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */ 2335 (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */
2331 (iw_handler) atmel_get_name, /* SIOCGIWNAME */ 2336 (iw_handler) atmel_get_name, /* SIOCGIWNAME */
2332 (iw_handler) NULL, /* SIOCSIWNWID */ 2337 (iw_handler) NULL, /* SIOCSIWNWID */
2333 (iw_handler) NULL, /* SIOCGIWNWID */ 2338 (iw_handler) NULL, /* SIOCGIWNWID */
2334 (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */ 2339 (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */
2335 (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */ 2340 (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */
2336 (iw_handler) atmel_set_mode, /* SIOCSIWMODE */ 2341 (iw_handler) atmel_set_mode, /* SIOCSIWMODE */
2337 (iw_handler) atmel_get_mode, /* SIOCGIWMODE */ 2342 (iw_handler) atmel_get_mode, /* SIOCGIWMODE */
2338 (iw_handler) NULL, /* SIOCSIWSENS */ 2343 (iw_handler) NULL, /* SIOCSIWSENS */
2339 (iw_handler) NULL, /* SIOCGIWSENS */ 2344 (iw_handler) NULL, /* SIOCGIWSENS */
2340 (iw_handler) NULL, /* SIOCSIWRANGE */ 2345 (iw_handler) NULL, /* SIOCSIWRANGE */
2341 (iw_handler) atmel_get_range, /* SIOCGIWRANGE */ 2346 (iw_handler) atmel_get_range, /* SIOCGIWRANGE */
2342 (iw_handler) NULL, /* SIOCSIWPRIV */ 2347 (iw_handler) NULL, /* SIOCSIWPRIV */
@@ -2350,13 +2355,13 @@ static const iw_handler atmel_handler[] =
2350 (iw_handler) atmel_set_wap, /* SIOCSIWAP */ 2355 (iw_handler) atmel_set_wap, /* SIOCSIWAP */
2351 (iw_handler) atmel_get_wap, /* SIOCGIWAP */ 2356 (iw_handler) atmel_get_wap, /* SIOCGIWAP */
2352 (iw_handler) NULL, /* -- hole -- */ 2357 (iw_handler) NULL, /* -- hole -- */
2353 (iw_handler) NULL, /* SIOCGIWAPLIST */ 2358 (iw_handler) NULL, /* SIOCGIWAPLIST */
2354 (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */ 2359 (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */
2355 (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */ 2360 (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */
2356 (iw_handler) atmel_set_essid, /* SIOCSIWESSID */ 2361 (iw_handler) atmel_set_essid, /* SIOCSIWESSID */
2357 (iw_handler) atmel_get_essid, /* SIOCGIWESSID */ 2362 (iw_handler) atmel_get_essid, /* SIOCGIWESSID */
2358 (iw_handler) NULL, /* SIOCSIWNICKN */ 2363 (iw_handler) NULL, /* SIOCSIWNICKN */
2359 (iw_handler) NULL, /* SIOCGIWNICKN */ 2364 (iw_handler) NULL, /* SIOCGIWNICKN */
2360 (iw_handler) NULL, /* -- hole -- */ 2365 (iw_handler) NULL, /* -- hole -- */
2361 (iw_handler) NULL, /* -- hole -- */ 2366 (iw_handler) NULL, /* -- hole -- */
2362 (iw_handler) atmel_set_rate, /* SIOCSIWRATE */ 2367 (iw_handler) atmel_set_rate, /* SIOCSIWRATE */
@@ -2365,8 +2370,8 @@ static const iw_handler atmel_handler[] =
2365 (iw_handler) atmel_get_rts, /* SIOCGIWRTS */ 2370 (iw_handler) atmel_get_rts, /* SIOCGIWRTS */
2366 (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */ 2371 (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */
2367 (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */ 2372 (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */
2368 (iw_handler) NULL, /* SIOCSIWTXPOW */ 2373 (iw_handler) NULL, /* SIOCSIWTXPOW */
2369 (iw_handler) NULL, /* SIOCGIWTXPOW */ 2374 (iw_handler) NULL, /* SIOCGIWTXPOW */
2370 (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */ 2375 (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */
2371 (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */ 2376 (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */
2372 (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */ 2377 (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */
@@ -2375,39 +2380,51 @@ static const iw_handler atmel_handler[] =
2375 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */ 2380 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */
2376}; 2381};
2377 2382
2378 2383static const iw_handler atmel_private_handler[] =
2379static const iw_handler atmel_private_handler[] =
2380{ 2384{
2381 NULL, /* SIOCIWFIRSTPRIV */ 2385 NULL, /* SIOCIWFIRSTPRIV */
2382}; 2386};
2383 2387
2384typedef struct atmel_priv_ioctl { 2388typedef struct atmel_priv_ioctl {
2385 char id[32]; 2389 char id[32];
2386 unsigned char __user *data; 2390 unsigned char __user *data;
2387 unsigned short len; 2391 unsigned short len;
2388} atmel_priv_ioctl; 2392} atmel_priv_ioctl;
2389 2393
2390 2394#define ATMELFWL SIOCIWFIRSTPRIV
2391#define ATMELFWL SIOCIWFIRSTPRIV 2395#define ATMELIDIFC ATMELFWL + 1
2392#define ATMELIDIFC ATMELFWL + 1 2396#define ATMELRD ATMELFWL + 2
2393#define ATMELRD ATMELFWL + 2 2397#define ATMELMAGIC 0x51807
2394#define ATMELMAGIC 0x51807
2395#define REGDOMAINSZ 20 2398#define REGDOMAINSZ 20
2396 2399
2397static const struct iw_priv_args atmel_private_args[] = { 2400static const struct iw_priv_args atmel_private_args[] = {
2398/*{ cmd, set_args, get_args, name } */ 2401 {
2399 { ATMELFWL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (atmel_priv_ioctl), IW_PRIV_TYPE_NONE, "atmelfwl" }, 2402 .cmd = ATMELFWL,
2400 { ATMELIDIFC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "atmelidifc" }, 2403 .set_args = IW_PRIV_TYPE_BYTE
2401 { ATMELRD, IW_PRIV_TYPE_CHAR | REGDOMAINSZ, IW_PRIV_TYPE_NONE, "regdomain" }, 2404 | IW_PRIV_SIZE_FIXED
2405 | sizeof (atmel_priv_ioctl),
2406 .get_args = IW_PRIV_TYPE_NONE,
2407 .name = "atmelfwl"
2408 }, {
2409 .cmd = ATMELIDIFC,
2410 .set_args = IW_PRIV_TYPE_NONE,
2411 .get_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2412 .name = "atmelidifc"
2413 }, {
2414 .cmd = ATMELRD,
2415 .set_args = IW_PRIV_TYPE_CHAR | REGDOMAINSZ,
2416 .get_args = IW_PRIV_TYPE_NONE,
2417 .name = "regdomain"
2418 },
2402}; 2419};
2403 2420
2404static const struct iw_handler_def atmel_handler_def = 2421static const struct iw_handler_def atmel_handler_def =
2405{ 2422{
2406 .num_standard = sizeof(atmel_handler)/sizeof(iw_handler), 2423 .num_standard = sizeof(atmel_handler)/sizeof(iw_handler),
2407 .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler), 2424 .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler),
2408 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args), 2425 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
2409 .standard = (iw_handler *) atmel_handler, 2426 .standard = (iw_handler *) atmel_handler,
2410 .private = (iw_handler *) atmel_private_handler, 2427 .private = (iw_handler *) atmel_private_handler,
2411 .private_args = (struct iw_priv_args *) atmel_private_args, 2428 .private_args = (struct iw_priv_args *) atmel_private_args,
2412 .get_wireless_stats = atmel_get_wireless_stats 2429 .get_wireless_stats = atmel_get_wireless_stats
2413}; 2430};
@@ -2419,13 +2436,13 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2419 atmel_priv_ioctl com; 2436 atmel_priv_ioctl com;
2420 struct iwreq *wrq = (struct iwreq *) rq; 2437 struct iwreq *wrq = (struct iwreq *) rq;
2421 unsigned char *new_firmware; 2438 unsigned char *new_firmware;
2422 char domain[REGDOMAINSZ+1]; 2439 char domain[REGDOMAINSZ + 1];
2423 2440
2424 switch (cmd) { 2441 switch (cmd) {
2425 case ATMELIDIFC: 2442 case ATMELIDIFC:
2426 wrq->u.param.value = ATMELMAGIC; 2443 wrq->u.param.value = ATMELMAGIC;
2427 break; 2444 break;
2428 2445
2429 case ATMELFWL: 2446 case ATMELFWL:
2430 if (copy_from_user(&com, rq->ifr_data, sizeof(com))) { 2447 if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
2431 rc = -EFAULT; 2448 rc = -EFAULT;
@@ -2449,7 +2466,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2449 } 2466 }
2450 2467
2451 kfree(priv->firmware); 2468 kfree(priv->firmware);
2452 2469
2453 priv->firmware = new_firmware; 2470 priv->firmware = new_firmware;
2454 priv->firmware_length = com.len; 2471 priv->firmware_length = com.len;
2455 strncpy(priv->firmware_id, com.id, 31); 2472 strncpy(priv->firmware_id, com.id, 31);
@@ -2461,7 +2478,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2461 rc = -EFAULT; 2478 rc = -EFAULT;
2462 break; 2479 break;
2463 } 2480 }
2464 2481
2465 if (!capable(CAP_NET_ADMIN)) { 2482 if (!capable(CAP_NET_ADMIN)) {
2466 rc = -EPERM; 2483 rc = -EPERM;
2467 break; 2484 break;
@@ -2484,15 +2501,15 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2484 rc = 0; 2501 rc = 0;
2485 } 2502 }
2486 } 2503 }
2487 2504
2488 if (rc == 0 && priv->station_state != STATION_STATE_DOWN) 2505 if (rc == 0 && priv->station_state != STATION_STATE_DOWN)
2489 rc = atmel_open(dev); 2506 rc = atmel_open(dev);
2490 break; 2507 break;
2491 2508
2492 default: 2509 default:
2493 rc = -EOPNOTSUPP; 2510 rc = -EOPNOTSUPP;
2494 } 2511 }
2495 2512
2496 return rc; 2513 return rc;
2497} 2514}
2498 2515
@@ -2503,17 +2520,17 @@ struct auth_body {
2503 u8 el_id; 2520 u8 el_id;
2504 u8 chall_text_len; 2521 u8 chall_text_len;
2505 u8 chall_text[253]; 2522 u8 chall_text[253];
2506}; 2523};
2507 2524
2508static void atmel_enter_state(struct atmel_private *priv, int new_state) 2525static void atmel_enter_state(struct atmel_private *priv, int new_state)
2509{ 2526{
2510 int old_state = priv->station_state; 2527 int old_state = priv->station_state;
2511 2528
2512 if (new_state == old_state) 2529 if (new_state == old_state)
2513 return; 2530 return;
2514 2531
2515 priv->station_state = new_state; 2532 priv->station_state = new_state;
2516 2533
2517 if (new_state == STATION_STATE_READY) { 2534 if (new_state == STATION_STATE_READY) {
2518 netif_start_queue(priv->dev); 2535 netif_start_queue(priv->dev);
2519 netif_carrier_on(priv->dev); 2536 netif_carrier_on(priv->dev);
@@ -2540,7 +2557,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2540 u8 options; 2557 u8 options;
2541 u8 SSID_size; 2558 u8 SSID_size;
2542 } cmd; 2559 } cmd;
2543 2560
2544 memset(cmd.BSSID, 0xff, 6); 2561 memset(cmd.BSSID, 0xff, 6);
2545 2562
2546 if (priv->fast_scan) { 2563 if (priv->fast_scan) {
@@ -2554,17 +2571,17 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
2554 cmd.min_channel_time = cpu_to_le16(10); 2571 cmd.min_channel_time = cpu_to_le16(10);
2555 cmd.max_channel_time = cpu_to_le16(120); 2572 cmd.max_channel_time = cpu_to_le16(120);
2556 } 2573 }
2557 2574
2558 cmd.options = 0; 2575 cmd.options = 0;
2559 2576
2560 if (!specific_ssid) 2577 if (!specific_ssid)
2561 cmd.options |= SCAN_OPTIONS_SITE_SURVEY; 2578 cmd.options |= SCAN_OPTIONS_SITE_SURVEY;
2562 2579
2563 cmd.channel = (priv->channel & 0x7f); 2580 cmd.channel = (priv->channel & 0x7f);
2564 cmd.scan_type = SCAN_TYPE_ACTIVE; 2581 cmd.scan_type = SCAN_TYPE_ACTIVE;
2565 cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ? 2582 cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ?
2566 BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE); 2583 BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE);
2567 2584
2568 atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd)); 2585 atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd));
2569 2586
2570 /* This must come after all hardware access to avoid being messed up 2587 /* This must come after all hardware access to avoid being messed up
@@ -2591,16 +2608,15 @@ static void join(struct atmel_private *priv, int type)
2591 cmd.BSS_type = type; 2608 cmd.BSS_type = type;
2592 cmd.timeout = cpu_to_le16(2000); 2609 cmd.timeout = cpu_to_le16(2000);
2593 2610
2594 atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd)); 2611 atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd));
2595} 2612}
2596 2613
2597
2598static void start(struct atmel_private *priv, int type) 2614static void start(struct atmel_private *priv, int type)
2599{ 2615{
2600 struct { 2616 struct {
2601 u8 BSSID[6]; 2617 u8 BSSID[6];
2602 u8 SSID[MAX_SSID_LENGTH]; 2618 u8 SSID[MAX_SSID_LENGTH];
2603 u8 BSS_type; 2619 u8 BSS_type;
2604 u8 channel; 2620 u8 channel;
2605 u8 SSID_size; 2621 u8 SSID_size;
2606 u8 reserved[3]; 2622 u8 reserved[3];
@@ -2612,13 +2628,14 @@ static void start(struct atmel_private *priv, int type)
2612 cmd.BSS_type = type; 2628 cmd.BSS_type = type;
2613 cmd.channel = (priv->channel & 0x7f); 2629 cmd.channel = (priv->channel & 0x7f);
2614 2630
2615 atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd)); 2631 atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd));
2616} 2632}
2617 2633
2618static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 channel) 2634static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
2635 u8 channel)
2619{ 2636{
2620 int rejoin = 0; 2637 int rejoin = 0;
2621 int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 2638 int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
2622 SHORT_PREAMBLE : LONG_PREAMBLE; 2639 SHORT_PREAMBLE : LONG_PREAMBLE;
2623 2640
2624 if (priv->preamble != new) { 2641 if (priv->preamble != new) {
@@ -2626,48 +2643,48 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
2626 rejoin = 1; 2643 rejoin = 1;
2627 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new); 2644 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new);
2628 } 2645 }
2629 2646
2630 if (priv->channel != channel) { 2647 if (priv->channel != channel) {
2631 priv->channel = channel; 2648 priv->channel = channel;
2632 rejoin = 1; 2649 rejoin = 1;
2633 atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel); 2650 atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel);
2634 } 2651 }
2635 2652
2636 if (rejoin) { 2653 if (rejoin) {
2637 priv->station_is_associated = 0; 2654 priv->station_is_associated = 0;
2638 atmel_enter_state(priv, STATION_STATE_JOINNING); 2655 atmel_enter_state(priv, STATION_STATE_JOINNING);
2639 2656
2640 if (priv->operating_mode == IW_MODE_INFRA) 2657 if (priv->operating_mode == IW_MODE_INFRA)
2641 join(priv, BSS_TYPE_INFRASTRUCTURE); 2658 join(priv, BSS_TYPE_INFRASTRUCTURE);
2642 else 2659 else
2643 join(priv, BSS_TYPE_AD_HOC); 2660 join(priv, BSS_TYPE_AD_HOC);
2644 } 2661 }
2645} 2662}
2646 2663
2647 2664static void send_authentication_request(struct atmel_private *priv, u16 system,
2648static void send_authentication_request(struct atmel_private *priv, u16 system, u8 *challenge, int challenge_len) 2665 u8 *challenge, int challenge_len)
2649{ 2666{
2650 struct ieee80211_hdr_4addr header; 2667 struct ieee80211_hdr_4addr header;
2651 struct auth_body auth; 2668 struct auth_body auth;
2652 2669
2653 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 2670 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2654 header.duration_id = cpu_to_le16(0x8000); 2671 header.duration_id = cpu_to_le16(0x8000);
2655 header.seq_ctl = 0; 2672 header.seq_ctl = 0;
2656 memcpy(header.addr1, priv->CurrentBSSID, 6); 2673 memcpy(header.addr1, priv->CurrentBSSID, 6);
2657 memcpy(header.addr2, priv->dev->dev_addr, 6); 2674 memcpy(header.addr2, priv->dev->dev_addr, 6);
2658 memcpy(header.addr3, priv->CurrentBSSID, 6); 2675 memcpy(header.addr3, priv->CurrentBSSID, 6);
2659 2676
2660 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1) 2677 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
2661 /* no WEP for authentication frames with TrSeqNo 1 */ 2678 /* no WEP for authentication frames with TrSeqNo 1 */
2662 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2679 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2663 2680
2664 auth.alg = cpu_to_le16(system); 2681 auth.alg = cpu_to_le16(system);
2665 2682
2666 auth.status = 0; 2683 auth.status = 0;
2667 auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum); 2684 auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum);
2668 priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1; 2685 priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1;
2669 priv->CurrentAuthentTransactionSeqNum += 2; 2686 priv->CurrentAuthentTransactionSeqNum += 2;
2670 2687
2671 if (challenge_len != 0) { 2688 if (challenge_len != 0) {
2672 auth.el_id = 16; /* challenge_text */ 2689 auth.el_id = 16; /* challenge_text */
2673 auth.chall_text_len = challenge_len; 2690 auth.chall_text_len = challenge_len;
@@ -2685,7 +2702,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2685 struct ieee80211_hdr_4addr header; 2702 struct ieee80211_hdr_4addr header;
2686 struct ass_req_format { 2703 struct ass_req_format {
2687 u16 capability; 2704 u16 capability;
2688 u16 listen_interval; 2705 u16 listen_interval;
2689 u8 ap[6]; /* nothing after here directly accessible */ 2706 u8 ap[6]; /* nothing after here directly accessible */
2690 u8 ssid_el_id; 2707 u8 ssid_el_id;
2691 u8 ssid_len; 2708 u8 ssid_len;
@@ -2694,15 +2711,15 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2694 u8 sup_rates_len; 2711 u8 sup_rates_len;
2695 u8 rates[4]; 2712 u8 rates[4];
2696 } body; 2713 } body;
2697 2714
2698 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2715 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2699 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ)); 2716 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
2700 header.duration_id = cpu_to_le16(0x8000); 2717 header.duration_id = cpu_to_le16(0x8000);
2701 header.seq_ctl = 0; 2718 header.seq_ctl = 0;
2702 2719
2703 memcpy(header.addr1, priv->CurrentBSSID, 6); 2720 memcpy(header.addr1, priv->CurrentBSSID, 6);
2704 memcpy(header.addr2, priv->dev->dev_addr, 6); 2721 memcpy(header.addr2, priv->dev->dev_addr, 6);
2705 memcpy(header.addr3, priv->CurrentBSSID, 6); 2722 memcpy(header.addr3, priv->CurrentBSSID, 6);
2706 2723
2707 body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS); 2724 body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS);
2708 if (priv->wep_is_on) 2725 if (priv->wep_is_on)
@@ -2711,18 +2728,18 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2711 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble); 2728 body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble);
2712 2729
2713 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); 2730 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
2714 2731
2715 /* current AP address - only in reassoc frame */ 2732 /* current AP address - only in reassoc frame */
2716 if (is_reassoc) { 2733 if (is_reassoc) {
2717 memcpy(body.ap, priv->CurrentBSSID, 6); 2734 memcpy(body.ap, priv->CurrentBSSID, 6);
2718 ssid_el_p = (u8 *)&body.ssid_el_id; 2735 ssid_el_p = (u8 *)&body.ssid_el_id;
2719 bodysize = 18 + priv->SSID_size; 2736 bodysize = 18 + priv->SSID_size;
2720 } else { 2737 } else {
2721 ssid_el_p = (u8 *)&body.ap[0]; 2738 ssid_el_p = (u8 *)&body.ap[0];
2722 bodysize = 12 + priv->SSID_size; 2739 bodysize = 12 + priv->SSID_size;
2723 } 2740 }
2724 2741
2725 ssid_el_p[0]= C80211_MGMT_ElementID_SSID; 2742 ssid_el_p[0] = C80211_MGMT_ElementID_SSID;
2726 ssid_el_p[1] = priv->SSID_size; 2743 ssid_el_p[1] = priv->SSID_size;
2727 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); 2744 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
2728 ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates; 2745 ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates;
@@ -2732,7 +2749,8 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2732 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); 2749 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
2733} 2750}
2734 2751
2735static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header) 2752static int is_frame_from_current_bss(struct atmel_private *priv,
2753 struct ieee80211_hdr_4addr *header)
2736{ 2754{
2737 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 2755 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
2738 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2756 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
@@ -2745,29 +2763,29 @@ static int retrieve_bss(struct atmel_private *priv)
2745 int i; 2763 int i;
2746 int max_rssi = -128; 2764 int max_rssi = -128;
2747 int max_index = -1; 2765 int max_index = -1;
2748 2766
2749 if (priv->BSS_list_entries == 0) 2767 if (priv->BSS_list_entries == 0)
2750 return -1; 2768 return -1;
2751 2769
2752 if (priv->connect_to_any_BSS) { 2770 if (priv->connect_to_any_BSS) {
2753 /* Select a BSS with the max-RSSI but of the same type and of the same WEP mode 2771 /* Select a BSS with the max-RSSI but of the same type and of
2754 and that it is not marked as 'bad' (i.e. we had previously failed to connect to 2772 the same WEP mode and that it is not marked as 'bad' (i.e.
2755 this BSS with the settings that we currently use) */ 2773 we had previously failed to connect to this BSS with the
2774 settings that we currently use) */
2756 priv->current_BSS = 0; 2775 priv->current_BSS = 0;
2757 for(i=0; i<priv->BSS_list_entries; i++) { 2776 for (i = 0; i < priv->BSS_list_entries; i++) {
2758 if (priv->operating_mode == priv->BSSinfo[i].BSStype && 2777 if (priv->operating_mode == priv->BSSinfo[i].BSStype &&
2759 ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) || 2778 ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) ||
2760 (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) && 2779 (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) &&
2761 !(priv->BSSinfo[i].channel & 0x80)) { 2780 !(priv->BSSinfo[i].channel & 0x80)) {
2762 max_rssi = priv->BSSinfo[i].RSSI; 2781 max_rssi = priv->BSSinfo[i].RSSI;
2763 priv->current_BSS = max_index = i; 2782 priv->current_BSS = max_index = i;
2764 } 2783 }
2765
2766 } 2784 }
2767 return max_index; 2785 return max_index;
2768 } 2786 }
2769 2787
2770 for(i=0; i<priv->BSS_list_entries; i++) { 2788 for (i = 0; i < priv->BSS_list_entries; i++) {
2771 if (priv->SSID_size == priv->BSSinfo[i].SSIDsize && 2789 if (priv->SSID_size == priv->BSSinfo[i].SSIDsize &&
2772 memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 && 2790 memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 &&
2773 priv->operating_mode == priv->BSSinfo[i].BSStype && 2791 priv->operating_mode == priv->BSSinfo[i].BSStype &&
@@ -2781,19 +2799,19 @@ static int retrieve_bss(struct atmel_private *priv)
2781 return max_index; 2799 return max_index;
2782} 2800}
2783 2801
2784 2802static void store_bss_info(struct atmel_private *priv,
2785static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 2803 struct ieee80211_hdr_4addr *header, u16 capability,
2786 u16 capability, u16 beacon_period, u8 channel, u8 rssi, 2804 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
2787 u8 ssid_len, u8 *ssid, int is_beacon) 2805 u8 *ssid, int is_beacon)
2788{ 2806{
2789 u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3; 2807 u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3;
2790 int i, index; 2808 int i, index;
2791 2809
2792 for (index = -1, i = 0; i < priv->BSS_list_entries; i++) 2810 for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
2793 if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0) 2811 if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
2794 index = i; 2812 index = i;
2795 2813
2796 /* If we process a probe and an entry from this BSS exists 2814 /* If we process a probe and an entry from this BSS exists
2797 we will update the BSS entry with the info from this BSS. 2815 we will update the BSS entry with the info from this BSS.
2798 If we process a beacon we will only update RSSI */ 2816 If we process a beacon we will only update RSSI */
2799 2817
@@ -2820,8 +2838,8 @@ static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4add
2820 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC; 2838 priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
2821 else if (capability & C80211_MGMT_CAPABILITY_ESS) 2839 else if (capability & C80211_MGMT_CAPABILITY_ESS)
2822 priv->BSSinfo[index].BSStype =IW_MODE_INFRA; 2840 priv->BSSinfo[index].BSStype =IW_MODE_INFRA;
2823 2841
2824 priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ? 2842 priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
2825 SHORT_PREAMBLE : LONG_PREAMBLE; 2843 SHORT_PREAMBLE : LONG_PREAMBLE;
2826} 2844}
2827 2845
@@ -2831,8 +2849,8 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2831 u16 status = le16_to_cpu(auth->status); 2849 u16 status = le16_to_cpu(auth->status);
2832 u16 trans_seq_no = le16_to_cpu(auth->trans_seq); 2850 u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
2833 u16 system = le16_to_cpu(auth->alg); 2851 u16 system = le16_to_cpu(auth->alg);
2834 2852
2835 if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) { 2853 if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) {
2836 /* no WEP */ 2854 /* no WEP */
2837 if (priv->station_was_associated) { 2855 if (priv->station_was_associated) {
2838 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 2856 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
@@ -2842,20 +2860,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2842 atmel_enter_state(priv, STATION_STATE_ASSOCIATING); 2860 atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
2843 send_association_request(priv, 0); 2861 send_association_request(priv, 0);
2844 return; 2862 return;
2845 } 2863 }
2846 } 2864 }
2847 2865
2848 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { 2866 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) {
2849 /* WEP */ 2867 /* WEP */
2850 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) 2868 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
2851 return; 2869 return;
2852 2870
2853 if (trans_seq_no == 0x0002 && 2871 if (trans_seq_no == 0x0002 &&
2854 auth->el_id == C80211_MGMT_ElementID_ChallengeText) { 2872 auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
2855 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); 2873 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
2856 return; 2874 return;
2857 } 2875 }
2858 2876
2859 if (trans_seq_no == 0x0004) { 2877 if (trans_seq_no == 0x0004) {
2860 if(priv->station_was_associated) { 2878 if(priv->station_was_associated) {
2861 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 2879 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
@@ -2865,10 +2883,10 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2865 atmel_enter_state(priv, STATION_STATE_ASSOCIATING); 2883 atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
2866 send_association_request(priv, 0); 2884 send_association_request(priv, 0);
2867 return; 2885 return;
2868 } 2886 }
2869 } 2887 }
2870 } 2888 }
2871 2889
2872 if (status == C80211_MGMT_SC_AuthAlgNotSupported) { 2890 if (status == C80211_MGMT_SC_AuthAlgNotSupported) {
2873 /* Do opensystem first, then try sharedkey */ 2891 /* Do opensystem first, then try sharedkey */
2874 if (system == C80211_MGMT_AAN_OPENSYSTEM) { 2892 if (system == C80211_MGMT_AAN_OPENSYSTEM) {
@@ -2876,17 +2894,16 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
2876 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 2894 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
2877 } else if (priv->connect_to_any_BSS) { 2895 } else if (priv->connect_to_any_BSS) {
2878 int bss_index; 2896 int bss_index;
2879 2897
2880 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 2898 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
2881 2899
2882 if ((bss_index = retrieve_bss(priv)) != -1) { 2900 if ((bss_index = retrieve_bss(priv)) != -1) {
2883 atmel_join_bss(priv, bss_index); 2901 atmel_join_bss(priv, bss_index);
2884 return; 2902 return;
2885 } 2903 }
2886 } 2904 }
2887 } 2905 }
2888 2906
2889
2890 priv->AuthenticationRequestRetryCnt = 0; 2907 priv->AuthenticationRequestRetryCnt = 0;
2891 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 2908 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
2892 priv->station_is_associated = 0; 2909 priv->station_is_associated = 0;
@@ -2902,38 +2919,44 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2902 u8 length; 2919 u8 length;
2903 u8 rates[4]; 2920 u8 rates[4];
2904 } *ass_resp = (struct ass_resp_format *)priv->rx_buf; 2921 } *ass_resp = (struct ass_resp_format *)priv->rx_buf;
2905 2922
2906 u16 status = le16_to_cpu(ass_resp->status); 2923 u16 status = le16_to_cpu(ass_resp->status);
2907 u16 ass_id = le16_to_cpu(ass_resp->ass_id); 2924 u16 ass_id = le16_to_cpu(ass_resp->ass_id);
2908 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length; 2925 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
2909 2926
2910 if (frame_len < 8 + rates_len) 2927 if (frame_len < 8 + rates_len)
2911 return; 2928 return;
2912 2929
2913 if (status == C80211_MGMT_SC_Success) { 2930 if (status == C80211_MGMT_SC_Success) {
2914 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE) 2931 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE)
2915 priv->AssociationRequestRetryCnt = 0; 2932 priv->AssociationRequestRetryCnt = 0;
2916 else 2933 else
2917 priv->ReAssociationRequestRetryCnt = 0; 2934 priv->ReAssociationRequestRetryCnt = 0;
2918 2935
2919 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff); 2936 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2920 atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len); 2937 MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff);
2938 atmel_set_mib(priv, Phy_Mib_Type,
2939 PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len);
2921 if (priv->power_mode == 0) { 2940 if (priv->power_mode == 0) {
2922 priv->listen_interval = 1; 2941 priv->listen_interval = 1;
2923 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 2942 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2924 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 2943 MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
2944 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2945 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
2925 } else { 2946 } else {
2926 priv->listen_interval = 2; 2947 priv->listen_interval = 2;
2927 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, PS_MODE); 2948 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2928 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2); 2949 MAC_MGMT_MIB_PS_MODE_POS, PS_MODE);
2950 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
2951 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2);
2929 } 2952 }
2930 2953
2931 priv->station_is_associated = 1; 2954 priv->station_is_associated = 1;
2932 priv->station_was_associated = 1; 2955 priv->station_was_associated = 1;
2933 atmel_enter_state(priv, STATION_STATE_READY); 2956 atmel_enter_state(priv, STATION_STATE_READY);
2934 return; 2957 return;
2935 } 2958 }
2936 2959
2937 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE && 2960 if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE &&
2938 status != C80211_MGMT_SC_AssDeniedBSSRate && 2961 status != C80211_MGMT_SC_AssDeniedBSSRate &&
2939 status != C80211_MGMT_SC_SupportCapabilities && 2962 status != C80211_MGMT_SC_SupportCapabilities &&
@@ -2943,7 +2966,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2943 send_association_request(priv, 0); 2966 send_association_request(priv, 0);
2944 return; 2967 return;
2945 } 2968 }
2946 2969
2947 if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE && 2970 if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE &&
2948 status != C80211_MGMT_SC_AssDeniedBSSRate && 2971 status != C80211_MGMT_SC_AssDeniedBSSRate &&
2949 status != C80211_MGMT_SC_SupportCapabilities && 2972 status != C80211_MGMT_SC_SupportCapabilities &&
@@ -2953,17 +2976,16 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2953 send_association_request(priv, 1); 2976 send_association_request(priv, 1);
2954 return; 2977 return;
2955 } 2978 }
2956 2979
2957 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 2980 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
2958 priv->station_is_associated = 0; 2981 priv->station_is_associated = 0;
2959 2982
2960 if(priv->connect_to_any_BSS) { 2983 if (priv->connect_to_any_BSS) {
2961 int bss_index; 2984 int bss_index;
2962 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 2985 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
2963 2986
2964 if ((bss_index = retrieve_bss(priv)) != -1) 2987 if ((bss_index = retrieve_bss(priv)) != -1)
2965 atmel_join_bss(priv, bss_index); 2988 atmel_join_bss(priv, bss_index);
2966
2967 } 2989 }
2968} 2990}
2969 2991
@@ -2977,7 +2999,7 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
2977 /* The WPA stuff cares about the current AP address */ 2999 /* The WPA stuff cares about the current AP address */
2978 if (priv->use_wpa) 3000 if (priv->use_wpa)
2979 build_wpa_mib(priv); 3001 build_wpa_mib(priv);
2980 3002
2981 /* When switching to AdHoc turn OFF Power Save if needed */ 3003 /* When switching to AdHoc turn OFF Power Save if needed */
2982 3004
2983 if (bss->BSStype == IW_MODE_ADHOC && 3005 if (bss->BSStype == IW_MODE_ADHOC &&
@@ -2985,25 +3007,28 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
2985 priv->power_mode) { 3007 priv->power_mode) {
2986 priv->power_mode = 0; 3008 priv->power_mode = 0;
2987 priv->listen_interval = 1; 3009 priv->listen_interval = 1;
2988 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 3010 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
2989 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 3011 MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
3012 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
3013 MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
2990 } 3014 }
2991 3015
2992 priv->operating_mode = bss->BSStype; 3016 priv->operating_mode = bss->BSStype;
2993 priv->channel = bss->channel & 0x7f; 3017 priv->channel = bss->channel & 0x7f;
2994 priv->beacon_period = bss->beacon_period; 3018 priv->beacon_period = bss->beacon_period;
2995 3019
2996 if (priv->preamble != bss->preamble) { 3020 if (priv->preamble != bss->preamble) {
2997 priv->preamble = bss->preamble; 3021 priv->preamble = bss->preamble;
2998 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, bss->preamble); 3022 atmel_set_mib8(priv, Local_Mib_Type,
3023 LOCAL_MIB_PREAMBLE_TYPE, bss->preamble);
2999 } 3024 }
3000 3025
3001 if (!priv->wep_is_on && bss->UsingWEP) { 3026 if (!priv->wep_is_on && bss->UsingWEP) {
3002 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3027 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3003 priv->station_is_associated = 0; 3028 priv->station_is_associated = 0;
3004 return; 3029 return;
3005 } 3030 }
3006 3031
3007 if (priv->wep_is_on && !bss->UsingWEP) { 3032 if (priv->wep_is_on && !bss->UsingWEP) {
3008 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3033 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3009 priv->station_is_associated = 0; 3034 priv->station_is_associated = 0;
@@ -3011,30 +3036,28 @@ void atmel_join_bss(struct atmel_private *priv, int bss_index)
3011 } 3036 }
3012 3037
3013 atmel_enter_state(priv, STATION_STATE_JOINNING); 3038 atmel_enter_state(priv, STATION_STATE_JOINNING);
3014 3039
3015 if (priv->operating_mode == IW_MODE_INFRA) 3040 if (priv->operating_mode == IW_MODE_INFRA)
3016 join(priv, BSS_TYPE_INFRASTRUCTURE); 3041 join(priv, BSS_TYPE_INFRASTRUCTURE);
3017 else 3042 else
3018 join(priv, BSS_TYPE_AD_HOC); 3043 join(priv, BSS_TYPE_AD_HOC);
3019} 3044}
3020 3045
3021
3022static void restart_search(struct atmel_private *priv) 3046static void restart_search(struct atmel_private *priv)
3023{ 3047{
3024 int bss_index; 3048 int bss_index;
3025 3049
3026 if (!priv->connect_to_any_BSS) { 3050 if (!priv->connect_to_any_BSS) {
3027 atmel_scan(priv, 1); 3051 atmel_scan(priv, 1);
3028 } else { 3052 } else {
3029 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80; 3053 priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
3030 3054
3031 if ((bss_index = retrieve_bss(priv)) != -1) 3055 if ((bss_index = retrieve_bss(priv)) != -1)
3032 atmel_join_bss(priv, bss_index); 3056 atmel_join_bss(priv, bss_index);
3033 else 3057 else
3034 atmel_scan(priv, 0); 3058 atmel_scan(priv, 0);
3035 3059 }
3036 } 3060}
3037}
3038 3061
3039static void smooth_rssi(struct atmel_private *priv, u8 rssi) 3062static void smooth_rssi(struct atmel_private *priv, u8 rssi)
3040{ 3063{
@@ -3050,21 +3073,21 @@ static void smooth_rssi(struct atmel_private *priv, u8 rssi)
3050 } 3073 }
3051 3074
3052 rssi = rssi * 100 / max_rssi; 3075 rssi = rssi * 100 / max_rssi;
3053 if((rssi + old) % 2) 3076 if ((rssi + old) % 2)
3054 priv->wstats.qual.level = ((rssi + old)/2) + 1; 3077 priv->wstats.qual.level = (rssi + old) / 2 + 1;
3055 else 3078 else
3056 priv->wstats.qual.level = ((rssi + old)/2); 3079 priv->wstats.qual.level = (rssi + old) / 2;
3057 priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; 3080 priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
3058 priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID; 3081 priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID;
3059} 3082}
3060 3083
3061static void atmel_smooth_qual(struct atmel_private *priv) 3084static void atmel_smooth_qual(struct atmel_private *priv)
3062{ 3085{
3063 unsigned long time_diff = (jiffies - priv->last_qual)/HZ; 3086 unsigned long time_diff = (jiffies - priv->last_qual) / HZ;
3064 while (time_diff--) { 3087 while (time_diff--) {
3065 priv->last_qual += HZ; 3088 priv->last_qual += HZ;
3066 priv->wstats.qual.qual = priv->wstats.qual.qual/2; 3089 priv->wstats.qual.qual = priv->wstats.qual.qual / 2;
3067 priv->wstats.qual.qual += 3090 priv->wstats.qual.qual +=
3068 priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000; 3091 priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000;
3069 priv->beacons_this_sec = 0; 3092 priv->beacons_this_sec = 0;
3070 } 3093 }
@@ -3073,15 +3096,17 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3073} 3096}
3074 3097
3075/* deals with incoming managment frames. */ 3098/* deals with incoming managment frames. */
3076static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, 3099static void atmel_management_frame(struct atmel_private *priv,
3077 u16 frame_len, u8 rssi) 3100 struct ieee80211_hdr_4addr *header,
3101 u16 frame_len, u8 rssi)
3078{ 3102{
3079 u16 subtype; 3103 u16 subtype;
3080 3104
3081 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE) { 3105 subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE;
3082 case C80211_SUBTYPE_MGMT_BEACON : 3106 switch (subtype) {
3107 case C80211_SUBTYPE_MGMT_BEACON:
3083 case C80211_SUBTYPE_MGMT_ProbeResponse: 3108 case C80211_SUBTYPE_MGMT_ProbeResponse:
3084 3109
3085 /* beacon frame has multiple variable-length fields - 3110 /* beacon frame has multiple variable-length fields -
3086 never let an engineer loose with a data structure design. */ 3111 never let an engineer loose with a data structure design. */
3087 { 3112 {
@@ -3099,7 +3124,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3099 u8 ds_length; 3124 u8 ds_length;
3100 /* ds here */ 3125 /* ds here */
3101 } *beacon = (struct beacon_format *)priv->rx_buf; 3126 } *beacon = (struct beacon_format *)priv->rx_buf;
3102 3127
3103 u8 channel, rates_length, ssid_length; 3128 u8 channel, rates_length, ssid_length;
3104 u64 timestamp = le64_to_cpu(beacon->timestamp); 3129 u64 timestamp = le64_to_cpu(beacon->timestamp);
3105 u16 beacon_interval = le16_to_cpu(beacon->interval); 3130 u16 beacon_interval = le16_to_cpu(beacon->interval);
@@ -3107,7 +3132,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3107 u8 *beaconp = priv->rx_buf; 3132 u8 *beaconp = priv->rx_buf;
3108 ssid_length = beacon->ssid_length; 3133 ssid_length = beacon->ssid_length;
3109 /* this blows chunks. */ 3134 /* this blows chunks. */
3110 if (frame_len < 14 || frame_len < ssid_length + 15) 3135 if (frame_len < 14 || frame_len < ssid_length + 15)
3111 return; 3136 return;
3112 rates_length = beaconp[beacon->ssid_length + 15]; 3137 rates_length = beaconp[beacon->ssid_length + 15];
3113 if (frame_len < ssid_length + rates_length + 18) 3138 if (frame_len < ssid_length + rates_length + 18)
@@ -3115,10 +3140,10 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3115 if (ssid_length > MAX_SSID_LENGTH) 3140 if (ssid_length > MAX_SSID_LENGTH)
3116 return; 3141 return;
3117 channel = beaconp[ssid_length + rates_length + 18]; 3142 channel = beaconp[ssid_length + rates_length + 18];
3118 3143
3119 if (priv->station_state == STATION_STATE_READY) { 3144 if (priv->station_state == STATION_STATE_READY) {
3120 smooth_rssi(priv, rssi); 3145 smooth_rssi(priv, rssi);
3121 if (is_frame_from_current_bss(priv, header)) { 3146 if (is_frame_from_current_bss(priv, header)) {
3122 priv->beacons_this_sec++; 3147 priv->beacons_this_sec++;
3123 atmel_smooth_qual(priv); 3148 atmel_smooth_qual(priv);
3124 if (priv->last_beacon_timestamp) { 3149 if (priv->last_beacon_timestamp) {
@@ -3132,41 +3157,43 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3132 handle_beacon_probe(priv, capability, channel); 3157 handle_beacon_probe(priv, capability, channel);
3133 } 3158 }
3134 } 3159 }
3135 3160
3136 if (priv->station_state == STATION_STATE_SCANNING ) 3161 if (priv->station_state == STATION_STATE_SCANNING)
3137 store_bss_info(priv, header, capability, beacon_interval, channel, 3162 store_bss_info(priv, header, capability,
3138 rssi, ssid_length, &beacon->rates_el_id, 3163 beacon_interval, channel, rssi,
3139 subtype == C80211_SUBTYPE_MGMT_BEACON) ; 3164 ssid_length,
3165 &beacon->rates_el_id,
3166 subtype == C80211_SUBTYPE_MGMT_BEACON);
3140 } 3167 }
3141 break; 3168 break;
3142 3169
3143 case C80211_SUBTYPE_MGMT_Authentication: 3170 case C80211_SUBTYPE_MGMT_Authentication:
3144 3171
3145 if (priv->station_state == STATION_STATE_AUTHENTICATING) 3172 if (priv->station_state == STATION_STATE_AUTHENTICATING)
3146 authenticate(priv, frame_len); 3173 authenticate(priv, frame_len);
3147 3174
3148 break; 3175 break;
3149 3176
3150 case C80211_SUBTYPE_MGMT_ASS_RESPONSE: 3177 case C80211_SUBTYPE_MGMT_ASS_RESPONSE:
3151 case C80211_SUBTYPE_MGMT_REASS_RESPONSE: 3178 case C80211_SUBTYPE_MGMT_REASS_RESPONSE:
3152 3179
3153 if (priv->station_state == STATION_STATE_ASSOCIATING || 3180 if (priv->station_state == STATION_STATE_ASSOCIATING ||
3154 priv->station_state == STATION_STATE_REASSOCIATING) 3181 priv->station_state == STATION_STATE_REASSOCIATING)
3155 associate(priv, frame_len, subtype); 3182 associate(priv, frame_len, subtype);
3156 3183
3157 break; 3184 break;
3158 3185
3159 case C80211_SUBTYPE_MGMT_DISASSOSIATION: 3186 case C80211_SUBTYPE_MGMT_DISASSOSIATION:
3160 if (priv->station_is_associated && 3187 if (priv->station_is_associated &&
3161 priv->operating_mode == IW_MODE_INFRA && 3188 priv->operating_mode == IW_MODE_INFRA &&
3162 is_frame_from_current_bss(priv, header)) { 3189 is_frame_from_current_bss(priv, header)) {
3163 priv->station_was_associated = 0; 3190 priv->station_was_associated = 0;
3164 priv->station_is_associated = 0; 3191 priv->station_is_associated = 0;
3165 3192
3166 atmel_enter_state(priv, STATION_STATE_JOINNING); 3193 atmel_enter_state(priv, STATION_STATE_JOINNING);
3167 join(priv, BSS_TYPE_INFRASTRUCTURE); 3194 join(priv, BSS_TYPE_INFRASTRUCTURE);
3168 } 3195 }
3169 3196
3170 break; 3197 break;
3171 3198
3172 case C80211_SUBTYPE_MGMT_Deauthentication: 3199 case C80211_SUBTYPE_MGMT_Deauthentication:
@@ -3177,7 +3204,7 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3177 atmel_enter_state(priv, STATION_STATE_JOINNING); 3204 atmel_enter_state(priv, STATION_STATE_JOINNING);
3178 join(priv, BSS_TYPE_INFRASTRUCTURE); 3205 join(priv, BSS_TYPE_INFRASTRUCTURE);
3179 } 3206 }
3180 3207
3181 break; 3208 break;
3182 } 3209 }
3183} 3210}
@@ -3185,76 +3212,73 @@ static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_
3185/* run when timer expires */ 3212/* run when timer expires */
3186static void atmel_management_timer(u_long a) 3213static void atmel_management_timer(u_long a)
3187{ 3214{
3188 struct net_device *dev = (struct net_device *) a; 3215 struct net_device *dev = (struct net_device *) a;
3189 struct atmel_private *priv = netdev_priv(dev); 3216 struct atmel_private *priv = netdev_priv(dev);
3190 unsigned long flags; 3217 unsigned long flags;
3191
3192 /* Check if the card has been yanked. */
3193 if (priv->card && priv->present_callback &&
3194 !(*priv->present_callback)(priv->card))
3195 return;
3196
3197 spin_lock_irqsave(&priv->irqlock, flags);
3198
3199 switch (priv->station_state) {
3200
3201 case STATION_STATE_AUTHENTICATING:
3202 if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
3203 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3204 priv->station_is_associated = 0;
3205 priv->AuthenticationRequestRetryCnt = 0;
3206 restart_search(priv);
3207 } else {
3208 priv->AuthenticationRequestRetryCnt++;
3209 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3210 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3211 send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0);
3212 }
3213
3214 break;
3215 3218
3216 case STATION_STATE_ASSOCIATING: 3219 /* Check if the card has been yanked. */
3217 if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) { 3220 if (priv->card && priv->present_callback &&
3218 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR); 3221 !(*priv->present_callback)(priv->card))
3219 priv->station_is_associated = 0; 3222 return;
3220 priv->AssociationRequestRetryCnt = 0;
3221 restart_search(priv);
3222 } else {
3223 priv->AssociationRequestRetryCnt++;
3224 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3225 send_association_request(priv, 0);
3226 }
3227 3223
3228 break; 3224 spin_lock_irqsave(&priv->irqlock, flags);
3229 3225
3230 case STATION_STATE_REASSOCIATING: 3226 switch (priv->station_state) {
3231 if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3232 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3233 priv->station_is_associated = 0;
3234 priv->ReAssociationRequestRetryCnt = 0;
3235 restart_search(priv);
3236 } else {
3237 priv->ReAssociationRequestRetryCnt++;
3238 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3239 send_association_request(priv, 1);
3240 }
3241 3227
3228 case STATION_STATE_AUTHENTICATING:
3229 if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
3230 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3231 priv->station_is_associated = 0;
3232 priv->AuthenticationRequestRetryCnt = 0;
3233 restart_search(priv);
3234 } else {
3235 priv->AuthenticationRequestRetryCnt++;
3236 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3237 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3238 send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0);
3239 }
3242 break; 3240 break;
3243 3241
3244 default: 3242 case STATION_STATE_ASSOCIATING:
3243 if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3244 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3245 priv->station_is_associated = 0;
3246 priv->AssociationRequestRetryCnt = 0;
3247 restart_search(priv);
3248 } else {
3249 priv->AssociationRequestRetryCnt++;
3250 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3251 send_association_request(priv, 0);
3252 }
3245 break; 3253 break;
3246 } 3254
3247 3255 case STATION_STATE_REASSOCIATING:
3248 spin_unlock_irqrestore(&priv->irqlock, flags); 3256 if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
3257 atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
3258 priv->station_is_associated = 0;
3259 priv->ReAssociationRequestRetryCnt = 0;
3260 restart_search(priv);
3261 } else {
3262 priv->ReAssociationRequestRetryCnt++;
3263 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3264 send_association_request(priv, 1);
3265 }
3266 break;
3267
3268 default:
3269 break;
3270 }
3271
3272 spin_unlock_irqrestore(&priv->irqlock, flags);
3249} 3273}
3250 3274
3251static void atmel_command_irq(struct atmel_private *priv) 3275static void atmel_command_irq(struct atmel_private *priv)
3252{ 3276{
3253 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET)); 3277 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
3254 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET)); 3278 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
3255 int fast_scan; 3279 int fast_scan;
3256 3280
3257 if (status == CMD_STATUS_IDLE || 3281 if (status == CMD_STATUS_IDLE ||
3258 status == CMD_STATUS_IN_PROGRESS) 3282 status == CMD_STATUS_IN_PROGRESS)
3259 return; 3283 return;
3260 3284
@@ -3266,20 +3290,20 @@ static void atmel_command_irq(struct atmel_private *priv)
3266 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS, 3290 atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
3267 (u8 *)priv->CurrentBSSID, 6); 3291 (u8 *)priv->CurrentBSSID, 6);
3268 atmel_enter_state(priv, STATION_STATE_READY); 3292 atmel_enter_state(priv, STATION_STATE_READY);
3269 } 3293 }
3270 break; 3294 break;
3271 3295
3272 case CMD_Scan: 3296 case CMD_Scan:
3273 fast_scan = priv->fast_scan; 3297 fast_scan = priv->fast_scan;
3274 priv->fast_scan = 0; 3298 priv->fast_scan = 0;
3275 3299
3276 if (status != CMD_STATUS_COMPLETE) { 3300 if (status != CMD_STATUS_COMPLETE) {
3277 atmel_scan(priv, 1); 3301 atmel_scan(priv, 1);
3278 } else { 3302 } else {
3279 int bss_index = retrieve_bss(priv); 3303 int bss_index = retrieve_bss(priv);
3280 if (bss_index != -1) { 3304 if (bss_index != -1) {
3281 atmel_join_bss(priv, bss_index); 3305 atmel_join_bss(priv, bss_index);
3282 } else if (priv->operating_mode == IW_MODE_ADHOC && 3306 } else if (priv->operating_mode == IW_MODE_ADHOC &&
3283 priv->SSID_size != 0) { 3307 priv->SSID_size != 0) {
3284 start(priv, BSS_TYPE_AD_HOC); 3308 start(priv, BSS_TYPE_AD_HOC);
3285 } else { 3309 } else {
@@ -3289,16 +3313,16 @@ static void atmel_command_irq(struct atmel_private *priv)
3289 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3313 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3290 } 3314 }
3291 break; 3315 break;
3292 3316
3293 case CMD_SiteSurvey: 3317 case CMD_SiteSurvey:
3294 priv->fast_scan = 0; 3318 priv->fast_scan = 0;
3295 3319
3296 if (status != CMD_STATUS_COMPLETE) 3320 if (status != CMD_STATUS_COMPLETE)
3297 return; 3321 return;
3298 3322
3299 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3323 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3300 if (priv->station_is_associated) { 3324 if (priv->station_is_associated) {
3301 atmel_enter_state(priv, STATION_STATE_READY); 3325 atmel_enter_state(priv, STATION_STATE_READY);
3302 } else { 3326 } else {
3303 atmel_scan(priv, 1); 3327 atmel_scan(priv, 1);
3304 } 3328 }
@@ -3312,16 +3336,15 @@ static void atmel_command_irq(struct atmel_private *priv)
3312 } else { 3336 } else {
3313 priv->AuthenticationRequestRetryCnt = 0; 3337 priv->AuthenticationRequestRetryCnt = 0;
3314 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); 3338 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
3315 3339
3316 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3340 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3317 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3341 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3318 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 3342 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
3319 } 3343 }
3320 return; 3344 return;
3321 } 3345 }
3322 3346
3323 atmel_scan(priv, 1); 3347 atmel_scan(priv, 1);
3324
3325 } 3348 }
3326} 3349}
3327 3350
@@ -3333,20 +3356,20 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3333 3356
3334 if (priv->card_type == CARD_TYPE_SPI_FLASH) 3357 if (priv->card_type == CARD_TYPE_SPI_FLASH)
3335 atmel_set_gcr(priv->dev, GCR_REMAP); 3358 atmel_set_gcr(priv->dev, GCR_REMAP);
3336 3359
3337 /* wake up on-board processor */ 3360 /* wake up on-board processor */
3338 atmel_clear_gcr(priv->dev, 0x0040); 3361 atmel_clear_gcr(priv->dev, 0x0040);
3339 atmel_write16(priv->dev, BSR, BSS_SRAM); 3362 atmel_write16(priv->dev, BSR, BSS_SRAM);
3340 3363
3341 if (priv->card_type == CARD_TYPE_SPI_FLASH) 3364 if (priv->card_type == CARD_TYPE_SPI_FLASH)
3342 mdelay(100); 3365 mdelay(100);
3343 3366
3344 /* and wait for it */ 3367 /* and wait for it */
3345 for (i = LOOP_RETRY_LIMIT; i; i--) { 3368 for (i = LOOP_RETRY_LIMIT; i; i--) {
3346 mr1 = atmel_read16(priv->dev, MR1); 3369 mr1 = atmel_read16(priv->dev, MR1);
3347 mr3 = atmel_read16(priv->dev, MR3); 3370 mr3 = atmel_read16(priv->dev, MR3);
3348 3371
3349 if (mr3 & MAC_BOOT_COMPLETE) 3372 if (mr3 & MAC_BOOT_COMPLETE)
3350 break; 3373 break;
3351 if (mr1 & MAC_BOOT_COMPLETE && 3374 if (mr1 & MAC_BOOT_COMPLETE &&
3352 priv->bus_type == BUS_TYPE_PCCARD) 3375 priv->bus_type == BUS_TYPE_PCCARD)
@@ -3357,35 +3380,36 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3357 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); 3380 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
3358 return 0; 3381 return 0;
3359 } 3382 }
3360 3383
3361 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { 3384 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
3362 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); 3385 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
3363 return 0; 3386 return 0;
3364 } 3387 }
3365 3388
3366 /* now check for completion of MAC initialization through 3389 /* now check for completion of MAC initialization through
3367 the FunCtrl field of the IFACE, poll MR1 to detect completion of 3390 the FunCtrl field of the IFACE, poll MR1 to detect completion of
3368 MAC initialization, check completion status, set interrupt mask, 3391 MAC initialization, check completion status, set interrupt mask,
3369 enables interrupts and calls Tx and Rx initialization functions */ 3392 enables interrupts and calls Tx and Rx initialization functions */
3370 3393
3371 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE); 3394 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE);
3372 3395
3373 for (i = LOOP_RETRY_LIMIT; i; i--) { 3396 for (i = LOOP_RETRY_LIMIT; i; i--) {
3374 mr1 = atmel_read16(priv->dev, MR1); 3397 mr1 = atmel_read16(priv->dev, MR1);
3375 mr3 = atmel_read16(priv->dev, MR3); 3398 mr3 = atmel_read16(priv->dev, MR3);
3376 3399
3377 if (mr3 & MAC_INIT_COMPLETE) 3400 if (mr3 & MAC_INIT_COMPLETE)
3378 break; 3401 break;
3379 if (mr1 & MAC_INIT_COMPLETE && 3402 if (mr1 & MAC_INIT_COMPLETE &&
3380 priv->bus_type == BUS_TYPE_PCCARD) 3403 priv->bus_type == BUS_TYPE_PCCARD)
3381 break; 3404 break;
3382 } 3405 }
3383 3406
3384 if (i == 0) { 3407 if (i == 0) {
3385 printk(KERN_ALERT "%s: MAC failed to initialise.\n", priv->dev->name); 3408 printk(KERN_ALERT "%s: MAC failed to initialise.\n",
3409 priv->dev->name);
3386 return 0; 3410 return 0;
3387 } 3411 }
3388 3412
3389 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ 3413 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
3390 if ((mr3 & MAC_INIT_COMPLETE) && 3414 if ((mr3 & MAC_INIT_COMPLETE) &&
3391 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { 3415 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
@@ -3398,9 +3422,9 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3398 return 0; 3422 return 0;
3399 } 3423 }
3400 3424
3401 atmel_copy_to_host(priv->dev, (unsigned char *)iface, 3425 atmel_copy_to_host(priv->dev, (unsigned char *)iface,
3402 priv->host_info_base, sizeof(*iface)); 3426 priv->host_info_base, sizeof(*iface));
3403 3427
3404 iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos); 3428 iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos);
3405 iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size); 3429 iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size);
3406 iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos); 3430 iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos);
@@ -3424,16 +3448,16 @@ static int probe_atmel_card(struct net_device *dev)
3424{ 3448{
3425 int rc = 0; 3449 int rc = 0;
3426 struct atmel_private *priv = netdev_priv(dev); 3450 struct atmel_private *priv = netdev_priv(dev);
3427 3451
3428 /* reset pccard */ 3452 /* reset pccard */
3429 if (priv->bus_type == BUS_TYPE_PCCARD) 3453 if (priv->bus_type == BUS_TYPE_PCCARD)
3430 atmel_write16(dev, GCR, 0x0060); 3454 atmel_write16(dev, GCR, 0x0060);
3431 3455
3432 atmel_write16(dev, GCR, 0x0040); 3456 atmel_write16(dev, GCR, 0x0040);
3433 mdelay(500); 3457 mdelay(500);
3434 3458
3435 if (atmel_read16(dev, MR2) == 0) { 3459 if (atmel_read16(dev, MR2) == 0) {
3436 /* No stored firmware so load a small stub which just 3460 /* No stored firmware so load a small stub which just
3437 tells us the MAC address */ 3461 tells us the MAC address */
3438 int i; 3462 int i;
3439 priv->card_type = CARD_TYPE_EEPROM; 3463 priv->card_type = CARD_TYPE_EEPROM;
@@ -3442,7 +3466,7 @@ static int probe_atmel_card(struct net_device *dev)
3442 atmel_set_gcr(dev, GCR_REMAP); 3466 atmel_set_gcr(dev, GCR_REMAP);
3443 atmel_clear_gcr(priv->dev, 0x0040); 3467 atmel_clear_gcr(priv->dev, 0x0040);
3444 atmel_write16(dev, BSR, BSS_SRAM); 3468 atmel_write16(dev, BSR, BSS_SRAM);
3445 for (i = LOOP_RETRY_LIMIT; i; i--) 3469 for (i = LOOP_RETRY_LIMIT; i; i--)
3446 if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE) 3470 if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE)
3447 break; 3471 break;
3448 if (i == 0) { 3472 if (i == 0) {
@@ -3451,7 +3475,7 @@ static int probe_atmel_card(struct net_device *dev)
3451 atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6); 3475 atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6);
3452 /* got address, now squash it again until the network 3476 /* got address, now squash it again until the network
3453 interface is opened */ 3477 interface is opened */
3454 if (priv->bus_type == BUS_TYPE_PCCARD) 3478 if (priv->bus_type == BUS_TYPE_PCCARD)
3455 atmel_write16(dev, GCR, 0x0060); 3479 atmel_write16(dev, GCR, 0x0060);
3456 atmel_write16(dev, GCR, 0x0040); 3480 atmel_write16(dev, GCR, 0x0040);
3457 rc = 1; 3481 rc = 1;
@@ -3459,7 +3483,7 @@ static int probe_atmel_card(struct net_device *dev)
3459 } else if (atmel_read16(dev, MR4) == 0) { 3483 } else if (atmel_read16(dev, MR4) == 0) {
3460 /* Mac address easy in this case. */ 3484 /* Mac address easy in this case. */
3461 priv->card_type = CARD_TYPE_PARALLEL_FLASH; 3485 priv->card_type = CARD_TYPE_PARALLEL_FLASH;
3462 atmel_write16(dev, BSR, 1); 3486 atmel_write16(dev, BSR, 1);
3463 atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6); 3487 atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6);
3464 atmel_write16(dev, BSR, 0x200); 3488 atmel_write16(dev, BSR, 0x200);
3465 rc = 1; 3489 rc = 1;
@@ -3469,16 +3493,16 @@ static int probe_atmel_card(struct net_device *dev)
3469 priv->card_type = CARD_TYPE_SPI_FLASH; 3493 priv->card_type = CARD_TYPE_SPI_FLASH;
3470 if (atmel_wakeup_firmware(priv)) { 3494 if (atmel_wakeup_firmware(priv)) {
3471 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); 3495 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
3472 3496
3473 /* got address, now squash it again until the network 3497 /* got address, now squash it again until the network
3474 interface is opened */ 3498 interface is opened */
3475 if (priv->bus_type == BUS_TYPE_PCCARD) 3499 if (priv->bus_type == BUS_TYPE_PCCARD)
3476 atmel_write16(dev, GCR, 0x0060); 3500 atmel_write16(dev, GCR, 0x0060);
3477 atmel_write16(dev, GCR, 0x0040); 3501 atmel_write16(dev, GCR, 0x0040);
3478 rc = 1; 3502 rc = 1;
3479 } 3503 }
3480 } 3504 }
3481 3505
3482 if (rc) { 3506 if (rc) {
3483 if (dev->dev_addr[0] == 0xFF) { 3507 if (dev->dev_addr[0] == 0xFF) {
3484 u8 default_mac[] = {0x00,0x04, 0x25, 0x00, 0x00, 0x00}; 3508 u8 default_mac[] = {0x00,0x04, 0x25, 0x00, 0x00, 0x00};
@@ -3486,27 +3510,27 @@ static int probe_atmel_card(struct net_device *dev)
3486 memcpy(dev->dev_addr, default_mac, 6); 3510 memcpy(dev->dev_addr, default_mac, 6);
3487 } 3511 }
3488 } 3512 }
3489 3513
3490 return rc; 3514 return rc;
3491} 3515}
3492 3516
3493static void build_wep_mib(struct atmel_private *priv)
3494/* Move the encyption information on the MIB structure. 3517/* Move the encyption information on the MIB structure.
3495 This routine is for the pre-WPA firmware: later firmware has 3518 This routine is for the pre-WPA firmware: later firmware has
3496 a different format MIB and a different routine. */ 3519 a different format MIB and a different routine. */
3520static void build_wep_mib(struct atmel_private *priv)
3497{ 3521{
3498 struct { /* NB this is matched to the hardware, don't change. */ 3522 struct { /* NB this is matched to the hardware, don't change. */
3499 u8 wep_is_on; 3523 u8 wep_is_on;
3500 u8 default_key; /* 0..3 */ 3524 u8 default_key; /* 0..3 */
3501 u8 reserved; 3525 u8 reserved;
3502 u8 exclude_unencrypted; 3526 u8 exclude_unencrypted;
3503 3527
3504 u32 WEPICV_error_count; 3528 u32 WEPICV_error_count;
3505 u32 WEP_excluded_count; 3529 u32 WEP_excluded_count;
3506 3530
3507 u8 wep_keys[MAX_ENCRYPTION_KEYS][13]; 3531 u8 wep_keys[MAX_ENCRYPTION_KEYS][13];
3508 u8 encryption_level; /* 0, 1, 2 */ 3532 u8 encryption_level; /* 0, 1, 2 */
3509 u8 reserved2[3]; 3533 u8 reserved2[3];
3510 } mib; 3534 } mib;
3511 int i; 3535 int i;
3512 3536
@@ -3515,54 +3539,55 @@ static void build_wep_mib(struct atmel_private *priv)
3515 if (priv->wep_key_len[priv->default_key] > 5) 3539 if (priv->wep_key_len[priv->default_key] > 5)
3516 mib.encryption_level = 2; 3540 mib.encryption_level = 2;
3517 else 3541 else
3518 mib.encryption_level = 1; 3542 mib.encryption_level = 1;
3519 } else { 3543 } else {
3520 mib.encryption_level = 0; 3544 mib.encryption_level = 0;
3521 } 3545 }
3522 3546
3523 mib.default_key = priv->default_key; 3547 mib.default_key = priv->default_key;
3524 mib.exclude_unencrypted = priv->exclude_unencrypted; 3548 mib.exclude_unencrypted = priv->exclude_unencrypted;
3525 3549
3526 for(i = 0; i < MAX_ENCRYPTION_KEYS; i++) 3550 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++)
3527 memcpy(mib.wep_keys[i], priv->wep_keys[i], 13); 3551 memcpy(mib.wep_keys[i], priv->wep_keys[i], 13);
3528 3552
3529 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib)); 3553 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
3530} 3554}
3531 3555
3532static void build_wpa_mib(struct atmel_private *priv) 3556static void build_wpa_mib(struct atmel_private *priv)
3533{ 3557{
3534 /* This is for the later (WPA enabled) firmware. */ 3558 /* This is for the later (WPA enabled) firmware. */
3535 3559
3536 struct { /* NB this is matched to the hardware, don't change. */ 3560 struct { /* NB this is matched to the hardware, don't change. */
3537 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE]; 3561 u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
3538 u8 receiver_address[6]; 3562 u8 receiver_address[6];
3539 u8 wep_is_on; 3563 u8 wep_is_on;
3540 u8 default_key; /* 0..3 */ 3564 u8 default_key; /* 0..3 */
3541 u8 group_key; 3565 u8 group_key;
3542 u8 exclude_unencrypted; 3566 u8 exclude_unencrypted;
3543 u8 encryption_type; 3567 u8 encryption_type;
3544 u8 reserved; 3568 u8 reserved;
3545 3569
3546 u32 WEPICV_error_count; 3570 u32 WEPICV_error_count;
3547 u32 WEP_excluded_count; 3571 u32 WEP_excluded_count;
3548 3572
3549 u8 key_RSC[4][8]; 3573 u8 key_RSC[4][8];
3550 } mib; 3574 } mib;
3551 3575
3552 int i; 3576 int i;
3553 3577
3554 mib.wep_is_on = priv->wep_is_on; 3578 mib.wep_is_on = priv->wep_is_on;
3555 mib.exclude_unencrypted = priv->exclude_unencrypted; 3579 mib.exclude_unencrypted = priv->exclude_unencrypted;
3556 memcpy(mib.receiver_address, priv->CurrentBSSID, 6); 3580 memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
3557 3581
3558 /* zero all the keys before adding in valid ones. */ 3582 /* zero all the keys before adding in valid ones. */
3559 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value)); 3583 memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
3560 3584
3561 if (priv->wep_is_on) { 3585 if (priv->wep_is_on) {
3562 /* There's a comment in the Atmel code to the effect that this is only valid 3586 /* There's a comment in the Atmel code to the effect that this
3563 when still using WEP, it may need to be set to something to use WPA */ 3587 is only valid when still using WEP, it may need to be set to
3588 something to use WPA */
3564 memset(mib.key_RSC, 0, sizeof(mib.key_RSC)); 3589 memset(mib.key_RSC, 0, sizeof(mib.key_RSC));
3565 3590
3566 mib.default_key = mib.group_key = 255; 3591 mib.default_key = mib.group_key = 255;
3567 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) { 3592 for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) {
3568 if (priv->wep_key_len[i] > 0) { 3593 if (priv->wep_key_len[i] > 0) {
@@ -3570,12 +3595,12 @@ static void build_wpa_mib(struct atmel_private *priv)
3570 if (i == priv->default_key) { 3595 if (i == priv->default_key) {
3571 mib.default_key = i; 3596 mib.default_key = i;
3572 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7; 3597 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7;
3573 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite; 3598 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite;
3574 } else { 3599 } else {
3575 mib.group_key = i; 3600 mib.group_key = i;
3576 priv->group_cipher_suite = priv->pairwise_cipher_suite; 3601 priv->group_cipher_suite = priv->pairwise_cipher_suite;
3577 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1; 3602 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1;
3578 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite; 3603 mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite;
3579 } 3604 }
3580 } 3605 }
3581 } 3606 }
@@ -3583,47 +3608,47 @@ static void build_wpa_mib(struct atmel_private *priv)
3583 mib.default_key = mib.group_key != 255 ? mib.group_key : 0; 3608 mib.default_key = mib.group_key != 255 ? mib.group_key : 0;
3584 if (mib.group_key == 255) 3609 if (mib.group_key == 255)
3585 mib.group_key = mib.default_key; 3610 mib.group_key = mib.default_key;
3586 3611
3587 } 3612 }
3588 3613
3589 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib)); 3614 atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
3590} 3615}
3591 3616
3592static int reset_atmel_card(struct net_device *dev) 3617static int reset_atmel_card(struct net_device *dev)
3593{ 3618{
3594 /* do everything necessary to wake up the hardware, including 3619 /* do everything necessary to wake up the hardware, including
3595 waiting for the lightning strike and throwing the knife switch.... 3620 waiting for the lightning strike and throwing the knife switch....
3596 3621
3597 set all the Mib values which matter in the card to match 3622 set all the Mib values which matter in the card to match
3598 their settings in the atmel_private structure. Some of these 3623 their settings in the atmel_private structure. Some of these
3599 can be altered on the fly, but many (WEP, infrastucture or ad-hoc) 3624 can be altered on the fly, but many (WEP, infrastucture or ad-hoc)
3600 can only be changed by tearing down the world and coming back through 3625 can only be changed by tearing down the world and coming back through
3601 here. 3626 here.
3602 3627
3603 This routine is also responsible for initialising some 3628 This routine is also responsible for initialising some
3604 hardware-specific fields in the atmel_private structure, 3629 hardware-specific fields in the atmel_private structure,
3605 including a copy of the firmware's hostinfo stucture 3630 including a copy of the firmware's hostinfo stucture
3606 which is the route into the rest of the firmare datastructures. */ 3631 which is the route into the rest of the firmare datastructures. */
3607 3632
3608 struct atmel_private *priv = netdev_priv(dev); 3633 struct atmel_private *priv = netdev_priv(dev);
3609 u8 configuration; 3634 u8 configuration;
3610 3635
3611 /* data to add to the firmware names, in priority order 3636 /* data to add to the firmware names, in priority order
3612 this implemenents firmware versioning */ 3637 this implemenents firmware versioning */
3613 3638
3614 static char *firmware_modifier[] = { 3639 static char *firmware_modifier[] = {
3615 "-wpa", 3640 "-wpa",
3616 "", 3641 "",
3617 NULL 3642 NULL
3618 }; 3643 };
3619 3644
3620 /* reset pccard */ 3645 /* reset pccard */
3621 if (priv->bus_type == BUS_TYPE_PCCARD) 3646 if (priv->bus_type == BUS_TYPE_PCCARD)
3622 atmel_write16(priv->dev, GCR, 0x0060); 3647 atmel_write16(priv->dev, GCR, 0x0060);
3623 3648
3624 /* stop card , disable interrupts */ 3649 /* stop card , disable interrupts */
3625 atmel_write16(priv->dev, GCR, 0x0040); 3650 atmel_write16(priv->dev, GCR, 0x0040);
3626 3651
3627 if (priv->card_type == CARD_TYPE_EEPROM) { 3652 if (priv->card_type == CARD_TYPE_EEPROM) {
3628 /* copy in firmware if needed */ 3653 /* copy in firmware if needed */
3629 const struct firmware *fw_entry = NULL; 3654 const struct firmware *fw_entry = NULL;
@@ -3636,13 +3661,13 @@ static int reset_atmel_card(struct net_device *dev)
3636 "%s: card type is unknown: assuming at76c502 firmware is OK.\n", 3661 "%s: card type is unknown: assuming at76c502 firmware is OK.\n",
3637 dev->name); 3662 dev->name);
3638 printk(KERN_INFO 3663 printk(KERN_INFO
3639 "%s: if not, use the firmware= module parameter.\n", 3664 "%s: if not, use the firmware= module parameter.\n",
3640 dev->name); 3665 dev->name);
3641 strcpy(priv->firmware_id, "atmel_at76c502.bin"); 3666 strcpy(priv->firmware_id, "atmel_at76c502.bin");
3642 } 3667 }
3643 if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) { 3668 if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) {
3644 printk(KERN_ALERT 3669 printk(KERN_ALERT
3645 "%s: firmware %s is missing, cannot continue.\n", 3670 "%s: firmware %s is missing, cannot continue.\n",
3646 dev->name, priv->firmware_id); 3671 dev->name, priv->firmware_id);
3647 return 0; 3672 return 0;
3648 } 3673 }
@@ -3654,7 +3679,7 @@ static int reset_atmel_card(struct net_device *dev)
3654 while (fw_table[fw_index].fw_type != priv->firmware_type 3679 while (fw_table[fw_index].fw_type != priv->firmware_type
3655 && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) 3680 && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE)
3656 fw_index++; 3681 fw_index++;
3657 3682
3658 /* construct the actual firmware file name */ 3683 /* construct the actual firmware file name */
3659 if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) { 3684 if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) {
3660 int i; 3685 int i;
@@ -3669,24 +3694,24 @@ static int reset_atmel_card(struct net_device *dev)
3669 } 3694 }
3670 } 3695 }
3671 if (!success) { 3696 if (!success) {
3672 printk(KERN_ALERT 3697 printk(KERN_ALERT
3673 "%s: firmware %s is missing, cannot start.\n", 3698 "%s: firmware %s is missing, cannot start.\n",
3674 dev->name, priv->firmware_id); 3699 dev->name, priv->firmware_id);
3675 priv->firmware_id[0] = '\0'; 3700 priv->firmware_id[0] = '\0';
3676 return 0; 3701 return 0;
3677 } 3702 }
3678 } 3703 }
3679 3704
3680 fw = fw_entry->data; 3705 fw = fw_entry->data;
3681 len = fw_entry->size; 3706 len = fw_entry->size;
3682 } 3707 }
3683 3708
3684 if (len <= 0x6000) { 3709 if (len <= 0x6000) {
3685 atmel_write16(priv->dev, BSR, BSS_IRAM); 3710 atmel_write16(priv->dev, BSR, BSS_IRAM);
3686 atmel_copy_to_card(priv->dev, 0, fw, len); 3711 atmel_copy_to_card(priv->dev, 0, fw, len);
3687 atmel_set_gcr(priv->dev, GCR_REMAP); 3712 atmel_set_gcr(priv->dev, GCR_REMAP);
3688 } else { 3713 } else {
3689 /* Remap */ 3714 /* Remap */
3690 atmel_set_gcr(priv->dev, GCR_REMAP); 3715 atmel_set_gcr(priv->dev, GCR_REMAP);
3691 atmel_write16(priv->dev, BSR, BSS_IRAM); 3716 atmel_write16(priv->dev, BSR, BSS_IRAM);
3692 atmel_copy_to_card(priv->dev, 0, fw, 0x6000); 3717 atmel_copy_to_card(priv->dev, 0, fw, 0x6000);
@@ -3708,45 +3733,45 @@ static int reset_atmel_card(struct net_device *dev)
3708 the 3com broken-ness filter. */ 3733 the 3com broken-ness filter. */
3709 priv->use_wpa = (priv->host_info.major_version == 4); 3734 priv->use_wpa = (priv->host_info.major_version == 4);
3710 priv->radio_on_broken = (priv->host_info.major_version == 5); 3735 priv->radio_on_broken = (priv->host_info.major_version == 5);
3711 3736
3712 /* unmask all irq sources */ 3737 /* unmask all irq sources */
3713 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff); 3738 atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff);
3714 3739
3715 /* int Tx system and enable Tx */ 3740 /* int Tx system and enable Tx */
3716 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0); 3741 atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0);
3717 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L); 3742 atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L);
3718 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0); 3743 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0);
3719 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0); 3744 atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0);
3720 3745
3721 priv->tx_desc_free = priv->host_info.tx_desc_count; 3746 priv->tx_desc_free = priv->host_info.tx_desc_count;
3722 priv->tx_desc_head = 0; 3747 priv->tx_desc_head = 0;
3723 priv->tx_desc_tail = 0; 3748 priv->tx_desc_tail = 0;
3724 priv->tx_desc_previous = 0; 3749 priv->tx_desc_previous = 0;
3725 priv->tx_free_mem = priv->host_info.tx_buff_size; 3750 priv->tx_free_mem = priv->host_info.tx_buff_size;
3726 priv->tx_buff_head = 0; 3751 priv->tx_buff_head = 0;
3727 priv->tx_buff_tail = 0; 3752 priv->tx_buff_tail = 0;
3728 3753
3729 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET)); 3754 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
3730 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), 3755 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
3731 configuration | FUNC_CTRL_TxENABLE); 3756 configuration | FUNC_CTRL_TxENABLE);
3732 3757
3733 /* init Rx system and enable */ 3758 /* init Rx system and enable */
3734 priv->rx_desc_head = 0; 3759 priv->rx_desc_head = 0;
3735 3760
3736 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET)); 3761 configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
3737 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), 3762 atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
3738 configuration | FUNC_CTRL_RxENABLE); 3763 configuration | FUNC_CTRL_RxENABLE);
3739 3764
3740 if (!priv->radio_on_broken) { 3765 if (!priv->radio_on_broken) {
3741 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == 3766 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
3742 CMD_STATUS_REJECTED_RADIO_OFF) { 3767 CMD_STATUS_REJECTED_RADIO_OFF) {
3743 printk(KERN_INFO 3768 printk(KERN_INFO
3744 "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n", 3769 "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
3745 dev->name); 3770 dev->name);
3746 return 0; 3771 return 0;
3747 } 3772 }
3748 } 3773 }
3749 3774
3750 /* set up enough MIB values to run. */ 3775 /* set up enough MIB values to run. */
3751 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate); 3776 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate);
3752 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF); 3777 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF);
@@ -3755,7 +3780,7 @@ static int reset_atmel_card(struct net_device *dev)
3755 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry); 3780 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry);
3756 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry); 3781 atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry);
3757 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble); 3782 atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble);
3758 atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS, 3783 atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS,
3759 priv->dev->dev_addr, 6); 3784 priv->dev->dev_addr, 6);
3760 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE); 3785 atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
3761 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1); 3786 atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
@@ -3766,42 +3791,44 @@ static int reset_atmel_card(struct net_device *dev)
3766 build_wpa_mib(priv); 3791 build_wpa_mib(priv);
3767 else 3792 else
3768 build_wep_mib(priv); 3793 build_wep_mib(priv);
3769 3794
3770 return 1; 3795 return 1;
3771} 3796}
3772 3797
3773static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size) 3798static void atmel_send_command(struct atmel_private *priv, int command,
3799 void *cmd, int cmd_size)
3774{ 3800{
3775 if (cmd) 3801 if (cmd)
3776 atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET), 3802 atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET),
3777 cmd, cmd_size); 3803 cmd, cmd_size);
3778 3804
3779 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command); 3805 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command);
3780 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0); 3806 atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0);
3781} 3807}
3782 3808
3783static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size) 3809static int atmel_send_command_wait(struct atmel_private *priv, int command,
3810 void *cmd, int cmd_size)
3784{ 3811{
3785 int i, status; 3812 int i, status;
3786 3813
3787 atmel_send_command(priv, command, cmd, cmd_size); 3814 atmel_send_command(priv, command, cmd, cmd_size);
3788 3815
3789 for (i = 5000; i; i--) { 3816 for (i = 5000; i; i--) {
3790 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET)); 3817 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
3791 if (status != CMD_STATUS_IDLE && 3818 if (status != CMD_STATUS_IDLE &&
3792 status != CMD_STATUS_IN_PROGRESS) 3819 status != CMD_STATUS_IN_PROGRESS)
3793 break; 3820 break;
3794 udelay(20); 3821 udelay(20);
3795 } 3822 }
3796 3823
3797 if (i == 0) { 3824 if (i == 0) {
3798 printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name); 3825 printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name);
3799 status = CMD_STATUS_HOST_ERROR; 3826 status = CMD_STATUS_HOST_ERROR;
3800 } else { 3827 } else {
3801 if (command != CMD_EnableRadio) 3828 if (command != CMD_EnableRadio)
3802 status = CMD_STATUS_COMPLETE; 3829 status = CMD_STATUS_COMPLETE;
3803 } 3830 }
3804 3831
3805 return status; 3832 return status;
3806} 3833}
3807 3834
@@ -3827,7 +3854,8 @@ static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 dat
3827 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1); 3854 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
3828} 3855}
3829 3856
3830static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data) 3857static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
3858 u16 data)
3831{ 3859{
3832 struct get_set_mib m; 3860 struct get_set_mib m;
3833 m.type = type; 3861 m.type = type;
@@ -3839,7 +3867,8 @@ static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 d
3839 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2); 3867 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2);
3840} 3868}
3841 3869
3842static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len) 3870static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
3871 u8 *data, int data_len)
3843{ 3872{
3844 struct get_set_mib m; 3873 struct get_set_mib m;
3845 m.type = type; 3874 m.type = type;
@@ -3848,23 +3877,24 @@ static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *dat
3848 3877
3849 if (data_len > MIB_MAX_DATA_BYTES) 3878 if (data_len > MIB_MAX_DATA_BYTES)
3850 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name); 3879 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
3851 3880
3852 memcpy(m.data, data, data_len); 3881 memcpy(m.data, data, data_len);
3853 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len); 3882 atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
3854} 3883}
3855 3884
3856static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len) 3885static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
3886 u8 *data, int data_len)
3857{ 3887{
3858 struct get_set_mib m; 3888 struct get_set_mib m;
3859 m.type = type; 3889 m.type = type;
3860 m.size = data_len; 3890 m.size = data_len;
3861 m.index = index; 3891 m.index = index;
3862 3892
3863 if (data_len > MIB_MAX_DATA_BYTES) 3893 if (data_len > MIB_MAX_DATA_BYTES)
3864 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name); 3894 printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
3865 3895
3866 atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len); 3896 atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
3867 atmel_copy_to_host(priv->dev, data, 3897 atmel_copy_to_host(priv->dev, data,
3868 atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len); 3898 atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len);
3869} 3899}
3870 3900
@@ -3873,11 +3903,12 @@ static void atmel_writeAR(struct net_device *dev, u16 data)
3873 int i; 3903 int i;
3874 outw(data, dev->base_addr + AR); 3904 outw(data, dev->base_addr + AR);
3875 /* Address register appears to need some convincing..... */ 3905 /* Address register appears to need some convincing..... */
3876 for (i = 0; data != inw(dev->base_addr + AR) && i<10; i++) 3906 for (i = 0; data != inw(dev->base_addr + AR) && i < 10; i++)
3877 outw(data, dev->base_addr + AR); 3907 outw(data, dev->base_addr + AR);
3878} 3908}
3879 3909
3880static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len) 3910static void atmel_copy_to_card(struct net_device *dev, u16 dest,
3911 unsigned char *src, u16 len)
3881{ 3912{
3882 int i; 3913 int i;
3883 atmel_writeAR(dev, dest); 3914 atmel_writeAR(dev, dest);
@@ -3894,7 +3925,8 @@ static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *
3894 atmel_write8(dev, DR, *src); 3925 atmel_write8(dev, DR, *src);
3895} 3926}
3896 3927
3897static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len) 3928static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
3929 u16 src, u16 len)
3898{ 3930{
3899 int i; 3931 int i;
3900 atmel_writeAR(dev, src); 3932 atmel_writeAR(dev, src);
@@ -3930,22 +3962,24 @@ static int atmel_lock_mac(struct atmel_private *priv)
3930 break; 3962 break;
3931 udelay(20); 3963 udelay(20);
3932 } 3964 }
3933 3965
3934 if (!i) return 0; /* timed out */ 3966 if (!i)
3935 3967 return 0; /* timed out */
3968
3936 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1); 3969 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1);
3937 if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) { 3970 if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) {
3938 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0); 3971 atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
3939 if (!j--) return 0; /* timed out */ 3972 if (!j--)
3973 return 0; /* timed out */
3940 goto retry; 3974 goto retry;
3941 } 3975 }
3942 3976
3943 return 1; 3977 return 1;
3944} 3978}
3945 3979
3946static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data) 3980static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
3947{ 3981{
3948 atmel_writeAR(priv->dev, pos); 3982 atmel_writeAR(priv->dev, pos);
3949 atmel_write16(priv->dev, DR, data); /* card is little-endian */ 3983 atmel_write16(priv->dev, DR, data); /* card is little-endian */
3950 atmel_write16(priv->dev, DR, data >> 16); 3984 atmel_write16(priv->dev, DR, data >> 16);
3951} 3985}
@@ -4017,9 +4051,9 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
4017 serial output, since SO is normally high. But it 4051 serial output, since SO is normally high. But it
4018 does cause 8 clock cycles and thus 8 bits to be 4052 does cause 8 clock cycles and thus 8 bits to be
4019 clocked in to the chip. See Atmel's SPI 4053 clocked in to the chip. See Atmel's SPI
4020 controller (e.g. AT91M55800) timing and 4K 4054 controller (e.g. AT91M55800) timing and 4K
4021 SPI EEPROM manuals */ 4055 SPI EEPROM manuals */
4022 4056
4023 .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */ 4057 .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */
4024 .set NVRAM_IMAGE, 0x02000200 4058 .set NVRAM_IMAGE, 0x02000200
4025 .set NVRAM_LENGTH, 0x0200 4059 .set NVRAM_LENGTH, 0x0200
@@ -4032,24 +4066,24 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
4032 .set MR4, 0xC 4066 .set MR4, 0xC
4033RESET_VECTOR: 4067RESET_VECTOR:
4034 b RESET_HANDLER 4068 b RESET_HANDLER
4035UNDEF_VECTOR: 4069UNDEF_VECTOR:
4036 b HALT1 4070 b HALT1
4037SWI_VECTOR: 4071SWI_VECTOR:
4038 b HALT1 4072 b HALT1
4039IABORT_VECTOR: 4073IABORT_VECTOR:
4040 b HALT1 4074 b HALT1
4041DABORT_VECTOR: 4075DABORT_VECTOR:
4042RESERVED_VECTOR: 4076RESERVED_VECTOR:
4043 b HALT1 4077 b HALT1
4044IRQ_VECTOR: 4078IRQ_VECTOR:
4045 b HALT1 4079 b HALT1
4046FIQ_VECTOR: 4080FIQ_VECTOR:
4047 b HALT1 4081 b HALT1
4048HALT1: b HALT1 4082HALT1: b HALT1
4049RESET_HANDLER: 4083RESET_HANDLER:
4050 mov r0, #CPSR_INITIAL 4084 mov r0, #CPSR_INITIAL
4051 msr CPSR_c, r0 /* This is probably unnecessary */ 4085 msr CPSR_c, r0 /* This is probably unnecessary */
4052 4086
4053/* I'm guessing this is initializing clock generator electronics for SPI */ 4087/* I'm guessing this is initializing clock generator electronics for SPI */
4054 ldr r0, =SPI_CGEN_BASE 4088 ldr r0, =SPI_CGEN_BASE
4055 mov r1, #0 4089 mov r1, #0
@@ -4061,7 +4095,7 @@ RESET_HANDLER:
4061 str r1, [r0, #28] 4095 str r1, [r0, #28]
4062 mov r1, #1 4096 mov r1, #1
4063 str r1, [r0, #8] 4097 str r1, [r0, #8]
4064 4098
4065 ldr r0, =MRBASE 4099 ldr r0, =MRBASE
4066 mov r1, #0 4100 mov r1, #0
4067 strh r1, [r0, #MR1] 4101 strh r1, [r0, #MR1]
@@ -4094,7 +4128,7 @@ GET_WHOLE_NVRAM:
4094 ldmia sp!, {lr} 4128 ldmia sp!, {lr}
4095 bx lr 4129 bx lr
4096.endfunc 4130.endfunc
4097 4131
4098.func Get_MAC_Addr, GET_MAC_ADDR 4132.func Get_MAC_Addr, GET_MAC_ADDR
4099GET_MAC_ADDR: 4133GET_MAC_ADDR:
4100 stmdb sp!, {lr} 4134 stmdb sp!, {lr}
@@ -4110,13 +4144,13 @@ GET_MAC_ADDR:
4110.func Delay9, DELAY9 4144.func Delay9, DELAY9
4111DELAY9: 4145DELAY9:
4112 adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */ 4146 adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */
4113DELAYLOOP: 4147DELAYLOOP:
4114 beq DELAY9_done 4148 beq DELAY9_done
4115 subs r0, r0, #1 4149 subs r0, r0, #1
4116 b DELAYLOOP 4150 b DELAYLOOP
4117DELAY9_done: 4151DELAY9_done:
4118 bx lr 4152 bx lr
4119.endfunc 4153.endfunc
4120 4154
4121.func SP_Init, SP_INIT 4155.func SP_Init, SP_INIT
4122SP_INIT: 4156SP_INIT:
@@ -4145,26 +4179,26 @@ SP_INIT:
4145 ldr r0, [r0, #SP_RDR] 4179 ldr r0, [r0, #SP_RDR]
4146 bx lr 4180 bx lr
4147.endfunc 4181.endfunc
4148.func NVRAM_Init, NVRAM_INIT 4182.func NVRAM_Init, NVRAM_INIT
4149NVRAM_INIT: 4183NVRAM_INIT:
4150 ldr r1, =SP_BASE 4184 ldr r1, =SP_BASE
4151 ldr r0, [r1, #SP_RDR] 4185 ldr r0, [r1, #SP_RDR]
4152 mov r0, #NVRAM_CMD_RDSR 4186 mov r0, #NVRAM_CMD_RDSR
4153 str r0, [r1, #SP_TDR] 4187 str r0, [r1, #SP_TDR]
4154SP_loop1: 4188SP_loop1:
4155 ldr r0, [r1, #SP_SR] 4189 ldr r0, [r1, #SP_SR]
4156 tst r0, #SP_TDRE 4190 tst r0, #SP_TDRE
4157 beq SP_loop1 4191 beq SP_loop1
4158 4192
4159 mov r0, #SPI_8CLOCKS 4193 mov r0, #SPI_8CLOCKS
4160 str r0, [r1, #SP_TDR] 4194 str r0, [r1, #SP_TDR]
4161SP_loop2: 4195SP_loop2:
4162 ldr r0, [r1, #SP_SR] 4196 ldr r0, [r1, #SP_SR]
4163 tst r0, #SP_TDRE 4197 tst r0, #SP_TDRE
4164 beq SP_loop2 4198 beq SP_loop2
4165 4199
4166 ldr r0, [r1, #SP_RDR] 4200 ldr r0, [r1, #SP_RDR]
4167SP_loop3: 4201SP_loop3:
4168 ldr r0, [r1, #SP_SR] 4202 ldr r0, [r1, #SP_SR]
4169 tst r0, #SP_RDRF 4203 tst r0, #SP_RDRF
4170 beq SP_loop3 4204 beq SP_loop3
@@ -4173,7 +4207,7 @@ SP_loop3:
4173 and r0, r0, #255 4207 and r0, r0, #255
4174 bx lr 4208 bx lr
4175.endfunc 4209.endfunc
4176 4210
4177.func NVRAM_Xfer, NVRAM_XFER 4211.func NVRAM_Xfer, NVRAM_XFER
4178 /* r0 = dest address */ 4212 /* r0 = dest address */
4179 /* r1 = not used */ 4213 /* r1 = not used */
@@ -4185,11 +4219,11 @@ NVRAM_XFER:
4185 mov r4, r3 /* save r3 (length) */ 4219 mov r4, r3 /* save r3 (length) */
4186 mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */ 4220 mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */
4187 and r0, r0, #8 4221 and r0, r0, #8
4188 add r0, r0, #NVRAM_CMD_READ 4222 add r0, r0, #NVRAM_CMD_READ
4189 ldr r1, =NVRAM_SCRATCH 4223 ldr r1, =NVRAM_SCRATCH
4190 strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */ 4224 strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */
4191 strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */ 4225 strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */
4192_local1: 4226_local1:
4193 bl NVRAM_INIT 4227 bl NVRAM_INIT
4194 tst r0, #NVRAM_SR_RDY 4228 tst r0, #NVRAM_SR_RDY
4195 bne _local1 4229 bne _local1
@@ -4211,7 +4245,7 @@ NVRAM_XFER2:
4211 cmp r0, #0 4245 cmp r0, #0
4212 bls _local2 4246 bls _local2
4213 ldr r5, =NVRAM_SCRATCH 4247 ldr r5, =NVRAM_SCRATCH
4214_local4: 4248_local4:
4215 ldrb r6, [r5, r3] 4249 ldrb r6, [r5, r3]
4216 str r6, [r4, #SP_TDR] 4250 str r6, [r4, #SP_TDR]
4217_local3: 4251_local3:
@@ -4225,7 +4259,7 @@ _local2:
4225 mov r3, #SPI_8CLOCKS 4259 mov r3, #SPI_8CLOCKS
4226 str r3, [r4, #SP_TDR] 4260 str r3, [r4, #SP_TDR]
4227 ldr r0, [r4, #SP_RDR] 4261 ldr r0, [r4, #SP_RDR]
4228_local5: 4262_local5:
4229 ldr r0, [r4, #SP_SR] 4263 ldr r0, [r4, #SP_SR]
4230 tst r0, #SP_RDRF 4264 tst r0, #SP_RDRF
4231 beq _local5 4265 beq _local5
@@ -4233,12 +4267,12 @@ _local5:
4233 mov r0, #0 4267 mov r0, #0
4234 cmp r2, #0 /* r2 is # of bytes to copy in */ 4268 cmp r2, #0 /* r2 is # of bytes to copy in */
4235 bls _local6 4269 bls _local6
4236_local7: 4270_local7:
4237 ldr r5, [r4, #SP_SR] 4271 ldr r5, [r4, #SP_SR]
4238 tst r5, #SP_TDRE 4272 tst r5, #SP_TDRE
4239 beq _local7 4273 beq _local7
4240 str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */ 4274 str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */
4241_local8: 4275_local8:
4242 ldr r5, [r4, #SP_SR] 4276 ldr r5, [r4, #SP_SR]
4243 tst r5, #SP_RDRF 4277 tst r5, #SP_RDRF
4244 beq _local8 4278 beq _local8
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index fc62235bfc24..353ccb93134b 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,3 +1,4 @@
1hostap-y := hostap_main.o
1obj-$(CONFIG_HOSTAP) += hostap.o 2obj-$(CONFIG_HOSTAP) += hostap.o
2 3
3obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o 4obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..3d2ea61033be 100644
--- a/drivers/net/wireless/hostap/hostap.c
+++ b/drivers/net/wireless/hostap/hostap_main.c