aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c13
-rw-r--r--drivers/net/8139too.c86
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c74
-rw-r--r--drivers/net/bonding/bond_alb.c56
-rw-r--r--drivers/net/bonding/bond_main.c343
-rw-r--r--drivers/net/bonding/bond_sysfs.c1399
-rw-r--r--drivers/net/bonding/bonding.h37
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/gianfar_ethtool.c2
-rw-r--r--drivers/net/gianfar_mii.c2
-rw-r--r--drivers/net/gianfar_mii.h2
-rw-r--r--drivers/net/irda/ali-ircc.c1
-rw-r--r--drivers/net/irda/nsc-ircc.c1
-rw-r--r--drivers/net/sis900.c73
-rw-r--r--drivers/net/sis900.h45
-rw-r--r--drivers/net/sky2.c3039
-rw-r--r--drivers/net/sky2.h1910
-rw-r--r--drivers/net/smc91x.h16
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/wan/sdladrv.c2
-rw-r--r--drivers/net/wireless/ipw2200.c7
25 files changed, 6878 insertions, 259 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 977935a3d898..824e430486c2 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -84,6 +84,7 @@ static int max_interrupt_work = 10;
84#include <linux/netdevice.h> 84#include <linux/netdevice.h>
85#include <linux/etherdevice.h> 85#include <linux/etherdevice.h>
86#include <linux/pm.h> 86#include <linux/pm.h>
87#include <linux/pm_legacy.h>
87#include <linux/skbuff.h> 88#include <linux/skbuff.h>
88#include <linux/delay.h> /* for udelay() */ 89#include <linux/delay.h> /* for udelay() */
89#include <linux/spinlock.h> 90#include <linux/spinlock.h>
@@ -173,7 +174,7 @@ struct el3_private {
173 /* skb send-queue */ 174 /* skb send-queue */
174 int head, size; 175 int head, size;
175 struct sk_buff *queue[SKB_QUEUE_SIZE]; 176 struct sk_buff *queue[SKB_QUEUE_SIZE];
176#ifdef CONFIG_PM 177#ifdef CONFIG_PM_LEGACY
177 struct pm_dev *pmdev; 178 struct pm_dev *pmdev;
178#endif 179#endif
179 enum { 180 enum {
@@ -200,7 +201,7 @@ static void el3_tx_timeout (struct net_device *dev);
200static void el3_down(struct net_device *dev); 201static void el3_down(struct net_device *dev);
201static void el3_up(struct net_device *dev); 202static void el3_up(struct net_device *dev);
202static struct ethtool_ops ethtool_ops; 203static struct ethtool_ops ethtool_ops;
203#ifdef CONFIG_PM 204#ifdef CONFIG_PM_LEGACY
204static int el3_suspend(struct pm_dev *pdev); 205static int el3_suspend(struct pm_dev *pdev);
205static int el3_resume(struct pm_dev *pdev); 206static int el3_resume(struct pm_dev *pdev);
206static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data); 207static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data);
@@ -361,7 +362,7 @@ static void el3_common_remove (struct net_device *dev)
361 struct el3_private *lp = netdev_priv(dev); 362 struct el3_private *lp = netdev_priv(dev);
362 363
363 (void) lp; /* Keep gcc quiet... */ 364 (void) lp; /* Keep gcc quiet... */
364#ifdef CONFIG_PM 365#ifdef CONFIG_PM_LEGACY
365 if (lp->pmdev) 366 if (lp->pmdev)
366 pm_unregister(lp->pmdev); 367 pm_unregister(lp->pmdev);
367#endif 368#endif
@@ -571,7 +572,7 @@ no_pnp:
571 if (err) 572 if (err)
572 goto out1; 573 goto out1;
573 574
574#ifdef CONFIG_PM 575#ifdef CONFIG_PM_LEGACY
575 /* register power management */ 576 /* register power management */
576 lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback); 577 lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback);
577 if (lp->pmdev) { 578 if (lp->pmdev) {
@@ -1479,7 +1480,7 @@ el3_up(struct net_device *dev)
1479} 1480}
1480 1481
1481/* Power Management support functions */ 1482/* Power Management support functions */
1482#ifdef CONFIG_PM 1483#ifdef CONFIG_PM_LEGACY
1483 1484
1484static int 1485static int
1485el3_suspend(struct pm_dev *pdev) 1486el3_suspend(struct pm_dev *pdev)
@@ -1548,7 +1549,7 @@ el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
1548 return 0; 1549 return 0;
1549} 1550}
1550 1551
1551#endif /* CONFIG_PM */ 1552#endif /* CONFIG_PM_LEGACY */
1552 1553
1553/* Parameters that may be passed into the module. */ 1554/* Parameters that may be passed into the module. */
1554static int debug = -1; 1555static int debug = -1;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 30bee11c48bd..d2102a27d307 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -586,16 +586,16 @@ struct rtl8139_private {
586 dma_addr_t tx_bufs_dma; 586 dma_addr_t tx_bufs_dma;
587 signed char phys[4]; /* MII device addresses. */ 587 signed char phys[4]; /* MII device addresses. */
588 char twistie, twist_row, twist_col; /* Twister tune state. */ 588 char twistie, twist_row, twist_col; /* Twister tune state. */
589 unsigned int default_port:4; /* Last dev->if_port value. */ 589 unsigned int default_port : 4; /* Last dev->if_port value. */
590 unsigned int have_thread : 1;
590 spinlock_t lock; 591 spinlock_t lock;
591 spinlock_t rx_lock; 592 spinlock_t rx_lock;
592 chip_t chipset; 593 chip_t chipset;
593 pid_t thr_pid;
594 wait_queue_head_t thr_wait;
595 struct completion thr_exited;
596 u32 rx_config; 594 u32 rx_config;
597 struct rtl_extra_stats xstats; 595 struct rtl_extra_stats xstats;
598 int time_to_die; 596
597 struct work_struct thread;
598
599 struct mii_if_info mii; 599 struct mii_if_info mii;
600 unsigned int regs_len; 600 unsigned int regs_len;
601 unsigned long fifo_copy_timeout; 601 unsigned long fifo_copy_timeout;
@@ -620,7 +620,7 @@ static int rtl8139_open (struct net_device *dev);
620static int mdio_read (struct net_device *dev, int phy_id, int location); 620static int mdio_read (struct net_device *dev, int phy_id, int location);
621static void mdio_write (struct net_device *dev, int phy_id, int location, 621static void mdio_write (struct net_device *dev, int phy_id, int location,
622 int val); 622 int val);
623static void rtl8139_start_thread(struct net_device *dev); 623static void rtl8139_start_thread(struct rtl8139_private *tp);
624static void rtl8139_tx_timeout (struct net_device *dev); 624static void rtl8139_tx_timeout (struct net_device *dev);
625static void rtl8139_init_ring (struct net_device *dev); 625static void rtl8139_init_ring (struct net_device *dev);
626static int rtl8139_start_xmit (struct sk_buff *skb, 626static int rtl8139_start_xmit (struct sk_buff *skb,
@@ -637,6 +637,7 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
637static void rtl8139_set_rx_mode (struct net_device *dev); 637static void rtl8139_set_rx_mode (struct net_device *dev);
638static void __set_rx_mode (struct net_device *dev); 638static void __set_rx_mode (struct net_device *dev);
639static void rtl8139_hw_start (struct net_device *dev); 639static void rtl8139_hw_start (struct net_device *dev);
640static void rtl8139_thread (void *_data);
640static struct ethtool_ops rtl8139_ethtool_ops; 641static struct ethtool_ops rtl8139_ethtool_ops;
641 642
642/* write MMIO register, with flush */ 643/* write MMIO register, with flush */
@@ -1007,8 +1008,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1007 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1008 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1008 spin_lock_init (&tp->lock); 1009 spin_lock_init (&tp->lock);
1009 spin_lock_init (&tp->rx_lock); 1010 spin_lock_init (&tp->rx_lock);
1010 init_waitqueue_head (&tp->thr_wait); 1011 INIT_WORK(&tp->thread, rtl8139_thread, dev);
1011 init_completion (&tp->thr_exited);
1012 tp->mii.dev = dev; 1012 tp->mii.dev = dev;
1013 tp->mii.mdio_read = mdio_read; 1013 tp->mii.mdio_read = mdio_read;
1014 tp->mii.mdio_write = mdio_write; 1014 tp->mii.mdio_write = mdio_write;
@@ -1345,7 +1345,7 @@ static int rtl8139_open (struct net_device *dev)
1345 dev->irq, RTL_R8 (MediaStatus), 1345 dev->irq, RTL_R8 (MediaStatus),
1346 tp->mii.full_duplex ? "full" : "half"); 1346 tp->mii.full_duplex ? "full" : "half");
1347 1347
1348 rtl8139_start_thread(dev); 1348 rtl8139_start_thread(tp);
1349 1349
1350 return 0; 1350 return 0;
1351} 1351}
@@ -1594,55 +1594,43 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
1594 RTL_R8 (Config1)); 1594 RTL_R8 (Config1));
1595} 1595}
1596 1596
1597static int rtl8139_thread (void *data) 1597static void rtl8139_thread (void *_data)
1598{ 1598{
1599 struct net_device *dev = data; 1599 struct net_device *dev = _data;
1600 struct rtl8139_private *tp = netdev_priv(dev); 1600 struct rtl8139_private *tp = netdev_priv(dev);
1601 unsigned long timeout; 1601 unsigned long thr_delay;
1602
1603 daemonize("%s", dev->name);
1604 allow_signal(SIGTERM);
1605
1606 while (1) {
1607 timeout = next_tick;
1608 do {
1609 timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
1610 /* make swsusp happy with our thread */
1611 try_to_freeze();
1612 } while (!signal_pending (current) && (timeout > 0));
1613
1614 if (signal_pending (current)) {
1615 flush_signals(current);
1616 }
1617 1602
1618 if (tp->time_to_die) 1603 if (rtnl_shlock_nowait() == 0) {
1619 break;
1620
1621 if (rtnl_lock_interruptible ())
1622 break;
1623 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1604 rtl8139_thread_iter (dev, tp, tp->mmio_addr);
1624 rtnl_unlock (); 1605 rtnl_unlock ();
1606
1607 thr_delay = next_tick;
1608 } else {
1609 /* unlikely race. mitigate with fast poll. */
1610 thr_delay = HZ / 2;
1625 } 1611 }
1626 1612
1627 complete_and_exit (&tp->thr_exited, 0); 1613 schedule_delayed_work(&tp->thread, thr_delay);
1628} 1614}
1629 1615
1630static void rtl8139_start_thread(struct net_device *dev) 1616static void rtl8139_start_thread(struct rtl8139_private *tp)
1631{ 1617{
1632 struct rtl8139_private *tp = netdev_priv(dev);
1633
1634 tp->thr_pid = -1;
1635 tp->twistie = 0; 1618 tp->twistie = 0;
1636 tp->time_to_die = 0;
1637 if (tp->chipset == CH_8139_K) 1619 if (tp->chipset == CH_8139_K)
1638 tp->twistie = 1; 1620 tp->twistie = 1;
1639 else if (tp->drv_flags & HAS_LNK_CHNG) 1621 else if (tp->drv_flags & HAS_LNK_CHNG)
1640 return; 1622 return;
1641 1623
1642 tp->thr_pid = kernel_thread(rtl8139_thread, dev, CLONE_FS|CLONE_FILES); 1624 tp->have_thread = 1;
1643 if (tp->thr_pid < 0) { 1625
1644 printk (KERN_WARNING "%s: unable to start kernel thread\n", 1626 schedule_delayed_work(&tp->thread, next_tick);
1645 dev->name); 1627}
1628
1629static void rtl8139_stop_thread(struct rtl8139_private *tp)
1630{
1631 if (tp->have_thread) {
1632 cancel_rearming_delayed_work(&tp->thread);
1633 tp->have_thread = 0;
1646 } 1634 }
1647} 1635}
1648 1636
@@ -2224,22 +2212,12 @@ static int rtl8139_close (struct net_device *dev)
2224{ 2212{
2225 struct rtl8139_private *tp = netdev_priv(dev); 2213 struct rtl8139_private *tp = netdev_priv(dev);
2226 void __iomem *ioaddr = tp->mmio_addr; 2214 void __iomem *ioaddr = tp->mmio_addr;
2227 int ret = 0;
2228 unsigned long flags; 2215 unsigned long flags;
2229 2216
2230 netif_stop_queue (dev); 2217 netif_stop_queue (dev);
2231 2218
2232 if (tp->thr_pid >= 0) { 2219 rtl8139_stop_thread(tp);
2233 tp->time_to_die = 1; 2220
2234 wmb();
2235 ret = kill_proc (tp->thr_pid, SIGTERM, 1);
2236 if (ret) {
2237 printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
2238 return ret;
2239 }
2240 wait_for_completion (&tp->thr_exited);
2241 }
2242
2243 if (netif_msg_ifdown(tp)) 2221 if (netif_msg_ifdown(tp))
2244 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", 2222 printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n",
2245 dev->name, RTL_R16 (IntrStatus)); 2223 dev->name, RTL_R16 (IntrStatus));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ebd7313d7fc1..39415b5c4569 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2008,7 +2008,25 @@ config SKGE
2008 2008
2009 It does not support the link failover and network management 2009 It does not support the link failover and network management
2010 features that "portable" vendor supplied sk98lin driver does. 2010 features that "portable" vendor supplied sk98lin driver does.
2011 2011
2012
2013config SKY2
2014 tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
2015 depends on PCI && EXPERIMENTAL
2016 select CRC32
2017 ---help---
2018 This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
2019
2020 To compile this driver as a module, choose M here: the module
2021 will be called sky2. This is recommended.
2022
2023config SKY2_EC_A1
2024 bool "Support old Yukon-EC A1 chipset"
2025 depends on SKY2
2026 ---help---
2027 Include support for early revisions of the Yukon EC chipset
2028 that required extra workarounds. If in doubt, say N.
2029
2012config SK98LIN 2030config SK98LIN
2013 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support" 2031 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
2014 depends on PCI 2032 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4cffd34442aa..27822a2f0683 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -59,6 +59,7 @@ spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o
59obj-$(CONFIG_SPIDER_NET) += spidernet.o 59obj-$(CONFIG_SPIDER_NET) += spidernet.o
60obj-$(CONFIG_TC35815) += tc35815.o 60obj-$(CONFIG_TC35815) += tc35815.o
61obj-$(CONFIG_SKGE) += skge.o 61obj-$(CONFIG_SKGE) += skge.o
62obj-$(CONFIG_SKY2) += sky2.o
62obj-$(CONFIG_SK98LIN) += sk98lin/ 63obj-$(CONFIG_SK98LIN) += sk98lin/
63obj-$(CONFIG_SKFP) += skfp/ 64obj-$(CONFIG_SKFP) += skfp/
64obj-$(CONFIG_VIA_RHINE) += via-rhine.o 65obj-$(CONFIG_VIA_RHINE) += via-rhine.o
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index cf50384b469e..5cdae2bc055a 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
8 8
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d2f34d5a8083..04705233ca0b 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1198,10 +1198,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1198 // detect loopback situation 1198 // detect loopback situation
1199 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) { 1199 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
1200 // INFO_RECEIVED_LOOPBACK_FRAMES 1200 // INFO_RECEIVED_LOOPBACK_FRAMES
1201 printk(KERN_ERR DRV_NAME ": An illegal loopback occurred on adapter (%s)\n", 1201 printk(KERN_ERR DRV_NAME ": %s: An illegal loopback occurred on "
1202 port->slave->dev->name); 1202 "adapter (%s). Check the configuration to verify that all "
1203 printk(KERN_ERR "Check the configuration to verify that all Adapters " 1203 "Adapters are connected to 802.3ad compliant switch ports\n",
1204 "are connected to 802.3ad compliant switch ports\n"); 1204 port->slave->dev->master->name, port->slave->dev->name);
1205 __release_rx_machine_lock(port); 1205 __release_rx_machine_lock(port);
1206 return; 1206 return;
1207 } 1207 }
@@ -1378,8 +1378,9 @@ static void ad_port_selection_logic(struct port *port)
1378 } 1378 }
1379 } 1379 }
1380 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list 1380 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
1381 printk(KERN_WARNING DRV_NAME ": Warning: Port %d (on %s) was " 1381 printk(KERN_WARNING DRV_NAME ": %s: Warning: Port %d (on %s) was "
1382 "related to aggregator %d but was not on its port list\n", 1382 "related to aggregator %d but was not on its port list\n",
1383 port->slave->dev->master->name,
1383 port->actor_port_number, port->slave->dev->name, 1384 port->actor_port_number, port->slave->dev->name,
1384 port->aggregator->aggregator_identifier); 1385 port->aggregator->aggregator_identifier);
1385 } 1386 }
@@ -1450,7 +1451,8 @@ static void ad_port_selection_logic(struct port *port)
1450 1451
1451 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1452 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1452 } else { 1453 } else {
1453 printk(KERN_ERR DRV_NAME ": Port %d (on %s) did not find a suitable aggregator\n", 1454 printk(KERN_ERR DRV_NAME ": %s: Port %d (on %s) did not find a suitable aggregator\n",
1455 port->slave->dev->master->name,
1454 port->actor_port_number, port->slave->dev->name); 1456 port->actor_port_number, port->slave->dev->name);
1455 } 1457 }
1456 } 1458 }
@@ -1582,8 +1584,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator)
1582 1584
1583 // check if any partner replys 1585 // check if any partner replys
1584 if (best_aggregator->is_individual) { 1586 if (best_aggregator->is_individual) {
1585 printk(KERN_WARNING DRV_NAME ": Warning: No 802.3ad response from the link partner " 1587 printk(KERN_WARNING DRV_NAME ": %s: Warning: No 802.3ad response from "
1586 "for any adapters in the bond\n"); 1588 "the link partner for any adapters in the bond\n",
1589 best_aggregator->slave->dev->master->name);
1587 } 1590 }
1588 1591
1589 // check if there are more than one aggregator 1592 // check if there are more than one aggregator
@@ -1915,7 +1918,8 @@ int bond_3ad_bind_slave(struct slave *slave)
1915 struct aggregator *aggregator; 1918 struct aggregator *aggregator;
1916 1919
1917 if (bond == NULL) { 1920 if (bond == NULL) {
1918 printk(KERN_ERR "The slave %s is not attached to its bond\n", slave->dev->name); 1921 printk(KERN_ERR DRV_NAME ": %s: The slave %s is not attached to its bond\n",
1922 slave->dev->master->name, slave->dev->name);
1919 return -1; 1923 return -1;
1920 } 1924 }
1921 1925
@@ -1990,7 +1994,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
1990 1994
1991 // if slave is null, the whole port is not initialized 1995 // if slave is null, the whole port is not initialized
1992 if (!port->slave) { 1996 if (!port->slave) {
1993 printk(KERN_WARNING DRV_NAME ": Trying to unbind an uninitialized port on %s\n", slave->dev->name); 1997 printk(KERN_WARNING DRV_NAME ": Warning: %s: Trying to "
1998 "unbind an uninitialized port on %s\n",
1999 slave->dev->master->name, slave->dev->name);
1994 return; 2000 return;
1995 } 2001 }
1996 2002
@@ -2021,7 +2027,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2021 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); 2027 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
2022 2028
2023 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { 2029 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
2024 printk(KERN_INFO DRV_NAME ": Removing an active aggregator\n"); 2030 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2031 aggregator->slave->dev->master->name);
2025 // select new active aggregator 2032 // select new active aggregator
2026 select_new_active_agg = 1; 2033 select_new_active_agg = 1;
2027 } 2034 }
@@ -2051,15 +2058,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
2051 ad_agg_selection_logic(__get_first_agg(port)); 2058 ad_agg_selection_logic(__get_first_agg(port));
2052 } 2059 }
2053 } else { 2060 } else {
2054 printk(KERN_WARNING DRV_NAME ": Warning: unbinding aggregator, " 2061 printk(KERN_WARNING DRV_NAME ": %s: Warning: unbinding aggregator, "
2055 "and could not find a new aggregator for its ports\n"); 2062 "and could not find a new aggregator for its ports\n",
2063 slave->dev->master->name);
2056 } 2064 }
2057 } else { // in case that the only port related to this aggregator is the one we want to remove 2065 } else { // in case that the only port related to this aggregator is the one we want to remove
2058 select_new_active_agg = aggregator->is_active; 2066 select_new_active_agg = aggregator->is_active;
2059 // clear the aggregator 2067 // clear the aggregator
2060 ad_clear_agg(aggregator); 2068 ad_clear_agg(aggregator);
2061 if (select_new_active_agg) { 2069 if (select_new_active_agg) {
2062 printk(KERN_INFO "Removing an active aggregator\n"); 2070 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2071 slave->dev->master->name);
2063 // select new active aggregator 2072 // select new active aggregator
2064 ad_agg_selection_logic(__get_first_agg(port)); 2073 ad_agg_selection_logic(__get_first_agg(port));
2065 } 2074 }
@@ -2085,7 +2094,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
2085 // clear the aggregator 2094 // clear the aggregator
2086 ad_clear_agg(temp_aggregator); 2095 ad_clear_agg(temp_aggregator);
2087 if (select_new_active_agg) { 2096 if (select_new_active_agg) {
2088 printk(KERN_INFO "Removing an active aggregator\n"); 2097 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
2098 slave->dev->master->name);
2089 // select new active aggregator 2099 // select new active aggregator
2090 ad_agg_selection_logic(__get_first_agg(port)); 2100 ad_agg_selection_logic(__get_first_agg(port));
2091 } 2101 }
@@ -2131,7 +2141,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2131 // select the active aggregator for the bond 2141 // select the active aggregator for the bond
2132 if ((port = __get_first_port(bond))) { 2142 if ((port = __get_first_port(bond))) {
2133 if (!port->slave) { 2143 if (!port->slave) {
2134 printk(KERN_WARNING DRV_NAME ": Warning: bond's first port is uninitialized\n"); 2144 printk(KERN_WARNING DRV_NAME ": %s: Warning: bond's first port is "
2145 "uninitialized\n", bond->dev->name);
2135 goto re_arm; 2146 goto re_arm;
2136 } 2147 }
2137 2148
@@ -2143,7 +2154,8 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2143 // for each port run the state machines 2154 // for each port run the state machines
2144 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2155 for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
2145 if (!port->slave) { 2156 if (!port->slave) {
2146 printk(KERN_WARNING DRV_NAME ": Warning: Found an uninitialized port\n"); 2157 printk(KERN_WARNING DRV_NAME ": %s: Warning: Found an uninitialized "
2158 "port\n", bond->dev->name);
2147 goto re_arm; 2159 goto re_arm;
2148 } 2160 }
2149 2161
@@ -2184,7 +2196,8 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2184 port = &(SLAVE_AD_INFO(slave).port); 2196 port = &(SLAVE_AD_INFO(slave).port);
2185 2197
2186 if (!port->slave) { 2198 if (!port->slave) {
2187 printk(KERN_WARNING DRV_NAME ": Warning: port of slave %s is uninitialized\n", slave->dev->name); 2199 printk(KERN_WARNING DRV_NAME ": %s: Warning: port of slave %s is "
2200 "uninitialized\n", slave->dev->name, slave->dev->master->name);
2188 return; 2201 return;
2189 } 2202 }
2190 2203
@@ -2230,8 +2243,9 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2230 2243
2231 // if slave is null, the whole port is not initialized 2244 // if slave is null, the whole port is not initialized
2232 if (!port->slave) { 2245 if (!port->slave) {
2233 printk(KERN_WARNING DRV_NAME ": Warning: speed changed for uninitialized port on %s\n", 2246 printk(KERN_WARNING DRV_NAME ": Warning: %s: speed "
2234 slave->dev->name); 2247 "changed for uninitialized port on %s\n",
2248 slave->dev->master->name, slave->dev->name);
2235 return; 2249 return;
2236 } 2250 }
2237 2251
@@ -2257,8 +2271,9 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2257 2271
2258 // if slave is null, the whole port is not initialized 2272 // if slave is null, the whole port is not initialized
2259 if (!port->slave) { 2273 if (!port->slave) {
2260 printk(KERN_WARNING DRV_NAME ": Warning: duplex changed for uninitialized port on %s\n", 2274 printk(KERN_WARNING DRV_NAME ": %s: Warning: duplex changed "
2261 slave->dev->name); 2275 "for uninitialized port on %s\n",
2276 slave->dev->master->name, slave->dev->name);
2262 return; 2277 return;
2263 } 2278 }
2264 2279
@@ -2285,8 +2300,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2285 2300
2286 // if slave is null, the whole port is not initialized 2301 // if slave is null, the whole port is not initialized
2287 if (!port->slave) { 2302 if (!port->slave) {
2288 printk(KERN_WARNING DRV_NAME ": Warning: link status changed for uninitialized port on %s\n", 2303 printk(KERN_WARNING DRV_NAME ": Warning: %s: link status changed for "
2289 slave->dev->name); 2304 "uninitialized port on %s\n",
2305 slave->dev->master->name, slave->dev->name);
2290 return; 2306 return;
2291 } 2307 }
2292 2308
@@ -2363,7 +2379,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2363 } 2379 }
2364 2380
2365 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2381 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2366 printk(KERN_DEBUG "ERROR: bond_3ad_get_active_agg_info failed\n"); 2382 printk(KERN_DEBUG DRV_NAME ": %s: Error: "
2383 "bond_3ad_get_active_agg_info failed\n", dev->name);
2367 goto out; 2384 goto out;
2368 } 2385 }
2369 2386
@@ -2372,7 +2389,9 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2372 2389
2373 if (slaves_in_agg == 0) { 2390 if (slaves_in_agg == 0) {
2374 /*the aggregator is empty*/ 2391 /*the aggregator is empty*/
2375 printk(KERN_DEBUG "ERROR: active aggregator is empty\n"); 2392 printk(KERN_DEBUG DRV_NAME ": %s: Error: active "
2393 "aggregator is empty\n",
2394 dev->name);
2376 goto out; 2395 goto out;
2377 } 2396 }
2378 2397
@@ -2390,7 +2409,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2390 } 2409 }
2391 2410
2392 if (slave_agg_no >= 0) { 2411 if (slave_agg_no >= 0) {
2393 printk(KERN_ERR DRV_NAME ": Error: Couldn't find a slave to tx on for aggregator ID %d\n", agg_id); 2412 printk(KERN_ERR DRV_NAME ": %s: Error: Couldn't find a slave to tx on "
2413 "for aggregator ID %d\n", dev->name, agg_id);
2394 goto out; 2414 goto out;
2395 } 2415 }
2396 2416
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f8fce3961197..9bd1e104554a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -198,20 +198,21 @@ static int tlb_initialize(struct bonding *bond)
198{ 198{
199 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 199 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
200 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info); 200 int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
201 struct tlb_client_info *new_hashtbl;
201 int i; 202 int i;
202 203
203 spin_lock_init(&(bond_info->tx_hashtbl_lock)); 204 spin_lock_init(&(bond_info->tx_hashtbl_lock));
204 205
205 _lock_tx_hashtbl(bond); 206 new_hashtbl = kmalloc(size, GFP_KERNEL);
206 207 if (!new_hashtbl) {
207 bond_info->tx_hashtbl = kmalloc(size, GFP_KERNEL);
208 if (!bond_info->tx_hashtbl) {
209 printk(KERN_ERR DRV_NAME 208 printk(KERN_ERR DRV_NAME
210 ": Error: %s: Failed to allocate TLB hash table\n", 209 ": %s: Error: Failed to allocate TLB hash table\n",
211 bond->dev->name); 210 bond->dev->name);
212 _unlock_tx_hashtbl(bond);
213 return -1; 211 return -1;
214 } 212 }
213 _lock_tx_hashtbl(bond);
214
215 bond_info->tx_hashtbl = new_hashtbl;
215 216
216 memset(bond_info->tx_hashtbl, 0, size); 217 memset(bond_info->tx_hashtbl, 0, size);
217 218
@@ -513,7 +514,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
513 client_info->mac_dst); 514 client_info->mac_dst);
514 if (!skb) { 515 if (!skb) {
515 printk(KERN_ERR DRV_NAME 516 printk(KERN_ERR DRV_NAME
516 ": Error: failed to create an ARP packet\n"); 517 ": %s: Error: failed to create an ARP packet\n",
518 client_info->slave->dev->master->name);
517 continue; 519 continue;
518 } 520 }
519 521
@@ -523,7 +525,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
523 skb = vlan_put_tag(skb, client_info->vlan_id); 525 skb = vlan_put_tag(skb, client_info->vlan_id);
524 if (!skb) { 526 if (!skb) {
525 printk(KERN_ERR DRV_NAME 527 printk(KERN_ERR DRV_NAME
526 ": Error: failed to insert VLAN tag\n"); 528 ": %s: Error: failed to insert VLAN tag\n",
529 client_info->slave->dev->master->name);
527 continue; 530 continue;
528 } 531 }
529 } 532 }
@@ -606,8 +609,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
606 609
607 if (!client_info->slave) { 610 if (!client_info->slave) {
608 printk(KERN_ERR DRV_NAME 611 printk(KERN_ERR DRV_NAME
609 ": Error: found a client with no channel in " 612 ": %s: Error: found a client with no channel in "
610 "the client's hash table\n"); 613 "the client's hash table\n",
614 bond->dev->name);
611 continue; 615 continue;
612 } 616 }
613 /*update all clients using this src_ip, that are not assigned 617 /*update all clients using this src_ip, that are not assigned
@@ -797,21 +801,22 @@ static int rlb_initialize(struct bonding *bond)
797{ 801{
798 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 802 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
799 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type); 803 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
804 struct rlb_client_info *new_hashtbl;
800 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); 805 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
801 int i; 806 int i;
802 807
803 spin_lock_init(&(bond_info->rx_hashtbl_lock)); 808 spin_lock_init(&(bond_info->rx_hashtbl_lock));
804 809
805 _lock_rx_hashtbl(bond); 810 new_hashtbl = kmalloc(size, GFP_KERNEL);
806 811 if (!new_hashtbl) {
807 bond_info->rx_hashtbl = kmalloc(size, GFP_KERNEL);
808 if (!bond_info->rx_hashtbl) {
809 printk(KERN_ERR DRV_NAME 812 printk(KERN_ERR DRV_NAME
810 ": Error: %s: Failed to allocate RLB hash table\n", 813 ": %s: Error: Failed to allocate RLB hash table\n",
811 bond->dev->name); 814 bond->dev->name);
812 _unlock_rx_hashtbl(bond);
813 return -1; 815 return -1;
814 } 816 }
817 _lock_rx_hashtbl(bond);
818
819 bond_info->rx_hashtbl = new_hashtbl;
815 820
816 bond_info->rx_hashtbl_head = RLB_NULL_INDEX; 821 bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
817 822
@@ -927,7 +932,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
927 skb = vlan_put_tag(skb, vlan->vlan_id); 932 skb = vlan_put_tag(skb, vlan->vlan_id);
928 if (!skb) { 933 if (!skb) {
929 printk(KERN_ERR DRV_NAME 934 printk(KERN_ERR DRV_NAME
930 ": Error: failed to insert VLAN tag\n"); 935 ": %s: Error: failed to insert VLAN tag\n",
936 bond->dev->name);
931 continue; 937 continue;
932 } 938 }
933 } 939 }
@@ -956,11 +962,11 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
956 s_addr.sa_family = dev->type; 962 s_addr.sa_family = dev->type;
957 if (dev_set_mac_address(dev, &s_addr)) { 963 if (dev_set_mac_address(dev, &s_addr)) {
958 printk(KERN_ERR DRV_NAME 964 printk(KERN_ERR DRV_NAME
959 ": Error: dev_set_mac_address of dev %s failed! ALB " 965 ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
960 "mode requires that the base driver support setting " 966 "mode requires that the base driver support setting "
961 "the hw address also when the network device's " 967 "the hw address also when the network device's "
962 "interface is open\n", 968 "interface is open\n",
963 dev->name); 969 dev->master->name, dev->name);
964 return -EOPNOTSUPP; 970 return -EOPNOTSUPP;
965 } 971 }
966 return 0; 972 return 0;
@@ -1153,16 +1159,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1153 bond->alb_info.rlb_enabled); 1159 bond->alb_info.rlb_enabled);
1154 1160
1155 printk(KERN_WARNING DRV_NAME 1161 printk(KERN_WARNING DRV_NAME
1156 ": Warning: the hw address of slave %s is in use by " 1162 ": %s: Warning: the hw address of slave %s is in use by "
1157 "the bond; giving it the hw address of %s\n", 1163 "the bond; giving it the hw address of %s\n",
1158 slave->dev->name, free_mac_slave->dev->name); 1164 bond->dev->name, slave->dev->name, free_mac_slave->dev->name);
1159 1165
1160 } else if (has_bond_addr) { 1166 } else if (has_bond_addr) {
1161 printk(KERN_ERR DRV_NAME 1167 printk(KERN_ERR DRV_NAME
1162 ": Error: the hw address of slave %s is in use by the " 1168 ": %s: Error: the hw address of slave %s is in use by the "
1163 "bond; couldn't find a slave with a free hw address to " 1169 "bond; couldn't find a slave with a free hw address to "
1164 "give it (this should not have happened)\n", 1170 "give it (this should not have happened)\n",
1165 slave->dev->name); 1171 bond->dev->name, slave->dev->name);
1166 return -EFAULT; 1172 return -EFAULT;
1167 } 1173 }
1168 1174
@@ -1250,6 +1256,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1250 tlb_deinitialize(bond); 1256 tlb_deinitialize(bond);
1251 return res; 1257 return res;
1252 } 1258 }
1259 } else {
1260 bond->alb_info.rlb_enabled = 0;
1253 } 1261 }
1254 1262
1255 return 0; 1263 return 0;
@@ -1409,7 +1417,7 @@ void bond_alb_monitor(struct bonding *bond)
1409 read_lock(&bond->curr_slave_lock); 1417 read_lock(&bond->curr_slave_lock);
1410 1418
1411 bond_for_each_slave(bond, slave, i) { 1419 bond_for_each_slave(bond, slave, i) {
1412 alb_send_learning_packets(slave,slave->dev->dev_addr); 1420 alb_send_learning_packets(slave, slave->dev->dev_addr);
1413 } 1421 }
1414 1422
1415 read_unlock(&bond->curr_slave_lock); 1423 read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94cec3cf2a13..40ff79175c4a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -489,6 +489,28 @@
489 * Set version to 2.6.3. 489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com> 490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4. 491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
492 * 2005/09/27 - Mitch Williams <mitch.a.williams at intel dot com>
493 * - Radheka Godse <radheka.godse at intel dot com>
494 * - Split out bond creation code to allow for sysfs interface.
495 * - Removed static declaration on some functions and data items.
496 * - Added sysfs support, including capability to add/remove/change
497 * any bond at runtime.
498 *
499 * - Miscellaneous:
500 * - Added bonding: <bondname>: prefix to sysfs log messages
501 * - Added arp_ip_targets to /proc entry
502 * - Allow ARP target table to have empty entries
503 * - trivial fix: added missing modes description to modinfo
504 * - Corrected bug in ALB init where kmalloc is called inside
505 * a held lock
506 * - Corrected behavior to maintain bond link when changing
507 * from arp monitor to miimon and vice versa
508 * - Added missing bonding: <bondname>: prefix to alb, ad log messages
509 * - Fixed stack dump warnings seen if changing between miimon
510 * and arp monitoring when the bond interface is down.
511 * - Fixed stack dump warnings seen when enslaving an e100
512 * driver
513 * - Set version to 3.0.0
492 */ 514 */
493 515
494//#define BONDING_DEBUG 1 516//#define BONDING_DEBUG 1
@@ -557,6 +579,7 @@ static char *lacp_rate = NULL;
557static char *xmit_hash_policy = NULL; 579static char *xmit_hash_policy = NULL;
558static int arp_interval = BOND_LINK_ARP_INTERV; 580static int arp_interval = BOND_LINK_ARP_INTERV;
559static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 581static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
582struct bond_params bonding_defaults;
560 583
561module_param(max_bonds, int, 0); 584module_param(max_bonds, int, 0);
562MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 585MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -565,17 +588,24 @@ MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
565module_param(updelay, int, 0); 588module_param(updelay, int, 0);
566MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); 589MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
567module_param(downdelay, int, 0); 590module_param(downdelay, int, 0);
568MODULE_PARM_DESC(downdelay, "Delay before considering link down, in milliseconds"); 591MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
592 "in milliseconds");
569module_param(use_carrier, int, 0); 593module_param(use_carrier, int, 0);
570MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; 0 for off, 1 for on (default)"); 594MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
595 "0 for off, 1 for on (default)");
571module_param(mode, charp, 0); 596module_param(mode, charp, 0);
572MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor"); 597MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
598 "1 for active-backup, 2 for balance-xor, "
599 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
600 "6 for balance-alb");
573module_param(primary, charp, 0); 601module_param(primary, charp, 0);
574MODULE_PARM_DESC(primary, "Primary network device to use"); 602MODULE_PARM_DESC(primary, "Primary network device to use");
575module_param(lacp_rate, charp, 0); 603module_param(lacp_rate, charp, 0);
576MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner (slow/fast)"); 604MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
605 "(slow/fast)");
577module_param(xmit_hash_policy, charp, 0); 606module_param(xmit_hash_policy, charp, 0);
578MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method : 0 for layer 2 (default), 1 for layer 3+4"); 607MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
608 ", 1 for layer 3+4");
579module_param(arp_interval, int, 0); 609module_param(arp_interval, int, 0);
580MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 610MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
581module_param_array(arp_ip_target, charp, NULL, 0); 611module_param_array(arp_ip_target, charp, NULL, 0);
@@ -586,30 +616,27 @@ MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
586static const char *version = 616static const char *version =
587 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 617 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
588 618
589static LIST_HEAD(bond_dev_list); 619LIST_HEAD(bond_dev_list);
590 620
591#ifdef CONFIG_PROC_FS 621#ifdef CONFIG_PROC_FS
592static struct proc_dir_entry *bond_proc_dir = NULL; 622static struct proc_dir_entry *bond_proc_dir = NULL;
593#endif 623#endif
594 624
625extern struct rw_semaphore bonding_rwsem;
595static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; 626static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
596static int arp_ip_count = 0; 627static int arp_ip_count = 0;
597static int bond_mode = BOND_MODE_ROUNDROBIN; 628static int bond_mode = BOND_MODE_ROUNDROBIN;
598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 629static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
599static int lacp_fast = 0; 630static int lacp_fast = 0;
600 631
601struct bond_parm_tbl {
602 char *modename;
603 int mode;
604};
605 632
606static struct bond_parm_tbl bond_lacp_tbl[] = { 633struct bond_parm_tbl bond_lacp_tbl[] = {
607{ "slow", AD_LACP_SLOW}, 634{ "slow", AD_LACP_SLOW},
608{ "fast", AD_LACP_FAST}, 635{ "fast", AD_LACP_FAST},
609{ NULL, -1}, 636{ NULL, -1},
610}; 637};
611 638
612static struct bond_parm_tbl bond_mode_tbl[] = { 639struct bond_parm_tbl bond_mode_tbl[] = {
613{ "balance-rr", BOND_MODE_ROUNDROBIN}, 640{ "balance-rr", BOND_MODE_ROUNDROBIN},
614{ "active-backup", BOND_MODE_ACTIVEBACKUP}, 641{ "active-backup", BOND_MODE_ACTIVEBACKUP},
615{ "balance-xor", BOND_MODE_XOR}, 642{ "balance-xor", BOND_MODE_XOR},
@@ -620,7 +647,7 @@ static struct bond_parm_tbl bond_mode_tbl[] = {
620{ NULL, -1}, 647{ NULL, -1},
621}; 648};
622 649
623static struct bond_parm_tbl xmit_hashtype_tbl[] = { 650struct bond_parm_tbl xmit_hashtype_tbl[] = {
624{ "layer2", BOND_XMIT_POLICY_LAYER2}, 651{ "layer2", BOND_XMIT_POLICY_LAYER2},
625{ "layer3+4", BOND_XMIT_POLICY_LAYER34}, 652{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
626{ NULL, -1}, 653{ NULL, -1},
@@ -628,12 +655,11 @@ static struct bond_parm_tbl xmit_hashtype_tbl[] = {
628 655
629/*-------------------------- Forward declarations ---------------------------*/ 656/*-------------------------- Forward declarations ---------------------------*/
630 657
631static inline void bond_set_mode_ops(struct bonding *bond, int mode);
632static void bond_send_gratuitous_arp(struct bonding *bond); 658static void bond_send_gratuitous_arp(struct bonding *bond);
633 659
634/*---------------------------- General routines -----------------------------*/ 660/*---------------------------- General routines -----------------------------*/
635 661
636static const char *bond_mode_name(int mode) 662const char *bond_mode_name(int mode)
637{ 663{
638 switch (mode) { 664 switch (mode) {
639 case BOND_MODE_ROUNDROBIN : 665 case BOND_MODE_ROUNDROBIN :
@@ -910,7 +936,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
910 res = bond_add_vlan(bond, vid); 936 res = bond_add_vlan(bond, vid);
911 if (res) { 937 if (res) {
912 printk(KERN_ERR DRV_NAME 938 printk(KERN_ERR DRV_NAME
913 ": %s: Failed to add vlan id %d\n", 939 ": %s: Error: Failed to add vlan id %d\n",
914 bond_dev->name, vid); 940 bond_dev->name, vid);
915 } 941 }
916} 942}
@@ -944,7 +970,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
944 res = bond_del_vlan(bond, vid); 970 res = bond_del_vlan(bond, vid);
945 if (res) { 971 if (res) {
946 printk(KERN_ERR DRV_NAME 972 printk(KERN_ERR DRV_NAME
947 ": %s: Failed to remove vlan id %d\n", 973 ": %s: Error: Failed to remove vlan id %d\n",
948 bond_dev->name, vid); 974 bond_dev->name, vid);
949 } 975 }
950} 976}
@@ -1449,7 +1475,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1449 * 1475 *
1450 * Warning: Caller must hold curr_slave_lock for writing. 1476 * Warning: Caller must hold curr_slave_lock for writing.
1451 */ 1477 */
1452static void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1478void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1453{ 1479{
1454 struct slave *old_active = bond->curr_active_slave; 1480 struct slave *old_active = bond->curr_active_slave;
1455 1481
@@ -1523,7 +1549,7 @@ static void bond_change_active_slave(struct bonding *bond, struct slave *new_act
1523 * 1549 *
1524 * Warning: Caller must hold curr_slave_lock for writing. 1550 * Warning: Caller must hold curr_slave_lock for writing.
1525 */ 1551 */
1526static void bond_select_active_slave(struct bonding *bond) 1552void bond_select_active_slave(struct bonding *bond)
1527{ 1553{
1528 struct slave *best_slave; 1554 struct slave *best_slave;
1529 1555
@@ -1591,7 +1617,7 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1591 1617
1592/*---------------------------------- IOCTL ----------------------------------*/ 1618/*---------------------------------- IOCTL ----------------------------------*/
1593 1619
1594static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev) 1620int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
1595{ 1621{
1596 dprintk("bond_dev=%p\n", bond_dev); 1622 dprintk("bond_dev=%p\n", bond_dev);
1597 dprintk("slave_dev=%p\n", slave_dev); 1623 dprintk("slave_dev=%p\n", slave_dev);
@@ -1631,7 +1657,7 @@ static int bond_compute_features(struct bonding *bond)
1631} 1657}
1632 1658
1633/* enslave device <slave> to bond device <master> */ 1659/* enslave device <slave> to bond device <master> */
1634static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1660int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1635{ 1661{
1636 struct bonding *bond = bond_dev->priv; 1662 struct bonding *bond = bond_dev->priv;
1637 struct slave *new_slave = NULL; 1663 struct slave *new_slave = NULL;
@@ -1644,8 +1670,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1644 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1670 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1645 slave_dev->do_ioctl == NULL) { 1671 slave_dev->do_ioctl == NULL) {
1646 printk(KERN_WARNING DRV_NAME 1672 printk(KERN_WARNING DRV_NAME
1647 ": Warning : no link monitoring support for %s\n", 1673 ": %s: Warning: no link monitoring support for %s\n",
1648 slave_dev->name); 1674 bond_dev->name, slave_dev->name);
1649 } 1675 }
1650 1676
1651 /* bond must be initialized by bond_open() before enslaving */ 1677 /* bond must be initialized by bond_open() before enslaving */
@@ -1666,17 +1692,17 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1666 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1692 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1667 if (!list_empty(&bond->vlan_list)) { 1693 if (!list_empty(&bond->vlan_list)) {
1668 printk(KERN_ERR DRV_NAME 1694 printk(KERN_ERR DRV_NAME
1669 ": Error: cannot enslave VLAN " 1695 ": %s: Error: cannot enslave VLAN "
1670 "challenged slave %s on VLAN enabled " 1696 "challenged slave %s on VLAN enabled "
1671 "bond %s\n", slave_dev->name, 1697 "bond %s\n", bond_dev->name, slave_dev->name,
1672 bond_dev->name); 1698 bond_dev->name);
1673 return -EPERM; 1699 return -EPERM;
1674 } else { 1700 } else {
1675 printk(KERN_WARNING DRV_NAME 1701 printk(KERN_WARNING DRV_NAME
1676 ": Warning: enslaved VLAN challenged " 1702 ": %s: Warning: enslaved VLAN challenged "
1677 "slave %s. Adding VLANs will be blocked as " 1703 "slave %s. Adding VLANs will be blocked as "
1678 "long as %s is part of bond %s\n", 1704 "long as %s is part of bond %s\n",
1679 slave_dev->name, slave_dev->name, 1705 bond_dev->name, slave_dev->name, slave_dev->name,
1680 bond_dev->name); 1706 bond_dev->name);
1681 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1707 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1682 } 1708 }
@@ -1706,12 +1732,11 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1706 1732
1707 if (slave_dev->set_mac_address == NULL) { 1733 if (slave_dev->set_mac_address == NULL) {
1708 printk(KERN_ERR DRV_NAME 1734 printk(KERN_ERR DRV_NAME
1709 ": Error: The slave device you specified does " 1735 ": %s: Error: The slave device you specified does "
1710 "not support setting the MAC address.\n"); 1736 "not support setting the MAC address. "
1711 printk(KERN_ERR 1737 "Your kernel likely does not support slave "
1712 "Your kernel likely does not support slave devices.\n"); 1738 "devices.\n", bond_dev->name);
1713 1739 res = -EOPNOTSUPP;
1714 res = -EOPNOTSUPP;
1715 goto err_undo_flags; 1740 goto err_undo_flags;
1716 } 1741 }
1717 1742
@@ -1827,21 +1852,21 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1827 * the messages for netif_carrier. 1852 * the messages for netif_carrier.
1828 */ 1853 */
1829 printk(KERN_WARNING DRV_NAME 1854 printk(KERN_WARNING DRV_NAME
1830 ": Warning: MII and ETHTOOL support not " 1855 ": %s: Warning: MII and ETHTOOL support not "
1831 "available for interface %s, and " 1856 "available for interface %s, and "
1832 "arp_interval/arp_ip_target module parameters " 1857 "arp_interval/arp_ip_target module parameters "
1833 "not specified, thus bonding will not detect " 1858 "not specified, thus bonding will not detect "
1834 "link failures! see bonding.txt for details.\n", 1859 "link failures! see bonding.txt for details.\n",
1835 slave_dev->name); 1860 bond_dev->name, slave_dev->name);
1836 } else if (link_reporting == -1) { 1861 } else if (link_reporting == -1) {
1837 /* unable get link status using mii/ethtool */ 1862 /* unable get link status using mii/ethtool */
1838 printk(KERN_WARNING DRV_NAME 1863 printk(KERN_WARNING DRV_NAME
1839 ": Warning: can't get link status from " 1864 ": %s: Warning: can't get link status from "
1840 "interface %s; the network driver associated " 1865 "interface %s; the network driver associated "
1841 "with this interface does not support MII or " 1866 "with this interface does not support MII or "
1842 "ETHTOOL link status reporting, thus miimon " 1867 "ETHTOOL link status reporting, thus miimon "
1843 "has no effect on this interface.\n", 1868 "has no effect on this interface.\n",
1844 slave_dev->name); 1869 bond_dev->name, slave_dev->name);
1845 } 1870 }
1846 } 1871 }
1847 1872
@@ -1868,15 +1893,15 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1868 if (bond_update_speed_duplex(new_slave) && 1893 if (bond_update_speed_duplex(new_slave) &&
1869 (new_slave->link != BOND_LINK_DOWN)) { 1894 (new_slave->link != BOND_LINK_DOWN)) {
1870 printk(KERN_WARNING DRV_NAME 1895 printk(KERN_WARNING DRV_NAME
1871 ": Warning: failed to get speed and duplex from %s, " 1896 ": %s: Warning: failed to get speed and duplex from %s, "
1872 "assumed to be 100Mb/sec and Full.\n", 1897 "assumed to be 100Mb/sec and Full.\n",
1873 new_slave->dev->name); 1898 bond_dev->name, new_slave->dev->name);
1874 1899
1875 if (bond->params.mode == BOND_MODE_8023AD) { 1900 if (bond->params.mode == BOND_MODE_8023AD) {
1876 printk(KERN_WARNING 1901 printk(KERN_WARNING DRV_NAME
1877 "Operation of 802.3ad mode requires ETHTOOL " 1902 ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
1878 "support in base driver for proper aggregator " 1903 "support in base driver for proper aggregator "
1879 "selection.\n"); 1904 "selection.\n", bond_dev->name);
1880 } 1905 }
1881 } 1906 }
1882 1907
@@ -1958,6 +1983,10 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1958 1983
1959 write_unlock_bh(&bond->lock); 1984 write_unlock_bh(&bond->lock);
1960 1985
1986 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1987 if (res)
1988 goto err_unset_master;
1989
1961 printk(KERN_INFO DRV_NAME 1990 printk(KERN_INFO DRV_NAME
1962 ": %s: enslaving %s as a%s interface with a%s link.\n", 1991 ": %s: enslaving %s as a%s interface with a%s link.\n",
1963 bond_dev->name, slave_dev->name, 1992 bond_dev->name, slave_dev->name,
@@ -1999,7 +2028,7 @@ err_undo_flags:
1999 * for Bonded connections: 2028 * for Bonded connections:
2000 * The first up interface should be left on and all others downed. 2029 * The first up interface should be left on and all others downed.
2001 */ 2030 */
2002static int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 2031int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2003{ 2032{
2004 struct bonding *bond = bond_dev->priv; 2033 struct bonding *bond = bond_dev->priv;
2005 struct slave *slave, *oldcurrent; 2034 struct slave *slave, *oldcurrent;
@@ -2010,7 +2039,7 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2010 if (!(slave_dev->flags & IFF_SLAVE) || 2039 if (!(slave_dev->flags & IFF_SLAVE) ||
2011 (slave_dev->master != bond_dev)) { 2040 (slave_dev->master != bond_dev)) {
2012 printk(KERN_ERR DRV_NAME 2041 printk(KERN_ERR DRV_NAME
2013 ": Error: %s: cannot release %s.\n", 2042 ": %s: Error: cannot release %s.\n",
2014 bond_dev->name, slave_dev->name); 2043 bond_dev->name, slave_dev->name);
2015 return -EINVAL; 2044 return -EINVAL;
2016 } 2045 }
@@ -2031,11 +2060,12 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2031 ETH_ALEN); 2060 ETH_ALEN);
2032 if (!mac_addr_differ && (bond->slave_cnt > 1)) { 2061 if (!mac_addr_differ && (bond->slave_cnt > 1)) {
2033 printk(KERN_WARNING DRV_NAME 2062 printk(KERN_WARNING DRV_NAME
2034 ": Warning: the permanent HWaddr of %s " 2063 ": %s: Warning: the permanent HWaddr of %s "
2035 "- %02X:%02X:%02X:%02X:%02X:%02X - is " 2064 "- %02X:%02X:%02X:%02X:%02X:%02X - is "
2036 "still in use by %s. Set the HWaddr of " 2065 "still in use by %s. Set the HWaddr of "
2037 "%s to a different address to avoid " 2066 "%s to a different address to avoid "
2038 "conflicts.\n", 2067 "conflicts.\n",
2068 bond_dev->name,
2039 slave_dev->name, 2069 slave_dev->name,
2040 slave->perm_hwaddr[0], 2070 slave->perm_hwaddr[0],
2041 slave->perm_hwaddr[1], 2071 slave->perm_hwaddr[1],
@@ -2111,24 +2141,28 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2111 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 2141 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2112 } else { 2142 } else {
2113 printk(KERN_WARNING DRV_NAME 2143 printk(KERN_WARNING DRV_NAME
2114 ": Warning: clearing HW address of %s while it " 2144 ": %s: Warning: clearing HW address of %s while it "
2115 "still has VLANs.\n", 2145 "still has VLANs.\n",
2116 bond_dev->name); 2146 bond_dev->name, bond_dev->name);
2117 printk(KERN_WARNING DRV_NAME 2147 printk(KERN_WARNING DRV_NAME
2118 ": When re-adding slaves, make sure the bond's " 2148 ": %s: When re-adding slaves, make sure the bond's "
2119 "HW address matches its VLANs'.\n"); 2149 "HW address matches its VLANs'.\n",
2150 bond_dev->name);
2120 } 2151 }
2121 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2152 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2122 !bond_has_challenged_slaves(bond)) { 2153 !bond_has_challenged_slaves(bond)) {
2123 printk(KERN_INFO DRV_NAME 2154 printk(KERN_INFO DRV_NAME
2124 ": last VLAN challenged slave %s " 2155 ": %s: last VLAN challenged slave %s "
2125 "left bond %s. VLAN blocking is removed\n", 2156 "left bond %s. VLAN blocking is removed\n",
2126 slave_dev->name, bond_dev->name); 2157 bond_dev->name, slave_dev->name, bond_dev->name);
2127 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; 2158 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
2128 } 2159 }
2129 2160
2130 write_unlock_bh(&bond->lock); 2161 write_unlock_bh(&bond->lock);
2131 2162
2163 /* must do this from outside any spinlocks */
2164 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2165
2132 bond_del_vlans_from_slave(bond, slave_dev); 2166 bond_del_vlans_from_slave(bond, slave_dev);
2133 2167
2134 /* If the mode USES_PRIMARY, then we should only remove its 2168 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2220,6 +2254,7 @@ static int bond_release_all(struct net_device *bond_dev)
2220 */ 2254 */
2221 write_unlock_bh(&bond->lock); 2255 write_unlock_bh(&bond->lock);
2222 2256
2257 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2223 bond_del_vlans_from_slave(bond, slave_dev); 2258 bond_del_vlans_from_slave(bond, slave_dev);
2224 2259
2225 /* If the mode USES_PRIMARY, then we should only remove its 2260 /* If the mode USES_PRIMARY, then we should only remove its
@@ -2274,12 +2309,13 @@ static int bond_release_all(struct net_device *bond_dev)
2274 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 2309 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2275 } else { 2310 } else {
2276 printk(KERN_WARNING DRV_NAME 2311 printk(KERN_WARNING DRV_NAME
2277 ": Warning: clearing HW address of %s while it " 2312 ": %s: Warning: clearing HW address of %s while it "
2278 "still has VLANs.\n", 2313 "still has VLANs.\n",
2279 bond_dev->name); 2314 bond_dev->name, bond_dev->name);
2280 printk(KERN_WARNING DRV_NAME 2315 printk(KERN_WARNING DRV_NAME
2281 ": When re-adding slaves, make sure the bond's " 2316 ": %s: When re-adding slaves, make sure the bond's "
2282 "HW address matches its VLANs'.\n"); 2317 "HW address matches its VLANs'.\n",
2318 bond_dev->name);
2283 } 2319 }
2284 2320
2285 printk(KERN_INFO DRV_NAME 2321 printk(KERN_INFO DRV_NAME
@@ -2397,7 +2433,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2397/*-------------------------------- Monitoring -------------------------------*/ 2433/*-------------------------------- Monitoring -------------------------------*/
2398 2434
2399/* this function is called regularly to monitor each slave's link. */ 2435/* this function is called regularly to monitor each slave's link. */
2400static void bond_mii_monitor(struct net_device *bond_dev) 2436void bond_mii_monitor(struct net_device *bond_dev)
2401{ 2437{
2402 struct bonding *bond = bond_dev->priv; 2438 struct bonding *bond = bond_dev->priv;
2403 struct slave *slave, *oldcurrent; 2439 struct slave *slave, *oldcurrent;
@@ -2596,8 +2632,11 @@ static void bond_mii_monitor(struct net_device *bond_dev)
2596 break; 2632 break;
2597 default: 2633 default:
2598 /* Should not happen */ 2634 /* Should not happen */
2599 printk(KERN_ERR "bonding: Error: %s Illegal value (link=%d)\n", 2635 printk(KERN_ERR DRV_NAME
2600 slave->dev->name, slave->link); 2636 ": %s: Error: %s Illegal value (link=%d)\n",
2637 bond_dev->name,
2638 slave->dev->name,
2639 slave->link);
2601 goto out; 2640 goto out;
2602 } /* end of switch (slave->link) */ 2641 } /* end of switch (slave->link) */
2603 2642
@@ -2721,7 +2760,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2721 struct flowi fl; 2760 struct flowi fl;
2722 struct rtable *rt; 2761 struct rtable *rt;
2723 2762
2724 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2763 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2764 if (!targets[i])
2765 continue;
2725 dprintk("basa: target %x\n", targets[i]); 2766 dprintk("basa: target %x\n", targets[i]);
2726 if (list_empty(&bond->vlan_list)) { 2767 if (list_empty(&bond->vlan_list)) {
2727 dprintk("basa: empty vlan: arp_send\n"); 2768 dprintk("basa: empty vlan: arp_send\n");
@@ -2825,7 +2866,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2825 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2866 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2826 * arp monitoring in active backup mode. 2867 * arp monitoring in active backup mode.
2827 */ 2868 */
2828static void bond_loadbalance_arp_mon(struct net_device *bond_dev) 2869void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2829{ 2870{
2830 struct bonding *bond = bond_dev->priv; 2871 struct bonding *bond = bond_dev->priv;
2831 struct slave *slave, *oldcurrent; 2872 struct slave *slave, *oldcurrent;
@@ -2963,7 +3004,7 @@ out:
2963 * may have received. 3004 * may have received.
2964 * see loadbalance_arp_monitor for arp monitoring in load balancing mode 3005 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2965 */ 3006 */
2966static void bond_activebackup_arp_mon(struct net_device *bond_dev) 3007void bond_activebackup_arp_mon(struct net_device *bond_dev)
2967{ 3008{
2968 struct bonding *bond = bond_dev->priv; 3009 struct bonding *bond = bond_dev->priv;
2969 struct slave *slave; 3010 struct slave *slave;
@@ -3249,6 +3290,8 @@ static void bond_info_show_master(struct seq_file *seq)
3249{ 3290{
3250 struct bonding *bond = seq->private; 3291 struct bonding *bond = seq->private;
3251 struct slave *curr; 3292 struct slave *curr;
3293 int i;
3294 u32 target;
3252 3295
3253 read_lock(&bond->curr_slave_lock); 3296 read_lock(&bond->curr_slave_lock);
3254 curr = bond->curr_active_slave; 3297 curr = bond->curr_active_slave;
@@ -3257,10 +3300,17 @@ static void bond_info_show_master(struct seq_file *seq)
3257 seq_printf(seq, "Bonding Mode: %s\n", 3300 seq_printf(seq, "Bonding Mode: %s\n",
3258 bond_mode_name(bond->params.mode)); 3301 bond_mode_name(bond->params.mode));
3259 3302
3303 if (bond->params.mode == BOND_MODE_XOR ||
3304 bond->params.mode == BOND_MODE_8023AD) {
3305 seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
3306 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
3307 bond->params.xmit_policy);
3308 }
3309
3260 if (USES_PRIMARY(bond->params.mode)) { 3310 if (USES_PRIMARY(bond->params.mode)) {
3261 seq_printf(seq, "Primary Slave: %s\n", 3311 seq_printf(seq, "Primary Slave: %s\n",
3262 (bond->params.primary[0]) ? 3312 (bond->primary_slave) ?
3263 bond->params.primary : "None"); 3313 bond->primary_slave->dev->name : "None");
3264 3314
3265 seq_printf(seq, "Currently Active Slave: %s\n", 3315 seq_printf(seq, "Currently Active Slave: %s\n",
3266 (curr) ? curr->dev->name : "None"); 3316 (curr) ? curr->dev->name : "None");
@@ -3273,6 +3323,27 @@ static void bond_info_show_master(struct seq_file *seq)
3273 seq_printf(seq, "Down Delay (ms): %d\n", 3323 seq_printf(seq, "Down Delay (ms): %d\n",
3274 bond->params.downdelay * bond->params.miimon); 3324 bond->params.downdelay * bond->params.miimon);
3275 3325
3326
3327 /* ARP information */
3328 if(bond->params.arp_interval > 0) {
3329 int printed=0;
3330 seq_printf(seq, "ARP Polling Interval (ms): %d\n",
3331 bond->params.arp_interval);
3332
3333 seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
3334
3335 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
3336 if (!bond->params.arp_targets[i])
3337 continue;
3338 if (printed)
3339 seq_printf(seq, ",");
3340 target = ntohl(bond->params.arp_targets[i]);
3341 seq_printf(seq, " %d.%d.%d.%d", HIPQUAD(target));
3342 printed = 1;
3343 }
3344 seq_printf(seq, "\n");
3345 }
3346
3276 if (bond->params.mode == BOND_MODE_8023AD) { 3347 if (bond->params.mode == BOND_MODE_8023AD) {
3277 struct ad_info ad_info; 3348 struct ad_info ad_info;
3278 3349
@@ -3478,7 +3549,10 @@ static int bond_event_changename(struct bonding *bond)
3478 bond_remove_proc_entry(bond); 3549 bond_remove_proc_entry(bond);
3479 bond_create_proc_entry(bond); 3550 bond_create_proc_entry(bond);
3480#endif 3551#endif
3481 3552 down_write(&(bonding_rwsem));
3553 bond_destroy_sysfs_entry(bond);
3554 bond_create_sysfs_entry(bond);
3555 up_write(&(bonding_rwsem));
3482 return NOTIFY_DONE; 3556 return NOTIFY_DONE;
3483} 3557}
3484 3558
@@ -3955,6 +4029,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3955 return -EPERM; 4029 return -EPERM;
3956 } 4030 }
3957 4031
4032 down_write(&(bonding_rwsem));
3958 slave_dev = dev_get_by_name(ifr->ifr_slave); 4033 slave_dev = dev_get_by_name(ifr->ifr_slave);
3959 4034
3960 dprintk("slave_dev=%p: \n", slave_dev); 4035 dprintk("slave_dev=%p: \n", slave_dev);
@@ -3987,6 +4062,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3987 dev_put(slave_dev); 4062 dev_put(slave_dev);
3988 } 4063 }
3989 4064
4065 up_write(&(bonding_rwsem));
3990 return res; 4066 return res;
3991} 4067}
3992 4068
@@ -4071,6 +4147,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4071 bond_for_each_slave(bond, slave, i) { 4147 bond_for_each_slave(bond, slave, i) {
4072 dprintk("s %p s->p %p c_m %p\n", slave, 4148 dprintk("s %p s->p %p c_m %p\n", slave,
4073 slave->prev, slave->dev->change_mtu); 4149 slave->prev, slave->dev->change_mtu);
4150
4074 res = dev_set_mtu(slave->dev, new_mtu); 4151 res = dev_set_mtu(slave->dev, new_mtu);
4075 4152
4076 if (res) { 4153 if (res) {
@@ -4397,8 +4474,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4397 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 4474 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4398 if (!skb2) { 4475 if (!skb2) {
4399 printk(KERN_ERR DRV_NAME 4476 printk(KERN_ERR DRV_NAME
4400 ": Error: bond_xmit_broadcast(): " 4477 ": %s: Error: bond_xmit_broadcast(): "
4401 "skb_clone() failed\n"); 4478 "skb_clone() failed\n",
4479 bond_dev->name);
4402 continue; 4480 continue;
4403 } 4481 }
4404 4482
@@ -4431,7 +4509,7 @@ out:
4431/* 4509/*
4432 * set bond mode specific net device operations 4510 * set bond mode specific net device operations
4433 */ 4511 */
4434static inline void bond_set_mode_ops(struct bonding *bond, int mode) 4512void bond_set_mode_ops(struct bonding *bond, int mode)
4435{ 4513{
4436 struct net_device *bond_dev = bond->dev; 4514 struct net_device *bond_dev = bond->dev;
4437 4515
@@ -4467,7 +4545,8 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4467 default: 4545 default:
4468 /* Should never happen, mode already checked */ 4546 /* Should never happen, mode already checked */
4469 printk(KERN_ERR DRV_NAME 4547 printk(KERN_ERR DRV_NAME
4470 ": Error: Unknown bonding mode %d\n", 4548 ": %s: Error: Unknown bonding mode %d\n",
4549 bond_dev->name,
4471 mode); 4550 mode);
4472 break; 4551 break;
4473 } 4552 }
@@ -4491,7 +4570,7 @@ static struct ethtool_ops bond_ethtool_ops = {
4491 * Does not allocate but creates a /proc entry. 4570 * Does not allocate but creates a /proc entry.
4492 * Allowed to fail. 4571 * Allowed to fail.
4493 */ 4572 */
4494static int __init bond_init(struct net_device *bond_dev, struct bond_params *params) 4573static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4495{ 4574{
4496 struct bonding *bond = bond_dev->priv; 4575 struct bonding *bond = bond_dev->priv;
4497 4576
@@ -4565,7 +4644,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4565/* De-initialize device specific data. 4644/* De-initialize device specific data.
4566 * Caller must hold rtnl_lock. 4645 * Caller must hold rtnl_lock.
4567 */ 4646 */
4568static inline void bond_deinit(struct net_device *bond_dev) 4647void bond_deinit(struct net_device *bond_dev)
4569{ 4648{
4570 struct bonding *bond = bond_dev->priv; 4649 struct bonding *bond = bond_dev->priv;
4571 4650
@@ -4601,7 +4680,7 @@ static void bond_free_all(void)
4601 * Convert string input module parms. Accept either the 4680 * Convert string input module parms. Accept either the
4602 * number of the mode or its string name. 4681 * number of the mode or its string name.
4603 */ 4682 */
4604static inline int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl) 4683int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl)
4605{ 4684{
4606 int i; 4685 int i;
4607 4686
@@ -4670,7 +4749,7 @@ static int bond_check_params(struct bond_params *params)
4670 if (max_bonds < 1 || max_bonds > INT_MAX) { 4749 if (max_bonds < 1 || max_bonds > INT_MAX) {
4671 printk(KERN_WARNING DRV_NAME 4750 printk(KERN_WARNING DRV_NAME
4672 ": Warning: max_bonds (%d) not in range %d-%d, so it " 4751 ": Warning: max_bonds (%d) not in range %d-%d, so it "
4673 "was reset to BOND_DEFAULT_MAX_BONDS (%d)", 4752 "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4674 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS); 4753 max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4675 max_bonds = BOND_DEFAULT_MAX_BONDS; 4754 max_bonds = BOND_DEFAULT_MAX_BONDS;
4676 } 4755 }
@@ -4881,81 +4960,96 @@ static int bond_check_params(struct bond_params *params)
4881 return 0; 4960 return 0;
4882} 4961}
4883 4962
4963/* Create a new bond based on the specified name and bonding parameters.
4964 * Caller must NOT hold rtnl_lock; we need to release it here before we
4965 * set up our sysfs entries.
4966 */
4967int bond_create(char *name, struct bond_params *params, struct bonding **newbond)
4968{
4969 struct net_device *bond_dev;
4970 int res;
4971
4972 rtnl_lock();
4973 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup);
4974 if (!bond_dev) {
4975 printk(KERN_ERR DRV_NAME
4976 ": %s: eek! can't alloc netdev!\n",
4977 name);
4978 res = -ENOMEM;
4979 goto out_rtnl;
4980 }
4981
4982 /* bond_init() must be called after dev_alloc_name() (for the
4983 * /proc files), but before register_netdevice(), because we
4984 * need to set function pointers.
4985 */
4986
4987 res = bond_init(bond_dev, params);
4988 if (res < 0) {
4989 goto out_netdev;
4990 }
4991
4992 SET_MODULE_OWNER(bond_dev);
4993
4994 res = register_netdevice(bond_dev);
4995 if (res < 0) {
4996 goto out_bond;
4997 }
4998 if (newbond)
4999 *newbond = bond_dev->priv;
5000
5001 rtnl_unlock(); /* allows sysfs registration of net device */
5002 res = bond_create_sysfs_entry(bond_dev->priv);
5003 goto done;
5004out_bond:
5005 bond_deinit(bond_dev);
5006out_netdev:
5007 free_netdev(bond_dev);
5008out_rtnl:
5009 rtnl_unlock();
5010done:
5011 return res;
5012}
5013
4884static int __init bonding_init(void) 5014static int __init bonding_init(void)
4885{ 5015{
4886 struct bond_params params;
4887 int i; 5016 int i;
4888 int res; 5017 int res;
5018 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4889 5019
4890 printk(KERN_INFO "%s", version); 5020 printk(KERN_INFO "%s", version);
4891 5021
4892 res = bond_check_params(&params); 5022 res = bond_check_params(&bonding_defaults);
4893 if (res) { 5023 if (res) {
4894 return res; 5024 goto out;
4895 } 5025 }
4896 5026
4897 rtnl_lock();
4898
4899#ifdef CONFIG_PROC_FS 5027#ifdef CONFIG_PROC_FS
4900 bond_create_proc_dir(); 5028 bond_create_proc_dir();
4901#endif 5029#endif
4902
4903 for (i = 0; i < max_bonds; i++) { 5030 for (i = 0; i < max_bonds; i++) {
4904 struct net_device *bond_dev; 5031 sprintf(new_bond_name, "bond%d",i);
4905 5032 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4906 bond_dev = alloc_netdev(sizeof(struct bonding), "", ether_setup); 5033 if (res)
4907 if (!bond_dev) { 5034 goto err;
4908 res = -ENOMEM;
4909 goto out_err;
4910 }
4911
4912 res = dev_alloc_name(bond_dev, "bond%d");
4913 if (res < 0) {
4914 free_netdev(bond_dev);
4915 goto out_err;
4916 }
4917
4918 /* bond_init() must be called after dev_alloc_name() (for the
4919 * /proc files), but before register_netdevice(), because we
4920 * need to set function pointers.
4921 */
4922 res = bond_init(bond_dev, &params);
4923 if (res < 0) {
4924 free_netdev(bond_dev);
4925 goto out_err;
4926 }
4927
4928 SET_MODULE_OWNER(bond_dev);
4929
4930 res = register_netdevice(bond_dev);
4931 if (res < 0) {
4932 bond_deinit(bond_dev);
4933 free_netdev(bond_dev);
4934 goto out_err;
4935 }
4936 } 5035 }
4937 5036
4938 rtnl_unlock(); 5037 res = bond_create_sysfs();
5038 if (res)
5039 goto err;
5040
4939 register_netdevice_notifier(&bond_netdev_notifier); 5041 register_netdevice_notifier(&bond_netdev_notifier);
4940 register_inetaddr_notifier(&bond_inetaddr_notifier); 5042 register_inetaddr_notifier(&bond_inetaddr_notifier);
4941 5043
4942 return 0; 5044 goto out;
4943 5045err:
4944out_err:
4945 /*
4946 * rtnl_unlock() will run netdev_run_todo(), putting the
4947 * thus-far-registered bonding devices into a state which
4948 * unregigister_netdevice() will accept
4949 */
4950 rtnl_unlock();
4951 rtnl_lock(); 5046 rtnl_lock();
4952
4953 /* free and unregister all bonds that were successfully added */
4954 bond_free_all(); 5047 bond_free_all();
4955 5048 bond_destroy_sysfs();
4956 rtnl_unlock(); 5049 rtnl_unlock();
4957 5050out:
4958 return res; 5051 return res;
5052
4959} 5053}
4960 5054
4961static void __exit bonding_exit(void) 5055static void __exit bonding_exit(void)
@@ -4965,6 +5059,7 @@ static void __exit bonding_exit(void)
4965 5059
4966 rtnl_lock(); 5060 rtnl_lock();
4967 bond_free_all(); 5061 bond_free_all();
5062 bond_destroy_sysfs();
4968 rtnl_unlock(); 5063 rtnl_unlock();
4969} 5064}
4970 5065
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
new file mode 100644
index 000000000000..c5f1c52863cb
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -0,0 +1,1399 @@
1
2/*
3 * Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 *
23 * Changes:
24 *
25 * 2004/12/12 - Mitch Williams <mitch.a.williams at intel dot com>
26 * - Initial creation of sysfs interface.
27 *
28 * 2005/06/22 - Radheka Godse <radheka.godse at intel dot com>
29 * - Added ifenslave -c type functionality to sysfs
30 * - Added sysfs files for attributes such as MII Status and
31 * 802.3ad aggregator that are displayed in /proc
32 * - Added "name value" format to sysfs "mode" and
33 * "lacp_rate", for e.g., "active-backup 1" or "slow 0" for
34 * consistency and ease of script parsing
35 * - Fixed reversal of octets in arp_ip_targets via sysfs
36 * - sysfs support to handle bond interface re-naming
37 * - Moved all sysfs entries into /sys/class/net instead of
38 * of using a standalone subsystem.
39 * - Added sysfs symlinks between masters and slaves
40 * - Corrected bugs in sysfs unload path when creating bonds
41 * with existing interface names.
42 * - Removed redundant sysfs stat file since it duplicates slave info
43 * from the proc file
44 * - Fixed errors in sysfs show/store arp targets.
45 * - For consistency with ifenslave, instead of exiting
46 * with an error, updated bonding sysfs to
47 * close and attempt to enslave an up adapter.
48 * - Fixed NULL dereference when adding a slave interface
49 * that does not exist.
50 * - Added checks in sysfs bonding to reject invalid ip addresses
51 * - Synch up with post linux-2.6.12 bonding changes
52 * - Created sysfs bond attrib for xmit_hash_policy
53 *
54 * 2005/09/19 - Mitch Williams <mitch.a.williams at intel dot com>
55 * - Changed semantics of multi-item files to be command-based
56 * instead of list-based.
57 * - Changed ARP target handler to use in_aton instead of sscanf
58 * - Style changes.
59 * 2005/09/27 - Mitch Williams <mitch.a.williams at intel dot com>
60 * - Made line endings consistent.
61 * - Removed "none" from primary output - just put blank instead
62 * - Fixed bug with long interface names
63 */
64#include <linux/config.h>
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/sched.h>
68#include <linux/device.h>
69#include <linux/sysdev.h>
70#include <linux/fs.h>
71#include <linux/types.h>
72#include <linux/string.h>
73#include <linux/netdevice.h>
74#include <linux/inetdevice.h>
75#include <linux/in.h>
76#include <linux/sysfs.h>
77#include <linux/string.h>
78#include <linux/ctype.h>
79#include <linux/inet.h>
80#include <linux/rtnetlink.h>
81
82/* #define BONDING_DEBUG 1 */
83#include "bonding.h"
84#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
85#define to_net_dev(class) container_of(class, struct net_device, class_dev)
86#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv))
87
88/*---------------------------- Declarations -------------------------------*/
89
90
91extern struct list_head bond_dev_list;
92extern struct bond_params bonding_defaults;
93extern struct bond_parm_tbl bond_mode_tbl[];
94extern struct bond_parm_tbl bond_lacp_tbl[];
95extern struct bond_parm_tbl xmit_hashtype_tbl[];
96
97static int expected_refcount = -1;
98static struct class *netdev_class;
99/*--------------------------- Data Structures -----------------------------*/
100
101/* Bonding sysfs lock. Why can't we just use the subsytem lock?
102 * Because kobject_register tries to acquire the subsystem lock. If
103 * we already hold the lock (which we would if the user was creating
104 * a new bond through the sysfs interface), we deadlock.
105 * This lock is only needed when deleting a bond - we need to make sure
106 * that we don't collide with an ongoing ioctl.
107 */
108
109struct rw_semaphore bonding_rwsem;
110
111
112
113
114/*------------------------------ Functions --------------------------------*/
115
116/*
117 * "show" function for the bond_masters attribute.
118 * The class parameter is ignored.
119 */
120static ssize_t bonding_show_bonds(struct class *cls, char *buffer)
121{
122 int res = 0;
123 struct bonding *bond;
124
125 down_read(&(bonding_rwsem));
126
127 list_for_each_entry(bond, &bond_dev_list, bond_list) {
128 if (res > (PAGE_SIZE - IFNAMSIZ)) {
129 /* not enough space for another interface name */
130 if ((PAGE_SIZE - res) > 10)
131 res = PAGE_SIZE - 10;
132 res += sprintf(buffer + res, "++more++");
133 break;
134 }
135 res += sprintf(buffer + res, "%s ",
136 bond->dev->name);
137 }
138 res += sprintf(buffer + res, "\n");
139 res++;
140 up_read(&(bonding_rwsem));
141 return res;
142}
143
144/*
145 * "store" function for the bond_masters attribute. This is what
146 * creates and deletes entire bonds.
147 *
148 * The class parameter is ignored.
149 *
150 */
151
152static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t count)
153{
154 char command[IFNAMSIZ + 1] = {0, };
155 char *ifname;
156 int res = count;
157 struct bonding *bond;
158 struct bonding *nxt;
159
160 down_write(&(bonding_rwsem));
161 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
162 ifname = command + 1;
163 if ((strlen(command) <= 1) ||
164 !dev_valid_name(ifname))
165 goto err_no_cmd;
166
167 if (command[0] == '+') {
168
169 /* Check to see if the bond already exists. */
170 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
171 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
172 printk(KERN_ERR DRV_NAME
173 ": cannot add bond %s; it already exists\n",
174 ifname);
175 res = -EPERM;
176 goto out;
177 }
178
179 printk(KERN_INFO DRV_NAME
180 ": %s is being created...\n", ifname);
181 if (bond_create(ifname, &bonding_defaults, &bond)) {
182 printk(KERN_INFO DRV_NAME
183 ": %s interface already exists. Bond creation failed.\n",
184 ifname);
185 res = -EPERM;
186 }
187 goto out;
188 }
189
190 if (command[0] == '-') {
191 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
192 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
193 rtnl_lock();
194 /* check the ref count on the bond's kobject.
195 * If it's > expected, then there's a file open,
196 * and we have to fail.
197 */
198 if (atomic_read(&bond->dev->class_dev.kobj.kref.refcount)
199 > expected_refcount){
200 rtnl_unlock();
201 printk(KERN_INFO DRV_NAME
202 ": Unable remove bond %s due to open references.\n",
203 ifname);
204 res = -EPERM;
205 goto out;
206 }
207 printk(KERN_INFO DRV_NAME
208 ": %s is being deleted...\n",
209 bond->dev->name);
210 unregister_netdevice(bond->dev);
211 bond_deinit(bond->dev);
212 bond_destroy_sysfs_entry(bond);
213 rtnl_unlock();
214 goto out;
215 }
216
217 printk(KERN_ERR DRV_NAME
218 ": unable to delete non-existent bond %s\n", ifname);
219 res = -ENODEV;
220 goto out;
221 }
222
223err_no_cmd:
224 printk(KERN_ERR DRV_NAME
225 ": no command found in bonding_masters. Use +ifname or -ifname.\n");
226 res = -EPERM;
227
228 /* Always return either count or an error. If you return 0, you'll
229 * get called forever, which is bad.
230 */
231out:
232 up_write(&(bonding_rwsem));
233 return res;
234}
235/* class attribute for bond_masters file. This ends up in /sys/class/net */
236static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
237 bonding_show_bonds, bonding_store_bonds);
238
239int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave)
240{
241 char linkname[IFNAMSIZ+7];
242 int ret = 0;
243
244 /* first, create a link from the slave back to the master */
245 ret = sysfs_create_link(&(slave->class_dev.kobj), &(master->class_dev.kobj),
246 "master");
247 if (ret)
248 return ret;
249 /* next, create a link from the master to the slave */
250 sprintf(linkname,"slave_%s",slave->name);
251 ret = sysfs_create_link(&(master->class_dev.kobj), &(slave->class_dev.kobj),
252 linkname);
253 return ret;
254
255}
256
257void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave)
258{
259 char linkname[IFNAMSIZ+7];
260
261 sysfs_remove_link(&(slave->class_dev.kobj), "master");
262 sprintf(linkname,"slave_%s",slave->name);
263 sysfs_remove_link(&(master->class_dev.kobj), linkname);
264}
265
266
267/*
268 * Show the slaves in the current bond.
269 */
270static ssize_t bonding_show_slaves(struct class_device *cd, char *buf)
271{
272 struct slave *slave;
273 int i, res = 0;
274 struct bonding *bond = to_bond(cd);
275
276 read_lock_bh(&bond->lock);
277 bond_for_each_slave(bond, slave, i) {
278 if (res > (PAGE_SIZE - IFNAMSIZ)) {
279 /* not enough space for another interface name */
280 if ((PAGE_SIZE - res) > 10)
281 res = PAGE_SIZE - 10;
282 res += sprintf(buf + res, "++more++");
283 break;
284 }
285 res += sprintf(buf + res, "%s ", slave->dev->name);
286 }
287 read_unlock_bh(&bond->lock);
288 res += sprintf(buf + res, "\n");
289 res++;
290 return res;
291}
292
293/*
294 * Set the slaves in the current bond. The bond interface must be
295 * up for this to succeed.
296 * This function is largely the same flow as bonding_update_bonds().
297 */
298static ssize_t bonding_store_slaves(struct class_device *cd, const char *buffer, size_t count)
299{
300 char command[IFNAMSIZ + 1] = { 0, };
301 char *ifname;
302 int i, res, found, ret = count;
303 struct slave *slave;
304 struct net_device *dev = 0;
305 struct bonding *bond = to_bond(cd);
306
307 /* Quick sanity check -- is the bond interface up? */
308 if (!(bond->dev->flags & IFF_UP)) {
309 printk(KERN_ERR DRV_NAME
310 ": %s: Unable to update slaves because interface is down.\n",
311 bond->dev->name);
312 ret = -EPERM;
313 goto out;
314 }
315
316 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
317
318 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
319 ifname = command + 1;
320 if ((strlen(command) <= 1) ||
321 !dev_valid_name(ifname))
322 goto err_no_cmd;
323
324 if (command[0] == '+') {
325
326 /* Got a slave name in ifname. Is it already in the list? */
327 found = 0;
328 read_lock_bh(&bond->lock);
329 bond_for_each_slave(bond, slave, i)
330 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
331 printk(KERN_ERR DRV_NAME
332 ": %s: Interface %s is already enslaved!\n",
333 bond->dev->name, ifname);
334 ret = -EPERM;
335 read_unlock_bh(&bond->lock);
336 goto out;
337 }
338
339 read_unlock_bh(&bond->lock);
340 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
341 bond->dev->name, ifname);
342 dev = dev_get_by_name(ifname);
343 if (!dev) {
344 printk(KERN_INFO DRV_NAME
345 ": %s: Interface %s does not exist!\n",
346 bond->dev->name, ifname);
347 ret = -EPERM;
348 goto out;
349 }
350 else
351 dev_put(dev);
352
353 if (dev->flags & IFF_UP) {
354 printk(KERN_ERR DRV_NAME
355 ": %s: Error: Unable to enslave %s "
356 "because it is already up.\n",
357 bond->dev->name, dev->name);
358 ret = -EPERM;
359 goto out;
360 }
361 /* If this is the first slave, then we need to set
362 the master's hardware address to be the same as the
363 slave's. */
364 if (!(*((u32 *) & (bond->dev->dev_addr[0])))) {
365 memcpy(bond->dev->dev_addr, dev->dev_addr,
366 dev->addr_len);
367 }
368
369 /* Set the slave's MTU to match the bond */
370 if (dev->mtu != bond->dev->mtu) {
371 if (dev->change_mtu) {
372 res = dev->change_mtu(dev,
373 bond->dev->mtu);
374 if (res) {
375 ret = res;
376 goto out;
377 }
378 } else {
379 dev->mtu = bond->dev->mtu;
380 }
381 }
382 rtnl_lock();
383 res = bond_enslave(bond->dev, dev);
384 rtnl_unlock();
385 if (res) {
386 ret = res;
387 }
388 goto out;
389 }
390
391 if (command[0] == '-') {
392 dev = NULL;
393 bond_for_each_slave(bond, slave, i)
394 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
395 dev = slave->dev;
396 break;
397 }
398 if (dev) {
399 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
400 bond->dev->name, dev->name);
401 rtnl_lock();
402 res = bond_release(bond->dev, dev);
403 rtnl_unlock();
404 if (res) {
405 ret = res;
406 goto out;
407 }
408 /* set the slave MTU to the default */
409 if (dev->change_mtu) {
410 dev->change_mtu(dev, 1500);
411 } else {
412 dev->mtu = 1500;
413 }
414 }
415 else {
416 printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n",
417 ifname, bond->dev->name);
418 ret = -ENODEV;
419 }
420 goto out;
421 }
422
423err_no_cmd:
424 printk(KERN_ERR DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
425 ret = -EPERM;
426
427out:
428 return ret;
429}
430
431static CLASS_DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
432
433/*
434 * Show and set the bonding mode. The bond interface must be down to
435 * change the mode.
436 */
437static ssize_t bonding_show_mode(struct class_device *cd, char *buf)
438{
439 struct bonding *bond = to_bond(cd);
440
441 return sprintf(buf, "%s %d\n",
442 bond_mode_tbl[bond->params.mode].modename,
443 bond->params.mode) + 1;
444}
445
446static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size_t count)
447{
448 int new_value, ret = count;
449 struct bonding *bond = to_bond(cd);
450
451 if (bond->dev->flags & IFF_UP) {
452 printk(KERN_ERR DRV_NAME
453 ": unable to update mode of %s because interface is up.\n",
454 bond->dev->name);
455 ret = -EPERM;
456 goto out;
457 }
458
459 new_value = bond_parse_parm((char *)buf, bond_mode_tbl);
460 if (new_value < 0) {
461 printk(KERN_ERR DRV_NAME
462 ": %s: Ignoring invalid mode value %.*s.\n",
463 bond->dev->name,
464 (int)strlen(buf) - 1, buf);
465 ret = -EINVAL;
466 goto out;
467 } else {
468 bond->params.mode = new_value;
469 bond_set_mode_ops(bond, bond->params.mode);
470 printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n",
471 bond->dev->name, bond_mode_tbl[new_value].modename, new_value);
472 }
473out:
474 return ret;
475}
476static CLASS_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
477
478/*
479 * Show and set the bonding transmit hash method. The bond interface must be down to
480 * change the xmit hash policy.
481 */
482static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf)
483{
484 int count;
485 struct bonding *bond = to_bond(cd);
486
487 if ((bond->params.mode != BOND_MODE_XOR) &&
488 (bond->params.mode != BOND_MODE_8023AD)) {
489 // Not Applicable
490 count = sprintf(buf, "NA\n") + 1;
491 } else {
492 count = sprintf(buf, "%s %d\n",
493 xmit_hashtype_tbl[bond->params.xmit_policy].modename,
494 bond->params.xmit_policy) + 1;
495 }
496
497 return count;
498}
499
500static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf, size_t count)
501{
502 int new_value, ret = count;
503 struct bonding *bond = to_bond(cd);
504
505 if (bond->dev->flags & IFF_UP) {
506 printk(KERN_ERR DRV_NAME
507 "%s: Interface is up. Unable to update xmit policy.\n",
508 bond->dev->name);
509 ret = -EPERM;
510 goto out;
511 }
512
513 if ((bond->params.mode != BOND_MODE_XOR) &&
514 (bond->params.mode != BOND_MODE_8023AD)) {
515 printk(KERN_ERR DRV_NAME
516 "%s: Transmit hash policy is irrelevant in this mode.\n",
517 bond->dev->name);
518 ret = -EPERM;
519 goto out;
520 }
521
522 new_value = bond_parse_parm((char *)buf, xmit_hashtype_tbl);
523 if (new_value < 0) {
524 printk(KERN_ERR DRV_NAME
525 ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
526 bond->dev->name,
527 (int)strlen(buf) - 1, buf);
528 ret = -EINVAL;
529 goto out;
530 } else {
531 bond->params.xmit_policy = new_value;
532 bond_set_mode_ops(bond, bond->params.mode);
533 printk(KERN_INFO DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
534 bond->dev->name, xmit_hashtype_tbl[new_value].modename, new_value);
535 }
536out:
537 return ret;
538}
539static CLASS_DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
540
541/*
542 * Show and set the arp timer interval. There are two tricky bits
543 * here. First, if ARP monitoring is activated, then we must disable
544 * MII monitoring. Second, if the ARP timer isn't running, we must
545 * start it.
546 */
547static ssize_t bonding_show_arp_interval(struct class_device *cd, char *buf)
548{
549 struct bonding *bond = to_bond(cd);
550
551 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1;
552}
553
554static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *buf, size_t count)
555{
556 int new_value, ret = count;
557 struct bonding *bond = to_bond(cd);
558
559 if (sscanf(buf, "%d", &new_value) != 1) {
560 printk(KERN_ERR DRV_NAME
561 ": %s: no arp_interval value specified.\n",
562 bond->dev->name);
563 ret = -EINVAL;
564 goto out;
565 }
566 if (new_value < 0) {
567 printk(KERN_ERR DRV_NAME
568 ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
569 bond->dev->name, new_value, INT_MAX);
570 ret = -EINVAL;
571 goto out;
572 }
573
574 printk(KERN_INFO DRV_NAME
575 ": %s: Setting ARP monitoring interval to %d.\n",
576 bond->dev->name, new_value);
577 bond->params.arp_interval = new_value;
578 if (bond->params.miimon) {
579 printk(KERN_INFO DRV_NAME
580 ": %s: ARP monitoring cannot be used with MII monitoring. "
581 "%s Disabling MII monitoring.\n",
582 bond->dev->name, bond->dev->name);
583 bond->params.miimon = 0;
584 /* Kill MII timer, else it brings bond's link down */
585 if (bond->arp_timer.function) {
586 printk(KERN_INFO DRV_NAME
587 ": %s: Kill MII timer, else it brings bond's link down...\n",
588 bond->dev->name);
589 del_timer_sync(&bond->mii_timer);
590 }
591 }
592 if (!bond->params.arp_targets[0]) {
593 printk(KERN_INFO DRV_NAME
594 ": %s: ARP monitoring has been set up, "
595 "but no ARP targets have been specified.\n",
596 bond->dev->name);
597 }
598 if (bond->dev->flags & IFF_UP) {
599 /* If the interface is up, we may need to fire off
600 * the ARP timer. If the interface is down, the
601 * timer will get fired off when the open function
602 * is called.
603 */
604 if (bond->arp_timer.function) {
605 /* The timer's already set up, so fire it off */
606 mod_timer(&bond->arp_timer, jiffies + 1);
607 } else {
608 /* Set up the timer. */
609 init_timer(&bond->arp_timer);
610 bond->arp_timer.expires = jiffies + 1;
611 bond->arp_timer.data =
612 (unsigned long) bond->dev;
613 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
614 bond->arp_timer.function =
615 (void *)
616 &bond_activebackup_arp_mon;
617 } else {
618 bond->arp_timer.function =
619 (void *)
620 &bond_loadbalance_arp_mon;
621 }
622 add_timer(&bond->arp_timer);
623 }
624 }
625
626out:
627 return ret;
628}
629static CLASS_DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
630
631/*
632 * Show and set the arp targets.
633 */
634static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf)
635{
636 int i, res = 0;
637 struct bonding *bond = to_bond(cd);
638
639 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
640 if (bond->params.arp_targets[i])
641 res += sprintf(buf + res, "%u.%u.%u.%u ",
642 NIPQUAD(bond->params.arp_targets[i]));
643 }
644 if (res)
645 res--; /* eat the leftover space */
646 res += sprintf(buf + res, "\n");
647 res++;
648 return res;
649}
650
651static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *buf, size_t count)
652{
653 u32 newtarget;
654 int i = 0, done = 0, ret = count;
655 struct bonding *bond = to_bond(cd);
656 u32 *targets;
657
658 targets = bond->params.arp_targets;
659 newtarget = in_aton(buf + 1);
660 /* look for adds */
661 if (buf[0] == '+') {
662 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
663 printk(KERN_ERR DRV_NAME
664 ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n",
665 bond->dev->name, NIPQUAD(newtarget));
666 ret = -EINVAL;
667 goto out;
668 }
669 /* look for an empty slot to put the target in, and check for dupes */
670 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
671 if (targets[i] == newtarget) { /* duplicate */
672 printk(KERN_ERR DRV_NAME
673 ": %s: ARP target %u.%u.%u.%u is already present\n",
674 bond->dev->name, NIPQUAD(newtarget));
675 if (done)
676 targets[i] = 0;
677 ret = -EINVAL;
678 goto out;
679 }
680 if (targets[i] == 0 && !done) {
681 printk(KERN_INFO DRV_NAME
682 ": %s: adding ARP target %d.%d.%d.%d.\n",
683 bond->dev->name, NIPQUAD(newtarget));
684 done = 1;
685 targets[i] = newtarget;
686 }
687 }
688 if (!done) {
689 printk(KERN_ERR DRV_NAME
690 ": %s: ARP target table is full!\n",
691 bond->dev->name);
692 ret = -EINVAL;
693 goto out;
694 }
695
696 }
697 else if (buf[0] == '-') {
698 if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) {
699 printk(KERN_ERR DRV_NAME
700 ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n",
701 bond->dev->name, NIPQUAD(newtarget));
702 ret = -EINVAL;
703 goto out;
704 }
705
706 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
707 if (targets[i] == newtarget) {
708 printk(KERN_INFO DRV_NAME
709 ": %s: removing ARP target %d.%d.%d.%d.\n",
710 bond->dev->name, NIPQUAD(newtarget));
711 targets[i] = 0;
712 done = 1;
713 }
714 }
715 if (!done) {
716 printk(KERN_INFO DRV_NAME
717 ": %s: unable to remove nonexistent ARP target %d.%d.%d.%d.\n",
718 bond->dev->name, NIPQUAD(newtarget));
719 ret = -EINVAL;
720 goto out;
721 }
722 }
723 else {
724 printk(KERN_ERR DRV_NAME ": no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
725 bond->dev->name);
726 ret = -EPERM;
727 goto out;
728 }
729
730out:
731 return ret;
732}
733static CLASS_DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
734
735/*
736 * Show and set the up and down delays. These must be multiples of the
737 * MII monitoring value, and are stored internally as the multiplier.
738 * Thus, we must translate to MS for the real world.
739 */
740static ssize_t bonding_show_downdelay(struct class_device *cd, char *buf)
741{
742 struct bonding *bond = to_bond(cd);
743
744 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1;
745}
746
747static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf, size_t count)
748{
749 int new_value, ret = count;
750 struct bonding *bond = to_bond(cd);
751
752 if (!(bond->params.miimon)) {
753 printk(KERN_ERR DRV_NAME
754 ": %s: Unable to set down delay as MII monitoring is disabled\n",
755 bond->dev->name);
756 ret = -EPERM;
757 goto out;
758 }
759
760 if (sscanf(buf, "%d", &new_value) != 1) {
761 printk(KERN_ERR DRV_NAME
762 ": %s: no down delay value specified.\n",
763 bond->dev->name);
764 ret = -EINVAL;
765 goto out;
766 }
767 if (new_value < 0) {
768 printk(KERN_ERR DRV_NAME
769 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
770 bond->dev->name, new_value, 1, INT_MAX);
771 ret = -EINVAL;
772 goto out;
773 } else {
774 if ((new_value % bond->params.miimon) != 0) {
775 printk(KERN_WARNING DRV_NAME
776 ": %s: Warning: down delay (%d) is not a multiple "
777 "of miimon (%d), delay rounded to %d ms\n",
778 bond->dev->name, new_value, bond->params.miimon,
779 (new_value / bond->params.miimon) *
780 bond->params.miimon);
781 }
782 bond->params.downdelay = new_value / bond->params.miimon;
783 printk(KERN_INFO DRV_NAME ": %s: Setting down delay to %d.\n",
784 bond->dev->name, bond->params.downdelay * bond->params.miimon);
785
786 }
787
788out:
789 return ret;
790}
791static CLASS_DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
792
793static ssize_t bonding_show_updelay(struct class_device *cd, char *buf)
794{
795 struct bonding *bond = to_bond(cd);
796
797 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1;
798
799}
800
801static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, size_t count)
802{
803 int new_value, ret = count;
804 struct bonding *bond = to_bond(cd);
805
806 if (!(bond->params.miimon)) {
807 printk(KERN_ERR DRV_NAME
808 ": %s: Unable to set up delay as MII monitoring is disabled\n",
809 bond->dev->name);
810 ret = -EPERM;
811 goto out;
812 }
813
814 if (sscanf(buf, "%d", &new_value) != 1) {
815 printk(KERN_ERR DRV_NAME
816 ": %s: no up delay value specified.\n",
817 bond->dev->name);
818 ret = -EINVAL;
819 goto out;
820 }
821 if (new_value < 0) {
822 printk(KERN_ERR DRV_NAME
823 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
824 bond->dev->name, new_value, 1, INT_MAX);
825 ret = -EINVAL;
826 goto out;
827 } else {
828 if ((new_value % bond->params.miimon) != 0) {
829 printk(KERN_WARNING DRV_NAME
830 ": %s: Warning: up delay (%d) is not a multiple "
831 "of miimon (%d), updelay rounded to %d ms\n",
832 bond->dev->name, new_value, bond->params.miimon,
833 (new_value / bond->params.miimon) *
834 bond->params.miimon);
835 }
836 bond->params.updelay = new_value / bond->params.miimon;
837 printk(KERN_INFO DRV_NAME ": %s: Setting up delay to %d.\n",
838 bond->dev->name, bond->params.updelay * bond->params.miimon);
839
840 }
841
842out:
843 return ret;
844}
845static CLASS_DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
846
847/*
848 * Show and set the LACP interval. Interface must be down, and the mode
849 * must be set to 802.3ad mode.
850 */
851static ssize_t bonding_show_lacp(struct class_device *cd, char *buf)
852{
853 struct bonding *bond = to_bond(cd);
854
855 return sprintf(buf, "%s %d\n",
856 bond_lacp_tbl[bond->params.lacp_fast].modename,
857 bond->params.lacp_fast) + 1;
858}
859
860static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size_t count)
861{
862 int new_value, ret = count;
863 struct bonding *bond = to_bond(cd);
864
865 if (bond->dev->flags & IFF_UP) {
866 printk(KERN_ERR DRV_NAME
867 ": %s: Unable to update LACP rate because interface is up.\n",
868 bond->dev->name);
869 ret = -EPERM;
870 goto out;
871 }
872
873 if (bond->params.mode != BOND_MODE_8023AD) {
874 printk(KERN_ERR DRV_NAME
875 ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
876 bond->dev->name);
877 ret = -EPERM;
878 goto out;
879 }
880
881 new_value = bond_parse_parm((char *)buf, bond_lacp_tbl);
882
883 if ((new_value == 1) || (new_value == 0)) {
884 bond->params.lacp_fast = new_value;
885 printk(KERN_INFO DRV_NAME
886 ": %s: Setting LACP rate to %s (%d).\n",
887 bond->dev->name, bond_lacp_tbl[new_value].modename, new_value);
888 } else {
889 printk(KERN_ERR DRV_NAME
890 ": %s: Ignoring invalid LACP rate value %.*s.\n",
891 bond->dev->name, (int)strlen(buf) - 1, buf);
892 ret = -EINVAL;
893 }
894out:
895 return ret;
896}
897static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
898
899/*
900 * Show and set the MII monitor interval. There are two tricky bits
901 * here. First, if MII monitoring is activated, then we must disable
902 * ARP monitoring. Second, if the timer isn't running, we must
903 * start it.
904 */
905static ssize_t bonding_show_miimon(struct class_device *cd, char *buf)
906{
907 struct bonding *bond = to_bond(cd);
908
909 return sprintf(buf, "%d\n", bond->params.miimon) + 1;
910}
911
912static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, size_t count)
913{
914 int new_value, ret = count;
915 struct bonding *bond = to_bond(cd);
916
917 if (sscanf(buf, "%d", &new_value) != 1) {
918 printk(KERN_ERR DRV_NAME
919 ": %s: no miimon value specified.\n",
920 bond->dev->name);
921 ret = -EINVAL;
922 goto out;
923 }
924 if (new_value < 0) {
925 printk(KERN_ERR DRV_NAME
926 ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
927 bond->dev->name, new_value, 1, INT_MAX);
928 ret = -EINVAL;
929 goto out;
930 } else {
931 printk(KERN_INFO DRV_NAME
932 ": %s: Setting MII monitoring interval to %d.\n",
933 bond->dev->name, new_value);
934 bond->params.miimon = new_value;
935 if(bond->params.updelay)
936 printk(KERN_INFO DRV_NAME
937 ": %s: Note: Updating updelay (to %d) "
938 "since it is a multiple of the miimon value.\n",
939 bond->dev->name,
940 bond->params.updelay * bond->params.miimon);
941 if(bond->params.downdelay)
942 printk(KERN_INFO DRV_NAME
943 ": %s: Note: Updating downdelay (to %d) "
944 "since it is a multiple of the miimon value.\n",
945 bond->dev->name,
946 bond->params.downdelay * bond->params.miimon);
947 if (bond->params.arp_interval) {
948 printk(KERN_INFO DRV_NAME
949 ": %s: MII monitoring cannot be used with "
950 "ARP monitoring. Disabling ARP monitoring...\n",
951 bond->dev->name);
952 bond->params.arp_interval = 0;
953 /* Kill ARP timer, else it brings bond's link down */
954 if (bond->mii_timer.function) {
955 printk(KERN_INFO DRV_NAME
956 ": %s: Kill ARP timer, else it brings bond's link down...\n",
957 bond->dev->name);
958 del_timer_sync(&bond->arp_timer);
959 }
960 }
961
962 if (bond->dev->flags & IFF_UP) {
963 /* If the interface is up, we may need to fire off
964 * the MII timer. If the interface is down, the
965 * timer will get fired off when the open function
966 * is called.
967 */
968 if (bond->mii_timer.function) {
969 /* The timer's already set up, so fire it off */
970 mod_timer(&bond->mii_timer, jiffies + 1);
971 } else {
972 /* Set up the timer. */
973 init_timer(&bond->mii_timer);
974 bond->mii_timer.expires = jiffies + 1;
975 bond->mii_timer.data =
976 (unsigned long) bond->dev;
977 bond->mii_timer.function =
978 (void *) &bond_mii_monitor;
979 add_timer(&bond->mii_timer);
980 }
981 }
982 }
983out:
984 return ret;
985}
986static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
987
988/*
989 * Show and set the primary slave. The store function is much
990 * simpler than bonding_store_slaves function because it only needs to
991 * handle one interface name.
992 * The bond must be a mode that supports a primary for this be
993 * set.
994 */
995static ssize_t bonding_show_primary(struct class_device *cd, char *buf)
996{
997 int count = 0;
998 struct bonding *bond = to_bond(cd);
999
1000 if (bond->primary_slave)
1001 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1;
1002 else
1003 count = sprintf(buf, "\n") + 1;
1004
1005 return count;
1006}
1007
1008static ssize_t bonding_store_primary(struct class_device *cd, const char *buf, size_t count)
1009{
1010 int i;
1011 struct slave *slave;
1012 struct bonding *bond = to_bond(cd);
1013
1014 write_lock_bh(&bond->lock);
1015 if (!USES_PRIMARY(bond->params.mode)) {
1016 printk(KERN_INFO DRV_NAME
1017 ": %s: Unable to set primary slave; %s is in mode %d\n",
1018 bond->dev->name, bond->dev->name, bond->params.mode);
1019 } else {
1020 bond_for_each_slave(bond, slave, i) {
1021 if (strnicmp
1022 (slave->dev->name, buf,
1023 strlen(slave->dev->name)) == 0) {
1024 printk(KERN_INFO DRV_NAME
1025 ": %s: Setting %s as primary slave.\n",
1026 bond->dev->name, slave->dev->name);
1027 bond->primary_slave = slave;
1028 bond_select_active_slave(bond);
1029 goto out;
1030 }
1031 }
1032
1033 /* if we got here, then we didn't match the name of any slave */
1034
1035 if (strlen(buf) == 0 || buf[0] == '\n') {
1036 printk(KERN_INFO DRV_NAME
1037 ": %s: Setting primary slave to None.\n",
1038 bond->dev->name);
1039 bond->primary_slave = 0;
1040 bond_select_active_slave(bond);
1041 } else {
1042 printk(KERN_INFO DRV_NAME
1043 ": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
1044 bond->dev->name, (int)strlen(buf) - 1, buf);
1045 }
1046 }
1047out:
1048 write_unlock_bh(&bond->lock);
1049 return count;
1050}
1051static CLASS_DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
1052
1053/*
1054 * Show and set the use_carrier flag.
1055 */
1056static ssize_t bonding_show_carrier(struct class_device *cd, char *buf)
1057{
1058 struct bonding *bond = to_bond(cd);
1059
1060 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1;
1061}
1062
1063static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, size_t count)
1064{
1065 int new_value, ret = count;
1066 struct bonding *bond = to_bond(cd);
1067
1068
1069 if (sscanf(buf, "%d", &new_value) != 1) {
1070 printk(KERN_ERR DRV_NAME
1071 ": %s: no use_carrier value specified.\n",
1072 bond->dev->name);
1073 ret = -EINVAL;
1074 goto out;
1075 }
1076 if ((new_value == 0) || (new_value == 1)) {
1077 bond->params.use_carrier = new_value;
1078 printk(KERN_INFO DRV_NAME ": %s: Setting use_carrier to %d.\n",
1079 bond->dev->name, new_value);
1080 } else {
1081 printk(KERN_INFO DRV_NAME
1082 ": %s: Ignoring invalid use_carrier value %d.\n",
1083 bond->dev->name, new_value);
1084 }
1085out:
1086 return count;
1087}
1088static CLASS_DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
1089
1090
1091/*
1092 * Show and set currently active_slave.
1093 */
1094static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf)
1095{
1096 struct slave *curr;
1097 struct bonding *bond = to_bond(cd);
1098 int count;
1099
1100
1101 read_lock(&bond->curr_slave_lock);
1102 curr = bond->curr_active_slave;
1103 read_unlock(&bond->curr_slave_lock);
1104
1105 if (USES_PRIMARY(bond->params.mode) && curr)
1106 count = sprintf(buf, "%s\n", curr->dev->name) + 1;
1107 else
1108 count = sprintf(buf, "\n") + 1;
1109 return count;
1110}
1111
1112static ssize_t bonding_store_active_slave(struct class_device *cd, const char *buf, size_t count)
1113{
1114 int i;
1115 struct slave *slave;
1116 struct slave *old_active = NULL;
1117 struct slave *new_active = NULL;
1118 struct bonding *bond = to_bond(cd);
1119
1120 write_lock_bh(&bond->lock);
1121 if (!USES_PRIMARY(bond->params.mode)) {
1122 printk(KERN_INFO DRV_NAME
1123 ": %s: Unable to change active slave; %s is in mode %d\n",
1124 bond->dev->name, bond->dev->name, bond->params.mode);
1125 } else {
1126 bond_for_each_slave(bond, slave, i) {
1127 if (strnicmp
1128 (slave->dev->name, buf,
1129 strlen(slave->dev->name)) == 0) {
1130 old_active = bond->curr_active_slave;
1131 new_active = slave;
1132 if (new_active && (new_active == old_active)) {
1133 /* do nothing */
1134 printk(KERN_INFO DRV_NAME
1135 ": %s: %s is already the current active slave.\n",
1136 bond->dev->name, slave->dev->name);
1137 goto out;
1138 }
1139 else {
1140 if ((new_active) &&
1141 (old_active) &&
1142 (new_active->link == BOND_LINK_UP) &&
1143 IS_UP(new_active->dev)) {
1144 printk(KERN_INFO DRV_NAME
1145 ": %s: Setting %s as active slave.\n",
1146 bond->dev->name, slave->dev->name);
1147 bond_change_active_slave(bond, new_active);
1148 }
1149 else {
1150 printk(KERN_INFO DRV_NAME
1151 ": %s: Could not set %s as active slave; "
1152 "either %s is down or the link is down.\n",
1153 bond->dev->name, slave->dev->name,
1154 slave->dev->name);
1155 }
1156 goto out;
1157 }
1158 }
1159 }
1160
1161 /* if we got here, then we didn't match the name of any slave */
1162
1163 if (strlen(buf) == 0 || buf[0] == '\n') {
1164 printk(KERN_INFO DRV_NAME
1165 ": %s: Setting active slave to None.\n",
1166 bond->dev->name);
1167 bond->primary_slave = 0;
1168 bond_select_active_slave(bond);
1169 } else {
1170 printk(KERN_INFO DRV_NAME
1171 ": %s: Unable to set %.*s as active slave as it is not a slave.\n",
1172 bond->dev->name, (int)strlen(buf) - 1, buf);
1173 }
1174 }
1175out:
1176 write_unlock_bh(&bond->lock);
1177 return count;
1178
1179}
1180static CLASS_DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
1181
1182
1183/*
1184 * Show link status of the bond interface.
1185 */
1186static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf)
1187{
1188 struct slave *curr;
1189 struct bonding *bond = to_bond(cd);
1190
1191 read_lock(&bond->curr_slave_lock);
1192 curr = bond->curr_active_slave;
1193 read_unlock(&bond->curr_slave_lock);
1194
1195 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1;
1196}
1197static CLASS_DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1198
1199
1200/*
1201 * Show current 802.3ad aggregator ID.
1202 */
1203static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf)
1204{
1205 int count = 0;
1206 struct bonding *bond = to_bond(cd);
1207
1208 if (bond->params.mode == BOND_MODE_8023AD) {
1209 struct ad_info ad_info;
1210 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id) + 1;
1211 }
1212 else
1213 count = sprintf(buf, "\n") + 1;
1214
1215 return count;
1216}
1217static CLASS_DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
1218
1219
1220/*
1221 * Show number of active 802.3ad ports.
1222 */
1223static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf)
1224{
1225 int count = 0;
1226 struct bonding *bond = to_bond(cd);
1227
1228 if (bond->params.mode == BOND_MODE_8023AD) {
1229 struct ad_info ad_info;
1230 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports) + 1;
1231 }
1232 else
1233 count = sprintf(buf, "\n") + 1;
1234
1235 return count;
1236}
1237static CLASS_DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
1238
1239
1240/*
1241 * Show current 802.3ad actor key.
1242 */
1243static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf)
1244{
1245 int count = 0;
1246 struct bonding *bond = to_bond(cd);
1247
1248 if (bond->params.mode == BOND_MODE_8023AD) {
1249 struct ad_info ad_info;
1250 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key) + 1;
1251 }
1252 else
1253 count = sprintf(buf, "\n") + 1;
1254
1255 return count;
1256}
1257static CLASS_DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
1258
1259
1260/*
1261 * Show current 802.3ad partner key.
1262 */
1263static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf)
1264{
1265 int count = 0;
1266 struct bonding *bond = to_bond(cd);
1267
1268 if (bond->params.mode == BOND_MODE_8023AD) {
1269 struct ad_info ad_info;
1270 count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key) + 1;
1271 }
1272 else
1273 count = sprintf(buf, "\n") + 1;
1274
1275 return count;
1276}
1277static CLASS_DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
1278
1279
1280/*
1281 * Show current 802.3ad partner mac.
1282 */
1283static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf)
1284{
1285 int count = 0;
1286 struct bonding *bond = to_bond(cd);
1287
1288 if (bond->params.mode == BOND_MODE_8023AD) {
1289 struct ad_info ad_info;
1290 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) {
1291 count = sprintf(buf,"%02x:%02x:%02x:%02x:%02x:%02x\n",
1292 ad_info.partner_system[0],
1293 ad_info.partner_system[1],
1294 ad_info.partner_system[2],
1295 ad_info.partner_system[3],
1296 ad_info.partner_system[4],
1297 ad_info.partner_system[5]) + 1;
1298 }
1299 }
1300 else
1301 count = sprintf(buf, "\n") + 1;
1302
1303 return count;
1304}
1305static CLASS_DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1306
1307
1308
1309static struct attribute *per_bond_attrs[] = {
1310 &class_device_attr_slaves.attr,
1311 &class_device_attr_mode.attr,
1312 &class_device_attr_arp_interval.attr,
1313 &class_device_attr_arp_ip_target.attr,
1314 &class_device_attr_downdelay.attr,
1315 &class_device_attr_updelay.attr,
1316 &class_device_attr_lacp_rate.attr,
1317 &class_device_attr_xmit_hash_policy.attr,
1318 &class_device_attr_miimon.attr,
1319 &class_device_attr_primary.attr,
1320 &class_device_attr_use_carrier.attr,
1321 &class_device_attr_active_slave.attr,
1322 &class_device_attr_mii_status.attr,
1323 &class_device_attr_ad_aggregator.attr,
1324 &class_device_attr_ad_num_ports.attr,
1325 &class_device_attr_ad_actor_key.attr,
1326 &class_device_attr_ad_partner_key.attr,
1327 &class_device_attr_ad_partner_mac.attr,
1328 NULL,
1329};
1330
1331static struct attribute_group bonding_group = {
1332 .name = "bonding",
1333 .attrs = per_bond_attrs,
1334};
1335
1336/*
1337 * Initialize sysfs. This sets up the bonding_masters file in
1338 * /sys/class/net.
1339 */
1340int bond_create_sysfs(void)
1341{
1342 int ret = 0;
1343 struct bonding *firstbond;
1344
1345 init_rwsem(&bonding_rwsem);
1346
1347 /* get the netdev class pointer */
1348 firstbond = container_of(bond_dev_list.next, struct bonding, bond_list);
1349 if (!firstbond)
1350 return -ENODEV;
1351
1352 netdev_class = firstbond->dev->class_dev.class;
1353 if (!netdev_class)
1354 return -ENODEV;
1355
1356 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1357
1358 return ret;
1359
1360}
1361
1362/*
1363 * Remove /sys/class/net/bonding_masters.
1364 */
1365void bond_destroy_sysfs(void)
1366{
1367 if (netdev_class)
1368 class_remove_file(netdev_class, &class_attr_bonding_masters);
1369}
1370
1371/*
1372 * Initialize sysfs for each bond. This sets up and registers
1373 * the 'bondctl' directory for each individual bond under /sys/class/net.
1374 */
1375int bond_create_sysfs_entry(struct bonding *bond)
1376{
1377 struct net_device *dev = bond->dev;
1378 int err;
1379
1380 err = sysfs_create_group(&(dev->class_dev.kobj), &bonding_group);
1381 if (err) {
1382 printk(KERN_EMERG "eek! didn't create group!\n");
1383 }
1384
1385 if (expected_refcount < 1)
1386 expected_refcount = atomic_read(&bond->dev->class_dev.kobj.kref.refcount);
1387
1388 return err;
1389}
1390/*
1391 * Remove sysfs entries for each bond.
1392 */
1393void bond_destroy_sysfs_entry(struct bonding *bond)
1394{
1395 struct net_device *dev = bond->dev;
1396
1397 sysfs_remove_group(&(dev->class_dev.kobj), &bonding_group);
1398}
1399
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 1433e91db0f7..d6d085480f21 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -29,6 +29,10 @@
29 * 2005/05/05 - Jason Gabler <jygabler at lbl dot gov> 29 * 2005/05/05 - Jason Gabler <jygabler at lbl dot gov>
30 * - added "xmit_policy" kernel parameter for alternate hashing policy 30 * - added "xmit_policy" kernel parameter for alternate hashing policy
31 * support for mode 2 31 * support for mode 2
32 *
33 * 2005/09/27 - Mitch Williams <mitch.a.williams at intel dot com>
34 * Radheka Godse <radheka.godse at intel dot com>
35 * - Added bonding sysfs interface
32 */ 36 */
33 37
34#ifndef _LINUX_BONDING_H 38#ifndef _LINUX_BONDING_H
@@ -37,11 +41,12 @@
37#include <linux/timer.h> 41#include <linux/timer.h>
38#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
39#include <linux/if_bonding.h> 43#include <linux/if_bonding.h>
44#include <linux/kobject.h>
40#include "bond_3ad.h" 45#include "bond_3ad.h"
41#include "bond_alb.h" 46#include "bond_alb.h"
42 47
43#define DRV_VERSION "2.6.5" 48#define DRV_VERSION "3.0.0"
44#define DRV_RELDATE "November 4, 2005" 49#define DRV_RELDATE "November 8, 2005"
45#define DRV_NAME "bonding" 50#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 51#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 52
@@ -152,6 +157,11 @@ struct bond_params {
152 u32 arp_targets[BOND_MAX_ARP_TARGETS]; 157 u32 arp_targets[BOND_MAX_ARP_TARGETS];
153}; 158};
154 159
160struct bond_parm_tbl {
161 char *modename;
162 int mode;
163};
164
155struct vlan_entry { 165struct vlan_entry {
156 struct list_head vlan_list; 166 struct list_head vlan_list;
157 u32 vlan_ip; 167 u32 vlan_ip;
@@ -159,7 +169,7 @@ struct vlan_entry {
159}; 169};
160 170
161struct slave { 171struct slave {
162 struct net_device *dev; /* first - usefull for panic debug */ 172 struct net_device *dev; /* first - useful for panic debug */
163 struct slave *next; 173 struct slave *next;
164 struct slave *prev; 174 struct slave *prev;
165 s16 delay; 175 s16 delay;
@@ -185,7 +195,7 @@ struct slave {
185 * beforehand. 195 * beforehand.
186 */ 196 */
187struct bonding { 197struct bonding {
188 struct net_device *dev; /* first - usefull for panic debug */ 198 struct net_device *dev; /* first - useful for panic debug */
189 struct slave *first_slave; 199 struct slave *first_slave;
190 struct slave *curr_active_slave; 200 struct slave *curr_active_slave;
191 struct slave *current_arp_slave; 201 struct slave *current_arp_slave;
@@ -255,6 +265,25 @@ extern inline void bond_set_slave_active_flags(struct slave *slave)
255 265
256struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 266struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
257int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 267int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
268int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
269void bond_deinit(struct net_device *bond_dev);
270int bond_create_sysfs(void);
271void bond_destroy_sysfs(void);
272void bond_destroy_sysfs_entry(struct bonding *bond);
273int bond_create_sysfs_entry(struct bonding *bond);
274int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
275void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
276int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
277int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
278int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev);
279void bond_mii_monitor(struct net_device *bond_dev);
280void bond_loadbalance_arp_mon(struct net_device *bond_dev);
281void bond_activebackup_arp_mon(struct net_device *bond_dev);
282void bond_set_mode_ops(struct bonding *bond, int mode);
283int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl);
284const char *bond_mode_name(int mode);
285void bond_select_active_slave(struct bonding *bond);
286void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
258 287
259#endif /* _LINUX_BONDING_H */ 288#endif /* _LINUX_BONDING_H */
260 289
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e3a329539f1c..0f030b73cbb3 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -6,7 +6,7 @@
6 * Based on 8260_io/fcc_enet.c 6 * Based on 8260_io/fcc_enet.c
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 9 * Maintainer: Kumar Gala
10 * 10 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
12 * 12 *
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 220084e53341..5065ba82cb76 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -6,7 +6,7 @@
6 * Based on 8260_io/fcc_enet.c 6 * Based on 8260_io/fcc_enet.c
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 9 * Maintainer: Kumar Gala
10 * 10 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
12 * 12 *
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5a2d810ce575..cfa3cd7c91a0 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -6,7 +6,7 @@
6 * Based on e1000 ethtool support 6 * Based on e1000 ethtool support
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 9 * Maintainer: Kumar Gala
10 * 10 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
12 * 12 *
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 9544279e8bcd..04a462c2a5b7 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -5,7 +5,7 @@
5 * Provides Bus interface for MIIM regs 5 * Provides Bus interface for MIIM regs
6 * 6 *
7 * Author: Andy Fleming 7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 8 * Maintainer: Kumar Gala
9 * 9 *
10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
11 * 11 *
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 56e5665d5c9b..e85eb216fb5b 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -5,7 +5,7 @@
5 * Driver for the MDIO bus controller in the Gianfar register space 5 * Driver for the MDIO bus controller in the Gianfar register space
6 * 6 *
7 * Author: Andy Fleming 7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 8 * Maintainer: Kumar Gala
9 * 9 *
10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
11 * 11 *
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9bf34681d3df..2e7882eb7d6f 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -40,6 +40,7 @@
40#include <asm/byteorder.h> 40#include <asm/byteorder.h>
41 41
42#include <linux/pm.h> 42#include <linux/pm.h>
43#include <linux/pm_legacy.h>
43 44
44#include <net/irda/wrapper.h> 45#include <net/irda/wrapper.h>
45#include <net/irda/irda.h> 46#include <net/irda/irda.h>
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 805714ec9a8a..ee717d0e939e 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -59,6 +59,7 @@
59#include <asm/byteorder.h> 59#include <asm/byteorder.h>
60 60
61#include <linux/pm.h> 61#include <linux/pm.h>
62#include <linux/pm_legacy.h>
62 63
63#include <net/irda/wrapper.h> 64#include <net/irda/wrapper.h>
64#include <net/irda/irda.h> 65#include <net/irda/irda.h>
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 1d4d88680db1..3d95fa20cd88 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. 1/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
2 Copyright 1999 Silicon Integrated System Corporation 2 Copyright 1999 Silicon Integrated System Corporation
3 Revision: 1.08.08 Jan. 22 2005 3 Revision: 1.08.09 Sep. 19 2005
4 4
5 Modified from the driver which is originally written by Donald Becker. 5 Modified from the driver which is originally written by Donald Becker.
6 6
@@ -17,6 +17,7 @@
17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, 17 SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
18 preliminary Rev. 1.0 Jan. 18, 1998 18 preliminary Rev. 1.0 Jan. 18, 1998
19 19
20 Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
20 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages 21 Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
21 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support 22 Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
22 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support 23 Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
@@ -76,7 +77,7 @@
76#include "sis900.h" 77#include "sis900.h"
77 78
78#define SIS900_MODULE_NAME "sis900" 79#define SIS900_MODULE_NAME "sis900"
79#define SIS900_DRV_VERSION "v1.08.08 Jan. 22 2005" 80#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
80 81
81static char version[] __devinitdata = 82static char version[] __devinitdata =
82KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; 83KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -538,6 +539,11 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
538 printk("%2.2x:", (u8)net_dev->dev_addr[i]); 539 printk("%2.2x:", (u8)net_dev->dev_addr[i]);
539 printk("%2.2x.\n", net_dev->dev_addr[i]); 540 printk("%2.2x.\n", net_dev->dev_addr[i]);
540 541
542 /* Detect Wake on Lan support */
543 ret = inl(CFGPMC & PMESP);
544 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
545 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
546
541 return 0; 547 return 0;
542 548
543 err_unmap_rx: 549 err_unmap_rx:
@@ -2015,6 +2021,67 @@ static int sis900_nway_reset(struct net_device *net_dev)
2015 return mii_nway_restart(&sis_priv->mii_info); 2021 return mii_nway_restart(&sis_priv->mii_info);
2016} 2022}
2017 2023
2024/**
2025 * sis900_set_wol - Set up Wake on Lan registers
2026 * @net_dev: the net device to probe
2027 * @wol: container for info passed to the driver
2028 *
2029 * Process ethtool command "wol" to setup wake on lan features.
2030 * SiS900 supports sending WoL events if a correct packet is received,
2031 * but there is no simple way to filter them to only a subset (broadcast,
2032 * multicast, unicast or arp).
2033 */
2034
2035static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2036{
2037 struct sis900_private *sis_priv = net_dev->priv;
2038 long pmctrl_addr = net_dev->base_addr + pmctrl;
2039 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2040
2041 if (wol->wolopts == 0) {
2042 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2043 cfgpmcsr |= ~PME_EN;
2044 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2045 outl(pmctrl_bits, pmctrl_addr);
2046 if (netif_msg_wol(sis_priv))
2047 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2048 return 0;
2049 }
2050
2051 if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
2052 | WAKE_BCAST | WAKE_ARP))
2053 return -EINVAL;
2054
2055 if (wol->wolopts & WAKE_MAGIC)
2056 pmctrl_bits |= MAGICPKT;
2057 if (wol->wolopts & WAKE_PHY)
2058 pmctrl_bits |= LINKON;
2059
2060 outl(pmctrl_bits, pmctrl_addr);
2061
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr |= PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 if (netif_msg_wol(sis_priv))
2066 printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
2067
2068 return 0;
2069}
2070
2071static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2072{
2073 long pmctrl_addr = net_dev->base_addr + pmctrl;
2074 u32 pmctrl_bits;
2075
2076 pmctrl_bits = inl(pmctrl_addr);
2077 if (pmctrl_bits & MAGICPKT)
2078 wol->wolopts |= WAKE_MAGIC;
2079 if (pmctrl_bits & LINKON)
2080 wol->wolopts |= WAKE_PHY;
2081
2082 wol->supported = (WAKE_PHY | WAKE_MAGIC);
2083}
2084
2018static struct ethtool_ops sis900_ethtool_ops = { 2085static struct ethtool_ops sis900_ethtool_ops = {
2019 .get_drvinfo = sis900_get_drvinfo, 2086 .get_drvinfo = sis900_get_drvinfo,
2020 .get_msglevel = sis900_get_msglevel, 2087 .get_msglevel = sis900_get_msglevel,
@@ -2023,6 +2090,8 @@ static struct ethtool_ops sis900_ethtool_ops = {
2023 .get_settings = sis900_get_settings, 2090 .get_settings = sis900_get_settings,
2024 .set_settings = sis900_set_settings, 2091 .set_settings = sis900_set_settings,
2025 .nway_reset = sis900_nway_reset, 2092 .nway_reset = sis900_nway_reset,
2093 .get_wol = sis900_get_wol,
2094 .set_wol = sis900_set_wol
2026}; 2095};
2027 2096
2028/** 2097/**
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index de3c06735d15..4233ea55670f 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -33,6 +33,7 @@ enum sis900_registers {
33 rxcfg=0x34, //Receive Configuration Register 33 rxcfg=0x34, //Receive Configuration Register
34 flctrl=0x38, //Flow Control Register 34 flctrl=0x38, //Flow Control Register
35 rxlen=0x3c, //Receive Packet Length Register 35 rxlen=0x3c, //Receive Packet Length Register
36 cfgpmcsr=0x44, //Configuration Power Management Control/Status Register
36 rfcr=0x48, //Receive Filter Control Register 37 rfcr=0x48, //Receive Filter Control Register
37 rfdr=0x4C, //Receive Filter Data Register 38 rfdr=0x4C, //Receive Filter Data Register
38 pmctrl=0xB0, //Power Management Control Register 39 pmctrl=0xB0, //Power Management Control Register
@@ -140,6 +141,50 @@ enum sis96x_eeprom_command {
140 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100 141 EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
141}; 142};
142 143
144/* PCI Registers */
145enum sis900_pci_registers {
146 CFGPMC = 0x40,
147 CFGPMCSR = 0x44
148};
149
150/* Power management capabilities bits */
151enum sis900_cfgpmc_register_bits {
152 PMVER = 0x00070000,
153 DSI = 0x00100000,
154 PMESP = 0xf8000000
155};
156
157enum sis900_pmesp_bits {
158 PME_D0 = 0x1,
159 PME_D1 = 0x2,
160 PME_D2 = 0x4,
161 PME_D3H = 0x8,
162 PME_D3C = 0x10
163};
164
165/* Power management control/status bits */
166enum sis900_cfgpmcsr_register_bits {
167 PMESTS = 0x00004000,
168 PME_EN = 0x00000100, // Power management enable
169 PWR_STA = 0x00000003 // Current power state
170};
171
172/* Wake-on-LAN support. */
173enum sis900_power_management_control_register_bits {
174 LINKLOSS = 0x00000001,
175 LINKON = 0x00000002,
176 MAGICPKT = 0x00000400,
177 ALGORITHM = 0x00000800,
178 FRM1EN = 0x00100000,
179 FRM2EN = 0x00200000,
180 FRM3EN = 0x00400000,
181 FRM1ACS = 0x01000000,
182 FRM2ACS = 0x02000000,
183 FRM3ACS = 0x04000000,
184 WAKEALL = 0x40000000,
185 GATECLK = 0x80000000
186};
187
143/* Management Data I/O (mdio) frame */ 188/* Management Data I/O (mdio) frame */
144#define MIIread 0x6000 189#define MIIread 0x6000
145#define MIIwrite 0x5002 190#define MIIwrite 0x5002
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
new file mode 100644
index 000000000000..9f89000e5ad5
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,3039 @@
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26/*
27 * TODO
28 * - coalescing setting?
29 *
30 * TOTEST
31 * - speed setting
32 * - suspend/resume
33 */
34
35#include <linux/config.h>
36#include <linux/crc32.h>
37#include <linux/kernel.h>
38#include <linux/version.h>
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/dma-mapping.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/pci.h>
45#include <linux/ip.h>
46#include <linux/tcp.h>
47#include <linux/in.h>
48#include <linux/delay.h>
49#include <linux/if_vlan.h>
50
51#include <asm/irq.h>
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define SKY2_VLAN_TAG_USED 1
55#endif
56
57#include "sky2.h"
58
59#define DRV_NAME "sky2"
60#define DRV_VERSION "0.7"
61#define PFX DRV_NAME " "
62
63/*
64 * The Yukon II chipset takes 64 bit command blocks (called list elements)
65 * that are organized into three (receive, transmit, status) different rings
66 * similar to Tigon3. A transmit can require several elements;
67 * a receive requires one (or two if using 64 bit dma).
68 */
69
70#ifdef CONFIG_SKY2_EC_A1
71#define is_ec_a1(hw) \
72 ((hw)->chip_id == CHIP_ID_YUKON_EC && \
73 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
74#else
75#define is_ec_a1(hw) 0
76#endif
77
78#define RX_LE_SIZE 256
79#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
80#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
81#define RX_DEF_PENDING 128
82#define RX_COPY_THRESHOLD 256
83
84#define TX_RING_SIZE 512
85#define TX_DEF_PENDING (TX_RING_SIZE - 1)
86#define TX_MIN_PENDING 64
87#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
88
89#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
90#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
91#define ETH_JUMBO_MTU 9000
92#define TX_WATCHDOG (5 * HZ)
93#define NAPI_WEIGHT 64
94#define PHY_RETRIES 1000
95
96static const u32 default_msg =
97 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
98 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
99 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
100
101static int debug = -1; /* defaults above */
102module_param(debug, int, 0);
103MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
104
105static const struct pci_device_id sky2_id_table[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
109 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, sky2_id_table);
127
128/* Avoid conditionals by using array */
129static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
130static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
131
132static const char *yukon_name[] = {
133 [CHIP_ID_YUKON_LITE - CHIP_ID_YUKON] = "Lite", /* 0xb0 */
134 [CHIP_ID_YUKON_LP - CHIP_ID_YUKON] = "LP", /* 0xb2 */
135 [CHIP_ID_YUKON_XL - CHIP_ID_YUKON] = "XL", /* 0xb3 */
136
137 [CHIP_ID_YUKON_EC - CHIP_ID_YUKON] = "EC", /* 0xb6 */
138 [CHIP_ID_YUKON_FE - CHIP_ID_YUKON] = "FE", /* 0xb7 */
139};
140
141
142/* Access to external PHY */
143static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
144{
145 int i;
146
147 gma_write16(hw, port, GM_SMI_DATA, val);
148 gma_write16(hw, port, GM_SMI_CTRL,
149 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
150
151 for (i = 0; i < PHY_RETRIES; i++) {
152 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
153 return;
154 udelay(1);
155 }
156 printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
157}
158
159static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
160{
161 int i;
162
163 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
164 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
165
166 for (i = 0; i < PHY_RETRIES; i++) {
167 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
168 goto ready;
169 udelay(1);
170 }
171
172 printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
173ready:
174 return gma_read16(hw, port, GM_SMI_DATA);
175}
176
177static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
178{
179 u16 power_control;
180 u32 reg1;
181 int vaux;
182 int ret = 0;
183
184 pr_debug("sky2_set_power_state %d\n", state);
185 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
186
187 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
188 vaux = (sky2_read8(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
189 (power_control & PCI_PM_CAP_PME_D3cold);
190
191 pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
192
193 power_control |= PCI_PM_CTRL_PME_STATUS;
194 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
195
196 switch (state) {
197 case PCI_D0:
198 /* switch power to VCC (WA for VAUX problem) */
199 sky2_write8(hw, B0_POWER_CTRL,
200 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
201
202 /* disable Core Clock Division, */
203 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
204
205 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
206 /* enable bits are inverted */
207 sky2_write8(hw, B2_Y2_CLK_GATE,
208 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
209 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
210 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
211 else
212 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
213
214 /* Turn off phy power saving */
215 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
216 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
217
218 /* looks like this XL is back asswards .. */
219 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
220 reg1 |= PCI_Y2_PHY1_COMA;
221 if (hw->ports > 1)
222 reg1 |= PCI_Y2_PHY2_COMA;
223 }
224 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
225 break;
226
227 case PCI_D3hot:
228 case PCI_D3cold:
229 /* Turn on phy power saving */
230 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
232 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
233 else
234 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
235 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
236
237 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
238 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
239 else
240 /* enable bits are inverted */
241 sky2_write8(hw, B2_Y2_CLK_GATE,
242 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
243 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
244 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
245
246 /* switch power to VAUX */
247 if (vaux && state != PCI_D3cold)
248 sky2_write8(hw, B0_POWER_CTRL,
249 (PC_VAUX_ENA | PC_VCC_ENA |
250 PC_VAUX_ON | PC_VCC_OFF));
251 break;
252 default:
253 printk(KERN_ERR PFX "Unknown power state %d\n", state);
254 ret = -1;
255 }
256
257 pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
258 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
259 return ret;
260}
261
262static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
263{
264 u16 reg;
265
266 /* disable all GMAC IRQ's */
267 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
268 /* disable PHY IRQs */
269 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
270
271 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
272 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
273 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
274 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
275
276 reg = gma_read16(hw, port, GM_RX_CTRL);
277 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
278 gma_write16(hw, port, GM_RX_CTRL, reg);
279}
280
281static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
282{
283 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
284 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
285
286 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
287 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
288
289 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
290 PHY_M_EC_MAC_S_MSK);
291 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
292
293 if (hw->chip_id == CHIP_ID_YUKON_EC)
294 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
295 else
296 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
297
298 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
299 }
300
301 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
302 if (hw->copper) {
303 if (hw->chip_id == CHIP_ID_YUKON_FE) {
304 /* enable automatic crossover */
305 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
306 } else {
307 /* disable energy detect */
308 ctrl &= ~PHY_M_PC_EN_DET_MSK;
309
310 /* enable automatic crossover */
311 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
312
313 if (sky2->autoneg == AUTONEG_ENABLE &&
314 hw->chip_id == CHIP_ID_YUKON_XL) {
315 ctrl &= ~PHY_M_PC_DSC_MSK;
316 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
317 }
318 }
319 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
320 } else {
321 /* workaround for deviation #4.88 (CRC errors) */
322 /* disable Automatic Crossover */
323
324 ctrl &= ~PHY_M_PC_MDIX_MSK;
325 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
326
327 if (hw->chip_id == CHIP_ID_YUKON_XL) {
328 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
329 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
330 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
331 ctrl &= ~PHY_M_MAC_MD_MSK;
332 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
333 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
334
335 /* select page 1 to access Fiber registers */
336 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
337 }
338 }
339
340 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
341 if (sky2->autoneg == AUTONEG_DISABLE)
342 ctrl &= ~PHY_CT_ANE;
343 else
344 ctrl |= PHY_CT_ANE;
345
346 ctrl |= PHY_CT_RESET;
347 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
348
349 ctrl = 0;
350 ct1000 = 0;
351 adv = PHY_AN_CSMA;
352
353 if (sky2->autoneg == AUTONEG_ENABLE) {
354 if (hw->copper) {
355 if (sky2->advertising & ADVERTISED_1000baseT_Full)
356 ct1000 |= PHY_M_1000C_AFD;
357 if (sky2->advertising & ADVERTISED_1000baseT_Half)
358 ct1000 |= PHY_M_1000C_AHD;
359 if (sky2->advertising & ADVERTISED_100baseT_Full)
360 adv |= PHY_M_AN_100_FD;
361 if (sky2->advertising & ADVERTISED_100baseT_Half)
362 adv |= PHY_M_AN_100_HD;
363 if (sky2->advertising & ADVERTISED_10baseT_Full)
364 adv |= PHY_M_AN_10_FD;
365 if (sky2->advertising & ADVERTISED_10baseT_Half)
366 adv |= PHY_M_AN_10_HD;
367 } else /* special defines for FIBER (88E1011S only) */
368 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
369
370 /* Set Flow-control capabilities */
371 if (sky2->tx_pause && sky2->rx_pause)
372 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
373 else if (sky2->rx_pause && !sky2->tx_pause)
374 adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
375 else if (!sky2->rx_pause && sky2->tx_pause)
376 adv |= PHY_AN_PAUSE_ASYM; /* local */
377
378 /* Restart Auto-negotiation */
379 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
380 } else {
381 /* forced speed/duplex settings */
382 ct1000 = PHY_M_1000C_MSE;
383
384 if (sky2->duplex == DUPLEX_FULL)
385 ctrl |= PHY_CT_DUP_MD;
386
387 switch (sky2->speed) {
388 case SPEED_1000:
389 ctrl |= PHY_CT_SP1000;
390 break;
391 case SPEED_100:
392 ctrl |= PHY_CT_SP100;
393 break;
394 }
395
396 ctrl |= PHY_CT_RESET;
397 }
398
399 if (hw->chip_id != CHIP_ID_YUKON_FE)
400 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
401
402 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
403 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
404
405 /* Setup Phy LED's */
406 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
407 ledover = 0;
408
409 switch (hw->chip_id) {
410 case CHIP_ID_YUKON_FE:
411 /* on 88E3082 these bits are at 11..9 (shifted left) */
412 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
413
414 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
415
416 /* delete ACT LED control bits */
417 ctrl &= ~PHY_M_FELP_LED1_MSK;
418 /* change ACT LED control to blink mode */
419 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
420 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
421 break;
422
423 case CHIP_ID_YUKON_XL:
424 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
425
426 /* select page 3 to access LED control register */
427 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
428
429 /* set LED Function Control register */
430 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
431 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
432 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
433 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
434
435 /* set Polarity Control register */
436 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
437 (PHY_M_POLC_LS1_P_MIX(4) |
438 PHY_M_POLC_IS0_P_MIX(4) |
439 PHY_M_POLC_LOS_CTRL(2) |
440 PHY_M_POLC_INIT_CTRL(2) |
441 PHY_M_POLC_STA1_CTRL(2) |
442 PHY_M_POLC_STA0_CTRL(2)));
443
444 /* restore page register */
445 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
446 break;
447
448 default:
449 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
450 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
451 /* turn off the Rx LED (LED_RX) */
452 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
453 }
454
455 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
456
457 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
458 /* turn on 100 Mbps LED (LED_LINK100) */
459 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
460 }
461
462 if (ledover)
463 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
464
465 /* Enable phy interrupt on auto-negotiation complete (or link up) */
466 if (sky2->autoneg == AUTONEG_ENABLE)
467 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
468 else
469 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
470}
471
472static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
473{
474 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
475 u16 reg;
476 int i;
477 const u8 *addr = hw->dev[port]->dev_addr;
478
479 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
480 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
481
482 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
483
484 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
485 /* WA DEV_472 -- looks like crossed wires on port 2 */
486 /* clear GMAC 1 Control reset */
487 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
488 do {
489 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
490 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
491 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
492 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
493 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
494 }
495
496 if (sky2->autoneg == AUTONEG_DISABLE) {
497 reg = gma_read16(hw, port, GM_GP_CTRL);
498 reg |= GM_GPCR_AU_ALL_DIS;
499 gma_write16(hw, port, GM_GP_CTRL, reg);
500 gma_read16(hw, port, GM_GP_CTRL);
501
502 switch (sky2->speed) {
503 case SPEED_1000:
504 reg |= GM_GPCR_SPEED_1000;
505 /* fallthru */
506 case SPEED_100:
507 reg |= GM_GPCR_SPEED_100;
508 }
509
510 if (sky2->duplex == DUPLEX_FULL)
511 reg |= GM_GPCR_DUP_FULL;
512 } else
513 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
514
515 if (!sky2->tx_pause && !sky2->rx_pause) {
516 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
517 reg |=
518 GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
519 } else if (sky2->tx_pause && !sky2->rx_pause) {
520 /* disable Rx flow-control */
521 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
522 }
523
524 gma_write16(hw, port, GM_GP_CTRL, reg);
525
526 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
527
528 spin_lock_bh(&hw->phy_lock);
529 sky2_phy_init(hw, port);
530 spin_unlock_bh(&hw->phy_lock);
531
532 /* MIB clear */
533 reg = gma_read16(hw, port, GM_PHY_ADDR);
534 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
535
536 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
537 gma_read16(hw, port, GM_MIB_CNT_BASE + 8 * i);
538 gma_write16(hw, port, GM_PHY_ADDR, reg);
539
540 /* transmit control */
541 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
542
543 /* receive control reg: unicast + multicast + no FCS */
544 gma_write16(hw, port, GM_RX_CTRL,
545 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
546
547 /* transmit flow control */
548 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
549
550 /* transmit parameter */
551 gma_write16(hw, port, GM_TX_PARAM,
552 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
553 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
554 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
555 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
556
557 /* serial mode register */
558 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
559 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
560
561 if (hw->dev[port]->mtu > ETH_DATA_LEN)
562 reg |= GM_SMOD_JUMBO_ENA;
563
564 gma_write16(hw, port, GM_SERIAL_MODE, reg);
565
566 /* virtual address for data */
567 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
568
569 /* physical address: used for pause frames */
570 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
571
572 /* ignore counter overflows */
573 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
574 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
575 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
576
577 /* Configure Rx MAC FIFO */
578 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
579 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
580 GMF_RX_CTRL_DEF);
581
582 /* Flush Rx MAC FIFO on any flow control or error */
583 reg = GMR_FS_ANY_ERR;
584 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev <= 1)
585 reg = 0; /* WA dev #4.115 */
586
587 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), reg);
588 /* Set threshold to 0xa (64 bytes)
589 * ASF disabled so no need to do WA dev #4.30
590 */
591 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
592
593 /* Configure Tx MAC FIFO */
594 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
595 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
596}
597
598static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
599{
600 u32 end;
601
602 start /= 8;
603 len /= 8;
604 end = start + len - 1;
605
606 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
607 sky2_write32(hw, RB_ADDR(q, RB_START), start);
608 sky2_write32(hw, RB_ADDR(q, RB_END), end);
609 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
610 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
611
612 if (q == Q_R1 || q == Q_R2) {
613 u32 rxup, rxlo;
614
615 rxlo = len/2;
616 rxup = rxlo + len/4;
617
618 /* Set thresholds on receive queue's */
619 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
620 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
621 } else {
622 /* Enable store & forward on Tx queue's because
623 * Tx FIFO is only 1K on Yukon
624 */
625 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
626 }
627
628 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
629 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
630}
631
632/* Setup Bus Memory Interface */
633static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm)
634{
635 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
636 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
637 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
638 sky2_write32(hw, Q_ADDR(q, Q_WM), wm);
639}
640
641/* Setup prefetch unit registers. This is the interface between
642 * hardware and driver list elements
643 */
644static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
645 u64 addr, u32 last)
646{
647 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
648 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
649 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
650 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
651 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
652 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
653
654 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
655}
656
657static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
658{
659 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
660
661 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
662 return le;
663}
664
665/*
666 * This is a workaround code taken from SysKonnect sk98lin driver
667 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
668 */
669static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
670 u16 idx, u16 *last, u16 size)
671{
672 if (is_ec_a1(hw) && idx < *last) {
673 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
674
675 if (hwget == 0) {
676 /* Start prefetching again */
677 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
678 goto setnew;
679 }
680
681 if (hwget == size - 1) {
682 /* set watermark to one list element */
683 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
684
685 /* set put index to first list element */
686 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
687 } else /* have hardware go to end of list */
688 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
689 size - 1);
690 } else {
691setnew:
692 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
693 }
694 *last = idx;
695}
696
697
698static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
699{
700 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
701 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
702 return le;
703}
704
705/* Build description to hardware about buffer */
706static inline void sky2_rx_add(struct sky2_port *sky2, struct ring_info *re)
707{
708 struct sky2_rx_le *le;
709 u32 hi = (re->mapaddr >> 16) >> 16;
710
711 re->idx = sky2->rx_put;
712 if (sky2->rx_addr64 != hi) {
713 le = sky2_next_rx(sky2);
714 le->addr = cpu_to_le32(hi);
715 le->ctrl = 0;
716 le->opcode = OP_ADDR64 | HW_OWNER;
717 sky2->rx_addr64 = hi;
718 }
719
720 le = sky2_next_rx(sky2);
721 le->addr = cpu_to_le32((u32) re->mapaddr);
722 le->length = cpu_to_le16(re->maplen);
723 le->ctrl = 0;
724 le->opcode = OP_PACKET | HW_OWNER;
725}
726
727
728/* Tell chip where to start receive checksum.
729 * Actually has two checksums, but set both same to avoid possible byte
730 * order problems.
731 */
732static void rx_set_checksum(struct sky2_port *sky2)
733{
734 struct sky2_rx_le *le;
735
736 le = sky2_next_rx(sky2);
737 le->addr = (ETH_HLEN << 16) | ETH_HLEN;
738 le->ctrl = 0;
739 le->opcode = OP_TCPSTART | HW_OWNER;
740
741 sky2_write32(sky2->hw,
742 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
743 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
744
745}
746
747/*
748 * The RX Stop command will not work for Yukon-2 if the BMU does not
749 * reach the end of packet and since we can't make sure that we have
750 * incoming data, we must reset the BMU while it is not doing a DMA
751 * transfer. Since it is possible that the RX path is still active,
752 * the RX RAM buffer will be stopped first, so any possible incoming
753 * data will not trigger a DMA. After the RAM buffer is stopped, the
754 * BMU is polled until any DMA in progress is ended and only then it
755 * will be reset.
756 */
757static void sky2_rx_stop(struct sky2_port *sky2)
758{
759 struct sky2_hw *hw = sky2->hw;
760 unsigned rxq = rxqaddr[sky2->port];
761 int i;
762
763 /* disable the RAM Buffer receive queue */
764 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
765
766 for (i = 0; i < 0xffff; i++)
767 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
768 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
769 goto stopped;
770
771 printk(KERN_WARNING PFX "%s: receiver stop failed\n",
772 sky2->netdev->name);
773stopped:
774 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
775
776 /* reset the Rx prefetch unit */
777 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
778}
779
780/* Clean out receive buffer area, assumes receiver hardware stopped */
781static void sky2_rx_clean(struct sky2_port *sky2)
782{
783 unsigned i;
784
785 memset(sky2->rx_le, 0, RX_LE_BYTES);
786 for (i = 0; i < sky2->rx_pending; i++) {
787 struct ring_info *re = sky2->rx_ring + i;
788
789 if (re->skb) {
790 pci_unmap_single(sky2->hw->pdev,
791 re->mapaddr, re->maplen,
792 PCI_DMA_FROMDEVICE);
793 kfree_skb(re->skb);
794 re->skb = NULL;
795 }
796 }
797}
798
799#ifdef SKY2_VLAN_TAG_USED
800static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
801{
802 struct sky2_port *sky2 = netdev_priv(dev);
803 struct sky2_hw *hw = sky2->hw;
804 u16 port = sky2->port;
805 unsigned long flags;
806
807 spin_lock_irqsave(&sky2->tx_lock, flags);
808
809 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
810 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
811 sky2->vlgrp = grp;
812
813 spin_unlock_irqrestore(&sky2->tx_lock, flags);
814}
815
816static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
817{
818 struct sky2_port *sky2 = netdev_priv(dev);
819 struct sky2_hw *hw = sky2->hw;
820 u16 port = sky2->port;
821 unsigned long flags;
822
823 spin_lock_irqsave(&sky2->tx_lock, flags);
824
825 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
826 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
827 if (sky2->vlgrp)
828 sky2->vlgrp->vlan_devices[vid] = NULL;
829
830 spin_unlock_irqrestore(&sky2->tx_lock, flags);
831}
832#endif
833
834#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
835static inline unsigned rx_size(const struct sky2_port *sky2)
836{
837 return roundup(sky2->netdev->mtu + ETH_HLEN + 4, 8);
838}
839
840/*
841 * Allocate and setup receiver buffer pool.
842 * In case of 64 bit dma, there are 2X as many list elements
843 * available as ring entries
844 * and need to reserve one list element so we don't wrap around.
845 *
846 * It appears the hardware has a bug in the FIFO logic that
847 * cause it to hang if the FIFO gets overrun and the receive buffer
848 * is not aligned. This means we can't use skb_reserve to align
849 * the IP header.
850 */
851static int sky2_rx_start(struct sky2_port *sky2)
852{
853 struct sky2_hw *hw = sky2->hw;
854 unsigned size = rx_size(sky2);
855 unsigned rxq = rxqaddr[sky2->port];
856 int i;
857
858 sky2->rx_put = sky2->rx_next = 0;
859 sky2_qset(hw, rxq, is_pciex(hw) ? 0x80 : 0x600);
860 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
861
862 rx_set_checksum(sky2);
863 for (i = 0; i < sky2->rx_pending; i++) {
864 struct ring_info *re = sky2->rx_ring + i;
865
866 re->skb = dev_alloc_skb(size);
867 if (!re->skb)
868 goto nomem;
869
870 re->mapaddr = pci_map_single(hw->pdev, re->skb->data,
871 size, PCI_DMA_FROMDEVICE);
872 re->maplen = size;
873 sky2_rx_add(sky2, re);
874 }
875
876 /* Tell chip about available buffers */
877 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
878 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
879 return 0;
880nomem:
881 sky2_rx_clean(sky2);
882 return -ENOMEM;
883}
884
885/* Bring up network interface. */
886static int sky2_up(struct net_device *dev)
887{
888 struct sky2_port *sky2 = netdev_priv(dev);
889 struct sky2_hw *hw = sky2->hw;
890 unsigned port = sky2->port;
891 u32 ramsize, rxspace;
892 int err = -ENOMEM;
893
894 if (netif_msg_ifup(sky2))
895 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
896
897 /* must be power of 2 */
898 sky2->tx_le = pci_alloc_consistent(hw->pdev,
899 TX_RING_SIZE *
900 sizeof(struct sky2_tx_le),
901 &sky2->tx_le_map);
902 if (!sky2->tx_le)
903 goto err_out;
904
905 sky2->tx_ring = kzalloc(TX_RING_SIZE * sizeof(struct ring_info),
906 GFP_KERNEL);
907 if (!sky2->tx_ring)
908 goto err_out;
909 sky2->tx_prod = sky2->tx_cons = 0;
910
911 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
912 &sky2->rx_le_map);
913 if (!sky2->rx_le)
914 goto err_out;
915 memset(sky2->rx_le, 0, RX_LE_BYTES);
916
917 sky2->rx_ring = kzalloc(sky2->rx_pending * sizeof(struct ring_info),
918 GFP_KERNEL);
919 if (!sky2->rx_ring)
920 goto err_out;
921
922 sky2_mac_init(hw, port);
923
924 /* Configure RAM buffers */
925 if (hw->chip_id == CHIP_ID_YUKON_FE ||
926 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
927 ramsize = 4096;
928 else {
929 u8 e0 = sky2_read8(hw, B2_E_0);
930 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
931 }
932
933 /* 2/3 for Rx */
934 rxspace = (2 * ramsize) / 3;
935 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
936 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
937
938 /* Make sure SyncQ is disabled */
939 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
940 RB_RST_SET);
941
942 sky2_qset(hw, txqaddr[port], 0x600);
943 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
944 TX_RING_SIZE - 1);
945
946 err = sky2_rx_start(sky2);
947 if (err)
948 goto err_out;
949
950 /* Enable interrupts from phy/mac for port */
951 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
952 sky2_write32(hw, B0_IMSK, hw->intr_mask);
953 return 0;
954
955err_out:
956 if (sky2->rx_le)
957 pci_free_consistent(hw->pdev, RX_LE_BYTES,
958 sky2->rx_le, sky2->rx_le_map);
959 if (sky2->tx_le)
960 pci_free_consistent(hw->pdev,
961 TX_RING_SIZE * sizeof(struct sky2_tx_le),
962 sky2->tx_le, sky2->tx_le_map);
963 if (sky2->tx_ring)
964 kfree(sky2->tx_ring);
965 if (sky2->rx_ring)
966 kfree(sky2->rx_ring);
967
968 return err;
969}
970
971/* Modular subtraction in ring */
972static inline int tx_dist(unsigned tail, unsigned head)
973{
974 return (head >= tail ? head : head + TX_RING_SIZE) - tail;
975}
976
977/* Number of list elements available for next tx */
978static inline int tx_avail(const struct sky2_port *sky2)
979{
980 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
981}
982
983/* Estimate of number of transmit list elements required */
984static inline unsigned tx_le_req(const struct sk_buff *skb)
985{
986 unsigned count;
987
988 count = sizeof(dma_addr_t) / sizeof(u32);
989 count += skb_shinfo(skb)->nr_frags * count;
990
991 if (skb_shinfo(skb)->tso_size)
992 ++count;
993
994 if (skb->ip_summed)
995 ++count;
996
997 return count;
998}
999
1000/*
1001 * Put one packet in ring for transmit.
1002 * A single packet can generate multiple list elements, and
1003 * the number of ring elements will probably be less than the number
1004 * of list elements used.
1005 */
1006static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1007{
1008 struct sky2_port *sky2 = netdev_priv(dev);
1009 struct sky2_hw *hw = sky2->hw;
1010 struct sky2_tx_le *le = NULL;
1011 struct ring_info *re;
1012 unsigned long flags;
1013 unsigned i, len;
1014 dma_addr_t mapping;
1015 u32 addr64;
1016 u16 mss;
1017 u8 ctrl;
1018
1019 local_irq_save(flags);
1020 if (!spin_trylock(&sky2->tx_lock)) {
1021 local_irq_restore(flags);
1022 return NETDEV_TX_LOCKED;
1023 }
1024
1025 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1026 netif_stop_queue(dev);
1027 spin_unlock_irqrestore(&sky2->tx_lock, flags);
1028
1029 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1030 dev->name);
1031 return NETDEV_TX_BUSY;
1032 }
1033
1034 if (unlikely(netif_msg_tx_queued(sky2)))
1035 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1036 dev->name, sky2->tx_prod, skb->len);
1037
1038 len = skb_headlen(skb);
1039 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1040 addr64 = (mapping >> 16) >> 16;
1041
1042 re = sky2->tx_ring + sky2->tx_prod;
1043
1044 /* Send high bits if changed */
1045 if (addr64 != sky2->tx_addr64) {
1046 le = get_tx_le(sky2);
1047 le->tx.addr = cpu_to_le32(addr64);
1048 le->ctrl = 0;
1049 le->opcode = OP_ADDR64 | HW_OWNER;
1050 sky2->tx_addr64 = addr64;
1051 }
1052
1053 /* Check for TCP Segmentation Offload */
1054 mss = skb_shinfo(skb)->tso_size;
1055 if (mss != 0) {
1056 /* just drop the packet if non-linear expansion fails */
1057 if (skb_header_cloned(skb) &&
1058 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1059 dev_kfree_skb_any(skb);
1060 goto out_unlock;
1061 }
1062
1063 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1064 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1065 mss += ETH_HLEN;
1066 }
1067
1068 if (mss != sky2->tx_last_mss) {
1069 le = get_tx_le(sky2);
1070 le->tx.tso.size = cpu_to_le16(mss);
1071 le->tx.tso.rsvd = 0;
1072 le->opcode = OP_LRGLEN | HW_OWNER;
1073 le->ctrl = 0;
1074 sky2->tx_last_mss = mss;
1075 }
1076
1077 ctrl = 0;
1078#ifdef SKY2_VLAN_TAG_USED
1079 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1080 if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1081 if (!le) {
1082 le = get_tx_le(sky2);
1083 le->tx.addr = 0;
1084 le->opcode = OP_VLAN|HW_OWNER;
1085 le->ctrl = 0;
1086 } else
1087 le->opcode |= OP_VLAN;
1088 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1089 ctrl |= INS_VLAN;
1090 }
1091#endif
1092
1093 /* Handle TCP checksum offload */
1094 if (skb->ip_summed == CHECKSUM_HW) {
1095 u16 hdr = skb->h.raw - skb->data;
1096 u16 offset = hdr + skb->csum;
1097
1098 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1099 if (skb->nh.iph->protocol == IPPROTO_UDP)
1100 ctrl |= UDPTCP;
1101
1102 le = get_tx_le(sky2);
1103 le->tx.csum.start = cpu_to_le16(hdr);
1104 le->tx.csum.offset = cpu_to_le16(offset);
1105 le->length = 0; /* initial checksum value */
1106 le->ctrl = 1; /* one packet */
1107 le->opcode = OP_TCPLISW | HW_OWNER;
1108 }
1109
1110 le = get_tx_le(sky2);
1111 le->tx.addr = cpu_to_le32((u32) mapping);
1112 le->length = cpu_to_le16(len);
1113 le->ctrl = ctrl;
1114 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1115
1116 /* Record the transmit mapping info */
1117 re->skb = skb;
1118 re->mapaddr = mapping;
1119 re->maplen = len;
1120
1121 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1122 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1123 struct ring_info *fre;
1124
1125 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1126 frag->size, PCI_DMA_TODEVICE);
1127 addr64 = (mapping >> 16) >> 16;
1128 if (addr64 != sky2->tx_addr64) {
1129 le = get_tx_le(sky2);
1130 le->tx.addr = cpu_to_le32(addr64);
1131 le->ctrl = 0;
1132 le->opcode = OP_ADDR64 | HW_OWNER;
1133 sky2->tx_addr64 = addr64;
1134 }
1135
1136 le = get_tx_le(sky2);
1137 le->tx.addr = cpu_to_le32((u32) mapping);
1138 le->length = cpu_to_le16(frag->size);
1139 le->ctrl = ctrl;
1140 le->opcode = OP_BUFFER | HW_OWNER;
1141
1142 fre = sky2->tx_ring
1143 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
1144 fre->skb = NULL;
1145 fre->mapaddr = mapping;
1146 fre->maplen = frag->size;
1147 }
1148 re->idx = sky2->tx_prod;
1149 le->ctrl |= EOP;
1150
1151 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
1152 &sky2->tx_last_put, TX_RING_SIZE);
1153
1154 if (tx_avail(sky2) < MAX_SKB_TX_LE + 1)
1155 netif_stop_queue(dev);
1156
1157out_unlock:
1158 mmiowb();
1159 spin_unlock_irqrestore(&sky2->tx_lock, flags);
1160
1161 dev->trans_start = jiffies;
1162 return NETDEV_TX_OK;
1163}
1164
1165/*
1166 * Free ring elements from starting at tx_cons until "done"
1167 *
1168 * NB: the hardware will tell us about partial completion of multi-part
1169 * buffers; these are deferred until completion.
1170 */
1171static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1172{
1173 struct net_device *dev = sky2->netdev;
1174 unsigned i;
1175
1176 if (unlikely(netif_msg_tx_done(sky2)))
1177 printk(KERN_DEBUG "%s: tx done, up to %u\n",
1178 dev->name, done);
1179
1180 spin_lock(&sky2->tx_lock);
1181
1182 while (sky2->tx_cons != done) {
1183 struct ring_info *re = sky2->tx_ring + sky2->tx_cons;
1184 struct sk_buff *skb;
1185
1186 /* Check for partial status */
1187 if (tx_dist(sky2->tx_cons, done)
1188 < tx_dist(sky2->tx_cons, re->idx))
1189 goto out;
1190
1191 skb = re->skb;
1192 pci_unmap_single(sky2->hw->pdev,
1193 re->mapaddr, re->maplen, PCI_DMA_TODEVICE);
1194
1195 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1196 struct ring_info *fre;
1197 fre =
1198 sky2->tx_ring + (sky2->tx_cons + i +
1199 1) % TX_RING_SIZE;
1200 pci_unmap_page(sky2->hw->pdev, fre->mapaddr,
1201 fre->maplen, PCI_DMA_TODEVICE);
1202 }
1203
1204 dev_kfree_skb_any(skb);
1205
1206 sky2->tx_cons = re->idx;
1207 }
1208out:
1209
1210 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1211 netif_wake_queue(dev);
1212 spin_unlock(&sky2->tx_lock);
1213}
1214
1215/* Cleanup all untransmitted buffers, assume transmitter not running */
1216static inline void sky2_tx_clean(struct sky2_port *sky2)
1217{
1218 sky2_tx_complete(sky2, sky2->tx_prod);
1219}
1220
1221/* Network shutdown */
1222static int sky2_down(struct net_device *dev)
1223{
1224 struct sky2_port *sky2 = netdev_priv(dev);
1225 struct sky2_hw *hw = sky2->hw;
1226 unsigned port = sky2->port;
1227 u16 ctrl;
1228
1229 if (netif_msg_ifdown(sky2))
1230 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1231
1232 netif_stop_queue(dev);
1233
1234 sky2_phy_reset(hw, port);
1235
1236 /* Stop transmitter */
1237 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1238 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1239
1240 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1241 RB_RST_SET | RB_DIS_OP_MD);
1242
1243 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1244 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1245 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1246
1247 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1248
1249 /* Workaround shared GMAC reset */
1250 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1251 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1252 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1253
1254 /* Disable Force Sync bit and Enable Alloc bit */
1255 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1256 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1257
1258 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1259 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1260 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1261
1262 /* Reset the PCI FIFO of the async Tx queue */
1263 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1264 BMU_RST_SET | BMU_FIFO_RST);
1265
1266 /* Reset the Tx prefetch units */
1267 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1268 PREF_UNIT_RST_SET);
1269
1270 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1271
1272 sky2_rx_stop(sky2);
1273
1274 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1275 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1276
1277 /* turn off LED's */
1278 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1279
1280 sky2_tx_clean(sky2);
1281 sky2_rx_clean(sky2);
1282
1283 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1284 sky2->rx_le, sky2->rx_le_map);
1285 kfree(sky2->rx_ring);
1286
1287 pci_free_consistent(hw->pdev,
1288 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1289 sky2->tx_le, sky2->tx_le_map);
1290 kfree(sky2->tx_ring);
1291
1292 return 0;
1293}
1294
1295static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1296{
1297 if (!hw->copper)
1298 return SPEED_1000;
1299
1300 if (hw->chip_id == CHIP_ID_YUKON_FE)
1301 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1302
1303 switch (aux & PHY_M_PS_SPEED_MSK) {
1304 case PHY_M_PS_SPEED_1000:
1305 return SPEED_1000;
1306 case PHY_M_PS_SPEED_100:
1307 return SPEED_100;
1308 default:
1309 return SPEED_10;
1310 }
1311}
1312
1313static void sky2_link_up(struct sky2_port *sky2)
1314{
1315 struct sky2_hw *hw = sky2->hw;
1316 unsigned port = sky2->port;
1317 u16 reg;
1318
1319 /* disable Rx GMAC FIFO flush mode */
1320 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RX_F_FL_OFF);
1321
1322 /* Enable Transmit FIFO Underrun */
1323 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1324
1325 reg = gma_read16(hw, port, GM_GP_CTRL);
1326 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1327 reg |= GM_GPCR_DUP_FULL;
1328
1329 /* enable Rx/Tx */
1330 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1331 gma_write16(hw, port, GM_GP_CTRL, reg);
1332 gma_read16(hw, port, GM_GP_CTRL);
1333
1334 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1335
1336 netif_carrier_on(sky2->netdev);
1337 netif_wake_queue(sky2->netdev);
1338
1339 /* Turn on link LED */
1340 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1341 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1342
1343 if (hw->chip_id == CHIP_ID_YUKON_XL) {
1344 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1345
1346 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1347 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
1348 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1349 SPEED_10 ? 7 : 0) |
1350 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1351 SPEED_100 ? 7 : 0) |
1352 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1353 SPEED_1000 ? 7 : 0));
1354 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1355 }
1356
1357 if (netif_msg_link(sky2))
1358 printk(KERN_INFO PFX
1359 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1360 sky2->netdev->name, sky2->speed,
1361 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1362 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1363 sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1364}
1365
1366static void sky2_link_down(struct sky2_port *sky2)
1367{
1368 struct sky2_hw *hw = sky2->hw;
1369 unsigned port = sky2->port;
1370 u16 reg;
1371
1372 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1373
1374 reg = gma_read16(hw, port, GM_GP_CTRL);
1375 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1376 gma_write16(hw, port, GM_GP_CTRL, reg);
1377 gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1378
1379 if (sky2->rx_pause && !sky2->tx_pause) {
1380 /* restore Asymmetric Pause bit */
1381 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1382 gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1383 | PHY_M_AN_ASP);
1384 }
1385
1386 sky2_phy_reset(hw, port);
1387
1388 netif_carrier_off(sky2->netdev);
1389 netif_stop_queue(sky2->netdev);
1390
1391 /* Turn on link LED */
1392 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1393
1394 if (netif_msg_link(sky2))
1395 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1396 sky2_phy_init(hw, port);
1397}
1398
1399static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1400{
1401 struct sky2_hw *hw = sky2->hw;
1402 unsigned port = sky2->port;
1403 u16 lpa;
1404
1405 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1406
1407 if (lpa & PHY_M_AN_RF) {
1408 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1409 return -1;
1410 }
1411
1412 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1413 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1414 printk(KERN_ERR PFX "%s: master/slave fault",
1415 sky2->netdev->name);
1416 return -1;
1417 }
1418
1419 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1420 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1421 sky2->netdev->name);
1422 return -1;
1423 }
1424
1425 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1426
1427 sky2->speed = sky2_phy_speed(hw, aux);
1428
1429 /* Pause bits are offset (9..8) */
1430 if (hw->chip_id == CHIP_ID_YUKON_XL)
1431 aux >>= 6;
1432
1433 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1434 sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1435
1436 if ((sky2->tx_pause || sky2->rx_pause)
1437 && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1438 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1439 else
1440 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1441
1442 return 0;
1443}
1444
1445/*
1446 * Interrupt from PHY are handled in tasklet (soft irq)
1447 * because accessing phy registers requires spin wait which might
1448 * cause excess interrupt latency.
1449 */
1450static void sky2_phy_task(unsigned long data)
1451{
1452 struct sky2_port *sky2 = (struct sky2_port *)data;
1453 struct sky2_hw *hw = sky2->hw;
1454 u16 istatus, phystat;
1455
1456 spin_lock(&hw->phy_lock);
1457 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
1458 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
1459
1460 if (netif_msg_intr(sky2))
1461 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1462 sky2->netdev->name, istatus, phystat);
1463
1464 if (istatus & PHY_M_IS_AN_COMPL) {
1465 if (sky2_autoneg_done(sky2, phystat) == 0)
1466 sky2_link_up(sky2);
1467 goto out;
1468 }
1469
1470 if (istatus & PHY_M_IS_LSP_CHANGE)
1471 sky2->speed = sky2_phy_speed(hw, phystat);
1472
1473 if (istatus & PHY_M_IS_DUP_CHANGE)
1474 sky2->duplex =
1475 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1476
1477 if (istatus & PHY_M_IS_LST_CHANGE) {
1478 if (phystat & PHY_M_PS_LINK_UP)
1479 sky2_link_up(sky2);
1480 else
1481 sky2_link_down(sky2);
1482 }
1483out:
1484 spin_unlock(&hw->phy_lock);
1485
1486 local_irq_disable();
1487 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1488 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1489 local_irq_enable();
1490}
1491
1492static void sky2_tx_timeout(struct net_device *dev)
1493{
1494 struct sky2_port *sky2 = netdev_priv(dev);
1495
1496 if (netif_msg_timer(sky2))
1497 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1498
1499 sky2_write32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR), BMU_STOP);
1500 sky2_read32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR));
1501
1502 sky2_tx_clean(sky2);
1503}
1504
1505static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1506{
1507 struct sky2_port *sky2 = netdev_priv(dev);
1508 struct sky2_hw *hw = sky2->hw;
1509 int err;
1510 u16 ctl, mode;
1511
1512 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1513 return -EINVAL;
1514
1515 if (!netif_running(dev)) {
1516 dev->mtu = new_mtu;
1517 return 0;
1518 }
1519
1520 local_irq_disable();
1521 sky2_write32(hw, B0_IMSK, 0);
1522
1523 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1524 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1525 sky2_rx_stop(sky2);
1526 sky2_rx_clean(sky2);
1527
1528 dev->mtu = new_mtu;
1529 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1530 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1531
1532 if (dev->mtu > ETH_DATA_LEN)
1533 mode |= GM_SMOD_JUMBO_ENA;
1534
1535 gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1536
1537 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1538
1539 err = sky2_rx_start(sky2);
1540 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
1541
1542 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1543 sky2_read32(hw, B0_IMSK);
1544 local_irq_enable();
1545 return err;
1546}
1547
1548/*
1549 * Receive one packet.
1550 * For small packets or errors, just reuse existing skb.
1551 * For larger packets, get new buffer.
1552 */
1553static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1554 u16 length, u32 status)
1555{
1556 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1557 struct sk_buff *skb = NULL;
1558 struct net_device *dev;
1559 const unsigned int bufsize = rx_size(sky2);
1560
1561 if (unlikely(netif_msg_rx_status(sky2)))
1562 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1563 sky2->netdev->name, sky2->rx_next, status, length);
1564
1565 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1566
1567 if (!(status & GMR_FS_RX_OK) || (status & GMR_FS_ANY_ERR))
1568 goto error;
1569
1570 if (length < RX_COPY_THRESHOLD) {
1571 skb = alloc_skb(length + 2, GFP_ATOMIC);
1572 if (!skb)
1573 goto resubmit;
1574
1575 skb_reserve(skb, 2);
1576 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1577 length, PCI_DMA_FROMDEVICE);
1578 memcpy(skb->data, re->skb->data, length);
1579 skb->ip_summed = re->skb->ip_summed;
1580 skb->csum = re->skb->csum;
1581 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1582 length, PCI_DMA_FROMDEVICE);
1583 } else {
1584 struct sk_buff *nskb;
1585
1586 nskb = dev_alloc_skb(bufsize);
1587 if (!nskb)
1588 goto resubmit;
1589
1590 skb = re->skb;
1591 re->skb = nskb;
1592 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1593 re->maplen, PCI_DMA_FROMDEVICE);
1594 prefetch(skb->data);
1595
1596 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1597 bufsize, PCI_DMA_FROMDEVICE);
1598 re->maplen = bufsize;
1599 }
1600
1601 skb_put(skb, length);
1602 dev = sky2->netdev;
1603 skb->dev = dev;
1604 skb->protocol = eth_type_trans(skb, dev);
1605 dev->last_rx = jiffies;
1606
1607resubmit:
1608 re->skb->ip_summed = CHECKSUM_NONE;
1609 sky2_rx_add(sky2, re);
1610
1611 /* Tell receiver about new buffers. */
1612 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
1613 &sky2->rx_last_put, RX_LE_SIZE);
1614
1615 return skb;
1616
1617error:
1618 if (status & GMR_FS_GOOD_FC)
1619 goto resubmit;
1620
1621 if (netif_msg_rx_err(sky2))
1622 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1623 sky2->netdev->name, status, length);
1624
1625 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1626 sky2->net_stats.rx_length_errors++;
1627 if (status & GMR_FS_FRAGMENT)
1628 sky2->net_stats.rx_frame_errors++;
1629 if (status & GMR_FS_CRC_ERR)
1630 sky2->net_stats.rx_crc_errors++;
1631 if (status & GMR_FS_RX_FF_OV)
1632 sky2->net_stats.rx_fifo_errors++;
1633
1634 goto resubmit;
1635}
1636
1637/* Transmit ring index in reported status block is encoded as:
1638 *
1639 * | TXS2 | TXA2 | TXS1 | TXA1
1640 */
1641static inline u16 tx_index(u8 port, u32 status, u16 len)
1642{
1643 if (port == 0)
1644 return status & 0xfff;
1645 else
1646 return ((status >> 24) & 0xff) | (len & 0xf) << 8;
1647}
1648
1649/*
1650 * Both ports share the same status interrupt, therefore there is only
1651 * one poll routine.
1652 */
1653static int sky2_poll(struct net_device *dev0, int *budget)
1654{
1655 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
1656 unsigned int to_do = min(dev0->quota, *budget);
1657 unsigned int work_done = 0;
1658 u16 hwidx;
1659
1660 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1661 BUG_ON(hwidx >= STATUS_RING_SIZE);
1662 rmb();
1663
1664 do {
1665 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1666 struct sky2_port *sky2;
1667 struct sk_buff *skb;
1668 u32 status;
1669 u16 length;
1670
1671 /* Are we done yet? */
1672 if (hw->st_idx == hwidx) {
1673 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1674 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1675 if (hwidx == hw->st_idx)
1676 break;
1677 }
1678
1679 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1680 prefetch(&hw->st_le[hw->st_idx]);
1681
1682 BUG_ON(le->link >= hw->ports || !hw->dev[le->link]);
1683
1684 sky2 = netdev_priv(hw->dev[le->link]);
1685 status = le32_to_cpu(le->status);
1686 length = le16_to_cpu(le->length);
1687
1688 switch (le->opcode & ~HW_OWNER) {
1689 case OP_RXSTAT:
1690 skb = sky2_receive(sky2, length, status);
1691 if (!skb)
1692 break;
1693#ifdef SKY2_VLAN_TAG_USED
1694 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
1695 vlan_hwaccel_receive_skb(skb,
1696 sky2->vlgrp,
1697 be16_to_cpu(sky2->rx_tag));
1698 } else
1699#endif
1700 netif_receive_skb(skb);
1701 ++work_done;
1702 break;
1703
1704#ifdef SKY2_VLAN_TAG_USED
1705 case OP_RXVLAN:
1706 sky2->rx_tag = length;
1707 break;
1708
1709 case OP_RXCHKSVLAN:
1710 sky2->rx_tag = length;
1711 /* fall through */
1712#endif
1713 case OP_RXCHKS:
1714 skb = sky2->rx_ring[sky2->rx_next].skb;
1715 skb->ip_summed = CHECKSUM_HW;
1716 skb->csum = le16_to_cpu(status);
1717 break;
1718
1719 case OP_TXINDEXLE:
1720 sky2_tx_complete(sky2,
1721 tx_index(sky2->port, status, length));
1722 break;
1723
1724 default:
1725 if (net_ratelimit())
1726 printk(KERN_WARNING PFX
1727 "unknown status opcode 0x%x\n",
1728 le->opcode);
1729 break;
1730 }
1731
1732 le->opcode = 0; /* paranoia */
1733 } while (work_done < to_do);
1734
1735 mmiowb();
1736
1737 *budget -= work_done;
1738 dev0->quota -= work_done;
1739 if (work_done < to_do) {
1740 /*
1741 * Another chip workaround, need to restart TX timer if status
1742 * LE was handled. WA_DEV_43_418
1743 */
1744 if (is_ec_a1(hw)) {
1745 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1746 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1747 }
1748
1749 netif_rx_complete(dev0);
1750 hw->intr_mask |= Y2_IS_STAT_BMU;
1751 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1752 sky2_read32(hw, B0_IMSK);
1753 }
1754
1755 return work_done >= to_do;
1756
1757}
1758
1759static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1760{
1761 struct net_device *dev = hw->dev[port];
1762
1763 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1764 dev->name, status);
1765
1766 if (status & Y2_IS_PAR_RD1) {
1767 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1768 dev->name);
1769 /* Clear IRQ */
1770 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1771 }
1772
1773 if (status & Y2_IS_PAR_WR1) {
1774 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1775 dev->name);
1776
1777 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1778 }
1779
1780 if (status & Y2_IS_PAR_MAC1) {
1781 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1782 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1783 }
1784
1785 if (status & Y2_IS_PAR_RX1) {
1786 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1787 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1788 }
1789
1790 if (status & Y2_IS_TCP_TXA1) {
1791 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
1792 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1793 }
1794}
1795
1796static void sky2_hw_intr(struct sky2_hw *hw)
1797{
1798 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1799
1800 if (status & Y2_IS_TIST_OV)
1801 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1802
1803 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1804 u16 pci_err;
1805
1806 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1807 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1808 pci_name(hw->pdev), pci_err);
1809
1810 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1811 pci_write_config_word(hw->pdev, PCI_STATUS,
1812 pci_err | PCI_STATUS_ERROR_BITS);
1813 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1814 }
1815
1816 if (status & Y2_IS_PCI_EXP) {
1817 /* PCI-Express uncorrectable Error occurred */
1818 u32 pex_err;
1819
1820 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1821
1822 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
1823 pci_name(hw->pdev), pex_err);
1824
1825 /* clear the interrupt */
1826 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1827 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1828 0xffffffffUL);
1829 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1830
1831 if (pex_err & PEX_FATAL_ERRORS) {
1832 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
1833 hwmsk &= ~Y2_IS_PCI_EXP;
1834 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
1835 }
1836 }
1837
1838 if (status & Y2_HWE_L1_MASK)
1839 sky2_hw_error(hw, 0, status);
1840 status >>= 8;
1841 if (status & Y2_HWE_L1_MASK)
1842 sky2_hw_error(hw, 1, status);
1843}
1844
1845static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1846{
1847 struct net_device *dev = hw->dev[port];
1848 struct sky2_port *sky2 = netdev_priv(dev);
1849 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1850
1851 if (netif_msg_intr(sky2))
1852 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
1853 dev->name, status);
1854
1855 if (status & GM_IS_RX_FF_OR) {
1856 ++sky2->net_stats.rx_fifo_errors;
1857 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1858 }
1859
1860 if (status & GM_IS_TX_FF_UR) {
1861 ++sky2->net_stats.tx_fifo_errors;
1862 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1863 }
1864}
1865
1866static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1867{
1868 struct net_device *dev = hw->dev[port];
1869 struct sky2_port *sky2 = netdev_priv(dev);
1870
1871 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1872 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1873 tasklet_schedule(&sky2->phy_task);
1874}
1875
1876static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
1877{
1878 struct sky2_hw *hw = dev_id;
1879 struct net_device *dev0 = hw->dev[0];
1880 u32 status;
1881
1882 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
1883 if (status == 0 || status == ~0)
1884 return IRQ_NONE;
1885
1886 if (status & Y2_IS_HW_ERR)
1887 sky2_hw_intr(hw);
1888
1889 /* Do NAPI for Rx and Tx status */
1890 if (status & Y2_IS_STAT_BMU) {
1891 hw->intr_mask &= ~Y2_IS_STAT_BMU;
1892 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1893 prefetch(&hw->st_le[hw->st_idx]);
1894
1895 if (netif_rx_schedule_test(dev0))
1896 __netif_rx_schedule(dev0);
1897 }
1898
1899 if (status & Y2_IS_IRQ_PHY1)
1900 sky2_phy_intr(hw, 0);
1901
1902 if (status & Y2_IS_IRQ_PHY2)
1903 sky2_phy_intr(hw, 1);
1904
1905 if (status & Y2_IS_IRQ_MAC1)
1906 sky2_mac_intr(hw, 0);
1907
1908 if (status & Y2_IS_IRQ_MAC2)
1909 sky2_mac_intr(hw, 1);
1910
1911 sky2_write32(hw, B0_Y2_SP_ICR, 2);
1912
1913 sky2_read32(hw, B0_IMSK);
1914
1915 return IRQ_HANDLED;
1916}
1917
1918#ifdef CONFIG_NET_POLL_CONTROLLER
1919static void sky2_netpoll(struct net_device *dev)
1920{
1921 struct sky2_port *sky2 = netdev_priv(dev);
1922
1923 sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
1924}
1925#endif
1926
1927/* Chip internal frequency for clock calculations */
1928static inline u32 sky2_khz(const struct sky2_hw *hw)
1929{
1930 switch (hw->chip_id) {
1931 case CHIP_ID_YUKON_EC:
1932 return 125000; /* 125 Mhz */
1933 case CHIP_ID_YUKON_FE:
1934 return 100000; /* 100 Mhz */
1935 default: /* YUKON_XL */
1936 return 156000; /* 156 Mhz */
1937 }
1938}
1939
1940static inline u32 sky2_ms2clk(const struct sky2_hw *hw, u32 ms)
1941{
1942 return sky2_khz(hw) * ms;
1943}
1944
1945static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
1946{
1947 return (sky2_khz(hw) * us) / 1000;
1948}
1949
1950static int sky2_reset(struct sky2_hw *hw)
1951{
1952 u32 ctst;
1953 u16 status;
1954 u8 t8, pmd_type;
1955 int i;
1956
1957 ctst = sky2_read32(hw, B0_CTST);
1958
1959 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1960 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
1961 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
1962 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
1963 pci_name(hw->pdev), hw->chip_id);
1964 return -EOPNOTSUPP;
1965 }
1966
1967 /* ring for status responses */
1968 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
1969 &hw->st_dma);
1970 if (!hw->st_le)
1971 return -ENOMEM;
1972
1973 /* disable ASF */
1974 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
1975 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1976 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
1977 }
1978
1979 /* do a SW reset */
1980 sky2_write8(hw, B0_CTST, CS_RST_SET);
1981 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1982
1983 /* clear PCI errors, if any */
1984 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
1985 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1986 pci_write_config_word(hw->pdev, PCI_STATUS,
1987 status | PCI_STATUS_ERROR_BITS);
1988
1989 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
1990
1991 /* clear any PEX errors */
1992 if (is_pciex(hw)) {
1993 u16 lstat;
1994 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1995 0xffffffffUL);
1996 pci_read_config_word(hw->pdev, PEX_LNK_STAT, &lstat);
1997 }
1998
1999 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2000 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
2001
2002 hw->ports = 1;
2003 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2004 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2005 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2006 ++hw->ports;
2007 }
2008 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2009
2010 sky2_set_power_state(hw, PCI_D0);
2011
2012 for (i = 0; i < hw->ports; i++) {
2013 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2014 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2015 }
2016
2017 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2018
2019 /* Clear I2C IRQ noise */
2020 sky2_write32(hw, B2_I2C_IRQ, 1);
2021
2022 /* turn off hardware timer (unused) */
2023 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2024 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2025
2026 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2027
2028 /* Turn on descriptor polling (every 75us) */
2029 sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
2030 sky2_write8(hw, B28_DPT_CTRL, DPT_START);
2031
2032 /* Turn off receive timestamp */
2033 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2034 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2035
2036 /* enable the Tx Arbiters */
2037 for (i = 0; i < hw->ports; i++)
2038 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2039
2040 /* Initialize ram interface */
2041 for (i = 0; i < hw->ports; i++) {
2042 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2043
2044 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2045 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2046 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2047 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2048 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2049 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2050 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2051 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2052 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2053 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2054 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2055 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2056 }
2057
2058 if (is_pciex(hw)) {
2059 u16 pctrl;
2060
2061 /* change Max. Read Request Size to 2048 bytes */
2062 pci_read_config_word(hw->pdev, PEX_DEV_CTRL, &pctrl);
2063 pctrl &= ~PEX_DC_MAX_RRS_MSK;
2064 pctrl |= PEX_DC_MAX_RD_RQ_SIZE(4);
2065
2066
2067 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2068 pci_write_config_word(hw->pdev, PEX_DEV_CTRL, pctrl);
2069 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2070 }
2071
2072 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2073
2074 spin_lock_bh(&hw->phy_lock);
2075 for (i = 0; i < hw->ports; i++)
2076 sky2_phy_reset(hw, i);
2077 spin_unlock_bh(&hw->phy_lock);
2078
2079 memset(hw->st_le, 0, STATUS_LE_BYTES);
2080 hw->st_idx = 0;
2081
2082 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2083 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2084
2085 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2086 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2087
2088 /* Set the list last index */
2089 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2090
2091 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
2092
2093 /* These status setup values are copied from SysKonnect's driver */
2094 if (is_ec_a1(hw)) {
2095 /* WA for dev. #4.3 */
2096 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
2097
2098 /* set Status-FIFO watermark */
2099 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
2100
2101 /* set Status-FIFO ISR watermark */
2102 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
2103
2104 } else {
2105 sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
2106
2107 /* set Status-FIFO watermark */
2108 sky2_write8(hw, STAT_FIFO_WM, 0x10);
2109
2110 /* set Status-FIFO ISR watermark */
2111 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2112 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
2113
2114 else /* WA dev 4.109 */
2115 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
2116
2117 sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
2118 }
2119
2120 /* enable status unit */
2121 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2122
2123 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2124 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2125 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2126
2127 return 0;
2128}
2129
2130static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
2131{
2132 u32 modes;
2133 if (hw->copper) {
2134 modes = SUPPORTED_10baseT_Half
2135 | SUPPORTED_10baseT_Full
2136 | SUPPORTED_100baseT_Half
2137 | SUPPORTED_100baseT_Full
2138 | SUPPORTED_Autoneg | SUPPORTED_TP;
2139
2140 if (hw->chip_id != CHIP_ID_YUKON_FE)
2141 modes |= SUPPORTED_1000baseT_Half
2142 | SUPPORTED_1000baseT_Full;
2143 } else
2144 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
2145 | SUPPORTED_Autoneg;
2146 return modes;
2147}
2148
2149static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2150{
2151 struct sky2_port *sky2 = netdev_priv(dev);
2152 struct sky2_hw *hw = sky2->hw;
2153
2154 ecmd->transceiver = XCVR_INTERNAL;
2155 ecmd->supported = sky2_supported_modes(hw);
2156 ecmd->phy_address = PHY_ADDR_MARV;
2157 if (hw->copper) {
2158 ecmd->supported = SUPPORTED_10baseT_Half
2159 | SUPPORTED_10baseT_Full
2160 | SUPPORTED_100baseT_Half
2161 | SUPPORTED_100baseT_Full
2162 | SUPPORTED_1000baseT_Half
2163 | SUPPORTED_1000baseT_Full
2164 | SUPPORTED_Autoneg | SUPPORTED_TP;
2165 ecmd->port = PORT_TP;
2166 } else
2167 ecmd->port = PORT_FIBRE;
2168
2169 ecmd->advertising = sky2->advertising;
2170 ecmd->autoneg = sky2->autoneg;
2171 ecmd->speed = sky2->speed;
2172 ecmd->duplex = sky2->duplex;
2173 return 0;
2174}
2175
2176static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2177{
2178 struct sky2_port *sky2 = netdev_priv(dev);
2179 const struct sky2_hw *hw = sky2->hw;
2180 u32 supported = sky2_supported_modes(hw);
2181
2182 if (ecmd->autoneg == AUTONEG_ENABLE) {
2183 ecmd->advertising = supported;
2184 sky2->duplex = -1;
2185 sky2->speed = -1;
2186 } else {
2187 u32 setting;
2188
2189 switch (ecmd->speed) {
2190 case SPEED_1000:
2191 if (ecmd->duplex == DUPLEX_FULL)
2192 setting = SUPPORTED_1000baseT_Full;
2193 else if (ecmd->duplex == DUPLEX_HALF)
2194 setting = SUPPORTED_1000baseT_Half;
2195 else
2196 return -EINVAL;
2197 break;
2198 case SPEED_100:
2199 if (ecmd->duplex == DUPLEX_FULL)
2200 setting = SUPPORTED_100baseT_Full;
2201 else if (ecmd->duplex == DUPLEX_HALF)
2202 setting = SUPPORTED_100baseT_Half;
2203 else
2204 return -EINVAL;
2205 break;
2206
2207 case SPEED_10:
2208 if (ecmd->duplex == DUPLEX_FULL)
2209 setting = SUPPORTED_10baseT_Full;
2210 else if (ecmd->duplex == DUPLEX_HALF)
2211 setting = SUPPORTED_10baseT_Half;
2212 else
2213 return -EINVAL;
2214 break;
2215 default:
2216 return -EINVAL;
2217 }
2218
2219 if ((setting & supported) == 0)
2220 return -EINVAL;
2221
2222 sky2->speed = ecmd->speed;
2223 sky2->duplex = ecmd->duplex;
2224 }
2225
2226 sky2->autoneg = ecmd->autoneg;
2227 sky2->advertising = ecmd->advertising;
2228
2229 if (netif_running(dev)) {
2230 sky2_down(dev);
2231 sky2_up(dev);
2232 }
2233
2234 return 0;
2235}
2236
2237static void sky2_get_drvinfo(struct net_device *dev,
2238 struct ethtool_drvinfo *info)
2239{
2240 struct sky2_port *sky2 = netdev_priv(dev);
2241
2242 strcpy(info->driver, DRV_NAME);
2243 strcpy(info->version, DRV_VERSION);
2244 strcpy(info->fw_version, "N/A");
2245 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2246}
2247
2248static const struct sky2_stat {
2249 char name[ETH_GSTRING_LEN];
2250 u16 offset;
2251} sky2_stats[] = {
2252 { "tx_bytes", GM_TXO_OK_HI },
2253 { "rx_bytes", GM_RXO_OK_HI },
2254 { "tx_broadcast", GM_TXF_BC_OK },
2255 { "rx_broadcast", GM_RXF_BC_OK },
2256 { "tx_multicast", GM_TXF_MC_OK },
2257 { "rx_multicast", GM_RXF_MC_OK },
2258 { "tx_unicast", GM_TXF_UC_OK },
2259 { "rx_unicast", GM_RXF_UC_OK },
2260 { "tx_mac_pause", GM_TXF_MPAUSE },
2261 { "rx_mac_pause", GM_RXF_MPAUSE },
2262 { "collisions", GM_TXF_SNG_COL },
2263 { "late_collision",GM_TXF_LAT_COL },
2264 { "aborted", GM_TXF_ABO_COL },
2265 { "multi_collisions", GM_TXF_MUL_COL },
2266 { "fifo_underrun", GM_TXE_FIFO_UR },
2267 { "fifo_overflow", GM_RXE_FIFO_OV },
2268 { "rx_toolong", GM_RXF_LNG_ERR },
2269 { "rx_jabber", GM_RXF_JAB_PKT },
2270 { "rx_runt", GM_RXE_FRAG },
2271 { "rx_too_long", GM_RXF_LNG_ERR },
2272 { "rx_fcs_error", GM_RXF_FCS_ERR },
2273};
2274
2275static u32 sky2_get_rx_csum(struct net_device *dev)
2276{
2277 struct sky2_port *sky2 = netdev_priv(dev);
2278
2279 return sky2->rx_csum;
2280}
2281
2282static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2283{
2284 struct sky2_port *sky2 = netdev_priv(dev);
2285
2286 sky2->rx_csum = data;
2287
2288 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2289 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2290
2291 return 0;
2292}
2293
2294static u32 sky2_get_msglevel(struct net_device *netdev)
2295{
2296 struct sky2_port *sky2 = netdev_priv(netdev);
2297 return sky2->msg_enable;
2298}
2299
2300static int sky2_nway_reset(struct net_device *dev)
2301{
2302 struct sky2_port *sky2 = netdev_priv(dev);
2303 struct sky2_hw *hw = sky2->hw;
2304
2305 if (sky2->autoneg != AUTONEG_ENABLE)
2306 return -EINVAL;
2307
2308 netif_stop_queue(dev);
2309
2310 spin_lock_irq(&hw->phy_lock);
2311 sky2_phy_reset(hw, sky2->port);
2312 sky2_phy_init(hw, sky2->port);
2313 spin_unlock_irq(&hw->phy_lock);
2314
2315 return 0;
2316}
2317
2318static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2319{
2320 struct sky2_hw *hw = sky2->hw;
2321 unsigned port = sky2->port;
2322 int i;
2323
2324 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2325 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2326 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2327 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2328
2329 for (i = 2; i < count; i++)
2330 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2331}
2332
2333static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2334{
2335 struct sky2_port *sky2 = netdev_priv(netdev);
2336 sky2->msg_enable = value;
2337}
2338
2339static int sky2_get_stats_count(struct net_device *dev)
2340{
2341 return ARRAY_SIZE(sky2_stats);
2342}
2343
2344static void sky2_get_ethtool_stats(struct net_device *dev,
2345 struct ethtool_stats *stats, u64 * data)
2346{
2347 struct sky2_port *sky2 = netdev_priv(dev);
2348
2349 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2350}
2351
2352static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2353{
2354 int i;
2355
2356 switch (stringset) {
2357 case ETH_SS_STATS:
2358 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
2359 memcpy(data + i * ETH_GSTRING_LEN,
2360 sky2_stats[i].name, ETH_GSTRING_LEN);
2361 break;
2362 }
2363}
2364
2365/* Use hardware MIB variables for critical path statistics and
2366 * transmit feedback not reported at interrupt.
2367 * Other errors are accounted for in interrupt handler.
2368 */
2369static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2370{
2371 struct sky2_port *sky2 = netdev_priv(dev);
2372 u64 data[13];
2373
2374 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2375
2376 sky2->net_stats.tx_bytes = data[0];
2377 sky2->net_stats.rx_bytes = data[1];
2378 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2379 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2380 sky2->net_stats.multicast = data[5] + data[7];
2381 sky2->net_stats.collisions = data[10];
2382 sky2->net_stats.tx_aborted_errors = data[12];
2383
2384 return &sky2->net_stats;
2385}
2386
2387static int sky2_set_mac_address(struct net_device *dev, void *p)
2388{
2389 struct sky2_port *sky2 = netdev_priv(dev);
2390 struct sockaddr *addr = p;
2391 int err = 0;
2392
2393 if (!is_valid_ether_addr(addr->sa_data))
2394 return -EADDRNOTAVAIL;
2395
2396 sky2_down(dev);
2397 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2398 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8,
2399 dev->dev_addr, ETH_ALEN);
2400 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8,
2401 dev->dev_addr, ETH_ALEN);
2402 if (dev->flags & IFF_UP)
2403 err = sky2_up(dev);
2404 return err;
2405}
2406
2407static void sky2_set_multicast(struct net_device *dev)
2408{
2409 struct sky2_port *sky2 = netdev_priv(dev);
2410 struct sky2_hw *hw = sky2->hw;
2411 unsigned port = sky2->port;
2412 struct dev_mc_list *list = dev->mc_list;
2413 u16 reg;
2414 u8 filter[8];
2415
2416 memset(filter, 0, sizeof(filter));
2417
2418 reg = gma_read16(hw, port, GM_RX_CTRL);
2419 reg |= GM_RXCR_UCF_ENA;
2420
2421 if (dev->flags & IFF_PROMISC) /* promiscuous */
2422 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2423 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
2424 memset(filter, 0xff, sizeof(filter));
2425 else if (dev->mc_count == 0) /* no multicast */
2426 reg &= ~GM_RXCR_MCF_ENA;
2427 else {
2428 int i;
2429 reg |= GM_RXCR_MCF_ENA;
2430
2431 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2432 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2433 filter[bit / 8] |= 1 << (bit % 8);
2434 }
2435 }
2436
2437 gma_write16(hw, port, GM_MC_ADDR_H1,
2438 (u16) filter[0] | ((u16) filter[1] << 8));
2439 gma_write16(hw, port, GM_MC_ADDR_H2,
2440 (u16) filter[2] | ((u16) filter[3] << 8));
2441 gma_write16(hw, port, GM_MC_ADDR_H3,
2442 (u16) filter[4] | ((u16) filter[5] << 8));
2443 gma_write16(hw, port, GM_MC_ADDR_H4,
2444 (u16) filter[6] | ((u16) filter[7] << 8));
2445
2446 gma_write16(hw, port, GM_RX_CTRL, reg);
2447}
2448
2449/* Can have one global because blinking is controlled by
2450 * ethtool and that is always under RTNL mutex
2451 */
2452static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2453{
2454 u16 pg;
2455
2456 spin_lock_bh(&hw->phy_lock);
2457 switch (hw->chip_id) {
2458 case CHIP_ID_YUKON_XL:
2459 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2460 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2461 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
2462 on ? (PHY_M_LEDC_LOS_CTRL(1) |
2463 PHY_M_LEDC_INIT_CTRL(7) |
2464 PHY_M_LEDC_STA1_CTRL(7) |
2465 PHY_M_LEDC_STA0_CTRL(7))
2466 : 0);
2467
2468 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2469 break;
2470
2471 default:
2472 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2473 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2474 on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
2475 PHY_M_LED_MO_10(MO_LED_ON) |
2476 PHY_M_LED_MO_100(MO_LED_ON) |
2477 PHY_M_LED_MO_1000(MO_LED_ON) |
2478 PHY_M_LED_MO_RX(MO_LED_ON)
2479 : PHY_M_LED_MO_DUP(MO_LED_OFF) |
2480 PHY_M_LED_MO_10(MO_LED_OFF) |
2481 PHY_M_LED_MO_100(MO_LED_OFF) |
2482 PHY_M_LED_MO_1000(MO_LED_OFF) |
2483 PHY_M_LED_MO_RX(MO_LED_OFF));
2484
2485 }
2486 spin_unlock_bh(&hw->phy_lock);
2487}
2488
2489/* blink LED's for finding board */
2490static int sky2_phys_id(struct net_device *dev, u32 data)
2491{
2492 struct sky2_port *sky2 = netdev_priv(dev);
2493 struct sky2_hw *hw = sky2->hw;
2494 unsigned port = sky2->port;
2495 u16 ledctrl, ledover = 0;
2496 long ms;
2497 int onoff = 1;
2498
2499 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
2500 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2501 else
2502 ms = data * 1000;
2503
2504 /* save initial values */
2505 spin_lock_bh(&hw->phy_lock);
2506 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2507 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2508 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2509 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2510 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2511 } else {
2512 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2513 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2514 }
2515 spin_unlock_bh(&hw->phy_lock);
2516
2517 while (ms > 0) {
2518 sky2_led(hw, port, onoff);
2519 onoff = !onoff;
2520
2521 if (msleep_interruptible(250))
2522 break; /* interrupted */
2523 ms -= 250;
2524 }
2525
2526 /* resume regularly scheduled programming */
2527 spin_lock_bh(&hw->phy_lock);
2528 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2529 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2530 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2531 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
2532 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2533 } else {
2534 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2535 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2536 }
2537 spin_unlock_bh(&hw->phy_lock);
2538
2539 return 0;
2540}
2541
2542static void sky2_get_pauseparam(struct net_device *dev,
2543 struct ethtool_pauseparam *ecmd)
2544{
2545 struct sky2_port *sky2 = netdev_priv(dev);
2546
2547 ecmd->tx_pause = sky2->tx_pause;
2548 ecmd->rx_pause = sky2->rx_pause;
2549 ecmd->autoneg = sky2->autoneg;
2550}
2551
2552static int sky2_set_pauseparam(struct net_device *dev,
2553 struct ethtool_pauseparam *ecmd)
2554{
2555 struct sky2_port *sky2 = netdev_priv(dev);
2556 int err = 0;
2557
2558 sky2->autoneg = ecmd->autoneg;
2559 sky2->tx_pause = ecmd->tx_pause != 0;
2560 sky2->rx_pause = ecmd->rx_pause != 0;
2561
2562 if (netif_running(dev)) {
2563 sky2_down(dev);
2564 err = sky2_up(dev);
2565 }
2566
2567 return err;
2568}
2569
2570#ifdef CONFIG_PM
2571static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2572{
2573 struct sky2_port *sky2 = netdev_priv(dev);
2574
2575 wol->supported = WAKE_MAGIC;
2576 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2577}
2578
2579static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2580{
2581 struct sky2_port *sky2 = netdev_priv(dev);
2582 struct sky2_hw *hw = sky2->hw;
2583
2584 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2585 return -EOPNOTSUPP;
2586
2587 sky2->wol = wol->wolopts == WAKE_MAGIC;
2588
2589 if (sky2->wol) {
2590 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2591
2592 sky2_write16(hw, WOL_CTRL_STAT,
2593 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2594 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2595 } else
2596 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2597
2598 return 0;
2599}
2600#endif
2601
2602static void sky2_get_ringparam(struct net_device *dev,
2603 struct ethtool_ringparam *ering)
2604{
2605 struct sky2_port *sky2 = netdev_priv(dev);
2606
2607 ering->rx_max_pending = RX_MAX_PENDING;
2608 ering->rx_mini_max_pending = 0;
2609 ering->rx_jumbo_max_pending = 0;
2610 ering->tx_max_pending = TX_RING_SIZE - 1;
2611
2612 ering->rx_pending = sky2->rx_pending;
2613 ering->rx_mini_pending = 0;
2614 ering->rx_jumbo_pending = 0;
2615 ering->tx_pending = sky2->tx_pending;
2616}
2617
2618static int sky2_set_ringparam(struct net_device *dev,
2619 struct ethtool_ringparam *ering)
2620{
2621 struct sky2_port *sky2 = netdev_priv(dev);
2622 int err = 0;
2623
2624 if (ering->rx_pending > RX_MAX_PENDING ||
2625 ering->rx_pending < 8 ||
2626 ering->tx_pending < MAX_SKB_TX_LE ||
2627 ering->tx_pending > TX_RING_SIZE - 1)
2628 return -EINVAL;
2629
2630 if (netif_running(dev))
2631 sky2_down(dev);
2632
2633 sky2->rx_pending = ering->rx_pending;
2634 sky2->tx_pending = ering->tx_pending;
2635
2636 if (netif_running(dev))
2637 err = sky2_up(dev);
2638
2639 return err;
2640}
2641
2642static int sky2_get_regs_len(struct net_device *dev)
2643{
2644 return 0x4000;
2645}
2646
2647/*
2648 * Returns copy of control register region
2649 * Note: access to the RAM address register set will cause timeouts.
2650 */
2651static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2652 void *p)
2653{
2654 const struct sky2_port *sky2 = netdev_priv(dev);
2655 const void __iomem *io = sky2->hw->regs;
2656
2657 BUG_ON(regs->len < B3_RI_WTO_R1);
2658 regs->version = 1;
2659 memset(p, 0, regs->len);
2660
2661 memcpy_fromio(p, io, B3_RAM_ADDR);
2662
2663 memcpy_fromio(p + B3_RI_WTO_R1,
2664 io + B3_RI_WTO_R1,
2665 regs->len - B3_RI_WTO_R1);
2666}
2667
2668static struct ethtool_ops sky2_ethtool_ops = {
2669 .get_settings = sky2_get_settings,
2670 .set_settings = sky2_set_settings,
2671 .get_drvinfo = sky2_get_drvinfo,
2672 .get_msglevel = sky2_get_msglevel,
2673 .set_msglevel = sky2_set_msglevel,
2674 .nway_reset = sky2_nway_reset,
2675 .get_regs_len = sky2_get_regs_len,
2676 .get_regs = sky2_get_regs,
2677 .get_link = ethtool_op_get_link,
2678 .get_sg = ethtool_op_get_sg,
2679 .set_sg = ethtool_op_set_sg,
2680 .get_tx_csum = ethtool_op_get_tx_csum,
2681 .set_tx_csum = ethtool_op_set_tx_csum,
2682 .get_tso = ethtool_op_get_tso,
2683 .set_tso = ethtool_op_set_tso,
2684 .get_rx_csum = sky2_get_rx_csum,
2685 .set_rx_csum = sky2_set_rx_csum,
2686 .get_strings = sky2_get_strings,
2687 .get_ringparam = sky2_get_ringparam,
2688 .set_ringparam = sky2_set_ringparam,
2689 .get_pauseparam = sky2_get_pauseparam,
2690 .set_pauseparam = sky2_set_pauseparam,
2691#ifdef CONFIG_PM
2692 .get_wol = sky2_get_wol,
2693 .set_wol = sky2_set_wol,
2694#endif
2695 .phys_id = sky2_phys_id,
2696 .get_stats_count = sky2_get_stats_count,
2697 .get_ethtool_stats = sky2_get_ethtool_stats,
2698 .get_perm_addr = ethtool_op_get_perm_addr,
2699};
2700
2701/* Initialize network device */
2702static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2703 unsigned port, int highmem)
2704{
2705 struct sky2_port *sky2;
2706 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
2707
2708 if (!dev) {
2709 printk(KERN_ERR "sky2 etherdev alloc failed");
2710 return NULL;
2711 }
2712
2713 SET_MODULE_OWNER(dev);
2714 SET_NETDEV_DEV(dev, &hw->pdev->dev);
2715 dev->open = sky2_up;
2716 dev->stop = sky2_down;
2717 dev->hard_start_xmit = sky2_xmit_frame;
2718 dev->get_stats = sky2_get_stats;
2719 dev->set_multicast_list = sky2_set_multicast;
2720 dev->set_mac_address = sky2_set_mac_address;
2721 dev->change_mtu = sky2_change_mtu;
2722 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
2723 dev->tx_timeout = sky2_tx_timeout;
2724 dev->watchdog_timeo = TX_WATCHDOG;
2725 if (port == 0)
2726 dev->poll = sky2_poll;
2727 dev->weight = NAPI_WEIGHT;
2728#ifdef CONFIG_NET_POLL_CONTROLLER
2729 dev->poll_controller = sky2_netpoll;
2730#endif
2731
2732 sky2 = netdev_priv(dev);
2733 sky2->netdev = dev;
2734 sky2->hw = hw;
2735 sky2->msg_enable = netif_msg_init(debug, default_msg);
2736
2737 spin_lock_init(&sky2->tx_lock);
2738 /* Auto speed and flow control */
2739 sky2->autoneg = AUTONEG_ENABLE;
2740 sky2->tx_pause = 0;
2741 sky2->rx_pause = 1;
2742 sky2->duplex = -1;
2743 sky2->speed = -1;
2744 sky2->advertising = sky2_supported_modes(hw);
2745 sky2->rx_csum = 1;
2746 tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long)sky2);
2747 sky2->tx_pending = TX_DEF_PENDING;
2748 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
2749
2750 hw->dev[port] = dev;
2751
2752 sky2->port = port;
2753
2754 dev->features |= NETIF_F_LLTX | NETIF_F_TSO;
2755 if (highmem)
2756 dev->features |= NETIF_F_HIGHDMA;
2757 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2758
2759#ifdef SKY2_VLAN_TAG_USED
2760 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2761 dev->vlan_rx_register = sky2_vlan_rx_register;
2762 dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid;
2763#endif
2764
2765 /* read the mac address */
2766 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
2767 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2768
2769 /* device is off until link detection */
2770 netif_carrier_off(dev);
2771 netif_stop_queue(dev);
2772
2773 return dev;
2774}
2775
2776static inline void sky2_show_addr(struct net_device *dev)
2777{
2778 const struct sky2_port *sky2 = netdev_priv(dev);
2779
2780 if (netif_msg_probe(sky2))
2781 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
2782 dev->name,
2783 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2784 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2785}
2786
2787static int __devinit sky2_probe(struct pci_dev *pdev,
2788 const struct pci_device_id *ent)
2789{
2790 struct net_device *dev, *dev1 = NULL;
2791 struct sky2_hw *hw;
2792 int err, pm_cap, using_dac = 0;
2793
2794 err = pci_enable_device(pdev);
2795 if (err) {
2796 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
2797 pci_name(pdev));
2798 goto err_out;
2799 }
2800
2801 err = pci_request_regions(pdev, DRV_NAME);
2802 if (err) {
2803 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
2804 pci_name(pdev));
2805 goto err_out;
2806 }
2807
2808 pci_set_master(pdev);
2809
2810 /* Find power-management capability. */
2811 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
2812 if (pm_cap == 0) {
2813 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
2814 "aborting.\n");
2815 err = -EIO;
2816 goto err_out_free_regions;
2817 }
2818
2819 if (sizeof(dma_addr_t) > sizeof(u32)) {
2820 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2821 if (!err)
2822 using_dac = 1;
2823 }
2824
2825 if (!using_dac) {
2826 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2827 if (err) {
2828 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
2829 pci_name(pdev));
2830 goto err_out_free_regions;
2831 }
2832 }
2833#ifdef __BIG_ENDIAN
2834 /* byte swap descriptors in hardware */
2835 {
2836 u32 reg;
2837
2838 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
2839 reg |= PCI_REV_DESC;
2840 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
2841 }
2842#endif
2843
2844 err = -ENOMEM;
2845 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
2846 if (!hw) {
2847 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
2848 pci_name(pdev));
2849 goto err_out_free_regions;
2850 }
2851
2852 memset(hw, 0, sizeof(*hw));
2853 hw->pdev = pdev;
2854 spin_lock_init(&hw->phy_lock);
2855
2856 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
2857 if (!hw->regs) {
2858 printk(KERN_ERR PFX "%s: cannot map device registers\n",
2859 pci_name(pdev));
2860 goto err_out_free_hw;
2861 }
2862 hw->pm_cap = pm_cap;
2863
2864 err = sky2_reset(hw);
2865 if (err)
2866 goto err_out_iounmap;
2867
2868 printk(KERN_INFO PFX "addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n",
2869 pci_resource_start(pdev, 0), pdev->irq,
2870 yukon_name[hw->chip_id - CHIP_ID_YUKON],
2871 hw->chip_id, hw->chip_rev);
2872
2873 dev = sky2_init_netdev(hw, 0, using_dac);
2874 if (!dev)
2875 goto err_out_free_pci;
2876
2877 err = register_netdev(dev);
2878 if (err) {
2879 printk(KERN_ERR PFX "%s: cannot register net device\n",
2880 pci_name(pdev));
2881 goto err_out_free_netdev;
2882 }
2883
2884 sky2_show_addr(dev);
2885
2886 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
2887 if (register_netdev(dev1) == 0)
2888 sky2_show_addr(dev1);
2889 else {
2890 /* Failure to register second port need not be fatal */
2891 printk(KERN_WARNING PFX
2892 "register of second port failed\n");
2893 hw->dev[1] = NULL;
2894 free_netdev(dev1);
2895 }
2896 }
2897
2898 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
2899 if (err) {
2900 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
2901 pci_name(pdev), pdev->irq);
2902 goto err_out_unregister;
2903 }
2904
2905 hw->intr_mask = Y2_IS_BASE;
2906 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2907
2908 pci_set_drvdata(pdev, hw);
2909
2910 return 0;
2911
2912err_out_unregister:
2913 if (dev1) {
2914 unregister_netdev(dev1);
2915 free_netdev(dev1);
2916 }
2917 unregister_netdev(dev);
2918err_out_free_netdev:
2919 free_netdev(dev);
2920err_out_free_pci:
2921 sky2_write8(hw, B0_CTST, CS_RST_SET);
2922 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
2923err_out_iounmap:
2924 iounmap(hw->regs);
2925err_out_free_hw:
2926 kfree(hw);
2927err_out_free_regions:
2928 pci_release_regions(pdev);
2929 pci_disable_device(pdev);
2930err_out:
2931 return err;
2932}
2933
2934static void __devexit sky2_remove(struct pci_dev *pdev)
2935{
2936 struct sky2_hw *hw = pci_get_drvdata(pdev);
2937 struct net_device *dev0, *dev1;
2938
2939 if (!hw)
2940 return;
2941
2942 dev0 = hw->dev[0];
2943 dev1 = hw->dev[1];
2944 if (dev1)
2945 unregister_netdev(dev1);
2946 unregister_netdev(dev0);
2947
2948 sky2_write32(hw, B0_IMSK, 0);
2949 sky2_set_power_state(hw, PCI_D3hot);
2950 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
2951 sky2_write8(hw, B0_CTST, CS_RST_SET);
2952 sky2_read8(hw, B0_CTST);
2953
2954 free_irq(pdev->irq, hw);
2955 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
2956 pci_release_regions(pdev);
2957 pci_disable_device(pdev);
2958
2959 if (dev1)
2960 free_netdev(dev1);
2961 free_netdev(dev0);
2962 iounmap(hw->regs);
2963 kfree(hw);
2964
2965 pci_set_drvdata(pdev, NULL);
2966}
2967
2968#ifdef CONFIG_PM
2969static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
2970{
2971 struct sky2_hw *hw = pci_get_drvdata(pdev);
2972 int i;
2973
2974 for (i = 0; i < 2; i++) {
2975 struct net_device *dev = hw->dev[i];
2976
2977 if (dev) {
2978 if (!netif_running(dev))
2979 continue;
2980
2981 sky2_down(dev);
2982 netif_device_detach(dev);
2983 }
2984 }
2985
2986 return sky2_set_power_state(hw, pci_choose_state(pdev, state));
2987}
2988
2989static int sky2_resume(struct pci_dev *pdev)
2990{
2991 struct sky2_hw *hw = pci_get_drvdata(pdev);
2992 int i;
2993
2994 pci_restore_state(pdev);
2995 pci_enable_wake(pdev, PCI_D0, 0);
2996 sky2_set_power_state(hw, PCI_D0);
2997
2998 sky2_reset(hw);
2999
3000 for (i = 0; i < 2; i++) {
3001 struct net_device *dev = hw->dev[i];
3002 if (dev) {
3003 if (netif_running(dev)) {
3004 netif_device_attach(dev);
3005 sky2_up(dev);
3006 }
3007 }
3008 }
3009 return 0;
3010}
3011#endif
3012
3013static struct pci_driver sky2_driver = {
3014 .name = DRV_NAME,
3015 .id_table = sky2_id_table,
3016 .probe = sky2_probe,
3017 .remove = __devexit_p(sky2_remove),
3018#ifdef CONFIG_PM
3019 .suspend = sky2_suspend,
3020 .resume = sky2_resume,
3021#endif
3022};
3023
3024static int __init sky2_init_module(void)
3025{
3026 return pci_module_init(&sky2_driver);
3027}
3028
3029static void __exit sky2_cleanup_module(void)
3030{
3031 pci_unregister_driver(&sky2_driver);
3032}
3033
3034module_init(sky2_init_module);
3035module_exit(sky2_cleanup_module);
3036
3037MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
3038MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
3039MODULE_LICENSE("GPL");
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
new file mode 100644
index 000000000000..629d08f170fd
--- /dev/null
+++ b/drivers/net/sky2.h
@@ -0,0 +1,1910 @@
1/*
2 * Definitions for the new Marvell Yukon 2 driver.
3 */
4#ifndef _SKY2_H
5#define _SKY2_H
6
7/* PCI config registers */
8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44
10#define PCI_DEV_STATUS 0x7c
11#define PCI_OS_PCI_X (1<<26)
12
13#define PEX_LNK_STAT 0xf2
14#define PEX_UNC_ERR_STAT 0x104
15#define PEX_DEV_CTRL 0xe8
16
17/* Yukon-2 */
18enum pci_dev_reg_1 {
19 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
20 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
21 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
22 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
23 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
24 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
25};
26
27enum pci_dev_reg_2 {
28 PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
29 PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
30 PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
31
32 PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
33 PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
34 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
35 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
36
37 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
38};
39
40
41#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
42 PCI_STATUS_SIG_SYSTEM_ERROR | \
43 PCI_STATUS_REC_MASTER_ABORT | \
44 PCI_STATUS_REC_TARGET_ABORT | \
45 PCI_STATUS_PARITY)
46
47enum pex_dev_ctrl {
48 PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
49 PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
50 PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
51 PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
52 PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
53 PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
54 PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
55 PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
56 PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
57 PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
58 PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
59};
60#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
61
62/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
63enum pex_err {
64 PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
65
66 PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
67
68 PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
69
70 PEX_COMP_TO = 1<<14, /* Completion Timeout */
71 PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
72 PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
73
74 PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
75 PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
76};
77
78
79enum csr_regs {
80 B0_RAP = 0x0000,
81 B0_CTST = 0x0004,
82 B0_Y2LED = 0x0005,
83 B0_POWER_CTRL = 0x0007,
84 B0_ISRC = 0x0008,
85 B0_IMSK = 0x000c,
86 B0_HWE_ISRC = 0x0010,
87 B0_HWE_IMSK = 0x0014,
88
89 /* Special ISR registers (Yukon-2 only) */
90 B0_Y2_SP_ISRC2 = 0x001c,
91 B0_Y2_SP_ISRC3 = 0x0020,
92 B0_Y2_SP_EISR = 0x0024,
93 B0_Y2_SP_LISR = 0x0028,
94 B0_Y2_SP_ICR = 0x002c,
95
96 B2_MAC_1 = 0x0100,
97 B2_MAC_2 = 0x0108,
98 B2_MAC_3 = 0x0110,
99 B2_CONN_TYP = 0x0118,
100 B2_PMD_TYP = 0x0119,
101 B2_MAC_CFG = 0x011a,
102 B2_CHIP_ID = 0x011b,
103 B2_E_0 = 0x011c,
104
105 B2_Y2_CLK_GATE = 0x011d,
106 B2_Y2_HW_RES = 0x011e,
107 B2_E_3 = 0x011f,
108 B2_Y2_CLK_CTRL = 0x0120,
109
110 B2_TI_INI = 0x0130,
111 B2_TI_VAL = 0x0134,
112 B2_TI_CTRL = 0x0138,
113 B2_TI_TEST = 0x0139,
114
115 B2_TST_CTRL1 = 0x0158,
116 B2_TST_CTRL2 = 0x0159,
117 B2_GP_IO = 0x015c,
118
119 B2_I2C_CTRL = 0x0160,
120 B2_I2C_DATA = 0x0164,
121 B2_I2C_IRQ = 0x0168,
122 B2_I2C_SW = 0x016c,
123
124 B3_RAM_ADDR = 0x0180,
125 B3_RAM_DATA_LO = 0x0184,
126 B3_RAM_DATA_HI = 0x0188,
127
128/* RAM Interface Registers */
129/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
130/*
131 * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
132 * not usable in SW. Please notice these are NOT real timeouts, these are
133 * the number of qWords transferred continuously.
134 */
135#define RAM_BUFFER(port, reg) (reg | (port <<6))
136
137 B3_RI_WTO_R1 = 0x0190,
138 B3_RI_WTO_XA1 = 0x0191,
139 B3_RI_WTO_XS1 = 0x0192,
140 B3_RI_RTO_R1 = 0x0193,
141 B3_RI_RTO_XA1 = 0x0194,
142 B3_RI_RTO_XS1 = 0x0195,
143 B3_RI_WTO_R2 = 0x0196,
144 B3_RI_WTO_XA2 = 0x0197,
145 B3_RI_WTO_XS2 = 0x0198,
146 B3_RI_RTO_R2 = 0x0199,
147 B3_RI_RTO_XA2 = 0x019a,
148 B3_RI_RTO_XS2 = 0x019b,
149 B3_RI_TO_VAL = 0x019c,
150 B3_RI_CTRL = 0x01a0,
151 B3_RI_TEST = 0x01a2,
152 B3_MA_TOINI_RX1 = 0x01b0,
153 B3_MA_TOINI_RX2 = 0x01b1,
154 B3_MA_TOINI_TX1 = 0x01b2,
155 B3_MA_TOINI_TX2 = 0x01b3,
156 B3_MA_TOVAL_RX1 = 0x01b4,
157 B3_MA_TOVAL_RX2 = 0x01b5,
158 B3_MA_TOVAL_TX1 = 0x01b6,
159 B3_MA_TOVAL_TX2 = 0x01b7,
160 B3_MA_TO_CTRL = 0x01b8,
161 B3_MA_TO_TEST = 0x01ba,
162 B3_MA_RCINI_RX1 = 0x01c0,
163 B3_MA_RCINI_RX2 = 0x01c1,
164 B3_MA_RCINI_TX1 = 0x01c2,
165 B3_MA_RCINI_TX2 = 0x01c3,
166 B3_MA_RCVAL_RX1 = 0x01c4,
167 B3_MA_RCVAL_RX2 = 0x01c5,
168 B3_MA_RCVAL_TX1 = 0x01c6,
169 B3_MA_RCVAL_TX2 = 0x01c7,
170 B3_MA_RC_CTRL = 0x01c8,
171 B3_MA_RC_TEST = 0x01ca,
172 B3_PA_TOINI_RX1 = 0x01d0,
173 B3_PA_TOINI_RX2 = 0x01d4,
174 B3_PA_TOINI_TX1 = 0x01d8,
175 B3_PA_TOINI_TX2 = 0x01dc,
176 B3_PA_TOVAL_RX1 = 0x01e0,
177 B3_PA_TOVAL_RX2 = 0x01e4,
178 B3_PA_TOVAL_TX1 = 0x01e8,
179 B3_PA_TOVAL_TX2 = 0x01ec,
180 B3_PA_CTRL = 0x01f0,
181 B3_PA_TEST = 0x01f2,
182
183 Y2_CFG_SPC = 0x1c00,
184};
185
186/* B0_CTST 16 bit Control/Status register */
187enum {
188 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
189 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
190 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
191 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
192 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
193 Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
194 Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
195 Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
196
197 CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
198 CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
199 CS_STOP_DONE = 1<<5, /* Stop Master is finished */
200 CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
201 CS_MRST_CLR = 1<<3, /* Clear Master reset */
202 CS_MRST_SET = 1<<2, /* Set Master reset */
203 CS_RST_CLR = 1<<1, /* Clear Software reset */
204 CS_RST_SET = 1, /* Set Software reset */
205};
206
207/* B0_LED 8 Bit LED register */
208enum {
209/* Bit 7.. 2: reserved */
210 LED_STAT_ON = 1<<1, /* Status LED on */
211 LED_STAT_OFF = 1, /* Status LED off */
212};
213
214/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
215enum {
216 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
217 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
218 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
219 PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
220 PC_VAUX_ON = 1<<3, /* Switch VAUX On */
221 PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
222 PC_VCC_ON = 1<<1, /* Switch VCC On */
223 PC_VCC_OFF = 1<<0, /* Switch VCC Off */
224};
225
226/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
227
228/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
229/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
230/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
231/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
232enum {
233 Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
234 Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
235 Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
236
237 Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
238 Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
239 Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
240 Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
241
242 Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
243 Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
244 Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
245 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
246 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
247
248 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
249 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
250 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
251 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
252 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
253
254 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
255 Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
256 Y2_IS_IRQ_SW | Y2_IS_TIMINT,
257 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
258 Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
259 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
260 Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
261};
262
263/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
264enum {
265 IS_ERR_MSK = 0x00003fff,/* All Error bits */
266
267 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
268 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
269 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
270 IS_IRQ_STAT = 1<<10, /* IRQ status exception */
271 IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
272 IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
273 IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
274 IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
275 IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
276 IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
277 IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
278 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
279 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
280 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
281};
282
283/* Hardware error interrupt mask for Yukon 2 */
284enum {
285 Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
286 Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
287 Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
288 Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
289 Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
290 Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
291 /* Link 2 */
292 Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
293 Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
294 Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
295 Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
296 Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
297 Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
298 /* Link 1 */
299 Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
300 Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
301 Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
302 Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
303 Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
304 Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
305
306 Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
307 Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
308 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
309 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
310
311 Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
312 Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP |
313 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
314};
315
316/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
317enum {
318 DPT_START = 1<<1,
319 DPT_STOP = 1<<0,
320};
321
322/* B2_TST_CTRL1 8 bit Test Control Register 1 */
323enum {
324 TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
325 TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
326 TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
327 TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
328 TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
329 TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
330 TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
331 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
332};
333
334/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
335enum {
336 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
337 /* Bit 3.. 2: reserved */
338 CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
339 CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
340};
341
342/* B2_CHIP_ID 8 bit Chip Identification Number */
343enum {
344 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
345 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
346 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
347 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
348 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
349 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
350 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
351
352 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
353 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
354 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
355};
356
357/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
358enum {
359 Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
360 Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
361 Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
362 Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
363 Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
364 Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
365 Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
366 Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
367};
368
369/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
370enum {
371 CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
372 CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
373 CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
374};
375#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
376#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
377
378
379/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
380enum {
381 Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
382#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
383 Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
384 Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
385#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
386#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
387 Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
388 Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
389};
390
391/* B2_TI_CTRL 8 bit Timer control */
392/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
393enum {
394 TIM_START = 1<<2, /* Start Timer */
395 TIM_STOP = 1<<1, /* Stop Timer */
396 TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
397};
398
399/* B2_TI_TEST 8 Bit Timer Test */
400/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
401/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
402enum {
403 TIM_T_ON = 1<<2, /* Test mode on */
404 TIM_T_OFF = 1<<1, /* Test mode off */
405 TIM_T_STEP = 1<<0, /* Test step */
406};
407
408/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
409 /* Bit 31..19: reserved */
410#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
411/* RAM Interface Registers */
412
413/* B3_RI_CTRL 16 bit RAM Interface Control Register */
414enum {
415 RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
416 RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
417
418 RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
419 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
420};
421
422#define SK_RI_TO_53 36 /* RAM interface timeout */
423
424
425/* Port related registers FIFO, and Arbiter */
426#define SK_REG(port,reg) (((port)<<7)+(reg))
427
428/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
429/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
430/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
431/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
432/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
433
434#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
435
436/* TXA_CTRL 8 bit Tx Arbiter Control Register */
437enum {
438 TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
439 TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
440 TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
441 TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
442 TXA_START_RC = 1<<3, /* Start sync Rate Control */
443 TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
444 TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
445 TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
446};
447
448/*
449 * Bank 4 - 5
450 */
451/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
452enum {
453 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
454 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
455 TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
456 TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
457 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
458 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
459 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
460};
461
462
463enum {
464 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
465 B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
466 B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
467 B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
468 B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
469 B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
470 B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
471 B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
472 B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
473};
474
475/* Queue Register Offsets, use Q_ADDR() to access */
476enum {
477 B8_Q_REGS = 0x0400, /* base of Queue registers */
478 Q_D = 0x00, /* 8*32 bit Current Descriptor */
479 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
480 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
481 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
482 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
483 Q_BC = 0x30, /* 32 bit Current Byte Counter */
484 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
485 Q_F = 0x38, /* 32 bit Flag Register */
486 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
487 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
488 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
489 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
490 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
491 Q_T2 = 0x40, /* 32 bit Test Register 2 */
492 Q_T3 = 0x44, /* 32 bit Test Register 3 */
493
494/* Yukon-2 */
495 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
496 Q_WM = 0x40, /* 16 bit FIFO Watermark */
497 Q_AL = 0x42, /* 8 bit FIFO Alignment */
498 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
499 Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
500 Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
501 Q_RL = 0x4a, /* 8 bit FIFO Read Level */
502 Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
503 Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
504 Q_WL = 0x4e, /* 8 bit FIFO Write Level */
505 Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
506};
507#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
508
509
510/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
511enum {
512 Y2_B8_PREF_REGS = 0x0450,
513
514 PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
515 PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
516 PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
517 PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
518 PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
519 PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
520 PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
521 PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
522 PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
523 PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
524
525 PREF_UNIT_MASK_IDX = 0x0fff,
526};
527#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
528
529/* RAM Buffer Register Offsets */
530enum {
531
532 RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
533 RB_END = 0x04,/* 32 bit RAM Buffer End Address */
534 RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
535 RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
536 RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
537 RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
538 RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
539 RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
540 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
541 RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
542 RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
543 RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
544 RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
545 RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
546};
547
548/* Receive and Transmit Queues */
549enum {
550 Q_R1 = 0x0000, /* Receive Queue 1 */
551 Q_R2 = 0x0080, /* Receive Queue 2 */
552 Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
553 Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
554 Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
555 Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
556};
557
558/* Different PHY Types */
559enum {
560 PHY_ADDR_MARV = 0,
561};
562
563#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
564
565
566enum {
567 LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
568 LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
569 LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
570 LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
571
572 LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
573
574/* Receive GMAC FIFO (YUKON and Yukon-2) */
575
576 RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
577 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
578 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
579 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
580 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
581 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
582
583 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
584 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
585
586 RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
587
588 RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
589
590 RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
591};
592
593
594/* Q_BC 32 bit Current Byte Counter */
595
596/* BMU Control Status Registers */
597/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
598/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
599/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
600/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
601/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
602/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
603/* Q_CSR 32 bit BMU Control/Status Register */
604
605/* Rx BMU Control / Status Registers (Yukon-2) */
606enum {
607 BMU_IDLE = 1<<31, /* BMU Idle State */
608 BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
609 BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
610
611 BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
612 BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
613 BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
614 BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
615 BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
616 BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
617 BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
618 BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
619 BMU_START = 1<<8, /* Start Rx/Tx Queue */
620 BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
621 BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
622 BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
623 BMU_FIFO_RST = 1<<4, /* Reset FIFO */
624 BMU_OP_ON = 1<<3, /* BMU Operational On */
625 BMU_OP_OFF = 1<<2, /* BMU Operational Off */
626 BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
627 BMU_RST_SET = 1<<0, /* Set BMU Reset */
628
629 BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
630 BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
631 BMU_FIFO_ENA | BMU_OP_ON,
632};
633
634/* Tx BMU Control / Status Registers (Yukon-2) */
635 /* Bit 31: same as for Rx */
636enum {
637 BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
638 BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
639 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
640};
641
642/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
643/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
644enum {
645 PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
646 PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
647 PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
648 PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
649};
650
651/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
652/* RB_START 32 bit RAM Buffer Start Address */
653/* RB_END 32 bit RAM Buffer End Address */
654/* RB_WP 32 bit RAM Buffer Write Pointer */
655/* RB_RP 32 bit RAM Buffer Read Pointer */
656/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
657/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
658/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
659/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
660/* RB_PC 32 bit RAM Buffer Packet Counter */
661/* RB_LEV 32 bit RAM Buffer Level Register */
662
663#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
664/* RB_TST2 8 bit RAM Buffer Test Register 2 */
665/* RB_TST1 8 bit RAM Buffer Test Register 1 */
666
667/* RB_CTRL 8 bit RAM Buffer Control Register */
668enum {
669 RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
670 RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
671 RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
672 RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
673 RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
674 RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
675};
676
677
678/* Transmit GMAC FIFO (YUKON only) */
679enum {
680 TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
681 TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
682 TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
683
684 TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
685 TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
686 TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
687
688 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
689 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
690 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
691};
692
693/* Descriptor Poll Timer Registers */
694enum {
695 B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
696 B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
697 B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
698
699 B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
700};
701
702/* Time Stamp Timer Registers (YUKON only) */
703enum {
704 GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
705 GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
706 GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
707};
708
709/* Polling Unit Registers (Yukon-2 only) */
710enum {
711 POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
712 POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
713
714 POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
715 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
716};
717
718/* ASF Subsystem Registers (Yukon-2 only) */
719enum {
720 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
721 B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
722 B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
723
724 B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
725 B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
726 B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
727 B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
728 B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
729 B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
730};
731
732/* Status BMU Registers (Yukon-2 only)*/
733enum {
734 STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
735 STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
736
737 STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
738 STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
739 STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
740 STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
741 STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
742 STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
743 STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
744 STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
745
746/* FIFO Control/Status Registers (Yukon-2 only)*/
747 STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
748 STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
749 STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
750 STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
751 STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
752 STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
753 STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
754
755/* Level and ISR Timer Registers (Yukon-2 only)*/
756 STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
757 STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
758 STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
759 STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
760 STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
761 STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
762 STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
763 STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
764 STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
765 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
766 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
767 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
768};
769
770enum {
771 LINKLED_OFF = 0x01,
772 LINKLED_ON = 0x02,
773 LINKLED_LINKSYNC_OFF = 0x04,
774 LINKLED_LINKSYNC_ON = 0x08,
775 LINKLED_BLINK_OFF = 0x10,
776 LINKLED_BLINK_ON = 0x20,
777};
778
779/* GMAC and GPHY Control Registers (YUKON only) */
780enum {
781 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
782 GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
783 GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
784 GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
785 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
786
787/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
788
789 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
790
791 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
792 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
793 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
794 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
795 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
796 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
797 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
798
799/* WOL Pattern Length Registers (YUKON only) */
800
801 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
802 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
803
804/* WOL Pattern Counter Registers (YUKON only) */
805
806
807 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
808 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
809};
810
811enum {
812 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
813 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
814};
815
816enum {
817 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
818 BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
819};
820
821/*
822 * Marvel-PHY Registers, indirect addressed over GMAC
823 */
824enum {
825 PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
826 PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
827 PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
828 PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
829 PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
830 PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
831 PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
832 PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
833 PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
834 /* Marvel-specific registers */
835 PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
836 PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
837 PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
838 PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
839 PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
840 PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
841 PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
842 PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
843 PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
844 PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
845 PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
846 PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
847 PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
848 PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
849 PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
850 PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
851 PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
852 PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
853
854/* for 10/100 Fast Ethernet PHY (88E3082 only) */
855 PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
856 PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
857 PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
858 PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
859 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
860};
861
862enum {
863 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
864 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
865 PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
866 PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
867 PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
868 PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
869 PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
870 PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
871 PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
872 PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
873};
874
875enum {
876 PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
877 PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
878 PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
879};
880
881enum {
882 PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
883
884 PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
885 PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
886 PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
887 PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
888 PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
889 PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
890 PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
891};
892
893enum {
894 PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
895 PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
896 PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
897};
898
899/* different Marvell PHY Ids */
900enum {
901 PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
902
903 PHY_BCOM_ID1_A1 = 0x6041,
904 PHY_BCOM_ID1_B2 = 0x6043,
905 PHY_BCOM_ID1_C0 = 0x6044,
906 PHY_BCOM_ID1_C5 = 0x6047,
907
908 PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
909 PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
910 PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
911 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
912};
913
914/* Advertisement register bits */
915enum {
916 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
917 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
918 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
919
920 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
921 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
922 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
923 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
924 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
925 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
926 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
927 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
928 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
929 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
930 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
931 PHY_AN_100HALF | PHY_AN_100FULL,
932};
933
934/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
935/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
936enum {
937 PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
938 PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
939 PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
940 PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
941 PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
942 PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
943 /* Bit 9..8: reserved */
944 PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
945};
946
947/** Marvell-Specific */
948enum {
949 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
950 PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
951 PHY_M_AN_RF = 1<<13, /* Remote Fault */
952
953 PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
954 PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
955 PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
956 PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
957 PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
958 PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
959 PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
960 PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
961};
962
963/* special defines for FIBER (88E1011S only) */
964enum {
965 PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
966 PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
967 PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
968 PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
969};
970
971/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
972enum {
973 PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
974 PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
975 PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
976 PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
977};
978
979/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
980enum {
981 PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
982 PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
983 PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
984 PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
985 PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
986 PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
987};
988
989/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
990enum {
991 PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
992 PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
993 PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
994 PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
995 PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
996 PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
997 PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
998 PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
999 PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1000 PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1001 PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1002 PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1003};
1004
1005enum {
1006 PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1007 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1008};
1009
1010#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1011
1012enum {
1013 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1014 PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1015 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1016};
1017
1018/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1019enum {
1020 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1021 PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1022 PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1023 PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1024 PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1025
1026 PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1027 PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1028
1029 PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1030 PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1031};
1032
1033/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1034enum {
1035 PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1036 PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1037 PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1038 PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1039 PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1040 PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1041 PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1042 PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1043 PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1044 PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1045 PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1046 PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1047 PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1048 PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1049 PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1050 PHY_M_PS_JABBER = 1<<0, /* Jabber */
1051};
1052
1053#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1054
1055/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1056enum {
1057 PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1058 PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1059};
1060
1061enum {
1062 PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1063 PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1064 PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1065 PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1066 PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1067 PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1068 PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1069 PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1070 PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1071 PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1072 PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1073 PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1074
1075 PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1076 PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1077 PHY_M_IS_JABBER = 1<<0, /* Jabber */
1078
1079 PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
1080 | PHY_M_IS_FIFO_ERROR,
1081 PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1082};
1083
1084
1085/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1086enum {
1087 PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1088 PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1089
1090 PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1091 PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1092 /* (88E1011 only) */
1093 PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1094 /* (88E1011 only) */
1095 PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1096 /* (88E1111 only) */
1097 PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1098 /* !!! Errata in spec. (1 = disable) */
1099 PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1100 PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1101 PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1102 PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1103 PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1104 PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1105
1106#define PHY_M_EC_M_DSC(x) ((x)<<10 & PHY_M_EC_M_DSC_MSK)
1107 /* 00=1x; 01=2x; 10=3x; 11=4x */
1108#define PHY_M_EC_S_DSC(x) ((x)<<8 & PHY_M_EC_S_DSC_MSK)
1109 /* 00=dis; 01=1x; 10=2x; 11=3x */
1110#define PHY_M_EC_DSC_2(x) ((x)<<9 & PHY_M_EC_M_DSC_MSK2)
1111 /* 000=1x; 001=2x; 010=3x; 011=4x */
1112#define PHY_M_EC_MAC_S(x) ((x)<<4 & PHY_M_EC_MAC_S_MSK)
1113 /* 01X=0; 110=2.5; 111=25 (MHz) */
1114
1115/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1116enum {
1117 PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
1118 PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
1119 PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
1120};
1121/* !!! Errata in spec. (1 = disable) */
1122
1123#define PHY_M_PC_DSC(x) (((x)<<12) & PHY_M_PC_DSC_MSK)
1124 /* 100=5x; 101=6x; 110=7x; 111=8x */
1125enum {
1126 MAC_TX_CLK_0_MHZ = 2,
1127 MAC_TX_CLK_2_5_MHZ = 6,
1128 MAC_TX_CLK_25_MHZ = 7,
1129};
1130
1131/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1132enum {
1133 PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1134 PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1135 PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1136 PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1137 PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1138 PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1139 PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1140 /* (88E1111 only) */
1141};
1142
1143enum {
1144 PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1145 /* (88E1011 only) */
1146 PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1147 PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1148 PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1149 PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1150 PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1151};
1152
1153#define PHY_M_LED_PULS_DUR(x) (((x)<<12) & PHY_M_LEDC_PULS_MSK)
1154
1155/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
1156enum {
1157 PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
1158 PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
1159 PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
1160 PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
1161 PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
1162 PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
1163};
1164
1165#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
1166#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
1167#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
1168#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
1169#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
1170#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
1171
1172enum {
1173 PULS_NO_STR = 0,/* no pulse stretching */
1174 PULS_21MS = 1,/* 21 ms to 42 ms */
1175 PULS_42MS = 2,/* 42 ms to 84 ms */
1176 PULS_84MS = 3,/* 84 ms to 170 ms */
1177 PULS_170MS = 4,/* 170 ms to 340 ms */
1178 PULS_340MS = 5,/* 340 ms to 670 ms */
1179 PULS_670MS = 6,/* 670 ms to 1.3 s */
1180 PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1181};
1182
1183#define PHY_M_LED_BLINK_RT(x) (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
1184
1185enum {
1186 BLINK_42MS = 0,/* 42 ms */
1187 BLINK_84MS = 1,/* 84 ms */
1188 BLINK_170MS = 2,/* 170 ms */
1189 BLINK_340MS = 3,/* 340 ms */
1190 BLINK_670MS = 4,/* 670 ms */
1191};
1192
1193/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1194#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1195 /* Bit 13..12: reserved */
1196#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1197#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1198#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1199#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1200#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1201#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1202
1203enum {
1204 MO_LED_NORM = 0,
1205 MO_LED_BLINK = 1,
1206 MO_LED_OFF = 2,
1207 MO_LED_ON = 3,
1208};
1209
1210/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1211enum {
1212 PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1213 PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1214 PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1215 PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1216 PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1217};
1218
1219/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1220enum {
1221 PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1222 PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1223 PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1224 PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1225 PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1226 PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1227 PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1228 /* (88E1111 only) */
1229
1230 PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1231 PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1232 PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1233};
1234
1235/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1236/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1237 /* Bit 15..12: reserved (used internally) */
1238enum {
1239 PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1240 PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1241 PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1242};
1243
1244#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
1245#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
1246#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
1247
1248enum {
1249 LED_PAR_CTRL_COLX = 0x00,
1250 LED_PAR_CTRL_ERROR = 0x01,
1251 LED_PAR_CTRL_DUPLEX = 0x02,
1252 LED_PAR_CTRL_DP_COL = 0x03,
1253 LED_PAR_CTRL_SPEED = 0x04,
1254 LED_PAR_CTRL_LINK = 0x05,
1255 LED_PAR_CTRL_TX = 0x06,
1256 LED_PAR_CTRL_RX = 0x07,
1257 LED_PAR_CTRL_ACT = 0x08,
1258 LED_PAR_CTRL_LNK_RX = 0x09,
1259 LED_PAR_CTRL_LNK_AC = 0x0a,
1260 LED_PAR_CTRL_ACT_BL = 0x0b,
1261 LED_PAR_CTRL_TX_BL = 0x0c,
1262 LED_PAR_CTRL_RX_BL = 0x0d,
1263 LED_PAR_CTRL_COL_BL = 0x0e,
1264 LED_PAR_CTRL_INACT = 0x0f
1265};
1266
1267/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1268enum {
1269 PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1270 PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1271 PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1272};
1273
1274/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
1275/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1276enum {
1277 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1278 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1279 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1280 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
1281};
1282#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
1283
1284/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1285enum {
1286 PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1287 PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1288 PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1289 PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1290};
1291
1292#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1293#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1294#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1295#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1296
1297/* GMAC registers */
1298/* Port Registers */
1299enum {
1300 GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1301 GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1302 GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1303 GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1304 GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1305 GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1306 GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1307/* Source Address Registers */
1308 GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1309 GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1310 GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1311 GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1312 GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1313 GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1314
1315/* Multicast Address Hash Registers */
1316 GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1317 GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1318 GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1319 GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1320
1321/* Interrupt Source Registers */
1322 GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1323 GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1324 GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1325
1326/* Interrupt Mask Registers */
1327 GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1328 GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1329 GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1330
1331/* Serial Management Interface (SMI) Registers */
1332 GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1333 GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1334 GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1335};
1336
1337/* MIB Counters */
1338#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1339#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1340
1341/*
1342 * MIB Counters base address definitions (low word) -
1343 * use offset 4 for access to high word (32 bit r/o)
1344 */
1345enum {
1346 GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1347 GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1348 GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1349 GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1350 GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1351 /* GM_MIB_CNT_BASE + 40: reserved */
1352 GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1353 GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1354 GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1355 GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1356 GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1357 GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1358 GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1359 GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
1360 GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
1361 GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
1362 GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
1363 GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
1364 GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
1365 GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
1366 GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
1367 /* GM_MIB_CNT_BASE + 168: reserved */
1368 GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
1369 /* GM_MIB_CNT_BASE + 184: reserved */
1370 GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
1371 GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
1372 GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
1373 GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
1374 GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
1375 GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
1376 GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
1377 GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
1378 GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
1379 GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
1380 GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
1381 GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
1382 GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
1383
1384 GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
1385 GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
1386 GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
1387 GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
1388 GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
1389 GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1390};
1391
1392/* GMAC Bit Definitions */
1393/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1394enum {
1395 GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1396 GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1397 GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1398 GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1399 GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1400 GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1401 GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
1402 GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
1403
1404 GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1405 GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1406 GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1407 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1408 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1409};
1410
1411/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1412enum {
1413 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1414 GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1415 GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1416 GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1417 GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1418 GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1419 GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1420 GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1421 GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1422 GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1423 GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1424 GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1425 GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1426 GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1427 GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1428};
1429
1430#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1431#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
1432
1433/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1434enum {
1435 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1436 GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1437 GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1438 GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
1439};
1440
1441#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1442#define TX_COL_DEF 0x04
1443
1444/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1445enum {
1446 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1447 GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1448 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1449 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1450};
1451
1452/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1453enum {
1454 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1455 GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1456 GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1457 GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
1458
1459 TX_JAM_LEN_DEF = 0x03,
1460 TX_JAM_IPG_DEF = 0x0b,
1461 TX_IPG_JAM_DEF = 0x1c,
1462 TX_BOF_LIM_DEF = 0x04,
1463};
1464
1465#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1466#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1467#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1468#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
1469
1470
1471/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1472enum {
1473 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1474 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
1475 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
1476 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
1477 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1478};
1479
1480#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1481#define DATA_BLIND_DEF 0x04
1482
1483#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1484#define IPG_DATA_DEF 0x1e
1485
1486/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1487enum {
1488 GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
1489 GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
1490 GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1491 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1492 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1493};
1494
1495#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
1496#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
1497
1498/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1499enum {
1500 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1501 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1502};
1503
1504/* Receive Frame Status Encoding */
1505enum {
1506 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1507 GMR_FS_VLAN = 1<<13, /* VLAN Packet */
1508 GMR_FS_JABBER = 1<<12, /* Jabber Packet */
1509 GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
1510 GMR_FS_MC = 1<<10, /* Multicast Packet */
1511 GMR_FS_BC = 1<<9, /* Broadcast Packet */
1512 GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
1513 GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
1514 GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
1515 GMR_FS_MII_ERR = 1<<5, /* MII Error */
1516 GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
1517 GMR_FS_FRAGMENT = 1<<3, /* Fragment */
1518
1519 GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
1520 GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
1521
1522 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1523 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1524 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1525 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1526};
1527
1528/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1529enum {
1530 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1531 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1532 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1533 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1534
1535 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1536 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1537 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1538
1539 GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1540 GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1541 GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1542 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1543 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1544 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1545 GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
1546
1547 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1548 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1549 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1550 GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1551
1552 RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1553
1554 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1555};
1556
1557
1558/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1559enum {
1560 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1561 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1562
1563 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1564 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1565 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
1566
1567 GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1568 GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1569 GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1570};
1571
1572/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1573enum {
1574 GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1575 GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1576 GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1577};
1578
1579/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
1580enum {
1581 Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
1582 Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
1583 Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
1584 Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
1585 Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
1586
1587 Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
1588 Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
1589};
1590
1591/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
1592enum {
1593 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1594 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1595};
1596
1597/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1598enum {
1599 SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
1600 SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
1601 SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
1602 SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
1603 SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
1604};
1605
1606/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1607enum {
1608 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1609 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1610 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
1611 GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
1612 GMC_PAUSE_ON = 1<<3, /* Pause On */
1613 GMC_PAUSE_OFF = 1<<2, /* Pause Off */
1614 GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
1615 GMC_RST_SET = 1<<0, /* Set GMAC Reset */
1616};
1617
1618/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1619enum {
1620 GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
1621 GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
1622 GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
1623 GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
1624 GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
1625 GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
1626 GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
1627 GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
1628 GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
1629 GPC_ANEG_0 = 1<<19, /* ANEG[0] */
1630 GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
1631 GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
1632 GPC_ANEG_3 = 1<<16, /* ANEG[3] */
1633 GPC_ANEG_2 = 1<<15, /* ANEG[2] */
1634 GPC_ANEG_1 = 1<<14, /* ANEG[1] */
1635 GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
1636 GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
1637 GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
1638 GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
1639 GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
1640 GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
1641 /* Bits 7..2: reserved */
1642 GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
1643 GPC_RST_SET = 1<<0, /* Set GPHY Reset */
1644};
1645
1646/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1647/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1648enum {
1649 GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
1650 GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
1651 GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
1652 GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
1653 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1654 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1655
1656#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1657
1658/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1659 /* Bits 15.. 2: reserved */
1660 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1661 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1662
1663
1664/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1665 WOL_CTL_LINK_CHG_OCC = 1<<15,
1666 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1667 WOL_CTL_PATTERN_OCC = 1<<13,
1668 WOL_CTL_CLEAR_RESULT = 1<<12,
1669 WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
1670 WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
1671 WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
1672 WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
1673 WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
1674 WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
1675 WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
1676 WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
1677 WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
1678 WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
1679 WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
1680 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1681};
1682
1683#define WOL_CTL_DEFAULT \
1684 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1685 WOL_CTL_DIS_PME_ON_PATTERN | \
1686 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1687 WOL_CTL_DIS_LINK_CHG_UNIT | \
1688 WOL_CTL_DIS_PATTERN_UNIT | \
1689 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1690
1691/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1692#define WOL_CTL_PATT_ENA(x) (1 << (x))
1693
1694
1695/* Control flags */
1696enum {
1697 UDPTCP = 1<<0,
1698 CALSUM = 1<<1,
1699 WR_SUM = 1<<2,
1700 INIT_SUM= 1<<3,
1701 LOCK_SUM= 1<<4,
1702 INS_VLAN= 1<<5,
1703 FRC_STAT= 1<<6,
1704 EOP = 1<<7,
1705};
1706
1707enum {
1708 HW_OWNER = 1<<7,
1709 OP_TCPWRITE = 0x11,
1710 OP_TCPSTART = 0x12,
1711 OP_TCPINIT = 0x14,
1712 OP_TCPLCK = 0x18,
1713 OP_TCPCHKSUM = OP_TCPSTART,
1714 OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
1715 OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
1716 OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
1717 OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
1718
1719 OP_ADDR64 = 0x21,
1720 OP_VLAN = 0x22,
1721 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
1722 OP_LRGLEN = 0x24,
1723 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
1724 OP_BUFFER = 0x40,
1725 OP_PACKET = 0x41,
1726 OP_LARGESEND = 0x43,
1727
1728/* YUKON-2 STATUS opcodes defines */
1729 OP_RXSTAT = 0x60,
1730 OP_RXTIMESTAMP = 0x61,
1731 OP_RXVLAN = 0x62,
1732 OP_RXCHKS = 0x64,
1733 OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
1734 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
1735 OP_RSS_HASH = 0x65,
1736 OP_TXINDEXLE = 0x68,
1737};
1738
1739/* Yukon 2 hardware interface
1740 * Not tested on big endian
1741 */
1742struct sky2_tx_le {
1743 union {
1744 u32 addr;
1745 struct {
1746 u16 offset;
1747 u16 start;
1748 } csum __attribute((packed));
1749 struct {
1750 u16 size;
1751 u16 rsvd;
1752 } tso __attribute((packed));
1753 } tx;
1754 u16 length; /* also vlan tag or checksum start */
1755 u8 ctrl;
1756 u8 opcode;
1757} __attribute((packed));
1758
1759struct sky2_rx_le {
1760 u32 addr;
1761 u16 length;
1762 u8 ctrl;
1763 u8 opcode;
1764} __attribute((packed));;
1765
1766struct sky2_status_le {
1767 u32 status; /* also checksum */
1768 u16 length; /* also vlan tag */
1769 u8 link;
1770 u8 opcode;
1771} __attribute((packed));
1772
1773struct ring_info {
1774 struct sk_buff *skb;
1775 dma_addr_t mapaddr;
1776 u16 maplen;
1777 u16 idx;
1778};
1779
1780struct sky2_port {
1781 struct sky2_hw *hw;
1782 struct net_device *netdev;
1783 unsigned port;
1784 u32 msg_enable;
1785
1786 struct ring_info *tx_ring;
1787 struct sky2_tx_le *tx_le;
1788 spinlock_t tx_lock;
1789 u32 tx_addr64;
1790 u16 tx_cons; /* next le to check */
1791 u16 tx_prod; /* next le to use */
1792 u16 tx_pending;
1793 u16 tx_last_put;
1794 u16 tx_last_mss;
1795
1796 struct ring_info *rx_ring;
1797 struct sky2_rx_le *rx_le;
1798 u32 rx_addr64;
1799 u16 rx_next; /* next re to check */
1800 u16 rx_put; /* next le index to use */
1801 u16 rx_pending;
1802 u16 rx_last_put;
1803#ifdef SKY2_VLAN_TAG_USED
1804 u16 rx_tag;
1805 struct vlan_group *vlgrp;
1806#endif
1807
1808 dma_addr_t rx_le_map;
1809 dma_addr_t tx_le_map;
1810 u32 advertising; /* ADVERTISED_ bits */
1811 u16 speed; /* SPEED_1000, SPEED_100, ... */
1812 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1813 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1814 u8 rx_pause;
1815 u8 tx_pause;
1816 u8 rx_csum;
1817 u8 wol;
1818
1819 struct tasklet_struct phy_task;
1820 struct net_device_stats net_stats;
1821};
1822
1823struct sky2_hw {
1824 void __iomem *regs;
1825 struct pci_dev *pdev;
1826 u32 intr_mask;
1827 struct net_device *dev[2];
1828
1829 int pm_cap;
1830 u8 chip_id;
1831 u8 chip_rev;
1832 u8 copper;
1833 u8 ports;
1834
1835 struct sky2_status_le *st_le;
1836 u32 st_idx;
1837 dma_addr_t st_dma;
1838
1839 spinlock_t phy_lock;
1840};
1841
1842/* Register accessor for memory mapped device */
1843static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
1844{
1845 return readl(hw->regs + reg);
1846}
1847
1848static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
1849{
1850 return readw(hw->regs + reg);
1851}
1852
1853static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
1854{
1855 return readb(hw->regs + reg);
1856}
1857
1858/* This should probably go away, bus based tweeks suck */
1859static inline int is_pciex(const struct sky2_hw *hw)
1860{
1861 u32 status;
1862 pci_read_config_dword(hw->pdev, PCI_DEV_STATUS, &status);
1863 return (status & PCI_OS_PCI_X) == 0;
1864}
1865
1866static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
1867{
1868 writel(val, hw->regs + reg);
1869}
1870
1871static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
1872{
1873 writew(val, hw->regs + reg);
1874}
1875
1876static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
1877{
1878 writeb(val, hw->regs + reg);
1879}
1880
1881/* Yukon PHY related registers */
1882#define SK_GMAC_REG(port,reg) \
1883 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
1884#define GM_PHY_RETRIES 100
1885
1886static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
1887{
1888 return sky2_read16(hw, SK_GMAC_REG(port,reg));
1889}
1890
1891static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
1892{
1893 unsigned base = SK_GMAC_REG(port, reg);
1894 return (u32) sky2_read16(hw, base)
1895 | (u32) sky2_read16(hw, base+4) << 16;
1896}
1897
1898static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
1899{
1900 sky2_write16(hw, SK_GMAC_REG(port,r), v);
1901}
1902
1903static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
1904 const u8 *addr)
1905{
1906 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
1907 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
1908 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
1909}
1910#endif
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a10cd184d597..5c2824be4ee6 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -100,14 +100,14 @@
100#define SMC_IO_SHIFT 0 100#define SMC_IO_SHIFT 0
101#define SMC_NOWAIT 1 101#define SMC_NOWAIT 1
102 102
103#define SMC_inb(a, r) inb((a) + (r)) 103#define SMC_inb(a, r) readb((a) + (r))
104#define SMC_insb(a, r, p, l) insb((a) + (r), p, (l)) 104#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
105#define SMC_inw(a, r) inw((a) + (r)) 105#define SMC_inw(a, r) readw((a) + (r))
106#define SMC_insw(a, r, p, l) insw((a) + (r), p, l) 106#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
107#define SMC_outb(v, a, r) outb(v, (a) + (r)) 107#define SMC_outb(v, a, r) writeb(v, (a) + (r))
108#define SMC_outsb(a, r, p, l) outsb((a) + (r), p, (l)) 108#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
109#define SMC_outw(v, a, r) outw(v, (a) + (r)) 109#define SMC_outw(v, a, r) writew(v, (a) + (r))
110#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l) 110#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
111 111
112#define set_irq_type(irq, type) do {} while (0) 112#define set_irq_type(irq, type) do {} while (0)
113 113
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index de399563a9db..081717d01374 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -128,6 +128,8 @@ static struct pci_device_id gem_pci_tbl[] = {
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 {0, } 133 {0, }
132}; 134};
133 135
diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c
index 7c2cf2e76300..032c0f81928e 100644
--- a/drivers/net/wan/sdladrv.c
+++ b/drivers/net/wan/sdladrv.c
@@ -1994,7 +1994,7 @@ static int detect_s514 (sdlahw_t* hw)
1994 modname, hw->irq); 1994 modname, hw->irq);
1995 1995
1996 /* map the physical PCI memory to virtual memory */ 1996 /* map the physical PCI memory to virtual memory */
1997 (void *)hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr, 1997 hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr,
1998 (unsigned long)MAX_SIZEOF_S514_MEMORY); 1998 (unsigned long)MAX_SIZEOF_S514_MEMORY);
1999 /* map the physical control register memory to virtual memory */ 1999 /* map the physical control register memory to virtual memory */
2000 hw->vector = (unsigned long)ioremap( 2000 hw->vector = (unsigned long)ioremap(
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index b0d195d1721a..5e7c7e944c9d 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1110,8 +1110,7 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1110 error->elem_len = elem_len; 1110 error->elem_len = elem_len;
1111 error->log_len = log_len; 1111 error->log_len = log_len;
1112 error->elem = (struct ipw_error_elem *)error->payload; 1112 error->elem = (struct ipw_error_elem *)error->payload;
1113 error->log = (struct ipw_event *)(error->elem + 1113 error->log = (struct ipw_event *)(error->elem + elem_len);
1114 (sizeof(*error->elem) * elem_len));
1115 1114
1116 ipw_capture_event_log(priv, log_len, error->log); 1115 ipw_capture_event_log(priv, log_len, error->log);
1117 1116
@@ -8926,6 +8925,10 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8926 struct ipw_scan_request_ext scan; 8925 struct ipw_scan_request_ext scan;
8927 int err = 0, scan_type; 8926 int err = 0, scan_type;
8928 8927
8928 if (!(priv->status & STATUS_INIT) ||
8929 (priv->status & STATUS_EXIT_PENDING))
8930 return 0;
8931
8929 down(&priv->sem); 8932 down(&priv->sem);
8930 8933
8931 if (priv->status & STATUS_RF_KILL_MASK) { 8934 if (priv->status & STATUS_RF_KILL_MASK) {