aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/fore200e.h1
-rw-r--r--drivers/atm/fore200e_mkfirm.c2
-rw-r--r--drivers/atm/he.h2
-rw-r--r--drivers/atm/idt77252.c7
-rw-r--r--drivers/atm/idt77252.h4
-rw-r--r--drivers/atm/nicstarmac.copyright2
-rw-r--r--drivers/net/3c509.c17
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/atlx/atl1.c5
-rw-r--r--drivers/net/au1000_eth.c7
-rw-r--r--drivers/net/bfin_mac.c1
-rw-r--r--drivers/net/bonding/bond_main.c706
-rw-r--r--drivers/net/bonding/bond_sysfs.c81
-rw-r--r--drivers/net/bonding/bonding.h13
-rw-r--r--drivers/net/cassini.c11
-rw-r--r--drivers/net/cpmac.c234
-rw-r--r--drivers/net/cxgb3/adapter.h18
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/cxgb3/sge.c391
-rw-r--r--drivers/net/cxgb3/t3_cpl.h11
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000e/netdev.c4
-rw-r--r--drivers/net/ehea/ehea_main.c5
-rw-r--r--drivers/net/forcedeth.c60
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c33
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/ixp2000/ixpdev.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c1078
-rw-r--r--drivers/net/ns83820.c9
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c4
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c12
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c201
-rw-r--r--drivers/net/phy/mdio-ofgpio.c205
-rw-r--r--drivers/net/phy/phy_device.c1
-rw-r--r--drivers/net/ppp_generic.c22
-rw-r--r--drivers/net/pppol2tp.c13
-rw-r--r--drivers/net/s2io-regs.h2
-rw-r--r--drivers/net/s2io.c496
-rw-r--r--drivers/net/s2io.h22
-rw-r--r--drivers/net/sb1250-mac.c67
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/sfc/Kconfig2
-rw-r--r--drivers/net/sfc/Makefile2
-rw-r--r--drivers/net/sfc/bitfield.h7
-rw-r--r--drivers/net/sfc/boards.c11
-rw-r--r--drivers/net/sfc/boards.h3
-rw-r--r--drivers/net/sfc/efx.c86
-rw-r--r--drivers/net/sfc/falcon.c161
-rw-r--r--drivers/net/sfc/falcon.h5
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h4
-rw-r--r--drivers/net/sfc/falcon_io.h29
-rw-r--r--drivers/net/sfc/falcon_xmac.c10
-rw-r--r--drivers/net/sfc/i2c-direct.c381
-rw-r--r--drivers/net/sfc/i2c-direct.h91
-rw-r--r--drivers/net/sfc/net_driver.h55
-rw-r--r--drivers/net/sfc/rx.c48
-rw-r--r--drivers/net/sfc/selftest.c14
-rw-r--r--drivers/net/sfc/sfe4001.c126
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c11
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sfc/xfp_phy.c4
-rw-r--r--drivers/net/sky2.c202
-rw-r--r--drivers/net/sky2.h23
-rw-r--r--drivers/net/tg3.c1268
-rw-r--r--drivers/net/tg3.h40
-rw-r--r--drivers/net/tlan.c490
-rw-r--r--drivers/net/tlan.h26
-rw-r--r--drivers/net/tokenring/3c359.h2
-rw-r--r--drivers/net/tokenring/olympic.h2
-rw-r--r--drivers/net/tsi108_eth.c6
-rw-r--r--drivers/net/tulip/uli526x.c16
-rw-r--r--drivers/net/ucc_geth.c9
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/catc.c5
-rw-r--r--drivers/net/usb/rndis_host.c6
-rw-r--r--drivers/net/via-velocity.c25
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wan/hdlc.c19
-rw-r--r--drivers/net/wan/hdlc_cisco.c82
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/ipw2200.c1
-rw-r--r--drivers/net/wireless/libertas/ethtool.c27
-rw-r--r--drivers/net/wireless/libertas/main.c2
-rw-r--r--drivers/net/wireless/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c6
99 files changed, 4672 insertions, 2511 deletions
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index 183841cc8fdf..8dd4aa76c3bd 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -1,4 +1,3 @@
1/* $Id: fore200e.h,v 1.4 2000/04/14 10:10:34 davem Exp $ */
2#ifndef _FORE200E_H 1#ifndef _FORE200E_H
3#define _FORE200E_H 2#define _FORE200E_H
4 3
diff --git a/drivers/atm/fore200e_mkfirm.c b/drivers/atm/fore200e_mkfirm.c
index 2ebe1a1e6f8b..520e14b488ff 100644
--- a/drivers/atm/fore200e_mkfirm.c
+++ b/drivers/atm/fore200e_mkfirm.c
@@ -1,6 +1,4 @@
1/* 1/*
2 $Id: fore200e_mkfirm.c,v 1.1 2000/02/21 16:04:32 davem Exp $
3
4 mkfirm.c: generates a C readable file from a binary firmware image 2 mkfirm.c: generates a C readable file from a binary firmware image
5 3
6 Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999. 4 Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999.
diff --git a/drivers/atm/he.h b/drivers/atm/he.h
index 1dc277547a73..fe6cd15a78a4 100644
--- a/drivers/atm/he.h
+++ b/drivers/atm/he.h
@@ -1,5 +1,3 @@
1/* $Id: he.h,v 1.4 2003/05/06 22:48:00 chas Exp $ */
2
3/* 1/*
4 2
5 he.h 3 he.h
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 28d77b5195de..3a504e94a4d9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1,8 +1,4 @@
1/******************************************************************* 1/*******************************************************************
2 * ident "$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $"
3 *
4 * $Author: ecd $
5 * $Date: 2001/11/11 08:13:54 $
6 * 2 *
7 * Copyright (c) 2000 ATecoM GmbH 3 * Copyright (c) 2000 ATecoM GmbH
8 * 4 *
@@ -29,9 +25,6 @@
29 * 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 675 Mass Ave, Cambridge, MA 02139, USA.
30 * 26 *
31 *******************************************************************/ 27 *******************************************************************/
32static char const rcsid[] =
33"$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $";
34
35 28
36#include <linux/module.h> 29#include <linux/module.h>
37#include <linux/pci.h> 30#include <linux/pci.h>
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 6f2b4a5875fb..e83eaf120da0 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -1,8 +1,4 @@
1/******************************************************************* 1/*******************************************************************
2 * ident "$Id: idt77252.h,v 1.2 2001/11/11 08:13:54 ecd Exp $"
3 *
4 * $Author: ecd $
5 * $Date: 2001/11/11 08:13:54 $
6 * 2 *
7 * Copyright (c) 2000 ATecoM GmbH 3 * Copyright (c) 2000 ATecoM GmbH
8 * 4 *
diff --git a/drivers/atm/nicstarmac.copyright b/drivers/atm/nicstarmac.copyright
index 2e15b39fac4f..180531a83c62 100644
--- a/drivers/atm/nicstarmac.copyright
+++ b/drivers/atm/nicstarmac.copyright
@@ -13,7 +13,7 @@
13 * 13 *
14 * Modified to work with the IDT7721 nicstar -- AAL5 (tested) only. 14 * Modified to work with the IDT7721 nicstar -- AAL5 (tested) only.
15 * 15 *
16 * R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997 $Revision: 1.1 $ $Date: 1999/08/20 11:00:11 $ 16 * R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997
17 * 17 *
18 * Linux driver for the IDT77201 NICStAR PCI ATM controller. 18 * Linux driver for the IDT77201 NICStAR PCI ATM controller.
19 * PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155; 19 * PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index e6c545fe5f58..b9d097c9f6bb 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -413,7 +413,7 @@ static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
413{ 413{
414 short i; 414 short i;
415 int ioaddr, irq, if_port; 415 int ioaddr, irq, if_port;
416 u16 phys_addr[3]; 416 __be16 phys_addr[3];
417 struct net_device *dev = NULL; 417 struct net_device *dev = NULL;
418 int err; 418 int err;
419 419
@@ -605,7 +605,7 @@ static int __init el3_mca_probe(struct device *device)
605 605
606 short i; 606 short i;
607 int ioaddr, irq, if_port; 607 int ioaddr, irq, if_port;
608 u16 phys_addr[3]; 608 __be16 phys_addr[3];
609 struct net_device *dev = NULL; 609 struct net_device *dev = NULL;
610 u_char pos4, pos5; 610 u_char pos4, pos5;
611 struct mca_device *mdev = to_mca_device(device); 611 struct mca_device *mdev = to_mca_device(device);
@@ -635,14 +635,13 @@ static int __init el3_mca_probe(struct device *device)
635 printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); 635 printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
636 } 636 }
637 EL3WINDOW(0); 637 EL3WINDOW(0);
638 for (i = 0; i < 3; i++) { 638 for (i = 0; i < 3; i++)
639 phys_addr[i] = htons(read_eeprom(ioaddr, i)); 639 phys_addr[i] = htons(read_eeprom(ioaddr, i));
640 }
641 640
642 dev = alloc_etherdev(sizeof (struct el3_private)); 641 dev = alloc_etherdev(sizeof (struct el3_private));
643 if (dev == NULL) { 642 if (dev == NULL) {
644 release_region(ioaddr, EL3_IO_EXTENT); 643 release_region(ioaddr, EL3_IO_EXTENT);
645 return -ENOMEM; 644 return -ENOMEM;
646 } 645 }
647 646
648 netdev_boot_setup_check(dev); 647 netdev_boot_setup_check(dev);
@@ -668,7 +667,7 @@ static int __init el3_eisa_probe (struct device *device)
668{ 667{
669 short i; 668 short i;
670 int ioaddr, irq, if_port; 669 int ioaddr, irq, if_port;
671 u16 phys_addr[3]; 670 __be16 phys_addr[3];
672 struct net_device *dev = NULL; 671 struct net_device *dev = NULL;
673 struct eisa_device *edev; 672 struct eisa_device *edev;
674 int err; 673 int err;
@@ -1063,7 +1062,6 @@ el3_rx(struct net_device *dev)
1063 struct sk_buff *skb; 1062 struct sk_buff *skb;
1064 1063
1065 skb = dev_alloc_skb(pkt_len+5); 1064 skb = dev_alloc_skb(pkt_len+5);
1066 dev->stats.rx_bytes += pkt_len;
1067 if (el3_debug > 4) 1065 if (el3_debug > 4)
1068 printk("Receiving packet size %d status %4.4x.\n", 1066 printk("Receiving packet size %d status %4.4x.\n",
1069 pkt_len, rx_status); 1067 pkt_len, rx_status);
@@ -1078,6 +1076,7 @@ el3_rx(struct net_device *dev)
1078 skb->protocol = eth_type_trans(skb,dev); 1076 skb->protocol = eth_type_trans(skb,dev);
1079 netif_rx(skb); 1077 netif_rx(skb);
1080 dev->last_rx = jiffies; 1078 dev->last_rx = jiffies;
1079 dev->stats.rx_bytes += pkt_len;
1081 dev->stats.rx_packets++; 1080 dev->stats.rx_packets++;
1082 continue; 1081 continue;
1083 } 1082 }
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 105a8c7ca7e9..e4e3241628d6 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -572,12 +572,16 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
572 int irq; 572 int irq;
573 DECLARE_MAC_BUF(mac); 573 DECLARE_MAC_BUF(mac);
574 574
575#ifdef __ISAPNP__
575 if (idev) { 576 if (idev) {
576 irq = pnp_irq(idev, 0); 577 irq = pnp_irq(idev, 0);
577 vp->dev = &idev->dev; 578 vp->dev = &idev->dev;
578 } else { 579 } else {
579 irq = inw(ioaddr + 0x2002) & 15; 580 irq = inw(ioaddr + 0x2002) & 15;
580 } 581 }
582#else
583 irq = inw(ioaddr + 0x2002) & 15;
584#endif
581 585
582 dev->base_addr = ioaddr; 586 dev->base_addr = ioaddr;
583 dev->irq = irq; 587 dev->irq = irq;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e52533d75ae1..654a78c31087 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1670,7 +1670,7 @@ config SUNDANCE_MMIO
1670 1670
1671config TLAN 1671config TLAN
1672 tristate "TI ThunderLAN support" 1672 tristate "TI ThunderLAN support"
1673 depends on NET_PCI && (PCI || EISA) && !64BIT 1673 depends on NET_PCI && (PCI || EISA)
1674 ---help--- 1674 ---help---
1675 If you have a PCI Ethernet network card based on the ThunderLAN chip 1675 If you have a PCI Ethernet network card based on the ThunderLAN chip
1676 which is supported by this driver, say Y and read the 1676 which is supported by this driver, say Y and read the
@@ -2228,6 +2228,7 @@ config VIA_VELOCITY
2228config TIGON3 2228config TIGON3
2229 tristate "Broadcom Tigon3 support" 2229 tristate "Broadcom Tigon3 support"
2230 depends on PCI 2230 depends on PCI
2231 select PHYLIB
2231 help 2232 help
2232 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 2233 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
2233 2234
@@ -2420,8 +2421,9 @@ config CHELSIO_T1_NAPI
2420 2421
2421config CHELSIO_T3 2422config CHELSIO_T3
2422 tristate "Chelsio Communications T3 10Gb Ethernet support" 2423 tristate "Chelsio Communications T3 10Gb Ethernet support"
2423 depends on PCI 2424 depends on PCI && INET
2424 select FW_LOADER 2425 select FW_LOADER
2426 select INET_LRO
2425 help 2427 help
2426 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2428 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2427 adapters. 2429 adapters.
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9c2394d49428..db04bfb3460f 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1876,7 +1876,8 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1876 1876
1877 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1877 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1878 1878
1879 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1879 skb = netdev_alloc_skb(adapter->netdev,
1880 adapter->rx_buffer_len + NET_IP_ALIGN);
1880 if (unlikely(!skb)) { 1881 if (unlikely(!skb)) {
1881 /* Better luck next round */ 1882 /* Better luck next round */
1882 adapter->net_stats.rx_dropped++; 1883 adapter->net_stats.rx_dropped++;
@@ -2135,7 +2136,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
2135 return -1; 2136 return -1;
2136 } 2137 }
2137 2138
2138 if (skb->protocol == ntohs(ETH_P_IP)) { 2139 if (skb->protocol == htons(ETH_P_IP)) {
2139 struct iphdr *iph = ip_hdr(skb); 2140 struct iphdr *iph = ip_hdr(skb);
2140 2141
2141 real_len = (((unsigned char *)iph - skb->data) + 2142 real_len = (((unsigned char *)iph - skb->data) +
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 3634b5fd7919..7023d77bf380 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
1239 */ 1239 */
1240static irqreturn_t au1000_interrupt(int irq, void *dev_id) 1240static irqreturn_t au1000_interrupt(int irq, void *dev_id)
1241{ 1241{
1242 struct net_device *dev = (struct net_device *) dev_id; 1242 struct net_device *dev = dev_id;
1243
1244 if (dev == NULL) {
1245 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
1246 return IRQ_RETVAL(1);
1247 }
1248 1243
1249 /* Handle RX interrupts first to minimize chance of overrun */ 1244 /* Handle RX interrupts first to minimize chance of overrun */
1250 1245
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 89c0018132ec..41443435ab1c 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -22,7 +22,6 @@
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/ethtool.h>
26#include <linux/mii.h> 25#include <linux/mii.h>
27#include <linux/phy.h> 26#include <linux/phy.h>
28#include <linux/netdevice.h> 27#include <linux/netdevice.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 50a40e433154..5b4af3cc2a44 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -88,6 +88,7 @@
88#define BOND_LINK_ARP_INTERV 0 88#define BOND_LINK_ARP_INTERV 0
89 89
90static int max_bonds = BOND_DEFAULT_MAX_BONDS; 90static int max_bonds = BOND_DEFAULT_MAX_BONDS;
91static int num_grat_arp = 1;
91static int miimon = BOND_LINK_MON_INTERV; 92static int miimon = BOND_LINK_MON_INTERV;
92static int updelay = 0; 93static int updelay = 0;
93static int downdelay = 0; 94static int downdelay = 0;
@@ -99,11 +100,13 @@ static char *xmit_hash_policy = NULL;
99static int arp_interval = BOND_LINK_ARP_INTERV; 100static int arp_interval = BOND_LINK_ARP_INTERV;
100static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 101static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
101static char *arp_validate = NULL; 102static char *arp_validate = NULL;
102static int fail_over_mac = 0; 103static char *fail_over_mac = NULL;
103struct bond_params bonding_defaults; 104struct bond_params bonding_defaults;
104 105
105module_param(max_bonds, int, 0); 106module_param(max_bonds, int, 0);
106MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 107MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
108module_param(num_grat_arp, int, 0644);
109MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
107module_param(miimon, int, 0); 110module_param(miimon, int, 0);
108MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 111MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
109module_param(updelay, int, 0); 112module_param(updelay, int, 0);
@@ -133,8 +136,8 @@ module_param_array(arp_ip_target, charp, NULL, 0);
133MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 136MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
134module_param(arp_validate, charp, 0); 137module_param(arp_validate, charp, 0);
135MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 138MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
136module_param(fail_over_mac, int, 0); 139module_param(fail_over_mac, charp, 0);
137MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. 0 of off (default), 1 for on."); 140MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
138 141
139/*----------------------------- Global variables ----------------------------*/ 142/*----------------------------- Global variables ----------------------------*/
140 143
@@ -187,6 +190,13 @@ struct bond_parm_tbl arp_validate_tbl[] = {
187{ NULL, -1}, 190{ NULL, -1},
188}; 191};
189 192
193struct bond_parm_tbl fail_over_mac_tbl[] = {
194{ "none", BOND_FOM_NONE},
195{ "active", BOND_FOM_ACTIVE},
196{ "follow", BOND_FOM_FOLLOW},
197{ NULL, -1},
198};
199
190/*-------------------------- Forward declarations ---------------------------*/ 200/*-------------------------- Forward declarations ---------------------------*/
191 201
192static void bond_send_gratuitous_arp(struct bonding *bond); 202static void bond_send_gratuitous_arp(struct bonding *bond);
@@ -261,14 +271,14 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
261 */ 271 */
262static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) 272static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
263{ 273{
264 struct vlan_entry *vlan, *next; 274 struct vlan_entry *vlan;
265 int res = -ENODEV; 275 int res = -ENODEV;
266 276
267 dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); 277 dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
268 278
269 write_lock_bh(&bond->lock); 279 write_lock_bh(&bond->lock);
270 280
271 list_for_each_entry_safe(vlan, next, &bond->vlan_list, vlan_list) { 281 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
272 if (vlan->vlan_id == vlan_id) { 282 if (vlan->vlan_id == vlan_id) {
273 list_del(&vlan->vlan_list); 283 list_del(&vlan->vlan_list);
274 284
@@ -970,6 +980,82 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct
970 } 980 }
971} 981}
972 982
983/*
984 * bond_do_fail_over_mac
985 *
986 * Perform special MAC address swapping for fail_over_mac settings
987 *
988 * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
989 */
990static void bond_do_fail_over_mac(struct bonding *bond,
991 struct slave *new_active,
992 struct slave *old_active)
993{
994 u8 tmp_mac[ETH_ALEN];
995 struct sockaddr saddr;
996 int rv;
997
998 switch (bond->params.fail_over_mac) {
999 case BOND_FOM_ACTIVE:
1000 if (new_active)
1001 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
1002 new_active->dev->addr_len);
1003 break;
1004 case BOND_FOM_FOLLOW:
1005 /*
1006 * if new_active && old_active, swap them
1007 * if just old_active, do nothing (going to no active slave)
1008 * if just new_active, set new_active to bond's MAC
1009 */
1010 if (!new_active)
1011 return;
1012
1013 write_unlock_bh(&bond->curr_slave_lock);
1014 read_unlock(&bond->lock);
1015
1016 if (old_active) {
1017 memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
1018 memcpy(saddr.sa_data, old_active->dev->dev_addr,
1019 ETH_ALEN);
1020 saddr.sa_family = new_active->dev->type;
1021 } else {
1022 memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
1023 saddr.sa_family = bond->dev->type;
1024 }
1025
1026 rv = dev_set_mac_address(new_active->dev, &saddr);
1027 if (rv) {
1028 printk(KERN_ERR DRV_NAME
1029 ": %s: Error %d setting MAC of slave %s\n",
1030 bond->dev->name, -rv, new_active->dev->name);
1031 goto out;
1032 }
1033
1034 if (!old_active)
1035 goto out;
1036
1037 memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
1038 saddr.sa_family = old_active->dev->type;
1039
1040 rv = dev_set_mac_address(old_active->dev, &saddr);
1041 if (rv)
1042 printk(KERN_ERR DRV_NAME
1043 ": %s: Error %d setting MAC of slave %s\n",
1044 bond->dev->name, -rv, new_active->dev->name);
1045out:
1046 read_lock(&bond->lock);
1047 write_lock_bh(&bond->curr_slave_lock);
1048 break;
1049 default:
1050 printk(KERN_ERR DRV_NAME
1051 ": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
1052 bond->dev->name, bond->params.fail_over_mac);
1053 break;
1054 }
1055
1056}
1057
1058
973/** 1059/**
974 * find_best_interface - select the best available slave to be the active one 1060 * find_best_interface - select the best available slave to be the active one
975 * @bond: our bonding struct 1061 * @bond: our bonding struct
@@ -1037,7 +1123,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1037 * because it is apparently the best available slave we have, even though its 1123 * because it is apparently the best available slave we have, even though its
1038 * updelay hasn't timed out yet. 1124 * updelay hasn't timed out yet.
1039 * 1125 *
1040 * Warning: Caller must hold curr_slave_lock for writing. 1126 * If new_active is not NULL, caller must hold bond->lock for read and
1127 * curr_slave_lock for write_bh.
1041 */ 1128 */
1042void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1129void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1043{ 1130{
@@ -1048,6 +1135,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1048 } 1135 }
1049 1136
1050 if (new_active) { 1137 if (new_active) {
1138 new_active->jiffies = jiffies;
1139
1051 if (new_active->link == BOND_LINK_BACK) { 1140 if (new_active->link == BOND_LINK_BACK) {
1052 if (USES_PRIMARY(bond->params.mode)) { 1141 if (USES_PRIMARY(bond->params.mode)) {
1053 printk(KERN_INFO DRV_NAME 1142 printk(KERN_INFO DRV_NAME
@@ -1059,7 +1148,6 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1059 1148
1060 new_active->delay = 0; 1149 new_active->delay = 0;
1061 new_active->link = BOND_LINK_UP; 1150 new_active->link = BOND_LINK_UP;
1062 new_active->jiffies = jiffies;
1063 1151
1064 if (bond->params.mode == BOND_MODE_8023AD) { 1152 if (bond->params.mode == BOND_MODE_8023AD) {
1065 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1153 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -1103,20 +1191,21 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1103 bond_set_slave_active_flags(new_active); 1191 bond_set_slave_active_flags(new_active);
1104 } 1192 }
1105 1193
1106 /* when bonding does not set the slave MAC address, the bond MAC
1107 * address is the one of the active slave.
1108 */
1109 if (new_active && bond->params.fail_over_mac) 1194 if (new_active && bond->params.fail_over_mac)
1110 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr, 1195 bond_do_fail_over_mac(bond, new_active, old_active);
1111 new_active->dev->addr_len); 1196
1197 bond->send_grat_arp = bond->params.num_grat_arp;
1112 if (bond->curr_active_slave && 1198 if (bond->curr_active_slave &&
1113 test_bit(__LINK_STATE_LINKWATCH_PENDING, 1199 test_bit(__LINK_STATE_LINKWATCH_PENDING,
1114 &bond->curr_active_slave->dev->state)) { 1200 &bond->curr_active_slave->dev->state)) {
1115 dprintk("delaying gratuitous arp on %s\n", 1201 dprintk("delaying gratuitous arp on %s\n",
1116 bond->curr_active_slave->dev->name); 1202 bond->curr_active_slave->dev->name);
1117 bond->send_grat_arp = 1; 1203 } else {
1118 } else 1204 if (bond->send_grat_arp > 0) {
1119 bond_send_gratuitous_arp(bond); 1205 bond_send_gratuitous_arp(bond);
1206 bond->send_grat_arp--;
1207 }
1208 }
1120 } 1209 }
1121} 1210}
1122 1211
@@ -1129,7 +1218,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1129 * - The primary_slave has got its link back. 1218 * - The primary_slave has got its link back.
1130 * - A slave has got its link back and there's no old curr_active_slave. 1219 * - A slave has got its link back and there's no old curr_active_slave.
1131 * 1220 *
1132 * Warning: Caller must hold curr_slave_lock for writing. 1221 * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
1133 */ 1222 */
1134void bond_select_active_slave(struct bonding *bond) 1223void bond_select_active_slave(struct bonding *bond)
1135{ 1224{
@@ -1376,14 +1465,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1376 printk(KERN_WARNING DRV_NAME 1465 printk(KERN_WARNING DRV_NAME
1377 ": %s: Warning: The first slave device " 1466 ": %s: Warning: The first slave device "
1378 "specified does not support setting the MAC " 1467 "specified does not support setting the MAC "
1379 "address. Enabling the fail_over_mac option.", 1468 "address. Setting fail_over_mac to active.",
1380 bond_dev->name); 1469 bond_dev->name);
1381 bond->params.fail_over_mac = 1; 1470 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1382 } else if (!bond->params.fail_over_mac) { 1471 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1383 printk(KERN_ERR DRV_NAME 1472 printk(KERN_ERR DRV_NAME
1384 ": %s: Error: The slave device specified " 1473 ": %s: Error: The slave device specified "
1385 "does not support setting the MAC address, " 1474 "does not support setting the MAC address, "
1386 "but fail_over_mac is not enabled.\n" 1475 "but fail_over_mac is not set to active.\n"
1387 , bond_dev->name); 1476 , bond_dev->name);
1388 res = -EOPNOTSUPP; 1477 res = -EOPNOTSUPP;
1389 goto err_undo_flags; 1478 goto err_undo_flags;
@@ -1490,6 +1579,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1490 1579
1491 bond_compute_features(bond); 1580 bond_compute_features(bond);
1492 1581
1582 write_unlock_bh(&bond->lock);
1583
1584 read_lock(&bond->lock);
1585
1493 new_slave->last_arp_rx = jiffies; 1586 new_slave->last_arp_rx = jiffies;
1494 1587
1495 if (bond->params.miimon && !bond->params.use_carrier) { 1588 if (bond->params.miimon && !bond->params.use_carrier) {
@@ -1566,6 +1659,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1566 } 1659 }
1567 } 1660 }
1568 1661
1662 write_lock_bh(&bond->curr_slave_lock);
1663
1569 switch (bond->params.mode) { 1664 switch (bond->params.mode) {
1570 case BOND_MODE_ACTIVEBACKUP: 1665 case BOND_MODE_ACTIVEBACKUP:
1571 bond_set_slave_inactive_flags(new_slave); 1666 bond_set_slave_inactive_flags(new_slave);
@@ -1613,9 +1708,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1613 break; 1708 break;
1614 } /* switch(bond_mode) */ 1709 } /* switch(bond_mode) */
1615 1710
1711 write_unlock_bh(&bond->curr_slave_lock);
1712
1616 bond_set_carrier(bond); 1713 bond_set_carrier(bond);
1617 1714
1618 write_unlock_bh(&bond->lock); 1715 read_unlock(&bond->lock);
1619 1716
1620 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1717 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1621 if (res) 1718 if (res)
@@ -1639,6 +1736,10 @@ err_unset_master:
1639 1736
1640err_restore_mac: 1737err_restore_mac:
1641 if (!bond->params.fail_over_mac) { 1738 if (!bond->params.fail_over_mac) {
1739 /* XXX TODO - fom follow mode needs to change master's
1740 * MAC if this slave's MAC is in use by the bond, or at
1741 * least print a warning.
1742 */
1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1743 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
1643 addr.sa_family = slave_dev->type; 1744 addr.sa_family = slave_dev->type;
1644 dev_set_mac_address(slave_dev, &addr); 1745 dev_set_mac_address(slave_dev, &addr);
@@ -1693,20 +1794,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1693 return -EINVAL; 1794 return -EINVAL;
1694 } 1795 }
1695 1796
1696 mac_addr_differ = memcmp(bond_dev->dev_addr, 1797 if (!bond->params.fail_over_mac) {
1697 slave->perm_hwaddr, 1798 mac_addr_differ = memcmp(bond_dev->dev_addr, slave->perm_hwaddr,
1698 ETH_ALEN); 1799 ETH_ALEN);
1699 if (!mac_addr_differ && (bond->slave_cnt > 1)) { 1800 if (!mac_addr_differ && (bond->slave_cnt > 1))
1700 printk(KERN_WARNING DRV_NAME 1801 printk(KERN_WARNING DRV_NAME
1701 ": %s: Warning: the permanent HWaddr of %s - " 1802 ": %s: Warning: the permanent HWaddr of %s - "
1702 "%s - is still in use by %s. " 1803 "%s - is still in use by %s. "
1703 "Set the HWaddr of %s to a different address " 1804 "Set the HWaddr of %s to a different address "
1704 "to avoid conflicts.\n", 1805 "to avoid conflicts.\n",
1705 bond_dev->name, 1806 bond_dev->name, slave_dev->name,
1706 slave_dev->name, 1807 print_mac(mac, slave->perm_hwaddr),
1707 print_mac(mac, slave->perm_hwaddr), 1808 bond_dev->name, slave_dev->name);
1708 bond_dev->name,
1709 slave_dev->name);
1710 } 1809 }
1711 1810
1712 /* Inform AD package of unbinding of slave. */ 1811 /* Inform AD package of unbinding of slave. */
@@ -1833,7 +1932,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1833 /* close slave before restoring its mac address */ 1932 /* close slave before restoring its mac address */
1834 dev_close(slave_dev); 1933 dev_close(slave_dev);
1835 1934
1836 if (!bond->params.fail_over_mac) { 1935 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1837 /* restore original ("permanent") mac address */ 1936 /* restore original ("permanent") mac address */
1838 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1937 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1839 addr.sa_family = slave_dev->type; 1938 addr.sa_family = slave_dev->type;
@@ -2144,7 +2243,7 @@ static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2144 dprintk("sending delayed gratuitous arp on on %s\n", 2243 dprintk("sending delayed gratuitous arp on on %s\n",
2145 bond->curr_active_slave->dev->name); 2244 bond->curr_active_slave->dev->name);
2146 bond_send_gratuitous_arp(bond); 2245 bond_send_gratuitous_arp(bond);
2147 bond->send_grat_arp = 0; 2246 bond->send_grat_arp--;
2148 } 2247 }
2149 } 2248 }
2150 read_lock(&bond->curr_slave_lock); 2249 read_lock(&bond->curr_slave_lock);
@@ -2397,7 +2496,7 @@ void bond_mii_monitor(struct work_struct *work)
2397 read_lock(&bond->lock); 2496 read_lock(&bond->lock);
2398 } 2497 }
2399 2498
2400 delay = ((bond->params.miimon * HZ) / 1000) ? : 1; 2499 delay = msecs_to_jiffies(bond->params.miimon);
2401 read_unlock(&bond->lock); 2500 read_unlock(&bond->lock);
2402 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2501 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2403} 2502}
@@ -2426,37 +2525,14 @@ out:
2426 return addr; 2525 return addr;
2427} 2526}
2428 2527
2429static int bond_has_ip(struct bonding *bond)
2430{
2431 struct vlan_entry *vlan, *vlan_next;
2432
2433 if (bond->master_ip)
2434 return 1;
2435
2436 if (list_empty(&bond->vlan_list))
2437 return 0;
2438
2439 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
2440 vlan_list) {
2441 if (vlan->vlan_ip)
2442 return 1;
2443 }
2444
2445 return 0;
2446}
2447
2448static int bond_has_this_ip(struct bonding *bond, __be32 ip) 2528static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2449{ 2529{
2450 struct vlan_entry *vlan, *vlan_next; 2530 struct vlan_entry *vlan;
2451 2531
2452 if (ip == bond->master_ip) 2532 if (ip == bond->master_ip)
2453 return 1; 2533 return 1;
2454 2534
2455 if (list_empty(&bond->vlan_list)) 2535 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2456 return 0;
2457
2458 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
2459 vlan_list) {
2460 if (ip == vlan->vlan_ip) 2536 if (ip == vlan->vlan_ip)
2461 return 1; 2537 return 1;
2462 } 2538 }
@@ -2498,7 +2574,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2498{ 2574{
2499 int i, vlan_id, rv; 2575 int i, vlan_id, rv;
2500 __be32 *targets = bond->params.arp_targets; 2576 __be32 *targets = bond->params.arp_targets;
2501 struct vlan_entry *vlan, *vlan_next; 2577 struct vlan_entry *vlan;
2502 struct net_device *vlan_dev; 2578 struct net_device *vlan_dev;
2503 struct flowi fl; 2579 struct flowi fl;
2504 struct rtable *rt; 2580 struct rtable *rt;
@@ -2545,8 +2621,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2545 } 2621 }
2546 2622
2547 vlan_id = 0; 2623 vlan_id = 0;
2548 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2624 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2549 vlan_list) {
2550 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2625 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2551 if (vlan_dev == rt->u.dst.dev) { 2626 if (vlan_dev == rt->u.dst.dev) {
2552 vlan_id = vlan->vlan_id; 2627 vlan_id = vlan->vlan_id;
@@ -2707,7 +2782,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2707 2782
2708 read_lock(&bond->lock); 2783 read_lock(&bond->lock);
2709 2784
2710 delta_in_ticks = (bond->params.arp_interval * HZ) / 1000; 2785 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2711 2786
2712 if (bond->kill_timers) { 2787 if (bond->kill_timers) {
2713 goto out; 2788 goto out;
@@ -2764,8 +2839,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2764 * if we don't know our ip yet 2839 * if we don't know our ip yet
2765 */ 2840 */
2766 if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) || 2841 if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) ||
2767 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks) && 2842 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
2768 bond_has_ip(bond))) {
2769 2843
2770 slave->link = BOND_LINK_DOWN; 2844 slave->link = BOND_LINK_DOWN;
2771 slave->state = BOND_STATE_BACKUP; 2845 slave->state = BOND_STATE_BACKUP;
@@ -2813,246 +2887,299 @@ out:
2813} 2887}
2814 2888
2815/* 2889/*
2816 * When using arp monitoring in active-backup mode, this function is 2890 * Called to inspect slaves for active-backup mode ARP monitor link state
2817 * called to determine if any backup slaves have went down or a new 2891 * changes. Sets new_link in slaves to specify what action should take
2818 * current slave needs to be found. 2892 * place for the slave. Returns 0 if no changes are found, >0 if changes
2819 * The backup slaves never generate traffic, they are considered up by merely 2893 * to link states must be committed.
2820 * receiving traffic. If the current slave goes down, each backup slave will 2894 *
2821 * be given the opportunity to tx/rx an arp before being taken down - this 2895 * Called with bond->lock held for read.
2822 * prevents all slaves from being taken down due to the current slave not
2823 * sending any traffic for the backups to receive. The arps are not necessarily
2824 * necessary, any tx and rx traffic will keep the current slave up. While any
2825 * rx traffic will keep the backup slaves up, the current slave is responsible
2826 * for generating traffic to keep them up regardless of any other traffic they
2827 * may have received.
2828 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2829 */ 2896 */
2830void bond_activebackup_arp_mon(struct work_struct *work) 2897static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2831{ 2898{
2832 struct bonding *bond = container_of(work, struct bonding,
2833 arp_work.work);
2834 struct slave *slave; 2899 struct slave *slave;
2835 int delta_in_ticks; 2900 int i, commit = 0;
2836 int i;
2837 2901
2838 read_lock(&bond->lock); 2902 bond_for_each_slave(bond, slave, i) {
2903 slave->new_link = BOND_LINK_NOCHANGE;
2839 2904
2840 delta_in_ticks = (bond->params.arp_interval * HZ) / 1000; 2905 if (slave->link != BOND_LINK_UP) {
2906 if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
2907 delta_in_ticks)) {
2908 slave->new_link = BOND_LINK_UP;
2909 commit++;
2910 }
2841 2911
2842 if (bond->kill_timers) { 2912 continue;
2843 goto out; 2913 }
2844 }
2845 2914
2846 if (bond->slave_cnt == 0) { 2915 /*
2847 goto re_arm; 2916 * Give slaves 2*delta after being enslaved or made
2917 * active. This avoids bouncing, as the last receive
2918 * times need a full ARP monitor cycle to be updated.
2919 */
2920 if (!time_after_eq(jiffies, slave->jiffies +
2921 2 * delta_in_ticks))
2922 continue;
2923
2924 /*
2925 * Backup slave is down if:
2926 * - No current_arp_slave AND
2927 * - more than 3*delta since last receive AND
2928 * - the bond has an IP address
2929 *
2930 * Note: a non-null current_arp_slave indicates
2931 * the curr_active_slave went down and we are
2932 * searching for a new one; under this condition
2933 * we only take the curr_active_slave down - this
2934 * gives each slave a chance to tx/rx traffic
2935 * before being taken out
2936 */
2937 if (slave->state == BOND_STATE_BACKUP &&
2938 !bond->current_arp_slave &&
2939 time_after(jiffies, slave_last_rx(bond, slave) +
2940 3 * delta_in_ticks)) {
2941 slave->new_link = BOND_LINK_DOWN;
2942 commit++;
2943 }
2944
2945 /*
2946 * Active slave is down if:
2947 * - more than 2*delta since transmitting OR
2948 * - (more than 2*delta since receive AND
2949 * the bond has an IP address)
2950 */
2951 if ((slave->state == BOND_STATE_ACTIVE) &&
2952 (time_after_eq(jiffies, slave->dev->trans_start +
2953 2 * delta_in_ticks) ||
2954 (time_after_eq(jiffies, slave_last_rx(bond, slave)
2955 + 2 * delta_in_ticks)))) {
2956 slave->new_link = BOND_LINK_DOWN;
2957 commit++;
2958 }
2848 } 2959 }
2849 2960
2850 /* determine if any slave has come up or any backup slave has 2961 read_lock(&bond->curr_slave_lock);
2851 * gone down 2962
2852 * TODO: what about up/down delay in arp mode? it wasn't here before 2963 /*
2853 * so it can wait 2964 * Trigger a commit if the primary option setting has changed.
2854 */ 2965 */
2855 bond_for_each_slave(bond, slave, i) { 2966 if (bond->primary_slave &&
2856 if (slave->link != BOND_LINK_UP) { 2967 (bond->primary_slave != bond->curr_active_slave) &&
2857 if (time_before_eq(jiffies, 2968 (bond->primary_slave->link == BOND_LINK_UP))
2858 slave_last_rx(bond, slave) + delta_in_ticks)) { 2969 commit++;
2859 2970
2860 slave->link = BOND_LINK_UP; 2971 read_unlock(&bond->curr_slave_lock);
2861 2972
2862 write_lock_bh(&bond->curr_slave_lock); 2973 return commit;
2974}
2863 2975
2864 if ((!bond->curr_active_slave) && 2976/*
2865 time_before_eq(jiffies, slave->dev->trans_start + delta_in_ticks)) { 2977 * Called to commit link state changes noted by inspection step of
2866 bond_change_active_slave(bond, slave); 2978 * active-backup mode ARP monitor.
2867 bond->current_arp_slave = NULL; 2979 *
2868 } else if (bond->curr_active_slave != slave) { 2980 * Called with RTNL and bond->lock for read.
2869 /* this slave has just come up but we 2981 */
2870 * already have a current slave; this 2982static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2871 * can also happen if bond_enslave adds 2983{
2872 * a new slave that is up while we are 2984 struct slave *slave;
2873 * searching for a new slave 2985 int i;
2874 */
2875 bond_set_slave_inactive_flags(slave);
2876 bond->current_arp_slave = NULL;
2877 }
2878 2986
2879 bond_set_carrier(bond); 2987 bond_for_each_slave(bond, slave, i) {
2988 switch (slave->new_link) {
2989 case BOND_LINK_NOCHANGE:
2990 continue;
2880 2991
2881 if (slave == bond->curr_active_slave) { 2992 case BOND_LINK_UP:
2882 printk(KERN_INFO DRV_NAME 2993 write_lock_bh(&bond->curr_slave_lock);
2883 ": %s: %s is up and now the "
2884 "active interface\n",
2885 bond->dev->name,
2886 slave->dev->name);
2887 netif_carrier_on(bond->dev);
2888 } else {
2889 printk(KERN_INFO DRV_NAME
2890 ": %s: backup interface %s is "
2891 "now up\n",
2892 bond->dev->name,
2893 slave->dev->name);
2894 }
2895 2994
2896 write_unlock_bh(&bond->curr_slave_lock); 2995 if (!bond->curr_active_slave &&
2897 } 2996 time_before_eq(jiffies, slave->dev->trans_start +
2898 } else { 2997 delta_in_ticks)) {
2899 read_lock(&bond->curr_slave_lock); 2998 slave->link = BOND_LINK_UP;
2999 bond_change_active_slave(bond, slave);
3000 bond->current_arp_slave = NULL;
2900 3001
2901 if ((slave != bond->curr_active_slave) && 3002 printk(KERN_INFO DRV_NAME
2902 (!bond->current_arp_slave) && 3003 ": %s: %s is up and now the "
2903 (time_after_eq(jiffies, slave_last_rx(bond, slave) + 3*delta_in_ticks) && 3004 "active interface\n",
2904 bond_has_ip(bond))) { 3005 bond->dev->name, slave->dev->name);
2905 /* a backup slave has gone down; three times 3006
2906 * the delta allows the current slave to be 3007 } else if (bond->curr_active_slave != slave) {
2907 * taken out before the backup slave. 3008 /* this slave has just come up but we
2908 * note: a non-null current_arp_slave indicates 3009 * already have a current slave; this can
2909 * the curr_active_slave went down and we are 3010 * also happen if bond_enslave adds a new
2910 * searching for a new one; under this 3011 * slave that is up while we are searching
2911 * condition we only take the curr_active_slave 3012 * for a new slave
2912 * down - this gives each slave a chance to
2913 * tx/rx traffic before being taken out
2914 */ 3013 */
3014 slave->link = BOND_LINK_UP;
3015 bond_set_slave_inactive_flags(slave);
3016 bond->current_arp_slave = NULL;
2915 3017
2916 read_unlock(&bond->curr_slave_lock); 3018 printk(KERN_INFO DRV_NAME
3019 ": %s: backup interface %s is now up\n",
3020 bond->dev->name, slave->dev->name);
3021 }
2917 3022
2918 slave->link = BOND_LINK_DOWN; 3023 write_unlock_bh(&bond->curr_slave_lock);
2919 3024
2920 if (slave->link_failure_count < UINT_MAX) { 3025 break;
2921 slave->link_failure_count++; 3026
2922 } 3027 case BOND_LINK_DOWN:
3028 if (slave->link_failure_count < UINT_MAX)
3029 slave->link_failure_count++;
3030
3031 slave->link = BOND_LINK_DOWN;
3032
3033 if (slave == bond->curr_active_slave) {
3034 printk(KERN_INFO DRV_NAME
3035 ": %s: link status down for active "
3036 "interface %s, disabling it\n",
3037 bond->dev->name, slave->dev->name);
2923 3038
2924 bond_set_slave_inactive_flags(slave); 3039 bond_set_slave_inactive_flags(slave);
2925 3040
3041 write_lock_bh(&bond->curr_slave_lock);
3042
3043 bond_select_active_slave(bond);
3044 if (bond->curr_active_slave)
3045 bond->curr_active_slave->jiffies =
3046 jiffies;
3047
3048 write_unlock_bh(&bond->curr_slave_lock);
3049
3050 bond->current_arp_slave = NULL;
3051
3052 } else if (slave->state == BOND_STATE_BACKUP) {
2926 printk(KERN_INFO DRV_NAME 3053 printk(KERN_INFO DRV_NAME
2927 ": %s: backup interface %s is now down\n", 3054 ": %s: backup interface %s is now down\n",
2928 bond->dev->name, 3055 bond->dev->name, slave->dev->name);
2929 slave->dev->name); 3056
2930 } else { 3057 bond_set_slave_inactive_flags(slave);
2931 read_unlock(&bond->curr_slave_lock);
2932 } 3058 }
3059 break;
3060
3061 default:
3062 printk(KERN_ERR DRV_NAME
3063 ": %s: impossible: new_link %d on slave %s\n",
3064 bond->dev->name, slave->new_link,
3065 slave->dev->name);
2933 } 3066 }
2934 } 3067 }
2935 3068
2936 read_lock(&bond->curr_slave_lock); 3069 /*
2937 slave = bond->curr_active_slave; 3070 * No race with changes to primary via sysfs, as we hold rtnl.
2938 read_unlock(&bond->curr_slave_lock); 3071 */
2939 3072 if (bond->primary_slave &&
2940 if (slave) { 3073 (bond->primary_slave != bond->curr_active_slave) &&
2941 /* if we have sent traffic in the past 2*arp_intervals but 3074 (bond->primary_slave->link == BOND_LINK_UP)) {
2942 * haven't xmit and rx traffic in that time interval, select 3075 write_lock_bh(&bond->curr_slave_lock);
2943 * a different slave. slave->jiffies is only updated when 3076 bond_change_active_slave(bond, bond->primary_slave);
2944 * a slave first becomes the curr_active_slave - not necessarily 3077 write_unlock_bh(&bond->curr_slave_lock);
2945 * after every arp; this ensures the slave has a full 2*delta 3078 }
2946 * before being taken out. if a primary is being used, check
2947 * if it is up and needs to take over as the curr_active_slave
2948 */
2949 if ((time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) ||
2950 (time_after_eq(jiffies, slave_last_rx(bond, slave) + 2*delta_in_ticks) &&
2951 bond_has_ip(bond))) &&
2952 time_after_eq(jiffies, slave->jiffies + 2*delta_in_ticks)) {
2953 3079
2954 slave->link = BOND_LINK_DOWN; 3080 bond_set_carrier(bond);
3081}
2955 3082
2956 if (slave->link_failure_count < UINT_MAX) { 3083/*
2957 slave->link_failure_count++; 3084 * Send ARP probes for active-backup mode ARP monitor.
2958 } 3085 *
3086 * Called with bond->lock held for read.
3087 */
3088static void bond_ab_arp_probe(struct bonding *bond)
3089{
3090 struct slave *slave;
3091 int i;
2959 3092
2960 printk(KERN_INFO DRV_NAME 3093 read_lock(&bond->curr_slave_lock);
2961 ": %s: link status down for active interface "
2962 "%s, disabling it\n",
2963 bond->dev->name,
2964 slave->dev->name);
2965 3094
2966 write_lock_bh(&bond->curr_slave_lock); 3095 if (bond->current_arp_slave && bond->curr_active_slave)
3096 printk("PROBE: c_arp %s && cas %s BAD\n",
3097 bond->current_arp_slave->dev->name,
3098 bond->curr_active_slave->dev->name);
2967 3099
2968 bond_select_active_slave(bond); 3100 if (bond->curr_active_slave) {
2969 slave = bond->curr_active_slave; 3101 bond_arp_send_all(bond, bond->curr_active_slave);
3102 read_unlock(&bond->curr_slave_lock);
3103 return;
3104 }
2970 3105
2971 write_unlock_bh(&bond->curr_slave_lock); 3106 read_unlock(&bond->curr_slave_lock);
2972 3107
2973 bond->current_arp_slave = slave; 3108 /* if we don't have a curr_active_slave, search for the next available
3109 * backup slave from the current_arp_slave and make it the candidate
3110 * for becoming the curr_active_slave
3111 */
2974 3112
2975 if (slave) { 3113 if (!bond->current_arp_slave) {
2976 slave->jiffies = jiffies; 3114 bond->current_arp_slave = bond->first_slave;
2977 } 3115 if (!bond->current_arp_slave)
2978 } else if ((bond->primary_slave) && 3116 return;
2979 (bond->primary_slave != slave) && 3117 }
2980 (bond->primary_slave->link == BOND_LINK_UP)) {
2981 /* at this point, slave is the curr_active_slave */
2982 printk(KERN_INFO DRV_NAME
2983 ": %s: changing from interface %s to primary "
2984 "interface %s\n",
2985 bond->dev->name,
2986 slave->dev->name,
2987 bond->primary_slave->dev->name);
2988 3118
2989 /* primary is up so switch to it */ 3119 bond_set_slave_inactive_flags(bond->current_arp_slave);
2990 write_lock_bh(&bond->curr_slave_lock);
2991 bond_change_active_slave(bond, bond->primary_slave);
2992 write_unlock_bh(&bond->curr_slave_lock);
2993 3120
2994 slave = bond->primary_slave; 3121 /* search for next candidate */
3122 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
3123 if (IS_UP(slave->dev)) {
3124 slave->link = BOND_LINK_BACK;
3125 bond_set_slave_active_flags(slave);
3126 bond_arp_send_all(bond, slave);
2995 slave->jiffies = jiffies; 3127 slave->jiffies = jiffies;
2996 } else { 3128 bond->current_arp_slave = slave;
2997 bond->current_arp_slave = NULL; 3129 break;
2998 } 3130 }
2999 3131
3000 /* the current slave must tx an arp to ensure backup slaves 3132 /* if the link state is up at this point, we
3001 * rx traffic 3133 * mark it down - this can happen if we have
3134 * simultaneous link failures and
3135 * reselect_active_interface doesn't make this
3136 * one the current slave so it is still marked
3137 * up when it is actually down
3002 */ 3138 */
3003 if (slave && bond_has_ip(bond)) { 3139 if (slave->link == BOND_LINK_UP) {
3004 bond_arp_send_all(bond, slave); 3140 slave->link = BOND_LINK_DOWN;
3141 if (slave->link_failure_count < UINT_MAX)
3142 slave->link_failure_count++;
3143
3144 bond_set_slave_inactive_flags(slave);
3145
3146 printk(KERN_INFO DRV_NAME
3147 ": %s: backup interface %s is now down.\n",
3148 bond->dev->name, slave->dev->name);
3005 } 3149 }
3006 } 3150 }
3151}
3007 3152
3008 /* if we don't have a curr_active_slave, search for the next available 3153void bond_activebackup_arp_mon(struct work_struct *work)
3009 * backup slave from the current_arp_slave and make it the candidate 3154{
3010 * for becoming the curr_active_slave 3155 struct bonding *bond = container_of(work, struct bonding,
3011 */ 3156 arp_work.work);
3012 if (!slave) { 3157 int delta_in_ticks;
3013 if (!bond->current_arp_slave) {
3014 bond->current_arp_slave = bond->first_slave;
3015 }
3016 3158
3017 if (bond->current_arp_slave) { 3159 read_lock(&bond->lock);
3018 bond_set_slave_inactive_flags(bond->current_arp_slave);
3019 3160
3020 /* search for next candidate */ 3161 if (bond->kill_timers)
3021 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) { 3162 goto out;
3022 if (IS_UP(slave->dev)) {
3023 slave->link = BOND_LINK_BACK;
3024 bond_set_slave_active_flags(slave);
3025 bond_arp_send_all(bond, slave);
3026 slave->jiffies = jiffies;
3027 bond->current_arp_slave = slave;
3028 break;
3029 }
3030 3163
3031 /* if the link state is up at this point, we 3164 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3032 * mark it down - this can happen if we have
3033 * simultaneous link failures and
3034 * reselect_active_interface doesn't make this
3035 * one the current slave so it is still marked
3036 * up when it is actually down
3037 */
3038 if (slave->link == BOND_LINK_UP) {
3039 slave->link = BOND_LINK_DOWN;
3040 if (slave->link_failure_count < UINT_MAX) {
3041 slave->link_failure_count++;
3042 }
3043 3165
3044 bond_set_slave_inactive_flags(slave); 3166 if (bond->slave_cnt == 0)
3167 goto re_arm;
3045 3168
3046 printk(KERN_INFO DRV_NAME 3169 if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
3047 ": %s: backup interface %s is " 3170 read_unlock(&bond->lock);
3048 "now down.\n", 3171 rtnl_lock();
3049 bond->dev->name, 3172 read_lock(&bond->lock);
3050 slave->dev->name); 3173
3051 } 3174 bond_ab_arp_commit(bond, delta_in_ticks);
3052 } 3175
3053 } 3176 read_unlock(&bond->lock);
3177 rtnl_unlock();
3178 read_lock(&bond->lock);
3054 } 3179 }
3055 3180
3181 bond_ab_arp_probe(bond);
3182
3056re_arm: 3183re_arm:
3057 if (bond->params.arp_interval) { 3184 if (bond->params.arp_interval) {
3058 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3185 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
@@ -3128,7 +3255,8 @@ static void bond_info_show_master(struct seq_file *seq)
3128 3255
3129 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && 3256 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
3130 bond->params.fail_over_mac) 3257 bond->params.fail_over_mac)
3131 seq_printf(seq, " (fail_over_mac)"); 3258 seq_printf(seq, " (fail_over_mac %s)",
3259 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
3132 3260
3133 seq_printf(seq, "\n"); 3261 seq_printf(seq, "\n");
3134 3262
@@ -3500,13 +3628,13 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3500{ 3628{
3501 struct in_ifaddr *ifa = ptr; 3629 struct in_ifaddr *ifa = ptr;
3502 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; 3630 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
3503 struct bonding *bond, *bond_next; 3631 struct bonding *bond;
3504 struct vlan_entry *vlan, *vlan_next; 3632 struct vlan_entry *vlan;
3505 3633
3506 if (dev_net(ifa->ifa_dev->dev) != &init_net) 3634 if (dev_net(ifa->ifa_dev->dev) != &init_net)
3507 return NOTIFY_DONE; 3635 return NOTIFY_DONE;
3508 3636
3509 list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) { 3637 list_for_each_entry(bond, &bond_dev_list, bond_list) {
3510 if (bond->dev == event_dev) { 3638 if (bond->dev == event_dev) {
3511 switch (event) { 3639 switch (event) {
3512 case NETDEV_UP: 3640 case NETDEV_UP:
@@ -3520,11 +3648,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3520 } 3648 }
3521 } 3649 }
3522 3650
3523 if (list_empty(&bond->vlan_list)) 3651 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
3524 continue;
3525
3526 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
3527 vlan_list) {
3528 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3652 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
3529 if (vlan_dev == event_dev) { 3653 if (vlan_dev == event_dev) {
3530 switch (event) { 3654 switch (event) {
@@ -4060,10 +4184,10 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4060 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); 4184 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
4061 4185
4062 /* 4186 /*
4063 * If fail_over_mac is enabled, do nothing and return success. 4187 * If fail_over_mac is set to active, do nothing and return
4064 * Returning an error causes ifenslave to fail. 4188 * success. Returning an error causes ifenslave to fail.
4065 */ 4189 */
4066 if (bond->params.fail_over_mac) 4190 if (bond->params.fail_over_mac == BOND_FOM_ACTIVE)
4067 return 0; 4191 return 0;
4068 4192
4069 if (!is_valid_ether_addr(sa->sa_data)) { 4193 if (!is_valid_ether_addr(sa->sa_data)) {
@@ -4568,7 +4692,7 @@ int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
4568 4692
4569static int bond_check_params(struct bond_params *params) 4693static int bond_check_params(struct bond_params *params)
4570{ 4694{
4571 int arp_validate_value; 4695 int arp_validate_value, fail_over_mac_value;
4572 4696
4573 /* 4697 /*
4574 * Convert string parameters. 4698 * Convert string parameters.
@@ -4658,6 +4782,13 @@ static int bond_check_params(struct bond_params *params)
4658 use_carrier = 1; 4782 use_carrier = 1;
4659 } 4783 }
4660 4784
4785 if (num_grat_arp < 0 || num_grat_arp > 255) {
4786 printk(KERN_WARNING DRV_NAME
4787 ": Warning: num_grat_arp (%d) not in range 0-255 so it "
4788 "was reset to 1 \n", num_grat_arp);
4789 num_grat_arp = 1;
4790 }
4791
4661 /* reset values for 802.3ad */ 4792 /* reset values for 802.3ad */
4662 if (bond_mode == BOND_MODE_8023AD) { 4793 if (bond_mode == BOND_MODE_8023AD) {
4663 if (!miimon) { 4794 if (!miimon) {
@@ -4836,15 +4967,29 @@ static int bond_check_params(struct bond_params *params)
4836 primary = NULL; 4967 primary = NULL;
4837 } 4968 }
4838 4969
4839 if (fail_over_mac && (bond_mode != BOND_MODE_ACTIVEBACKUP)) 4970 if (fail_over_mac) {
4840 printk(KERN_WARNING DRV_NAME 4971 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4841 ": Warning: fail_over_mac only affects " 4972 fail_over_mac_tbl);
4842 "active-backup mode.\n"); 4973 if (fail_over_mac_value == -1) {
4974 printk(KERN_ERR DRV_NAME
4975 ": Error: invalid fail_over_mac \"%s\"\n",
4976 arp_validate == NULL ? "NULL" : arp_validate);
4977 return -EINVAL;
4978 }
4979
4980 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4981 printk(KERN_WARNING DRV_NAME
4982 ": Warning: fail_over_mac only affects "
4983 "active-backup mode.\n");
4984 } else {
4985 fail_over_mac_value = BOND_FOM_NONE;
4986 }
4843 4987
4844 /* fill params struct with the proper values */ 4988 /* fill params struct with the proper values */
4845 params->mode = bond_mode; 4989 params->mode = bond_mode;
4846 params->xmit_policy = xmit_hashtype; 4990 params->xmit_policy = xmit_hashtype;
4847 params->miimon = miimon; 4991 params->miimon = miimon;
4992 params->num_grat_arp = num_grat_arp;
4848 params->arp_interval = arp_interval; 4993 params->arp_interval = arp_interval;
4849 params->arp_validate = arp_validate_value; 4994 params->arp_validate = arp_validate_value;
4850 params->updelay = updelay; 4995 params->updelay = updelay;
@@ -4852,7 +4997,7 @@ static int bond_check_params(struct bond_params *params)
4852 params->use_carrier = use_carrier; 4997 params->use_carrier = use_carrier;
4853 params->lacp_fast = lacp_fast; 4998 params->lacp_fast = lacp_fast;
4854 params->primary[0] = 0; 4999 params->primary[0] = 0;
4855 params->fail_over_mac = fail_over_mac; 5000 params->fail_over_mac = fail_over_mac_value;
4856 5001
4857 if (primary) { 5002 if (primary) {
4858 strncpy(params->primary, primary, IFNAMSIZ); 5003 strncpy(params->primary, primary, IFNAMSIZ);
@@ -4871,10 +5016,10 @@ static struct lock_class_key bonding_netdev_xmit_lock_key;
4871 * Caller must NOT hold rtnl_lock; we need to release it here before we 5016 * Caller must NOT hold rtnl_lock; we need to release it here before we
4872 * set up our sysfs entries. 5017 * set up our sysfs entries.
4873 */ 5018 */
4874int bond_create(char *name, struct bond_params *params, struct bonding **newbond) 5019int bond_create(char *name, struct bond_params *params)
4875{ 5020{
4876 struct net_device *bond_dev; 5021 struct net_device *bond_dev;
4877 struct bonding *bond, *nxt; 5022 struct bonding *bond;
4878 int res; 5023 int res;
4879 5024
4880 rtnl_lock(); 5025 rtnl_lock();
@@ -4882,7 +5027,7 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4882 5027
4883 /* Check to see if the bond already exists. */ 5028 /* Check to see if the bond already exists. */
4884 if (name) { 5029 if (name) {
4885 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 5030 list_for_each_entry(bond, &bond_dev_list, bond_list)
4886 if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { 5031 if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
4887 printk(KERN_ERR DRV_NAME 5032 printk(KERN_ERR DRV_NAME
4888 ": cannot add bond %s; it already exists\n", 5033 ": cannot add bond %s; it already exists\n",
@@ -4925,9 +5070,6 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4925 5070
4926 lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key); 5071 lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key);
4927 5072
4928 if (newbond)
4929 *newbond = bond_dev->priv;
4930
4931 netif_carrier_off(bond_dev); 5073 netif_carrier_off(bond_dev);
4932 5074
4933 up_write(&bonding_rwsem); 5075 up_write(&bonding_rwsem);
@@ -4957,7 +5099,7 @@ static int __init bonding_init(void)
4957{ 5099{
4958 int i; 5100 int i;
4959 int res; 5101 int res;
4960 struct bonding *bond, *nxt; 5102 struct bonding *bond;
4961 5103
4962 printk(KERN_INFO "%s", version); 5104 printk(KERN_INFO "%s", version);
4963 5105
@@ -4973,7 +5115,7 @@ static int __init bonding_init(void)
4973 init_rwsem(&bonding_rwsem); 5115 init_rwsem(&bonding_rwsem);
4974 5116
4975 for (i = 0; i < max_bonds; i++) { 5117 for (i = 0; i < max_bonds; i++) {
4976 res = bond_create(NULL, &bonding_defaults, NULL); 5118 res = bond_create(NULL, &bonding_defaults);
4977 if (res) 5119 if (res)
4978 goto err; 5120 goto err;
4979 } 5121 }
@@ -4987,7 +5129,7 @@ static int __init bonding_init(void)
4987 5129
4988 goto out; 5130 goto out;
4989err: 5131err:
4990 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) { 5132 list_for_each_entry(bond, &bond_dev_list, bond_list) {
4991 bond_work_cancel_all(bond); 5133 bond_work_cancel_all(bond);
4992 destroy_workqueue(bond->wq); 5134 destroy_workqueue(bond->wq);
4993 } 5135 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 08f3d396bcd6..dd265c69b0df 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -50,6 +50,7 @@ extern struct bond_parm_tbl bond_mode_tbl[];
50extern struct bond_parm_tbl bond_lacp_tbl[]; 50extern struct bond_parm_tbl bond_lacp_tbl[];
51extern struct bond_parm_tbl xmit_hashtype_tbl[]; 51extern struct bond_parm_tbl xmit_hashtype_tbl[];
52extern struct bond_parm_tbl arp_validate_tbl[]; 52extern struct bond_parm_tbl arp_validate_tbl[];
53extern struct bond_parm_tbl fail_over_mac_tbl[];
53 54
54static int expected_refcount = -1; 55static int expected_refcount = -1;
55static struct class *netdev_class; 56static struct class *netdev_class;
@@ -111,7 +112,6 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
111 char *ifname; 112 char *ifname;
112 int rv, res = count; 113 int rv, res = count;
113 struct bonding *bond; 114 struct bonding *bond;
114 struct bonding *nxt;
115 115
116 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/ 116 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
117 ifname = command + 1; 117 ifname = command + 1;
@@ -122,7 +122,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
122 if (command[0] == '+') { 122 if (command[0] == '+') {
123 printk(KERN_INFO DRV_NAME 123 printk(KERN_INFO DRV_NAME
124 ": %s is being created...\n", ifname); 124 ": %s is being created...\n", ifname);
125 rv = bond_create(ifname, &bonding_defaults, &bond); 125 rv = bond_create(ifname, &bonding_defaults);
126 if (rv) { 126 if (rv) {
127 printk(KERN_INFO DRV_NAME ": Bond creation failed.\n"); 127 printk(KERN_INFO DRV_NAME ": Bond creation failed.\n");
128 res = rv; 128 res = rv;
@@ -134,7 +134,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
134 rtnl_lock(); 134 rtnl_lock();
135 down_write(&bonding_rwsem); 135 down_write(&bonding_rwsem);
136 136
137 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 137 list_for_each_entry(bond, &bond_dev_list, bond_list)
138 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) { 138 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
139 /* check the ref count on the bond's kobject. 139 /* check the ref count on the bond's kobject.
140 * If it's > expected, then there's a file open, 140 * If it's > expected, then there's a file open,
@@ -548,42 +548,37 @@ static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attrib
548{ 548{
549 struct bonding *bond = to_bond(d); 549 struct bonding *bond = to_bond(d);
550 550
551 return sprintf(buf, "%d\n", bond->params.fail_over_mac) + 1; 551 return sprintf(buf, "%s %d\n",
552 fail_over_mac_tbl[bond->params.fail_over_mac].modename,
553 bond->params.fail_over_mac);
552} 554}
553 555
554static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count) 556static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count)
555{ 557{
556 int new_value; 558 int new_value;
557 int ret = count;
558 struct bonding *bond = to_bond(d); 559 struct bonding *bond = to_bond(d);
559 560
560 if (bond->slave_cnt != 0) { 561 if (bond->slave_cnt != 0) {
561 printk(KERN_ERR DRV_NAME 562 printk(KERN_ERR DRV_NAME
562 ": %s: Can't alter fail_over_mac with slaves in bond.\n", 563 ": %s: Can't alter fail_over_mac with slaves in bond.\n",
563 bond->dev->name); 564 bond->dev->name);
564 ret = -EPERM; 565 return -EPERM;
565 goto out;
566 } 566 }
567 567
568 if (sscanf(buf, "%d", &new_value) != 1) { 568 new_value = bond_parse_parm(buf, fail_over_mac_tbl);
569 if (new_value < 0) {
569 printk(KERN_ERR DRV_NAME 570 printk(KERN_ERR DRV_NAME
570 ": %s: no fail_over_mac value specified.\n", 571 ": %s: Ignoring invalid fail_over_mac value %s.\n",
571 bond->dev->name); 572 bond->dev->name, buf);
572 ret = -EINVAL; 573 return -EINVAL;
573 goto out;
574 } 574 }
575 575
576 if ((new_value == 0) || (new_value == 1)) { 576 bond->params.fail_over_mac = new_value;
577 bond->params.fail_over_mac = new_value; 577 printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
578 printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %d.\n", 578 bond->dev->name, fail_over_mac_tbl[new_value].modename,
579 bond->dev->name, new_value); 579 new_value);
580 } else { 580
581 printk(KERN_INFO DRV_NAME 581 return count;
582 ": %s: Ignoring invalid fail_over_mac value %d.\n",
583 bond->dev->name, new_value);
584 }
585out:
586 return ret;
587} 582}
588 583
589static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac); 584static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac);
@@ -952,6 +947,45 @@ out:
952static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); 947static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
953 948
954/* 949/*
950 * Show and set the number of grat ARP to send after a failover event.
951 */
952static ssize_t bonding_show_n_grat_arp(struct device *d,
953 struct device_attribute *attr,
954 char *buf)
955{
956 struct bonding *bond = to_bond(d);
957
958 return sprintf(buf, "%d\n", bond->params.num_grat_arp);
959}
960
961static ssize_t bonding_store_n_grat_arp(struct device *d,
962 struct device_attribute *attr,
963 const char *buf, size_t count)
964{
965 int new_value, ret = count;
966 struct bonding *bond = to_bond(d);
967
968 if (sscanf(buf, "%d", &new_value) != 1) {
969 printk(KERN_ERR DRV_NAME
970 ": %s: no num_grat_arp value specified.\n",
971 bond->dev->name);
972 ret = -EINVAL;
973 goto out;
974 }
975 if (new_value < 0 || new_value > 255) {
976 printk(KERN_ERR DRV_NAME
977 ": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
978 bond->dev->name, new_value);
979 ret = -EINVAL;
980 goto out;
981 } else {
982 bond->params.num_grat_arp = new_value;
983 }
984out:
985 return ret;
986}
987static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_n_grat_arp, bonding_store_n_grat_arp);
988/*
955 * Show and set the MII monitor interval. There are two tricky bits 989 * Show and set the MII monitor interval. There are two tricky bits
956 * here. First, if MII monitoring is activated, then we must disable 990 * here. First, if MII monitoring is activated, then we must disable
957 * ARP monitoring. Second, if the timer isn't running, we must 991 * ARP monitoring. Second, if the timer isn't running, we must
@@ -1388,6 +1422,7 @@ static struct attribute *per_bond_attrs[] = {
1388 &dev_attr_updelay.attr, 1422 &dev_attr_updelay.attr,
1389 &dev_attr_lacp_rate.attr, 1423 &dev_attr_lacp_rate.attr,
1390 &dev_attr_xmit_hash_policy.attr, 1424 &dev_attr_xmit_hash_policy.attr,
1425 &dev_attr_num_grat_arp.attr,
1391 &dev_attr_miimon.attr, 1426 &dev_attr_miimon.attr,
1392 &dev_attr_primary.attr, 1427 &dev_attr_primary.attr,
1393 &dev_attr_use_carrier.attr, 1428 &dev_attr_use_carrier.attr,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a3c74e20aa53..89fd9963db7a 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -125,6 +125,7 @@ struct bond_params {
125 int mode; 125 int mode;
126 int xmit_policy; 126 int xmit_policy;
127 int miimon; 127 int miimon;
128 int num_grat_arp;
128 int arp_interval; 129 int arp_interval;
129 int arp_validate; 130 int arp_validate;
130 int use_carrier; 131 int use_carrier;
@@ -157,6 +158,7 @@ struct slave {
157 unsigned long jiffies; 158 unsigned long jiffies;
158 unsigned long last_arp_rx; 159 unsigned long last_arp_rx;
159 s8 link; /* one of BOND_LINK_XXXX */ 160 s8 link; /* one of BOND_LINK_XXXX */
161 s8 new_link;
160 s8 state; /* one of BOND_STATE_XXXX */ 162 s8 state; /* one of BOND_STATE_XXXX */
161 u32 original_flags; 163 u32 original_flags;
162 u32 original_mtu; 164 u32 original_mtu;
@@ -169,6 +171,11 @@ struct slave {
169}; 171};
170 172
171/* 173/*
174 * Link pseudo-state only used internally by monitors
175 */
176#define BOND_LINK_NOCHANGE -1
177
178/*
172 * Here are the locking policies for the two bonding locks: 179 * Here are the locking policies for the two bonding locks:
173 * 180 *
174 * 1) Get bond->lock when reading/writing slave list. 181 * 1) Get bond->lock when reading/writing slave list.
@@ -241,6 +248,10 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
241 return (struct bonding *)slave->dev->master->priv; 248 return (struct bonding *)slave->dev->master->priv;
242} 249}
243 250
251#define BOND_FOM_NONE 0
252#define BOND_FOM_ACTIVE 1
253#define BOND_FOM_FOLLOW 2
254
244#define BOND_ARP_VALIDATE_NONE 0 255#define BOND_ARP_VALIDATE_NONE 0
245#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) 256#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
246#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 257#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
@@ -301,7 +312,7 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
301 312
302struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 313struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
303int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 314int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
304int bond_create(char *name, struct bond_params *params, struct bonding **newbond); 315int bond_create(char *name, struct bond_params *params);
305void bond_destroy(struct bonding *bond); 316void bond_destroy(struct bonding *bond);
306int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); 317int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
307int bond_create_sysfs(void); 318int bond_create_sysfs(void);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 93e13636f8dd..83768df27806 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -142,8 +142,8 @@
142 142
143#define DRV_MODULE_NAME "cassini" 143#define DRV_MODULE_NAME "cassini"
144#define PFX DRV_MODULE_NAME ": " 144#define PFX DRV_MODULE_NAME ": "
145#define DRV_MODULE_VERSION "1.5" 145#define DRV_MODULE_VERSION "1.6"
146#define DRV_MODULE_RELDATE "4 Jan 2008" 146#define DRV_MODULE_RELDATE "21 May 2008"
147 147
148#define CAS_DEF_MSG_ENABLE \ 148#define CAS_DEF_MSG_ENABLE \
149 (NETIF_MSG_DRV | \ 149 (NETIF_MSG_DRV | \
@@ -2136,9 +2136,12 @@ end_copy_pkt:
2136 if (addr) 2136 if (addr)
2137 cas_page_unmap(addr); 2137 cas_page_unmap(addr);
2138 } 2138 }
2139 skb->csum = csum_unfold(~csum);
2140 skb->ip_summed = CHECKSUM_COMPLETE;
2141 skb->protocol = eth_type_trans(skb, cp->dev); 2139 skb->protocol = eth_type_trans(skb, cp->dev);
2140 if (skb->protocol == htons(ETH_P_IP)) {
2141 skb->csum = csum_unfold(~csum);
2142 skb->ip_summed = CHECKSUM_COMPLETE;
2143 } else
2144 skb->ip_summed = CHECKSUM_NONE;
2142 return len; 2145 return len;
2143} 2146}
2144 2147
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ae07100bb935..7f3f62e1b113 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,6 +38,7 @@
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <asm/gpio.h> 40#include <asm/gpio.h>
41#include <asm/atomic.h>
41 42
42MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 43MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
43MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 44MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
@@ -187,6 +188,7 @@ struct cpmac_desc {
187#define CPMAC_EOQ 0x1000 188#define CPMAC_EOQ 0x1000
188 struct sk_buff *skb; 189 struct sk_buff *skb;
189 struct cpmac_desc *next; 190 struct cpmac_desc *next;
191 struct cpmac_desc *prev;
190 dma_addr_t mapping; 192 dma_addr_t mapping;
191 dma_addr_t data_mapping; 193 dma_addr_t data_mapping;
192}; 194};
@@ -208,6 +210,7 @@ struct cpmac_priv {
208 struct work_struct reset_work; 210 struct work_struct reset_work;
209 struct platform_device *pdev; 211 struct platform_device *pdev;
210 struct napi_struct napi; 212 struct napi_struct napi;
213 atomic_t reset_pending;
211}; 214};
212 215
213static irqreturn_t cpmac_irq(int, void *); 216static irqreturn_t cpmac_irq(int, void *);
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
241 printk("\n"); 244 printk("\n");
242} 245}
243 246
247static void cpmac_dump_all_desc(struct net_device *dev)
248{
249 struct cpmac_priv *priv = netdev_priv(dev);
250 struct cpmac_desc *dump = priv->rx_head;
251 do {
252 cpmac_dump_desc(dev, dump);
253 dump = dump->next;
254 } while (dump != priv->rx_head);
255}
256
244static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 257static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
245{ 258{
246 int i; 259 int i;
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
412static int cpmac_poll(struct napi_struct *napi, int budget) 425static int cpmac_poll(struct napi_struct *napi, int budget)
413{ 426{
414 struct sk_buff *skb; 427 struct sk_buff *skb;
415 struct cpmac_desc *desc; 428 struct cpmac_desc *desc, *restart;
416 int received = 0;
417 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 429 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
430 int received = 0, processed = 0;
418 431
419 spin_lock(&priv->rx_lock); 432 spin_lock(&priv->rx_lock);
420 if (unlikely(!priv->rx_head)) { 433 if (unlikely(!priv->rx_head)) {
421 if (netif_msg_rx_err(priv) && net_ratelimit()) 434 if (netif_msg_rx_err(priv) && net_ratelimit())
422 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 435 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
423 priv->dev->name); 436 priv->dev->name);
437 spin_unlock(&priv->rx_lock);
424 netif_rx_complete(priv->dev, napi); 438 netif_rx_complete(priv->dev, napi);
425 return 0; 439 return 0;
426 } 440 }
427 441
428 desc = priv->rx_head; 442 desc = priv->rx_head;
443 restart = NULL;
429 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 444 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
445 processed++;
446
447 if ((desc->dataflags & CPMAC_EOQ) != 0) {
448 /* The last update to eoq->hw_next didn't happen
449 * soon enough, and the receiver stopped here.
450 *Remember this descriptor so we can restart
451 * the receiver after freeing some space.
452 */
453 if (unlikely(restart)) {
454 if (netif_msg_rx_err(priv))
455 printk(KERN_ERR "%s: poll found a"
456 " duplicate EOQ: %p and %p\n",
457 priv->dev->name, restart, desc);
458 goto fatal_error;
459 }
460
461 restart = desc->next;
462 }
463
430 skb = cpmac_rx_one(priv, desc); 464 skb = cpmac_rx_one(priv, desc);
431 if (likely(skb)) { 465 if (likely(skb)) {
432 netif_receive_skb(skb); 466 netif_receive_skb(skb);
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
435 desc = desc->next; 469 desc = desc->next;
436 } 470 }
437 471
472 if (desc != priv->rx_head) {
473 /* We freed some buffers, but not the whole ring,
474 * add what we did free to the rx list */
475 desc->prev->hw_next = (u32)0;
476 priv->rx_head->prev->hw_next = priv->rx_head->mapping;
477 }
478
479 /* Optimization: If we did not actually process an EOQ (perhaps because
480 * of quota limits), check to see if the tail of the queue has EOQ set.
481 * We should immediately restart in that case so that the receiver can
482 * restart and run in parallel with more packet processing.
483 * This lets us handle slightly larger bursts before running
484 * out of ring space (assuming dev->weight < ring_size) */
485
486 if (!restart &&
487 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
488 == CPMAC_EOQ &&
489 (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
490 /* reset EOQ so the poll loop (above) doesn't try to
491 * restart this when it eventually gets to this descriptor.
492 */
493 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
494 restart = priv->rx_head;
495 }
496
497 if (restart) {
498 priv->dev->stats.rx_errors++;
499 priv->dev->stats.rx_fifo_errors++;
500 if (netif_msg_rx_err(priv) && net_ratelimit())
501 printk(KERN_WARNING "%s: rx dma ring overrun\n",
502 priv->dev->name);
503
504 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
505 if (netif_msg_drv(priv))
506 printk(KERN_ERR "%s: cpmac_poll is trying to "
507 "restart rx from a descriptor that's "
508 "not free: %p\n",
509 priv->dev->name, restart);
510 goto fatal_error;
511 }
512
513 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
514 }
515
438 priv->rx_head = desc; 516 priv->rx_head = desc;
439 spin_unlock(&priv->rx_lock); 517 spin_unlock(&priv->rx_lock);
440 if (unlikely(netif_msg_rx_status(priv))) 518 if (unlikely(netif_msg_rx_status(priv)))
441 printk(KERN_DEBUG "%s: poll processed %d packets\n", 519 printk(KERN_DEBUG "%s: poll processed %d packets\n",
442 priv->dev->name, received); 520 priv->dev->name, received);
443 if (desc->dataflags & CPMAC_OWN) { 521 if (processed == 0) {
522 /* we ran out of packets to read,
523 * revert to interrupt-driven mode */
444 netif_rx_complete(priv->dev, napi); 524 netif_rx_complete(priv->dev, napi);
445 cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
446 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 525 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
447 return 0; 526 return 0;
448 } 527 }
449 528
450 return 1; 529 return 1;
530
531fatal_error:
532 /* Something went horribly wrong.
533 * Reset hardware to try to recover rather than wedging. */
534
535 if (netif_msg_drv(priv)) {
536 printk(KERN_ERR "%s: cpmac_poll is confused. "
537 "Resetting hardware\n", priv->dev->name);
538 cpmac_dump_all_desc(priv->dev);
539 printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
540 priv->dev->name,
541 cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
542 cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
543 }
544
545 spin_unlock(&priv->rx_lock);
546 netif_rx_complete(priv->dev, napi);
547 netif_stop_queue(priv->dev);
548 napi_disable(&priv->napi);
549
550 atomic_inc(&priv->reset_pending);
551 cpmac_hw_stop(priv->dev);
552 if (!schedule_work(&priv->reset_work))
553 atomic_dec(&priv->reset_pending);
554 return 0;
555
451} 556}
452 557
453static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 558static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
456 struct cpmac_desc *desc; 561 struct cpmac_desc *desc;
457 struct cpmac_priv *priv = netdev_priv(dev); 562 struct cpmac_priv *priv = netdev_priv(dev);
458 563
564 if (unlikely(atomic_read(&priv->reset_pending)))
565 return NETDEV_TX_BUSY;
566
459 if (unlikely(skb_padto(skb, ETH_ZLEN))) 567 if (unlikely(skb_padto(skb, ETH_ZLEN)))
460 return NETDEV_TX_OK; 568 return NETDEV_TX_OK;
461 569
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
621 desc->dataflags = CPMAC_OWN; 729 desc->dataflags = CPMAC_OWN;
622 dev->stats.rx_dropped++; 730 dev->stats.rx_dropped++;
623 } 731 }
732 desc->hw_next = desc->next->mapping;
624 desc = desc->next; 733 desc = desc->next;
625 } 734 }
735 priv->rx_head->prev->hw_next = 0;
626} 736}
627 737
628static void cpmac_clear_tx(struct net_device *dev) 738static void cpmac_clear_tx(struct net_device *dev)
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
635 priv->desc_ring[i].dataflags = 0; 745 priv->desc_ring[i].dataflags = 0;
636 if (priv->desc_ring[i].skb) { 746 if (priv->desc_ring[i].skb) {
637 dev_kfree_skb_any(priv->desc_ring[i].skb); 747 dev_kfree_skb_any(priv->desc_ring[i].skb);
638 if (netif_subqueue_stopped(dev, i)) 748 priv->desc_ring[i].skb = NULL;
639 netif_wake_subqueue(dev, i);
640 } 749 }
641 } 750 }
642} 751}
643 752
644static void cpmac_hw_error(struct work_struct *work) 753static void cpmac_hw_error(struct work_struct *work)
645{ 754{
755 int i;
646 struct cpmac_priv *priv = 756 struct cpmac_priv *priv =
647 container_of(work, struct cpmac_priv, reset_work); 757 container_of(work, struct cpmac_priv, reset_work);
648 758
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
651 spin_unlock(&priv->rx_lock); 761 spin_unlock(&priv->rx_lock);
652 cpmac_clear_tx(priv->dev); 762 cpmac_clear_tx(priv->dev);
653 cpmac_hw_start(priv->dev); 763 cpmac_hw_start(priv->dev);
654 napi_enable(&priv->napi); 764 barrier();
655 netif_start_queue(priv->dev); 765 atomic_dec(&priv->reset_pending);
766
767 for (i = 0; i < CPMAC_QUEUES; i++)
768 netif_wake_subqueue(priv->dev, i);
769 netif_wake_queue(priv->dev);
770 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
771}
772
773static void cpmac_check_status(struct net_device *dev)
774{
775 struct cpmac_priv *priv = netdev_priv(dev);
776
777 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
778 int rx_channel = (macstatus >> 8) & 7;
779 int rx_code = (macstatus >> 12) & 15;
780 int tx_channel = (macstatus >> 16) & 7;
781 int tx_code = (macstatus >> 20) & 15;
782
783 if (rx_code || tx_code) {
784 if (netif_msg_drv(priv) && net_ratelimit()) {
785 /* Can't find any documentation on what these
786 *error codes actually are. So just log them and hope..
787 */
788 if (rx_code)
789 printk(KERN_WARNING "%s: host error %d on rx "
790 "channel %d (macstatus %08x), resetting\n",
791 dev->name, rx_code, rx_channel, macstatus);
792 if (tx_code)
793 printk(KERN_WARNING "%s: host error %d on tx "
794 "channel %d (macstatus %08x), resetting\n",
795 dev->name, tx_code, tx_channel, macstatus);
796 }
797
798 netif_stop_queue(dev);
799 cpmac_hw_stop(dev);
800 if (schedule_work(&priv->reset_work))
801 atomic_inc(&priv->reset_pending);
802 if (unlikely(netif_msg_hw(priv)))
803 cpmac_dump_regs(dev);
804 }
805 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
656} 806}
657 807
658static irqreturn_t cpmac_irq(int irq, void *dev_id) 808static irqreturn_t cpmac_irq(int irq, void *dev_id)
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
683 833
684 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 834 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
685 835
686 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { 836 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
687 if (netif_msg_drv(priv) && net_ratelimit()) 837 cpmac_check_status(dev);
688 printk(KERN_ERR "%s: hw error, resetting...\n",
689 dev->name);
690 netif_stop_queue(dev);
691 napi_disable(&priv->napi);
692 cpmac_hw_stop(dev);
693 schedule_work(&priv->reset_work);
694 if (unlikely(netif_msg_hw(priv)))
695 cpmac_dump_regs(dev);
696 }
697 838
698 return IRQ_HANDLED; 839 return IRQ_HANDLED;
699} 840}
700 841
701static void cpmac_tx_timeout(struct net_device *dev) 842static void cpmac_tx_timeout(struct net_device *dev)
702{ 843{
703 struct cpmac_priv *priv = netdev_priv(dev);
704 int i; 844 int i;
845 struct cpmac_priv *priv = netdev_priv(dev);
705 846
706 spin_lock(&priv->lock); 847 spin_lock(&priv->lock);
707 dev->stats.tx_errors++; 848 dev->stats.tx_errors++;
708 spin_unlock(&priv->lock); 849 spin_unlock(&priv->lock);
709 if (netif_msg_tx_err(priv) && net_ratelimit()) 850 if (netif_msg_tx_err(priv) && net_ratelimit())
710 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 851 printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
711 /* 852
712 * FIXME: waking up random queue is not the best thing to 853 atomic_inc(&priv->reset_pending);
713 * do... on the other hand why we got here at all? 854 barrier();
714 */ 855 cpmac_clear_tx(dev);
715#ifdef CONFIG_NETDEVICES_MULTIQUEUE 856 barrier();
857 atomic_dec(&priv->reset_pending);
858
859 netif_wake_queue(priv->dev);
716 for (i = 0; i < CPMAC_QUEUES; i++) 860 for (i = 0; i < CPMAC_QUEUES; i++)
717 if (priv->desc_ring[i].skb) { 861 netif_wake_subqueue(dev, i);
718 priv->desc_ring[i].dataflags = 0;
719 dev_kfree_skb_any(priv->desc_ring[i].skb);
720 netif_wake_subqueue(dev, i);
721 break;
722 }
723#else
724 priv->desc_ring[0].dataflags = 0;
725 if (priv->desc_ring[0].skb)
726 dev_kfree_skb_any(priv->desc_ring[0].skb);
727 netif_wake_queue(dev);
728#endif
729} 862}
730 863
731static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 864static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
901 desc->buflen = CPMAC_SKB_SIZE; 1034 desc->buflen = CPMAC_SKB_SIZE;
902 desc->dataflags = CPMAC_OWN; 1035 desc->dataflags = CPMAC_OWN;
903 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1036 desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1037 desc->next->prev = desc;
904 desc->hw_next = (u32)desc->next->mapping; 1038 desc->hw_next = (u32)desc->next->mapping;
905 } 1039 }
906 1040
1041 priv->rx_head->prev->hw_next = (u32)0;
1042
907 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1043 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
908 dev->name, dev))) { 1044 dev->name, dev))) {
909 if (netif_msg_drv(priv)) 1045 if (netif_msg_drv(priv))
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
912 goto fail_irq; 1048 goto fail_irq;
913 } 1049 }
914 1050
1051 atomic_set(&priv->reset_pending, 0);
915 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1052 INIT_WORK(&priv->reset_work, cpmac_hw_error);
916 cpmac_hw_start(dev); 1053 cpmac_hw_start(dev);
917 1054
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1007 1144
1008 if (phy_id == PHY_MAX_ADDR) { 1145 if (phy_id == PHY_MAX_ADDR) {
1009 if (external_switch || dumb_switch) { 1146 if (external_switch || dumb_switch) {
1010 struct fixed_phy_status status = {}; 1147 mdio_bus_id = 0; /* fixed phys bus */
1011 1148 phy_id = pdev->id;
1012 /*
1013 * FIXME: this should be in the platform code!
1014 * Since there is not platform code at all (that is,
1015 * no mainline users of that driver), place it here
1016 * for now.
1017 */
1018 phy_id = 0;
1019 status.link = 1;
1020 status.duplex = 1;
1021 status.speed = 100;
1022 fixed_phy_add(PHY_POLL, phy_id, &status);
1023 } else { 1149 } else {
1024 printk(KERN_ERR "cpmac: no PHY present\n"); 1150 dev_err(&pdev->dev, "no PHY present\n");
1025 return -ENODEV; 1151 return -ENODEV;
1026 } 1152 }
1027 } 1153 }
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1064 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1190 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1065 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1191 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
1066 1192
1067 snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1193 priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id,
1068 1194 &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1069 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
1070 PHY_INTERFACE_MODE_MII);
1071 if (IS_ERR(priv->phy)) { 1195 if (IS_ERR(priv->phy)) {
1072 if (netif_msg_drv(priv)) 1196 if (netif_msg_drv(priv))
1073 printk(KERN_ERR "%s: Could not attach to PHY\n", 1197 printk(KERN_ERR "%s: Could not attach to PHY\n",
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index acebe431d068..271140433b09 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,6 +42,7 @@
42#include <linux/cache.h> 42#include <linux/cache.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/inet_lro.h>
45#include "t3cdev.h" 46#include "t3cdev.h"
46#include <asm/io.h> 47#include <asm/io.h>
47 48
@@ -92,6 +93,7 @@ struct sge_fl { /* SGE per free-buffer list state */
92 unsigned int gen; /* free list generation */ 93 unsigned int gen; /* free list generation */
93 struct fl_pg_chunk pg_chunk;/* page chunk cache */ 94 struct fl_pg_chunk pg_chunk;/* page chunk cache */
94 unsigned int use_pages; /* whether FL uses pages or sk_buffs */ 95 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
96 unsigned int order; /* order of page allocations */
95 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 97 struct rx_desc *desc; /* address of HW Rx descriptor ring */
96 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 98 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
97 dma_addr_t phys_addr; /* physical address of HW ring start */ 99 dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -116,12 +118,15 @@ struct sge_rspq { /* state for an SGE response queue */
116 unsigned int polling; /* is the queue serviced through NAPI? */ 118 unsigned int polling; /* is the queue serviced through NAPI? */
117 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ 119 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
118 unsigned int next_holdoff; /* holdoff time for next interrupt */ 120 unsigned int next_holdoff; /* holdoff time for next interrupt */
121 unsigned int rx_recycle_buf; /* whether recycling occurred
122 within current sop-eop */
119 struct rsp_desc *desc; /* address of HW response ring */ 123 struct rsp_desc *desc; /* address of HW response ring */
120 dma_addr_t phys_addr; /* physical address of the ring */ 124 dma_addr_t phys_addr; /* physical address of the ring */
121 unsigned int cntxt_id; /* SGE context id for the response q */ 125 unsigned int cntxt_id; /* SGE context id for the response q */
122 spinlock_t lock; /* guards response processing */ 126 spinlock_t lock; /* guards response processing */
123 struct sk_buff *rx_head; /* offload packet receive queue head */ 127 struct sk_buff *rx_head; /* offload packet receive queue head */
124 struct sk_buff *rx_tail; /* offload packet receive queue tail */ 128 struct sk_buff *rx_tail; /* offload packet receive queue tail */
129 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
125 130
126 unsigned long offload_pkts; 131 unsigned long offload_pkts;
127 unsigned long offload_bundles; 132 unsigned long offload_bundles;
@@ -169,16 +174,29 @@ enum { /* per port SGE statistics */
169 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 174 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
170 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 175 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
171 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 176 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
177 SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
178 SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
179 SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
172 180
173 SGE_PSTAT_MAX /* must be last */ 181 SGE_PSTAT_MAX /* must be last */
174}; 182};
175 183
184#define T3_MAX_LRO_SES 8
185#define T3_MAX_LRO_MAX_PKTS 64
186
176struct sge_qset { /* an SGE queue set */ 187struct sge_qset { /* an SGE queue set */
177 struct adapter *adap; 188 struct adapter *adap;
178 struct napi_struct napi; 189 struct napi_struct napi;
179 struct sge_rspq rspq; 190 struct sge_rspq rspq;
180 struct sge_fl fl[SGE_RXQ_PER_SET]; 191 struct sge_fl fl[SGE_RXQ_PER_SET];
181 struct sge_txq txq[SGE_TXQ_PER_SET]; 192 struct sge_txq txq[SGE_TXQ_PER_SET];
193 struct net_lro_mgr lro_mgr;
194 struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
195 struct skb_frag_struct *lro_frag_tbl;
196 int lro_nfrags;
197 int lro_enabled;
198 int lro_frag_len;
199 void *lro_va;
182 struct net_device *netdev; 200 struct net_device *netdev;
183 unsigned long txq_stopped; /* which Tx queues are stopped */ 201 unsigned long txq_stopped; /* which Tx queues are stopped */
184 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 202 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 579bee42a5cb..d444f5881f56 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -351,6 +351,7 @@ struct tp_params {
351 351
352struct qset_params { /* SGE queue set parameters */ 352struct qset_params { /* SGE queue set parameters */
353 unsigned int polling; /* polling/interrupt service for rspq */ 353 unsigned int polling; /* polling/interrupt service for rspq */
354 unsigned int lro; /* large receive offload */
354 unsigned int coalesce_usecs; /* irq coalescing timer */ 355 unsigned int coalesce_usecs; /* irq coalescing timer */
355 unsigned int rspq_size; /* # of entries in response queue */ 356 unsigned int rspq_size; /* # of entries in response queue */
356 unsigned int fl_size; /* # of entries in regular free list */ 357 unsigned int fl_size; /* # of entries in regular free list */
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
index 0a82fcddf2d8..68200a14065e 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -90,6 +90,7 @@ struct ch_qset_params {
90 int32_t fl_size[2]; 90 int32_t fl_size[2];
91 int32_t intr_lat; 91 int32_t intr_lat;
92 int32_t polling; 92 int32_t polling;
93 int32_t lro;
93 int32_t cong_thres; 94 int32_t cong_thres;
94}; 95};
95 96
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 3a3127216791..5447f3e60f07 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1212,6 +1212,9 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
1212 "VLANinsertions ", 1212 "VLANinsertions ",
1213 "TxCsumOffload ", 1213 "TxCsumOffload ",
1214 "RxCsumGood ", 1214 "RxCsumGood ",
1215 "LroAggregated ",
1216 "LroFlushed ",
1217 "LroNoDesc ",
1215 "RxDrops ", 1218 "RxDrops ",
1216 1219
1217 "CheckTXEnToggled ", 1220 "CheckTXEnToggled ",
@@ -1340,6 +1343,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1340 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1343 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1341 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1344 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1342 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1345 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1346 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1347 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1348 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1343 *data++ = s->rx_cong_drops; 1349 *data++ = s->rx_cong_drops;
1344 1350
1345 *data++ = s->num_toggled; 1351 *data++ = s->num_toggled;
@@ -1558,6 +1564,13 @@ static int set_rx_csum(struct net_device *dev, u32 data)
1558 struct port_info *p = netdev_priv(dev); 1564 struct port_info *p = netdev_priv(dev);
1559 1565
1560 p->rx_csum_offload = data; 1566 p->rx_csum_offload = data;
1567 if (!data) {
1568 struct adapter *adap = p->adapter;
1569 int i;
1570
1571 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1572 adap->sge.qs[i].lro_enabled = 0;
1573 }
1561 return 0; 1574 return 0;
1562} 1575}
1563 1576
@@ -1830,6 +1843,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1830 } 1843 }
1831 } 1844 }
1832 } 1845 }
1846 if (t.lro >= 0) {
1847 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1848 q->lro = t.lro;
1849 qs->lro_enabled = t.lro;
1850 }
1833 break; 1851 break;
1834 } 1852 }
1835 case CHELSIO_GET_QSET_PARAMS:{ 1853 case CHELSIO_GET_QSET_PARAMS:{
@@ -1849,6 +1867,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1849 t.fl_size[0] = q->fl_size; 1867 t.fl_size[0] = q->fl_size;
1850 t.fl_size[1] = q->jumbo_size; 1868 t.fl_size[1] = q->jumbo_size;
1851 t.polling = q->polling; 1869 t.polling = q->polling;
1870 t.lro = q->lro;
1852 t.intr_lat = q->coalesce_usecs; 1871 t.intr_lat = q->coalesce_usecs;
1853 t.cong_thres = q->cong_thres; 1872 t.cong_thres = q->cong_thres;
1854 1873
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 796eb305cdc3..a96331c875e6 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -55,6 +55,9 @@
55 * directly. 55 * directly.
56 */ 56 */
57#define FL0_PG_CHUNK_SIZE 2048 57#define FL0_PG_CHUNK_SIZE 2048
58#define FL0_PG_ORDER 0
59#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
58 61
59#define SGE_RX_DROP_THRES 16 62#define SGE_RX_DROP_THRES 16
60 63
@@ -359,7 +362,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
359 } 362 }
360 363
361 if (q->pg_chunk.page) { 364 if (q->pg_chunk.page) {
362 __free_page(q->pg_chunk.page); 365 __free_pages(q->pg_chunk.page, q->order);
363 q->pg_chunk.page = NULL; 366 q->pg_chunk.page = NULL;
364 } 367 }
365} 368}
@@ -376,13 +379,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
376 * Add a buffer of the given length to the supplied HW and SW Rx 379 * Add a buffer of the given length to the supplied HW and SW Rx
377 * descriptors. 380 * descriptors.
378 */ 381 */
379static inline void add_one_rx_buf(void *va, unsigned int len, 382static inline int add_one_rx_buf(void *va, unsigned int len,
380 struct rx_desc *d, struct rx_sw_desc *sd, 383 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev) 384 unsigned int gen, struct pci_dev *pdev)
382{ 385{
383 dma_addr_t mapping; 386 dma_addr_t mapping;
384 387
385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
389 if (unlikely(pci_dma_mapping_error(mapping)))
390 return -ENOMEM;
391
386 pci_unmap_addr_set(sd, dma_addr, mapping); 392 pci_unmap_addr_set(sd, dma_addr, mapping);
387 393
388 d->addr_lo = cpu_to_be32(mapping); 394 d->addr_lo = cpu_to_be32(mapping);
@@ -390,12 +396,14 @@ static inline void add_one_rx_buf(void *va, unsigned int len,
390 wmb(); 396 wmb();
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 397 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 398 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
399 return 0;
393} 400}
394 401
395static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) 402static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
403 unsigned int order)
396{ 404{
397 if (!q->pg_chunk.page) { 405 if (!q->pg_chunk.page) {
398 q->pg_chunk.page = alloc_page(gfp); 406 q->pg_chunk.page = alloc_pages(gfp, order);
399 if (unlikely(!q->pg_chunk.page)) 407 if (unlikely(!q->pg_chunk.page))
400 return -ENOMEM; 408 return -ENOMEM;
401 q->pg_chunk.va = page_address(q->pg_chunk.page); 409 q->pg_chunk.va = page_address(q->pg_chunk.page);
@@ -404,7 +412,7 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
404 sd->pg_chunk = q->pg_chunk; 412 sd->pg_chunk = q->pg_chunk;
405 413
406 q->pg_chunk.offset += q->buf_size; 414 q->pg_chunk.offset += q->buf_size;
407 if (q->pg_chunk.offset == PAGE_SIZE) 415 if (q->pg_chunk.offset == (PAGE_SIZE << order))
408 q->pg_chunk.page = NULL; 416 q->pg_chunk.page = NULL;
409 else { 417 else {
410 q->pg_chunk.va += q->buf_size; 418 q->pg_chunk.va += q->buf_size;
@@ -424,15 +432,18 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
424 * allocated with the supplied gfp flags. The caller must assure that 432 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity. 433 * @n does not exceed the queue's capacity.
426 */ 434 */
427static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 435static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
428{ 436{
429 void *buf_start; 437 void *buf_start;
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 438 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx]; 439 struct rx_desc *d = &q->desc[q->pidx];
440 unsigned int count = 0;
432 441
433 while (n--) { 442 while (n--) {
443 int err;
444
434 if (q->use_pages) { 445 if (q->use_pages) {
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) { 446 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
436nomem: q->alloc_failed++; 447nomem: q->alloc_failed++;
437 break; 448 break;
438 } 449 }
@@ -447,8 +458,16 @@ nomem: q->alloc_failed++;
447 buf_start = skb->data; 458 buf_start = skb->data;
448 } 459 }
449 460
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 461 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
451 adap->pdev); 462 adap->pdev);
463 if (unlikely(err)) {
464 if (!q->use_pages) {
465 kfree_skb(sd->skb);
466 sd->skb = NULL;
467 }
468 break;
469 }
470
452 d++; 471 d++;
453 sd++; 472 sd++;
454 if (++q->pidx == q->size) { 473 if (++q->pidx == q->size) {
@@ -458,14 +477,19 @@ nomem: q->alloc_failed++;
458 d = q->desc; 477 d = q->desc;
459 } 478 }
460 q->credits++; 479 q->credits++;
480 count++;
461 } 481 }
462 wmb(); 482 wmb();
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 483 if (likely(count))
484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
485
486 return count;
464} 487}
465 488
466static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 489static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
467{ 490{
468 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC); 491 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
492 GFP_ATOMIC | __GFP_COMP);
469} 493}
470 494
471/** 495/**
@@ -560,6 +584,8 @@ static void t3_reset_qset(struct sge_qset *q)
560 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
561 q->txq_stopped = 0; 585 q->txq_stopped = 0;
562 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); 586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
563} 589}
564 590
565 591
@@ -740,19 +766,22 @@ use_orig_buf:
740 * that are page chunks rather than sk_buffs. 766 * that are page chunks rather than sk_buffs.
741 */ 767 */
742static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, 768static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
743 unsigned int len, unsigned int drop_thres) 769 struct sge_rspq *q, unsigned int len,
770 unsigned int drop_thres)
744{ 771{
745 struct sk_buff *skb = NULL; 772 struct sk_buff *newskb, *skb;
746 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 773 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
747 774
748 if (len <= SGE_RX_COPY_THRES) { 775 newskb = skb = q->pg_skb;
749 skb = alloc_skb(len, GFP_ATOMIC); 776
750 if (likely(skb != NULL)) { 777 if (!skb && (len <= SGE_RX_COPY_THRES)) {
751 __skb_put(skb, len); 778 newskb = alloc_skb(len, GFP_ATOMIC);
779 if (likely(newskb != NULL)) {
780 __skb_put(newskb, len);
752 pci_dma_sync_single_for_cpu(adap->pdev, 781 pci_dma_sync_single_for_cpu(adap->pdev,
753 pci_unmap_addr(sd, dma_addr), len, 782 pci_unmap_addr(sd, dma_addr), len,
754 PCI_DMA_FROMDEVICE); 783 PCI_DMA_FROMDEVICE);
755 memcpy(skb->data, sd->pg_chunk.va, len); 784 memcpy(newskb->data, sd->pg_chunk.va, len);
756 pci_dma_sync_single_for_device(adap->pdev, 785 pci_dma_sync_single_for_device(adap->pdev,
757 pci_unmap_addr(sd, dma_addr), len, 786 pci_unmap_addr(sd, dma_addr), len,
758 PCI_DMA_FROMDEVICE); 787 PCI_DMA_FROMDEVICE);
@@ -761,14 +790,16 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
761recycle: 790recycle:
762 fl->credits--; 791 fl->credits--;
763 recycle_rx_buf(adap, fl, fl->cidx); 792 recycle_rx_buf(adap, fl, fl->cidx);
764 return skb; 793 q->rx_recycle_buf++;
794 return newskb;
765 } 795 }
766 796
767 if (unlikely(fl->credits <= drop_thres)) 797 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
768 goto recycle; 798 goto recycle;
769 799
770 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 800 if (!skb)
771 if (unlikely(!skb)) { 801 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
802 if (unlikely(!newskb)) {
772 if (!drop_thres) 803 if (!drop_thres)
773 return NULL; 804 return NULL;
774 goto recycle; 805 goto recycle;
@@ -776,21 +807,29 @@ recycle:
776 807
777 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 808 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
778 fl->buf_size, PCI_DMA_FROMDEVICE); 809 fl->buf_size, PCI_DMA_FROMDEVICE);
779 __skb_put(skb, SGE_RX_PULL_LEN); 810 if (!skb) {
780 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 811 __skb_put(newskb, SGE_RX_PULL_LEN);
781 skb_fill_page_desc(skb, 0, sd->pg_chunk.page, 812 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
782 sd->pg_chunk.offset + SGE_RX_PULL_LEN, 813 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
783 len - SGE_RX_PULL_LEN); 814 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
784 skb->len = len; 815 len - SGE_RX_PULL_LEN);
785 skb->data_len = len - SGE_RX_PULL_LEN; 816 newskb->len = len;
786 skb->truesize += skb->data_len; 817 newskb->data_len = len - SGE_RX_PULL_LEN;
818 } else {
819 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
820 sd->pg_chunk.page,
821 sd->pg_chunk.offset, len);
822 newskb->len += len;
823 newskb->data_len += len;
824 }
825 newskb->truesize += newskb->data_len;
787 826
788 fl->credits--; 827 fl->credits--;
789 /* 828 /*
790 * We do not refill FLs here, we let the caller do it to overlap a 829 * We do not refill FLs here, we let the caller do it to overlap a
791 * prefetch. 830 * prefetch.
792 */ 831 */
793 return skb; 832 return newskb;
794} 833}
795 834
796/** 835/**
@@ -1831,9 +1870,10 @@ static void restart_tx(struct sge_qset *qs)
1831 * if it was immediate data in a response. 1870 * if it was immediate data in a response.
1832 */ 1871 */
1833static void rx_eth(struct adapter *adap, struct sge_rspq *rq, 1872static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1834 struct sk_buff *skb, int pad) 1873 struct sk_buff *skb, int pad, int lro)
1835{ 1874{
1836 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 1875 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1876 struct sge_qset *qs = rspq_to_qset(rq);
1837 struct port_info *pi; 1877 struct port_info *pi;
1838 1878
1839 skb_pull(skb, sizeof(*p) + pad); 1879 skb_pull(skb, sizeof(*p) + pad);
@@ -1850,18 +1890,202 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1850 if (unlikely(p->vlan_valid)) { 1890 if (unlikely(p->vlan_valid)) {
1851 struct vlan_group *grp = pi->vlan_grp; 1891 struct vlan_group *grp = pi->vlan_grp;
1852 1892
1853 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++; 1893 qs->port_stats[SGE_PSTAT_VLANEX]++;
1854 if (likely(grp)) 1894 if (likely(grp))
1855 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 1895 if (lro)
1856 rq->polling); 1896 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1897 grp,
1898 ntohs(p->vlan),
1899 p);
1900 else
1901 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1902 rq->polling);
1857 else 1903 else
1858 dev_kfree_skb_any(skb); 1904 dev_kfree_skb_any(skb);
1859 } else if (rq->polling) 1905 } else if (rq->polling) {
1860 netif_receive_skb(skb); 1906 if (lro)
1861 else 1907 lro_receive_skb(&qs->lro_mgr, skb, p);
1908 else
1909 netif_receive_skb(skb);
1910 } else
1862 netif_rx(skb); 1911 netif_rx(skb);
1863} 1912}
1864 1913
1914static inline int is_eth_tcp(u32 rss)
1915{
1916 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1917}
1918
1919/**
1920 * lro_frame_ok - check if an ingress packet is eligible for LRO
1921 * @p: the CPL header of the packet
1922 *
1923 * Returns true if a received packet is eligible for LRO.
1924 * The following conditions must be true:
1925 * - packet is TCP/IP Ethernet II (checked elsewhere)
1926 * - not an IP fragment
1927 * - no IP options
1928 * - TCP/IP checksums are correct
1929 * - the packet is for this host
1930 */
1931static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1932{
1933 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1934 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1935
1936 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938}
1939
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv)
1974{
1975 const struct cpl_rx_pkt *cpl = priv;
1976
1977 if (!lro_frame_ok(cpl))
1978 return -1;
1979
1980 *eh = (struct ethhdr *)(cpl + 1);
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0;
1989}
1990
1991static int t3_get_skb_header(struct sk_buff *skb,
1992 void **iph, void **tcph, u64 *hdr_flags,
1993 void *priv)
1994{
1995 void *eh;
1996
1997 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1998}
1999
2000static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2001 void **iph, void **tcph, u64 *hdr_flags,
2002 void *priv)
2003{
2004 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2005}
2006
2007/**
2008 * lro_add_page - add a page chunk to an LRO session
2009 * @adap: the adapter
2010 * @qs: the associated queue set
2011 * @fl: the free list containing the page chunk to add
2012 * @len: packet length
2013 * @complete: Indicates the last fragment of a frame
2014 *
2015 * Add a received packet contained in a page chunk to an existing LRO
2016 * session.
2017 */
2018static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2019 struct sge_fl *fl, int len, int complete)
2020{
2021 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2022 struct cpl_rx_pkt *cpl;
2023 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
2024 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
2025 int offset = 0;
2026
2027 if (!nr_frags) {
2028 offset = 2 + sizeof(struct cpl_rx_pkt);
2029 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2030 }
2031
2032 fl->credits--;
2033
2034 len -= offset;
2035 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2036 fl->buf_size, PCI_DMA_FROMDEVICE);
2037
2038 rx_frag += nr_frags;
2039 rx_frag->page = sd->pg_chunk.page;
2040 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2041 rx_frag->size = len;
2042 frag_len += len;
2043 qs->lro_nfrags++;
2044 qs->lro_frag_len = frag_len;
2045
2046 if (!complete)
2047 return;
2048
2049 qs->lro_nfrags = qs->lro_frag_len = 0;
2050 cpl = qs->lro_va;
2051
2052 if (unlikely(cpl->vlan_valid)) {
2053 struct net_device *dev = qs->netdev;
2054 struct port_info *pi = netdev_priv(dev);
2055 struct vlan_group *grp = pi->vlan_grp;
2056
2057 if (likely(grp != NULL)) {
2058 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2059 qs->lro_frag_tbl,
2060 frag_len, frag_len,
2061 grp, ntohs(cpl->vlan),
2062 cpl, 0);
2063 return;
2064 }
2065 }
2066 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2067 frag_len, frag_len, cpl, 0);
2068}
2069
2070/**
2071 * init_lro_mgr - initialize a LRO manager object
2072 * @lro_mgr: the LRO manager object
2073 */
2074static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2075{
2076 lro_mgr->dev = qs->netdev;
2077 lro_mgr->features = LRO_F_NAPI;
2078 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2079 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2080 lro_mgr->max_desc = T3_MAX_LRO_SES;
2081 lro_mgr->lro_arr = qs->lro_desc;
2082 lro_mgr->get_frag_header = t3_get_frag_header;
2083 lro_mgr->get_skb_header = t3_get_skb_header;
2084 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2085 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2086 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2087}
2088
1865/** 2089/**
1866 * handle_rsp_cntrl_info - handles control information in a response 2090 * handle_rsp_cntrl_info - handles control information in a response
1867 * @qs: the queue set corresponding to the response 2091 * @qs: the queue set corresponding to the response
@@ -1947,6 +2171,12 @@ static inline int is_new_response(const struct rsp_desc *r,
1947 return (r->intr_gen & F_RSPD_GEN2) == q->gen; 2171 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1948} 2172}
1949 2173
2174static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2175{
2176 q->pg_skb = NULL;
2177 q->rx_recycle_buf = 0;
2178}
2179
1950#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) 2180#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1951#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ 2181#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1952 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ 2182 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
@@ -1984,10 +2214,11 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1984 q->next_holdoff = q->holdoff_tmr; 2214 q->next_holdoff = q->holdoff_tmr;
1985 2215
1986 while (likely(budget_left && is_new_response(r, q))) { 2216 while (likely(budget_left && is_new_response(r, q))) {
1987 int eth, ethpad = 2; 2217 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
1988 struct sk_buff *skb = NULL; 2218 struct sk_buff *skb = NULL;
1989 u32 len, flags = ntohl(r->flags); 2219 u32 len, flags = ntohl(r->flags);
1990 __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val; 2220 __be32 rss_hi = *(const __be32 *)r,
2221 rss_lo = r->rss_hdr.rss_hash_val;
1991 2222
1992 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2223 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1993 2224
@@ -2015,6 +2246,9 @@ no_mem:
2015 } else if ((len = ntohl(r->len_cq)) != 0) { 2246 } else if ((len = ntohl(r->len_cq)) != 0) {
2016 struct sge_fl *fl; 2247 struct sge_fl *fl;
2017 2248
2249 if (eth)
2250 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2251
2018 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2252 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2019 if (fl->use_pages) { 2253 if (fl->use_pages) {
2020 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2254 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
@@ -2024,9 +2258,18 @@ no_mem:
2024 prefetch(addr + L1_CACHE_BYTES); 2258 prefetch(addr + L1_CACHE_BYTES);
2025#endif 2259#endif
2026 __refill_fl(adap, fl); 2260 __refill_fl(adap, fl);
2261 if (lro > 0) {
2262 lro_add_page(adap, qs, fl,
2263 G_RSPD_LEN(len),
2264 flags & F_RSPD_EOP);
2265 goto next_fl;
2266 }
2027 2267
2028 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len), 2268 skb = get_packet_pg(adap, fl, q,
2029 eth ? SGE_RX_DROP_THRES : 0); 2269 G_RSPD_LEN(len),
2270 eth ?
2271 SGE_RX_DROP_THRES : 0);
2272 q->pg_skb = skb;
2030 } else 2273 } else
2031 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2274 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2032 eth ? SGE_RX_DROP_THRES : 0); 2275 eth ? SGE_RX_DROP_THRES : 0);
@@ -2036,7 +2279,7 @@ no_mem:
2036 q->rx_drops++; 2279 q->rx_drops++;
2037 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) 2280 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2038 __skb_pull(skb, 2); 2281 __skb_pull(skb, 2);
2039 2282next_fl:
2040 if (++fl->cidx == fl->size) 2283 if (++fl->cidx == fl->size)
2041 fl->cidx = 0; 2284 fl->cidx = 0;
2042 } else 2285 } else
@@ -2060,9 +2303,13 @@ no_mem:
2060 q->credits = 0; 2303 q->credits = 0;
2061 } 2304 }
2062 2305
2063 if (likely(skb != NULL)) { 2306 packet_complete = flags &
2307 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2308 F_RSPD_ASYNC_NOTIF);
2309
2310 if (skb != NULL && packet_complete) {
2064 if (eth) 2311 if (eth)
2065 rx_eth(adap, q, skb, ethpad); 2312 rx_eth(adap, q, skb, ethpad, lro);
2066 else { 2313 else {
2067 q->offload_pkts++; 2314 q->offload_pkts++;
2068 /* Preserve the RSS info in csum & priority */ 2315 /* Preserve the RSS info in csum & priority */
@@ -2072,11 +2319,19 @@ no_mem:
2072 offload_skbs, 2319 offload_skbs,
2073 ngathered); 2320 ngathered);
2074 } 2321 }
2322
2323 if (flags & F_RSPD_EOP)
2324 clear_rspq_bufstate(q);
2075 } 2325 }
2076 --budget_left; 2326 --budget_left;
2077 } 2327 }
2078 2328
2079 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2329 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2330 lro_flush_all(&qs->lro_mgr);
2331 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2332 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2333 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2334
2080 if (sleeping) 2335 if (sleeping)
2081 check_ring_db(adap, qs, sleeping); 2336 check_ring_db(adap, qs, sleeping);
2082 2337
@@ -2618,8 +2873,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2618 int irq_vec_idx, const struct qset_params *p, 2873 int irq_vec_idx, const struct qset_params *p,
2619 int ntxq, struct net_device *dev) 2874 int ntxq, struct net_device *dev)
2620{ 2875{
2621 int i, ret = -ENOMEM; 2876 int i, avail, ret = -ENOMEM;
2622 struct sge_qset *q = &adapter->sge.qs[id]; 2877 struct sge_qset *q = &adapter->sge.qs[id];
2878 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2623 2879
2624 init_qset_cntxt(q, id); 2880 init_qset_cntxt(q, id);
2625 init_timer(&q->tx_reclaim_timer); 2881 init_timer(&q->tx_reclaim_timer);
@@ -2687,11 +2943,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2687#else 2943#else
2688 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); 2944 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2689#endif 2945#endif
2690 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; 2946#if FL1_PG_CHUNK_SIZE > 0
2947 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2948#else
2691 q->fl[1].buf_size = is_offload(adapter) ? 2949 q->fl[1].buf_size = is_offload(adapter) ?
2692 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 2950 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2693 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); 2951 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2952#endif
2694 2953
2954 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2955 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2956 q->fl[0].order = FL0_PG_ORDER;
2957 q->fl[1].order = FL1_PG_ORDER;
2958
2959 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2960 sizeof(struct skb_frag_struct),
2961 GFP_KERNEL);
2962 q->lro_nfrags = q->lro_frag_len = 0;
2695 spin_lock_irq(&adapter->sge.reg_lock); 2963 spin_lock_irq(&adapter->sge.reg_lock);
2696 2964
2697 /* FL threshold comparison uses < */ 2965 /* FL threshold comparison uses < */
@@ -2742,8 +3010,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2742 q->netdev = dev; 3010 q->netdev = dev;
2743 t3_update_qset_coalesce(q, p); 3011 t3_update_qset_coalesce(q, p);
2744 3012
2745 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); 3013 init_lro_mgr(q, lro_mgr);
2746 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); 3014
3015 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3016 GFP_KERNEL | __GFP_COMP);
3017 if (!avail) {
3018 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3019 goto err;
3020 }
3021 if (avail < q->fl[0].size)
3022 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3023 avail);
3024
3025 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3026 GFP_KERNEL | __GFP_COMP);
3027 if (avail < q->fl[1].size)
3028 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3029 avail);
2747 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); 3030 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2748 3031
2749 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3032 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
@@ -2752,9 +3035,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2752 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3035 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2753 return 0; 3036 return 0;
2754 3037
2755 err_unlock: 3038err_unlock:
2756 spin_unlock_irq(&adapter->sge.reg_lock); 3039 spin_unlock_irq(&adapter->sge.reg_lock);
2757 err: 3040err:
2758 t3_free_qset(adapter, q); 3041 t3_free_qset(adapter, q);
2759 return ret; 3042 return ret;
2760} 3043}
@@ -2876,7 +3159,7 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p)
2876 q->coalesce_usecs = 5; 3159 q->coalesce_usecs = 5;
2877 q->rspq_size = 1024; 3160 q->rspq_size = 1024;
2878 q->fl_size = 1024; 3161 q->fl_size = 1024;
2879 q->jumbo_size = 512; 3162 q->jumbo_size = 512;
2880 q->txq_size[TXQ_ETH] = 1024; 3163 q->txq_size[TXQ_ETH] = 1024;
2881 q->txq_size[TXQ_OFLD] = 1024; 3164 q->txq_size[TXQ_OFLD] = 1024;
2882 q->txq_size[TXQ_CTRL] = 256; 3165 q->txq_size[TXQ_CTRL] = 256;
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
index b7a1a310dfd4..a666c5d51cc0 100644
--- a/drivers/net/cxgb3/t3_cpl.h
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -174,6 +174,13 @@ enum { /* TCP congestion control algorithms */
174 CONG_ALG_HIGHSPEED 174 CONG_ALG_HIGHSPEED
175}; 175};
176 176
177enum { /* RSS hash type */
178 RSS_HASH_NONE = 0,
179 RSS_HASH_2_TUPLE = 1,
180 RSS_HASH_4_TUPLE = 2,
181 RSS_HASH_TCPV6 = 3
182};
183
177union opcode_tid { 184union opcode_tid {
178 __be32 opcode_tid; 185 __be32 opcode_tid;
179 __u8 opcode; 186 __u8 opcode;
@@ -184,6 +191,10 @@ union opcode_tid {
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) 191#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF) 192#define G_TID(x) ((x) & 0xFFFFFF)
186 193
194#define S_HASHTYPE 22
195#define M_HASHTYPE 0x3
196#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
197
187/* tid is assumed to be 24-bits */ 198/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) 199#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189 200
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e233d04a2132..8277e89e552d 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -499,7 +499,7 @@ rio_timer (unsigned long data)
499 entry = np->old_rx % RX_RING_SIZE; 499 entry = np->old_rx % RX_RING_SIZE;
500 /* Dropped packets don't need to re-allocate */ 500 /* Dropped packets don't need to re-allocate */
501 if (np->rx_skbuff[entry] == NULL) { 501 if (np->rx_skbuff[entry] == NULL) {
502 skb = dev_alloc_skb (np->rx_buf_sz); 502 skb = netdev_alloc_skb (dev, np->rx_buf_sz);
503 if (skb == NULL) { 503 if (skb == NULL) {
504 np->rx_ring[entry].fraginfo = 0; 504 np->rx_ring[entry].fraginfo = 0;
505 printk (KERN_INFO 505 printk (KERN_INFO
@@ -570,7 +570,7 @@ alloc_list (struct net_device *dev)
570 /* Allocate the rx buffers */ 570 /* Allocate the rx buffers */
571 for (i = 0; i < RX_RING_SIZE; i++) { 571 for (i = 0; i < RX_RING_SIZE; i++) {
572 /* Allocated fixed size of skbuff */ 572 /* Allocated fixed size of skbuff */
573 struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz); 573 struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
574 np->rx_skbuff[i] = skb; 574 np->rx_skbuff[i] = skb;
575 if (skb == NULL) { 575 if (skb == NULL) {
576 printk (KERN_ERR 576 printk (KERN_ERR
@@ -867,7 +867,7 @@ receive_packet (struct net_device *dev)
867 PCI_DMA_FROMDEVICE); 867 PCI_DMA_FROMDEVICE);
868 skb_put (skb = np->rx_skbuff[entry], pkt_len); 868 skb_put (skb = np->rx_skbuff[entry], pkt_len);
869 np->rx_skbuff[entry] = NULL; 869 np->rx_skbuff[entry] = NULL;
870 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 870 } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
871 pci_dma_sync_single_for_cpu(np->pdev, 871 pci_dma_sync_single_for_cpu(np->pdev,
872 desc_to_dma(desc), 872 desc_to_dma(desc),
873 np->rx_buf_sz, 873 np->rx_buf_sz,
@@ -904,7 +904,7 @@ receive_packet (struct net_device *dev)
904 struct sk_buff *skb; 904 struct sk_buff *skb;
905 /* Dropped packets don't need to re-allocate */ 905 /* Dropped packets don't need to re-allocate */
906 if (np->rx_skbuff[entry] == NULL) { 906 if (np->rx_skbuff[entry] == NULL) {
907 skb = dev_alloc_skb (np->rx_buf_sz); 907 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
908 if (skb == NULL) { 908 if (skb == NULL) {
909 np->rx_ring[entry].fraginfo = 0; 909 np->rx_ring[entry].fraginfo = 0;
910 printk (KERN_INFO 910 printk (KERN_INFO
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 32a9a922f153..08a7365a7d10 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
903 if (netif_msg_ifdown(db)) 903 if (netif_msg_ifdown(db))
904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
905 905
906 cancel_delayed_work(&db->phy_poll); 906 cancel_delayed_work_sync(&db->phy_poll);
907 907
908 netif_stop_queue(ndev); 908 netif_stop_queue(ndev);
909 netif_carrier_off(ndev); 909 netif_carrier_off(ndev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8cbb40f3a506..cab1835173cd 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4201 struct e1000_adapter *adapter; 4201 struct e1000_adapter *adapter;
4202 struct e1000_hw *hw; 4202 struct e1000_hw *hw;
4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4204 unsigned long mmio_start, mmio_len; 4204 resource_size_t mmio_start, mmio_len;
4205 unsigned long flash_start, flash_len; 4205 resource_size_t flash_start, flash_len;
4206 4206
4207 static int cards_found; 4207 static int cards_found;
4208 int i, err, pci_using_dac; 4208 int i, err, pci_using_dac;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index d1b6d4e7495d..287a61918739 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2213 goto out; 2213 goto out;
2214 } 2214 }
2215 2215
2216 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
2217
2218 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2216 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2219 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2217 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2220 if (hret != H_SUCCESS) 2218 if (hret != H_SUCCESS)
@@ -3178,11 +3176,12 @@ out_err:
3178 3176
3179static void ehea_shutdown_single_port(struct ehea_port *port) 3177static void ehea_shutdown_single_port(struct ehea_port *port)
3180{ 3178{
3179 struct ehea_adapter *adapter = port->adapter;
3181 unregister_netdev(port->netdev); 3180 unregister_netdev(port->netdev);
3182 ehea_unregister_port(port); 3181 ehea_unregister_port(port);
3183 kfree(port->mc_list); 3182 kfree(port->mc_list);
3184 free_netdev(port->netdev); 3183 free_netdev(port->netdev);
3185 port->adapter->active_ports--; 3184 adapter->active_ports--;
3186} 3185}
3187 3186
3188static int ehea_setup_ports(struct ehea_adapter *adapter) 3187static int ehea_setup_ports(struct ehea_adapter *adapter)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 35f66d4a4595..e4d697894364 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -426,6 +426,7 @@ union ring_type {
426#define NV_PCI_REGSZ_VER1 0x270 426#define NV_PCI_REGSZ_VER1 0x270
427#define NV_PCI_REGSZ_VER2 0x2d4 427#define NV_PCI_REGSZ_VER2 0x2d4
428#define NV_PCI_REGSZ_VER3 0x604 428#define NV_PCI_REGSZ_VER3 0x604
429#define NV_PCI_REGSZ_MAX 0x604
429 430
430/* various timeout delays: all in usec */ 431/* various timeout delays: all in usec */
431#define NV_TXRX_RESET_DELAY 4 432#define NV_TXRX_RESET_DELAY 4
@@ -784,6 +785,9 @@ struct fe_priv {
784 785
785 /* flow control */ 786 /* flow control */
786 u32 pause_flags; 787 u32 pause_flags;
788
789 /* power saved state */
790 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
787}; 791};
788 792
789/* 793/*
@@ -5785,49 +5789,66 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5785{ 5789{
5786 struct net_device *dev = pci_get_drvdata(pdev); 5790 struct net_device *dev = pci_get_drvdata(pdev);
5787 struct fe_priv *np = netdev_priv(dev); 5791 struct fe_priv *np = netdev_priv(dev);
5792 u8 __iomem *base = get_hwbase(dev);
5793 int i;
5788 5794
5789 if (!netif_running(dev)) 5795 if (netif_running(dev)) {
5790 goto out; 5796 // Gross.
5791 5797 nv_close(dev);
5798 }
5792 netif_device_detach(dev); 5799 netif_device_detach(dev);
5793 5800
5794 // Gross. 5801 /* save non-pci configuration space */
5795 nv_close(dev); 5802 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5803 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5796 5804
5797 pci_save_state(pdev); 5805 pci_save_state(pdev);
5798 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5806 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5807 pci_disable_device(pdev);
5799 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5808 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5800out:
5801 return 0; 5809 return 0;
5802} 5810}
5803 5811
5804static int nv_resume(struct pci_dev *pdev) 5812static int nv_resume(struct pci_dev *pdev)
5805{ 5813{
5806 struct net_device *dev = pci_get_drvdata(pdev); 5814 struct net_device *dev = pci_get_drvdata(pdev);
5815 struct fe_priv *np = netdev_priv(dev);
5807 u8 __iomem *base = get_hwbase(dev); 5816 u8 __iomem *base = get_hwbase(dev);
5808 int rc = 0; 5817 int i, rc = 0;
5809 u32 txreg;
5810
5811 if (!netif_running(dev))
5812 goto out;
5813
5814 netif_device_attach(dev);
5815 5818
5816 pci_set_power_state(pdev, PCI_D0); 5819 pci_set_power_state(pdev, PCI_D0);
5817 pci_restore_state(pdev); 5820 pci_restore_state(pdev);
5821 /* ack any pending wake events, disable PME */
5818 pci_enable_wake(pdev, PCI_D0, 0); 5822 pci_enable_wake(pdev, PCI_D0, 0);
5819 5823
5820 /* restore mac address reverse flag */ 5824 /* restore non-pci configuration space */
5821 txreg = readl(base + NvRegTransmitPoll); 5825 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5822 txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV; 5826 writel(np->saved_config_space[i], base+i*sizeof(u32));
5823 writel(txreg, base + NvRegTransmitPoll);
5824 5827
5825 rc = nv_open(dev); 5828 netif_device_attach(dev);
5826out: 5829 if (netif_running(dev)) {
5830 rc = nv_open(dev);
5831 nv_set_multicast(dev);
5832 }
5827 return rc; 5833 return rc;
5828} 5834}
5835
5836static void nv_shutdown(struct pci_dev *pdev)
5837{
5838 struct net_device *dev = pci_get_drvdata(pdev);
5839 struct fe_priv *np = netdev_priv(dev);
5840
5841 if (netif_running(dev))
5842 nv_close(dev);
5843
5844 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5845 pci_enable_wake(pdev, PCI_D3cold, np->wolenabled);
5846 pci_disable_device(pdev);
5847 pci_set_power_state(pdev, PCI_D3hot);
5848}
5829#else 5849#else
5830#define nv_suspend NULL 5850#define nv_suspend NULL
5851#define nv_shutdown NULL
5831#define nv_resume NULL 5852#define nv_resume NULL
5832#endif /* CONFIG_PM */ 5853#endif /* CONFIG_PM */
5833 5854
@@ -5998,6 +6019,7 @@ static struct pci_driver driver = {
5998 .remove = __devexit_p(nv_remove), 6019 .remove = __devexit_p(nv_remove),
5999 .suspend = nv_suspend, 6020 .suspend = nv_suspend,
6000 .resume = nv_resume, 6021 .resume = nv_resume,
6022 .shutdown = nv_shutdown,
6001}; 6023};
6002 6024
6003static int __init init_nic(void) 6025static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 67b4b0728fce..fb7c47790bd6 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -43,6 +43,7 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#ifdef CONFIG_PPC_CPM_NEW_BINDING 45#ifdef CONFIG_PPC_CPM_NEW_BINDING
46#include <linux/of_gpio.h>
46#include <asm/of_platform.h> 47#include <asm/of_platform.h>
47#endif 48#endif
48 49
@@ -1093,7 +1094,7 @@ err:
1093 if (registered) 1094 if (registered)
1094 unregister_netdev(ndev); 1095 unregister_netdev(ndev);
1095 1096
1096 if (fep != NULL) { 1097 if (fep && fep->ops) {
1097 (*fep->ops->free_bd)(ndev); 1098 (*fep->ops->free_bd)(ndev);
1098 (*fep->ops->cleanup_data)(ndev); 1099 (*fep->ops->cleanup_data)(ndev);
1099 } 1100 }
@@ -1172,8 +1173,7 @@ static int __devinit find_phy(struct device_node *np,
1172 struct fs_platform_info *fpi) 1173 struct fs_platform_info *fpi)
1173{ 1174{
1174 struct device_node *phynode, *mdionode; 1175 struct device_node *phynode, *mdionode;
1175 struct resource res; 1176 int ret = 0, len, bus_id;
1176 int ret = 0, len;
1177 const u32 *data; 1177 const u32 *data;
1178 1178
1179 data = of_get_property(np, "fixed-link", NULL); 1179 data = of_get_property(np, "fixed-link", NULL);
@@ -1190,19 +1190,28 @@ static int __devinit find_phy(struct device_node *np,
1190 if (!phynode) 1190 if (!phynode)
1191 return -EINVAL; 1191 return -EINVAL;
1192 1192
1193 mdionode = of_get_parent(phynode); 1193 data = of_get_property(phynode, "reg", &len);
1194 if (!mdionode) 1194 if (!data || len != 4) {
1195 ret = -EINVAL;
1195 goto out_put_phy; 1196 goto out_put_phy;
1197 }
1196 1198
1197 ret = of_address_to_resource(mdionode, 0, &res); 1199 mdionode = of_get_parent(phynode);
1198 if (ret) 1200 if (!mdionode) {
1199 goto out_put_mdio; 1201 ret = -EINVAL;
1202 goto out_put_phy;
1203 }
1200 1204
1201 data = of_get_property(phynode, "reg", &len); 1205 bus_id = of_get_gpio(mdionode, 0);
1202 if (!data || len != 4) 1206 if (bus_id < 0) {
1203 goto out_put_mdio; 1207 struct resource res;
1208 ret = of_address_to_resource(mdionode, 0, &res);
1209 if (ret)
1210 goto out_put_mdio;
1211 bus_id = res.start;
1212 }
1204 1213
1205 snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data); 1214 snprintf(fpi->bus_id, 16, "%x:%02x", bus_id, *data);
1206 1215
1207out_put_mdio: 1216out_put_mdio:
1208 of_node_put(mdionode); 1217 of_node_put(mdionode);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index e5c2380f50ca..3199526bcecb 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1140,11 +1140,11 @@ static void hamachi_tx_timeout(struct net_device *dev)
1140 } 1140 }
1141 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1141 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1142 for (i = 0; i < RX_RING_SIZE; i++) { 1142 for (i = 0; i < RX_RING_SIZE; i++) {
1143 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1143 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
1144 hmp->rx_skbuff[i] = skb; 1144 hmp->rx_skbuff[i] = skb;
1145 if (skb == NULL) 1145 if (skb == NULL)
1146 break; 1146 break;
1147 skb->dev = dev; /* Mark as being used by this device. */ 1147
1148 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1148 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1149 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1149 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1150 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1150 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1178,14 +1178,6 @@ static void hamachi_init_ring(struct net_device *dev)
1178 hmp->cur_rx = hmp->cur_tx = 0; 1178 hmp->cur_rx = hmp->cur_tx = 0;
1179 hmp->dirty_rx = hmp->dirty_tx = 0; 1179 hmp->dirty_rx = hmp->dirty_tx = 0;
1180 1180
1181#if 0
1182 /* This is wrong. I'm not sure what the original plan was, but this
1183 * is wrong. An MTU of 1 gets you a buffer of 1536, while an MTU
1184 * of 1501 gets a buffer of 1533? -KDU
1185 */
1186 hmp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1187#endif
1188 /* My attempt at a reasonable correction */
1189 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1181 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1190 * card needs room to do 8 byte alignment, +2 so we can reserve 1182 * card needs room to do 8 byte alignment, +2 so we can reserve
1191 * the first 2 bytes, and +16 gets room for the status word from the 1183 * the first 2 bytes, and +16 gets room for the status word from the
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f90515935833..45ae9d1191d7 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
1340 case PARAM_RTS: 1340 case PARAM_RTS:
1341 if ( !(scc->wreg[R5] & RTS) ) 1341 if ( !(scc->wreg[R5] & RTS) )
1342 { 1342 {
1343 if (arg != TX_OFF) 1343 if (arg != TX_OFF) {
1344 scc_key_trx(scc, TX_ON); 1344 scc_key_trx(scc, TX_ON);
1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); 1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1346 }
1346 } else { 1347 } else {
1347 if (arg == TX_OFF) 1348 if (arg == TX_OFF)
1348 { 1349 {
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 484cb2ba717f..7111c65f0b30 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -108,14 +108,14 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
108 if (unlikely(!netif_running(nds[desc->channel]))) 108 if (unlikely(!netif_running(nds[desc->channel])))
109 goto err; 109 goto err;
110 110
111 skb = dev_alloc_skb(desc->pkt_length + 2); 111 skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb_reserve(skb, 2); 113 skb_reserve(skb, 2);
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117 117
118 skb->dev->last_rx = jiffies; 118 dev->last_rx = jiffies;
119 119
120 netif_receive_skb(skb); 120 netif_receive_skb(skb);
121 } 121 }
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index c91b12ea26ad..93007d38df57 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -49,6 +49,7 @@
49#include <linux/if_ether.h> 49#include <linux/if_ether.h>
50#include <linux/if_vlan.h> 50#include <linux/if_vlan.h>
51#include <linux/inet_lro.h> 51#include <linux/inet_lro.h>
52#include <linux/dca.h>
52#include <linux/ip.h> 53#include <linux/ip.h>
53#include <linux/inet.h> 54#include <linux/inet.h>
54#include <linux/in.h> 55#include <linux/in.h>
@@ -185,11 +186,18 @@ struct myri10ge_slice_state {
185 dma_addr_t fw_stats_bus; 186 dma_addr_t fw_stats_bus;
186 int watchdog_tx_done; 187 int watchdog_tx_done;
187 int watchdog_tx_req; 188 int watchdog_tx_req;
189#ifdef CONFIG_DCA
190 int cached_dca_tag;
191 int cpu;
192 __be32 __iomem *dca_tag;
193#endif
194 char irq_desc[32];
188}; 195};
189 196
190struct myri10ge_priv { 197struct myri10ge_priv {
191 struct myri10ge_slice_state ss; 198 struct myri10ge_slice_state *ss;
192 int tx_boundary; /* boundary transmits cannot cross */ 199 int tx_boundary; /* boundary transmits cannot cross */
200 int num_slices;
193 int running; /* running? */ 201 int running; /* running? */
194 int csum_flag; /* rx_csums? */ 202 int csum_flag; /* rx_csums? */
195 int small_bytes; 203 int small_bytes;
@@ -208,6 +216,11 @@ struct myri10ge_priv {
208 dma_addr_t cmd_bus; 216 dma_addr_t cmd_bus;
209 struct pci_dev *pdev; 217 struct pci_dev *pdev;
210 int msi_enabled; 218 int msi_enabled;
219 int msix_enabled;
220 struct msix_entry *msix_vectors;
221#ifdef CONFIG_DCA
222 int dca_enabled;
223#endif
211 u32 link_state; 224 u32 link_state;
212 unsigned int rdma_tags_available; 225 unsigned int rdma_tags_available;
213 int intr_coal_delay; 226 int intr_coal_delay;
@@ -244,6 +257,8 @@ struct myri10ge_priv {
244 257
245static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 258static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
246static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 259static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
260static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
261static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
247 262
248static char *myri10ge_fw_name = NULL; 263static char *myri10ge_fw_name = NULL;
249module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 264module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -321,6 +336,18 @@ static int myri10ge_wcfifo = 0;
321module_param(myri10ge_wcfifo, int, S_IRUGO); 336module_param(myri10ge_wcfifo, int, S_IRUGO);
322MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); 337MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
323 338
339static int myri10ge_max_slices = 1;
340module_param(myri10ge_max_slices, int, S_IRUGO);
341MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
342
343static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
344module_param(myri10ge_rss_hash, int, S_IRUGO);
345MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
346
347static int myri10ge_dca = 1;
348module_param(myri10ge_dca, int, S_IRUGO);
349MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
350
324#define MYRI10GE_FW_OFFSET 1024*1024 351#define MYRI10GE_FW_OFFSET 1024*1024
325#define MYRI10GE_HIGHPART_TO_U32(X) \ 352#define MYRI10GE_HIGHPART_TO_U32(X) \
326(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 353(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -631,7 +658,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
631 return status; 658 return status;
632} 659}
633 660
634int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 661static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
635{ 662{
636 struct myri10ge_cmd cmd; 663 struct myri10ge_cmd cmd;
637 int status; 664 int status;
@@ -657,7 +684,7 @@ int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
657 return 0; 684 return 0;
658} 685}
659 686
660static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 687static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
661{ 688{
662 char __iomem *submit; 689 char __iomem *submit;
663 __be32 buf[16] __attribute__ ((__aligned__(8))); 690 __be32 buf[16] __attribute__ ((__aligned__(8)));
@@ -667,6 +694,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
667 size = 0; 694 size = 0;
668 status = myri10ge_load_hotplug_firmware(mgp, &size); 695 status = myri10ge_load_hotplug_firmware(mgp, &size);
669 if (status) { 696 if (status) {
697 if (!adopt)
698 return status;
670 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); 699 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
671 700
672 /* Do not attempt to adopt firmware if there 701 /* Do not attempt to adopt firmware if there
@@ -859,8 +888,12 @@ abort:
859static int myri10ge_reset(struct myri10ge_priv *mgp) 888static int myri10ge_reset(struct myri10ge_priv *mgp)
860{ 889{
861 struct myri10ge_cmd cmd; 890 struct myri10ge_cmd cmd;
862 int status; 891 struct myri10ge_slice_state *ss;
892 int i, status;
863 size_t bytes; 893 size_t bytes;
894#ifdef CONFIG_DCA
895 unsigned long dca_tag_off;
896#endif
864 897
865 /* try to send a reset command to the card to see if it 898 /* try to send a reset command to the card to see if it
866 * is alive */ 899 * is alive */
@@ -872,20 +905,74 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
872 } 905 }
873 906
874 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); 907 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
908 /*
909 * Use non-ndis mcp_slot (eg, 4 bytes total,
910 * no toeplitz hash value returned. Older firmware will
911 * not understand this command, but will use the correct
912 * sized mcp_slot, so we ignore error returns
913 */
914 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
915 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
875 916
876 /* Now exchange information about interrupts */ 917 /* Now exchange information about interrupts */
877 918
878 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 919 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
879 memset(mgp->ss.rx_done.entry, 0, bytes);
880 cmd.data0 = (u32) bytes; 920 cmd.data0 = (u32) bytes;
881 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 921 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
882 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); 922
883 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); 923 /*
884 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 924 * Even though we already know how many slices are supported
925 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
926 * has magic side effects, and must be called after a reset.
927 * It must be called prior to calling any RSS related cmds,
928 * including assigning an interrupt queue for anything but
929 * slice 0. It must also be called *after*
930 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
931 * the firmware to compute offsets.
932 */
933
934 if (mgp->num_slices > 1) {
935
936 /* ask the maximum number of slices it supports */
937 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
938 &cmd, 0);
939 if (status != 0) {
940 dev_err(&mgp->pdev->dev,
941 "failed to get number of slices\n");
942 }
943
944 /*
945 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
946 * to setting up the interrupt queue DMA
947 */
948
949 cmd.data0 = mgp->num_slices;
950 cmd.data1 = 1; /* use MSI-X */
951 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
952 &cmd, 0);
953 if (status != 0) {
954 dev_err(&mgp->pdev->dev,
955 "failed to set number of slices\n");
956
957 return status;
958 }
959 }
960 for (i = 0; i < mgp->num_slices; i++) {
961 ss = &mgp->ss[i];
962 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
963 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
964 cmd.data2 = i;
965 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
966 &cmd, 0);
967 };
885 968
886 status |= 969 status |=
887 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 970 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
888 mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 971 for (i = 0; i < mgp->num_slices; i++) {
972 ss = &mgp->ss[i];
973 ss->irq_claim =
974 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
975 }
889 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 976 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
890 &cmd, 0); 977 &cmd, 0);
891 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); 978 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -899,24 +986,116 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
899 } 986 }
900 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
901 988
902 memset(mgp->ss.rx_done.entry, 0, bytes); 989#ifdef CONFIG_DCA
990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
991 dca_tag_off = cmd.data0;
992 for (i = 0; i < mgp->num_slices; i++) {
993 ss = &mgp->ss[i];
994 if (status == 0) {
995 ss->dca_tag = (__iomem __be32 *)
996 (mgp->sram + dca_tag_off + 4 * i);
997 } else {
998 ss->dca_tag = NULL;
999 }
1000 }
1001#endif /* CONFIG_DCA */
903 1002
904 /* reset mcp/driver shared state back to 0 */ 1003 /* reset mcp/driver shared state back to 0 */
905 mgp->ss.tx.req = 0; 1004
906 mgp->ss.tx.done = 0;
907 mgp->ss.tx.pkt_start = 0;
908 mgp->ss.tx.pkt_done = 0;
909 mgp->ss.rx_big.cnt = 0;
910 mgp->ss.rx_small.cnt = 0;
911 mgp->ss.rx_done.idx = 0;
912 mgp->ss.rx_done.cnt = 0;
913 mgp->link_changes = 0; 1005 mgp->link_changes = 0;
1006 for (i = 0; i < mgp->num_slices; i++) {
1007 ss = &mgp->ss[i];
1008
1009 memset(ss->rx_done.entry, 0, bytes);
1010 ss->tx.req = 0;
1011 ss->tx.done = 0;
1012 ss->tx.pkt_start = 0;
1013 ss->tx.pkt_done = 0;
1014 ss->rx_big.cnt = 0;
1015 ss->rx_small.cnt = 0;
1016 ss->rx_done.idx = 0;
1017 ss->rx_done.cnt = 0;
1018 ss->tx.wake_queue = 0;
1019 ss->tx.stop_queue = 0;
1020 }
1021
914 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 1022 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
915 myri10ge_change_pause(mgp, mgp->pause); 1023 myri10ge_change_pause(mgp, mgp->pause);
916 myri10ge_set_multicast_list(mgp->dev); 1024 myri10ge_set_multicast_list(mgp->dev);
917 return status; 1025 return status;
918} 1026}
919 1027
1028#ifdef CONFIG_DCA
1029static void
1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1031{
1032 ss->cpu = cpu;
1033 ss->cached_dca_tag = tag;
1034 put_be32(htonl(tag), ss->dca_tag);
1035}
1036
1037static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1038{
1039 int cpu = get_cpu();
1040 int tag;
1041
1042 if (cpu != ss->cpu) {
1043 tag = dca_get_tag(cpu);
1044 if (ss->cached_dca_tag != tag)
1045 myri10ge_write_dca(ss, cpu, tag);
1046 }
1047 put_cpu();
1048}
1049
1050static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1051{
1052 int err, i;
1053 struct pci_dev *pdev = mgp->pdev;
1054
1055 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1056 return;
1057 if (!myri10ge_dca) {
1058 dev_err(&pdev->dev, "dca disabled by administrator\n");
1059 return;
1060 }
1061 err = dca_add_requester(&pdev->dev);
1062 if (err) {
1063 dev_err(&pdev->dev,
1064 "dca_add_requester() failed, err=%d\n", err);
1065 return;
1066 }
1067 mgp->dca_enabled = 1;
1068 for (i = 0; i < mgp->num_slices; i++)
1069 myri10ge_write_dca(&mgp->ss[i], -1, 0);
1070}
1071
1072static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1073{
1074 struct pci_dev *pdev = mgp->pdev;
1075 int err;
1076
1077 if (!mgp->dca_enabled)
1078 return;
1079 mgp->dca_enabled = 0;
1080 err = dca_remove_requester(&pdev->dev);
1081}
1082
1083static int myri10ge_notify_dca_device(struct device *dev, void *data)
1084{
1085 struct myri10ge_priv *mgp;
1086 unsigned long event;
1087
1088 mgp = dev_get_drvdata(dev);
1089 event = *(unsigned long *)data;
1090
1091 if (event == DCA_PROVIDER_ADD)
1092 myri10ge_setup_dca(mgp);
1093 else if (event == DCA_PROVIDER_REMOVE)
1094 myri10ge_teardown_dca(mgp);
1095 return 0;
1096}
1097#endif /* CONFIG_DCA */
1098
920static inline void 1099static inline void
921myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, 1100myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
922 struct mcp_kreq_ether_recv *src) 1101 struct mcp_kreq_ether_recv *src)
@@ -1095,9 +1274,10 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1095 rx_frags[0].size -= MXGEFW_PAD; 1274 rx_frags[0].size -= MXGEFW_PAD;
1096 len -= MXGEFW_PAD; 1275 len -= MXGEFW_PAD;
1097 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1276 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1098 len, len,
1099 /* opaque, will come back in get_frag_header */ 1277 /* opaque, will come back in get_frag_header */
1278 len, len,
1100 (void *)(__force unsigned long)csum, csum); 1279 (void *)(__force unsigned long)csum, csum);
1280
1101 return 1; 1281 return 1;
1102 } 1282 }
1103 1283
@@ -1236,7 +1416,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1236 1416
1237static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1417static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1238{ 1418{
1239 struct mcp_irq_data *stats = mgp->ss.fw_stats; 1419 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1240 1420
1241 if (unlikely(stats->stats_updated)) { 1421 if (unlikely(stats->stats_updated)) {
1242 unsigned link_up = ntohl(stats->link_up); 1422 unsigned link_up = ntohl(stats->link_up);
@@ -1283,6 +1463,11 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1283 struct net_device *netdev = ss->mgp->dev; 1463 struct net_device *netdev = ss->mgp->dev;
1284 int work_done; 1464 int work_done;
1285 1465
1466#ifdef CONFIG_DCA
1467 if (ss->mgp->dca_enabled)
1468 myri10ge_update_dca(ss);
1469#endif
1470
1286 /* process as many rx events as NAPI will allow */ 1471 /* process as many rx events as NAPI will allow */
1287 work_done = myri10ge_clean_rx_done(ss, budget); 1472 work_done = myri10ge_clean_rx_done(ss, budget);
1288 1473
@@ -1302,6 +1487,13 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1302 u32 send_done_count; 1487 u32 send_done_count;
1303 int i; 1488 int i;
1304 1489
1490 /* an interrupt on a non-zero slice is implicitly valid
1491 * since MSI-X irqs are not shared */
1492 if (ss != mgp->ss) {
1493 netif_rx_schedule(ss->dev, &ss->napi);
1494 return (IRQ_HANDLED);
1495 }
1496
1305 /* make sure it is our IRQ, and that the DMA has finished */ 1497 /* make sure it is our IRQ, and that the DMA has finished */
1306 if (unlikely(!stats->valid)) 1498 if (unlikely(!stats->valid))
1307 return (IRQ_NONE); 1499 return (IRQ_NONE);
@@ -1311,7 +1503,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1311 if (stats->valid & 1) 1503 if (stats->valid & 1)
1312 netif_rx_schedule(ss->dev, &ss->napi); 1504 netif_rx_schedule(ss->dev, &ss->napi);
1313 1505
1314 if (!mgp->msi_enabled) { 1506 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1315 put_be32(0, mgp->irq_deassert); 1507 put_be32(0, mgp->irq_deassert);
1316 if (!myri10ge_deassert_wait) 1508 if (!myri10ge_deassert_wait)
1317 stats->valid = 0; 1509 stats->valid = 0;
@@ -1446,10 +1638,10 @@ myri10ge_get_ringparam(struct net_device *netdev,
1446{ 1638{
1447 struct myri10ge_priv *mgp = netdev_priv(netdev); 1639 struct myri10ge_priv *mgp = netdev_priv(netdev);
1448 1640
1449 ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; 1641 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1450 ring->rx_max_pending = mgp->ss.rx_big.mask + 1; 1642 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1451 ring->rx_jumbo_max_pending = 0; 1643 ring->rx_jumbo_max_pending = 0;
1452 ring->tx_max_pending = mgp->ss.rx_small.mask + 1; 1644 ring->tx_max_pending = mgp->ss[0].rx_small.mask + 1;
1453 ring->rx_mini_pending = ring->rx_mini_max_pending; 1645 ring->rx_mini_pending = ring->rx_mini_max_pending;
1454 ring->rx_pending = ring->rx_max_pending; 1646 ring->rx_pending = ring->rx_max_pending;
1455 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1647 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1497,9 +1689,12 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1497 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1689 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1498 "tx_heartbeat_errors", "tx_window_errors", 1690 "tx_heartbeat_errors", "tx_window_errors",
1499 /* device-specific stats */ 1691 /* device-specific stats */
1500 "tx_boundary", "WC", "irq", "MSI", 1692 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1501 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1693 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1502 "serial_number", "watchdog_resets", 1694 "serial_number", "watchdog_resets",
1695#ifdef CONFIG_DCA
1696 "dca_capable", "dca_enabled",
1697#endif
1503 "link_changes", "link_up", "dropped_link_overflow", 1698 "link_changes", "link_up", "dropped_link_overflow",
1504 "dropped_link_error_or_filtered", 1699 "dropped_link_error_or_filtered",
1505 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1700 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
@@ -1524,23 +1719,31 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1524static void 1719static void
1525myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1720myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1526{ 1721{
1722 struct myri10ge_priv *mgp = netdev_priv(netdev);
1723 int i;
1724
1527 switch (stringset) { 1725 switch (stringset) {
1528 case ETH_SS_STATS: 1726 case ETH_SS_STATS:
1529 memcpy(data, *myri10ge_gstrings_main_stats, 1727 memcpy(data, *myri10ge_gstrings_main_stats,
1530 sizeof(myri10ge_gstrings_main_stats)); 1728 sizeof(myri10ge_gstrings_main_stats));
1531 data += sizeof(myri10ge_gstrings_main_stats); 1729 data += sizeof(myri10ge_gstrings_main_stats);
1532 memcpy(data, *myri10ge_gstrings_slice_stats, 1730 for (i = 0; i < mgp->num_slices; i++) {
1533 sizeof(myri10ge_gstrings_slice_stats)); 1731 memcpy(data, *myri10ge_gstrings_slice_stats,
1534 data += sizeof(myri10ge_gstrings_slice_stats); 1732 sizeof(myri10ge_gstrings_slice_stats));
1733 data += sizeof(myri10ge_gstrings_slice_stats);
1734 }
1535 break; 1735 break;
1536 } 1736 }
1537} 1737}
1538 1738
1539static int myri10ge_get_sset_count(struct net_device *netdev, int sset) 1739static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1540{ 1740{
1741 struct myri10ge_priv *mgp = netdev_priv(netdev);
1742
1541 switch (sset) { 1743 switch (sset) {
1542 case ETH_SS_STATS: 1744 case ETH_SS_STATS:
1543 return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; 1745 return MYRI10GE_MAIN_STATS_LEN +
1746 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1544 default: 1747 default:
1545 return -EOPNOTSUPP; 1748 return -EOPNOTSUPP;
1546 } 1749 }
@@ -1552,6 +1755,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1552{ 1755{
1553 struct myri10ge_priv *mgp = netdev_priv(netdev); 1756 struct myri10ge_priv *mgp = netdev_priv(netdev);
1554 struct myri10ge_slice_state *ss; 1757 struct myri10ge_slice_state *ss;
1758 int slice;
1555 int i; 1759 int i;
1556 1760
1557 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1761 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
@@ -1561,15 +1765,20 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1561 data[i++] = (unsigned int)mgp->wc_enabled; 1765 data[i++] = (unsigned int)mgp->wc_enabled;
1562 data[i++] = (unsigned int)mgp->pdev->irq; 1766 data[i++] = (unsigned int)mgp->pdev->irq;
1563 data[i++] = (unsigned int)mgp->msi_enabled; 1767 data[i++] = (unsigned int)mgp->msi_enabled;
1768 data[i++] = (unsigned int)mgp->msix_enabled;
1564 data[i++] = (unsigned int)mgp->read_dma; 1769 data[i++] = (unsigned int)mgp->read_dma;
1565 data[i++] = (unsigned int)mgp->write_dma; 1770 data[i++] = (unsigned int)mgp->write_dma;
1566 data[i++] = (unsigned int)mgp->read_write_dma; 1771 data[i++] = (unsigned int)mgp->read_write_dma;
1567 data[i++] = (unsigned int)mgp->serial_number; 1772 data[i++] = (unsigned int)mgp->serial_number;
1568 data[i++] = (unsigned int)mgp->watchdog_resets; 1773 data[i++] = (unsigned int)mgp->watchdog_resets;
1774#ifdef CONFIG_DCA
1775 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1776 data[i++] = (unsigned int)(mgp->dca_enabled);
1777#endif
1569 data[i++] = (unsigned int)mgp->link_changes; 1778 data[i++] = (unsigned int)mgp->link_changes;
1570 1779
1571 /* firmware stats are useful only in the first slice */ 1780 /* firmware stats are useful only in the first slice */
1572 ss = &mgp->ss; 1781 ss = &mgp->ss[0];
1573 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1782 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1574 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1783 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1575 data[i++] = 1784 data[i++] =
@@ -1585,24 +1794,27 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1585 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1794 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1586 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1795 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1587 1796
1588 data[i++] = 0; 1797 for (slice = 0; slice < mgp->num_slices; slice++) {
1589 data[i++] = (unsigned int)ss->tx.pkt_start; 1798 ss = &mgp->ss[slice];
1590 data[i++] = (unsigned int)ss->tx.pkt_done; 1799 data[i++] = slice;
1591 data[i++] = (unsigned int)ss->tx.req; 1800 data[i++] = (unsigned int)ss->tx.pkt_start;
1592 data[i++] = (unsigned int)ss->tx.done; 1801 data[i++] = (unsigned int)ss->tx.pkt_done;
1593 data[i++] = (unsigned int)ss->rx_small.cnt; 1802 data[i++] = (unsigned int)ss->tx.req;
1594 data[i++] = (unsigned int)ss->rx_big.cnt; 1803 data[i++] = (unsigned int)ss->tx.done;
1595 data[i++] = (unsigned int)ss->tx.wake_queue; 1804 data[i++] = (unsigned int)ss->rx_small.cnt;
1596 data[i++] = (unsigned int)ss->tx.stop_queue; 1805 data[i++] = (unsigned int)ss->rx_big.cnt;
1597 data[i++] = (unsigned int)ss->tx.linearized; 1806 data[i++] = (unsigned int)ss->tx.wake_queue;
1598 data[i++] = ss->rx_done.lro_mgr.stats.aggregated; 1807 data[i++] = (unsigned int)ss->tx.stop_queue;
1599 data[i++] = ss->rx_done.lro_mgr.stats.flushed; 1808 data[i++] = (unsigned int)ss->tx.linearized;
1600 if (ss->rx_done.lro_mgr.stats.flushed) 1809 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1601 data[i++] = ss->rx_done.lro_mgr.stats.aggregated / 1810 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1602 ss->rx_done.lro_mgr.stats.flushed; 1811 if (ss->rx_done.lro_mgr.stats.flushed)
1603 else 1812 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1604 data[i++] = 0; 1813 ss->rx_done.lro_mgr.stats.flushed;
1605 data[i++] = ss->rx_done.lro_mgr.stats.no_desc; 1814 else
1815 data[i++] = 0;
1816 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1817 }
1606} 1818}
1607 1819
1608static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) 1820static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1645,12 +1857,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1645 struct net_device *dev = mgp->dev; 1857 struct net_device *dev = mgp->dev;
1646 int tx_ring_size, rx_ring_size; 1858 int tx_ring_size, rx_ring_size;
1647 int tx_ring_entries, rx_ring_entries; 1859 int tx_ring_entries, rx_ring_entries;
1648 int i, status; 1860 int i, slice, status;
1649 size_t bytes; 1861 size_t bytes;
1650 1862
1651 /* get ring sizes */ 1863 /* get ring sizes */
1864 slice = ss - mgp->ss;
1865 cmd.data0 = slice;
1652 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1866 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1653 tx_ring_size = cmd.data0; 1867 tx_ring_size = cmd.data0;
1868 cmd.data0 = slice;
1654 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1869 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1655 if (status != 0) 1870 if (status != 0)
1656 return status; 1871 return status;
@@ -1715,15 +1930,17 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1715 mgp->small_bytes + MXGEFW_PAD, 0); 1930 mgp->small_bytes + MXGEFW_PAD, 0);
1716 1931
1717 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 1932 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
1718 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1933 printk(KERN_ERR
1719 dev->name, ss->rx_small.fill_cnt); 1934 "myri10ge: %s:slice-%d: alloced only %d small bufs\n",
1935 dev->name, slice, ss->rx_small.fill_cnt);
1720 goto abort_with_rx_small_ring; 1936 goto abort_with_rx_small_ring;
1721 } 1937 }
1722 1938
1723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1939 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1724 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 1940 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
1725 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1941 printk(KERN_ERR
1726 dev->name, ss->rx_big.fill_cnt); 1942 "myri10ge: %s:slice-%d: alloced only %d big bufs\n",
1943 dev->name, slice, ss->rx_big.fill_cnt);
1727 goto abort_with_rx_big_ring; 1944 goto abort_with_rx_big_ring;
1728 } 1945 }
1729 1946
@@ -1775,6 +1992,10 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1775 struct myri10ge_tx_buf *tx; 1992 struct myri10ge_tx_buf *tx;
1776 int i, len, idx; 1993 int i, len, idx;
1777 1994
1995 /* If not allocated, skip it */
1996 if (ss->tx.req_list == NULL)
1997 return;
1998
1778 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1999 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
1779 idx = i & ss->rx_big.mask; 2000 idx = i & ss->rx_big.mask;
1780 if (i == ss->rx_big.fill_cnt - 1) 2001 if (i == ss->rx_big.fill_cnt - 1)
@@ -1837,25 +2058,67 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1837static int myri10ge_request_irq(struct myri10ge_priv *mgp) 2058static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1838{ 2059{
1839 struct pci_dev *pdev = mgp->pdev; 2060 struct pci_dev *pdev = mgp->pdev;
2061 struct myri10ge_slice_state *ss;
2062 struct net_device *netdev = mgp->dev;
2063 int i;
1840 int status; 2064 int status;
1841 2065
2066 mgp->msi_enabled = 0;
2067 mgp->msix_enabled = 0;
2068 status = 0;
1842 if (myri10ge_msi) { 2069 if (myri10ge_msi) {
1843 status = pci_enable_msi(pdev); 2070 if (mgp->num_slices > 1) {
1844 if (status != 0) 2071 status =
1845 dev_err(&pdev->dev, 2072 pci_enable_msix(pdev, mgp->msix_vectors,
1846 "Error %d setting up MSI; falling back to xPIC\n", 2073 mgp->num_slices);
1847 status); 2074 if (status == 0) {
1848 else 2075 mgp->msix_enabled = 1;
1849 mgp->msi_enabled = 1; 2076 } else {
1850 } else { 2077 dev_err(&pdev->dev,
1851 mgp->msi_enabled = 0; 2078 "Error %d setting up MSI-X\n", status);
2079 return status;
2080 }
2081 }
2082 if (mgp->msix_enabled == 0) {
2083 status = pci_enable_msi(pdev);
2084 if (status != 0) {
2085 dev_err(&pdev->dev,
2086 "Error %d setting up MSI; falling back to xPIC\n",
2087 status);
2088 } else {
2089 mgp->msi_enabled = 1;
2090 }
2091 }
1852 } 2092 }
1853 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, 2093 if (mgp->msix_enabled) {
1854 mgp->dev->name, mgp); 2094 for (i = 0; i < mgp->num_slices; i++) {
1855 if (status != 0) { 2095 ss = &mgp->ss[i];
1856 dev_err(&pdev->dev, "failed to allocate IRQ\n"); 2096 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
1857 if (mgp->msi_enabled) 2097 "%s:slice-%d", netdev->name, i);
1858 pci_disable_msi(pdev); 2098 status = request_irq(mgp->msix_vectors[i].vector,
2099 myri10ge_intr, 0, ss->irq_desc,
2100 ss);
2101 if (status != 0) {
2102 dev_err(&pdev->dev,
2103 "slice %d failed to allocate IRQ\n", i);
2104 i--;
2105 while (i >= 0) {
2106 free_irq(mgp->msix_vectors[i].vector,
2107 &mgp->ss[i]);
2108 i--;
2109 }
2110 pci_disable_msix(pdev);
2111 return status;
2112 }
2113 }
2114 } else {
2115 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2116 mgp->dev->name, &mgp->ss[0]);
2117 if (status != 0) {
2118 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2119 if (mgp->msi_enabled)
2120 pci_disable_msi(pdev);
2121 }
1859 } 2122 }
1860 return status; 2123 return status;
1861} 2124}
@@ -1863,10 +2126,18 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1863static void myri10ge_free_irq(struct myri10ge_priv *mgp) 2126static void myri10ge_free_irq(struct myri10ge_priv *mgp)
1864{ 2127{
1865 struct pci_dev *pdev = mgp->pdev; 2128 struct pci_dev *pdev = mgp->pdev;
2129 int i;
1866 2130
1867 free_irq(pdev->irq, mgp); 2131 if (mgp->msix_enabled) {
2132 for (i = 0; i < mgp->num_slices; i++)
2133 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2134 } else {
2135 free_irq(pdev->irq, &mgp->ss[0]);
2136 }
1868 if (mgp->msi_enabled) 2137 if (mgp->msi_enabled)
1869 pci_disable_msi(pdev); 2138 pci_disable_msi(pdev);
2139 if (mgp->msix_enabled)
2140 pci_disable_msix(pdev);
1870} 2141}
1871 2142
1872static int 2143static int
@@ -1928,12 +2199,82 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1928 return 0; 2199 return 0;
1929} 2200}
1930 2201
2202static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2203{
2204 struct myri10ge_cmd cmd;
2205 struct myri10ge_slice_state *ss;
2206 int status;
2207
2208 ss = &mgp->ss[slice];
2209 cmd.data0 = 0; /* single slice for now */
2210 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
2211 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2212 (mgp->sram + cmd.data0);
2213
2214 cmd.data0 = slice;
2215 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2216 &cmd, 0);
2217 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2218 (mgp->sram + cmd.data0);
2219
2220 cmd.data0 = slice;
2221 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2222 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2223 (mgp->sram + cmd.data0);
2224
2225 if (myri10ge_wcfifo && mgp->wc_enabled) {
2226 ss->tx.wc_fifo = (u8 __iomem *)
2227 mgp->sram + MXGEFW_ETH_SEND_4 + 64 * slice;
2228 ss->rx_small.wc_fifo = (u8 __iomem *)
2229 mgp->sram + MXGEFW_ETH_RECV_SMALL + 64 * slice;
2230 ss->rx_big.wc_fifo = (u8 __iomem *)
2231 mgp->sram + MXGEFW_ETH_RECV_BIG + 64 * slice;
2232 } else {
2233 ss->tx.wc_fifo = NULL;
2234 ss->rx_small.wc_fifo = NULL;
2235 ss->rx_big.wc_fifo = NULL;
2236 }
2237 return status;
2238
2239}
2240
2241static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2242{
2243 struct myri10ge_cmd cmd;
2244 struct myri10ge_slice_state *ss;
2245 int status;
2246
2247 ss = &mgp->ss[slice];
2248 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2249 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2250 cmd.data2 = sizeof(struct mcp_irq_data);
2251 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2252 if (status == -ENOSYS) {
2253 dma_addr_t bus = ss->fw_stats_bus;
2254 if (slice != 0)
2255 return -EINVAL;
2256 bus += offsetof(struct mcp_irq_data, send_done_count);
2257 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2258 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2259 status = myri10ge_send_cmd(mgp,
2260 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2261 &cmd, 0);
2262 /* Firmware cannot support multicast without STATS_DMA_V2 */
2263 mgp->fw_multicast_support = 0;
2264 } else {
2265 mgp->fw_multicast_support = 1;
2266 }
2267 return 0;
2268}
2269
1931static int myri10ge_open(struct net_device *dev) 2270static int myri10ge_open(struct net_device *dev)
1932{ 2271{
2272 struct myri10ge_slice_state *ss;
1933 struct myri10ge_priv *mgp = netdev_priv(dev); 2273 struct myri10ge_priv *mgp = netdev_priv(dev);
1934 struct myri10ge_cmd cmd; 2274 struct myri10ge_cmd cmd;
2275 int i, status, big_pow2, slice;
2276 u8 *itable;
1935 struct net_lro_mgr *lro_mgr; 2277 struct net_lro_mgr *lro_mgr;
1936 int status, big_pow2;
1937 2278
1938 if (mgp->running != MYRI10GE_ETH_STOPPED) 2279 if (mgp->running != MYRI10GE_ETH_STOPPED)
1939 return -EBUSY; 2280 return -EBUSY;
@@ -1945,6 +2286,48 @@ static int myri10ge_open(struct net_device *dev)
1945 goto abort_with_nothing; 2286 goto abort_with_nothing;
1946 } 2287 }
1947 2288
2289 if (mgp->num_slices > 1) {
2290 cmd.data0 = mgp->num_slices;
2291 cmd.data1 = 1; /* use MSI-X */
2292 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2293 &cmd, 0);
2294 if (status != 0) {
2295 printk(KERN_ERR
2296 "myri10ge: %s: failed to set number of slices\n",
2297 dev->name);
2298 goto abort_with_nothing;
2299 }
2300 /* setup the indirection table */
2301 cmd.data0 = mgp->num_slices;
2302 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2303 &cmd, 0);
2304
2305 status |= myri10ge_send_cmd(mgp,
2306 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2307 &cmd, 0);
2308 if (status != 0) {
2309 printk(KERN_ERR
2310 "myri10ge: %s: failed to setup rss tables\n",
2311 dev->name);
2312 }
2313
2314 /* just enable an identity mapping */
2315 itable = mgp->sram + cmd.data0;
2316 for (i = 0; i < mgp->num_slices; i++)
2317 __raw_writeb(i, &itable[i]);
2318
2319 cmd.data0 = 1;
2320 cmd.data1 = myri10ge_rss_hash;
2321 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2322 &cmd, 0);
2323 if (status != 0) {
2324 printk(KERN_ERR
2325 "myri10ge: %s: failed to enable slices\n",
2326 dev->name);
2327 goto abort_with_nothing;
2328 }
2329 }
2330
1948 status = myri10ge_request_irq(mgp); 2331 status = myri10ge_request_irq(mgp);
1949 if (status != 0) 2332 if (status != 0)
1950 goto abort_with_nothing; 2333 goto abort_with_nothing;
@@ -1968,41 +2351,6 @@ static int myri10ge_open(struct net_device *dev)
1968 if (myri10ge_small_bytes > 0) 2351 if (myri10ge_small_bytes > 0)
1969 mgp->small_bytes = myri10ge_small_bytes; 2352 mgp->small_bytes = myri10ge_small_bytes;
1970 2353
1971 /* get the lanai pointers to the send and receive rings */
1972
1973 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
1974 mgp->ss.tx.lanai =
1975 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
1976
1977 status |=
1978 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
1979 mgp->ss.rx_small.lanai =
1980 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1981
1982 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
1983 mgp->ss.rx_big.lanai =
1984 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1985
1986 if (status != 0) {
1987 printk(KERN_ERR
1988 "myri10ge: %s: failed to get ring sizes or locations\n",
1989 dev->name);
1990 mgp->running = MYRI10GE_ETH_STOPPED;
1991 goto abort_with_irq;
1992 }
1993
1994 if (myri10ge_wcfifo && mgp->wc_enabled) {
1995 mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
1996 mgp->ss.rx_small.wc_fifo =
1997 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
1998 mgp->ss.rx_big.wc_fifo =
1999 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
2000 } else {
2001 mgp->ss.tx.wc_fifo = NULL;
2002 mgp->ss.rx_small.wc_fifo = NULL;
2003 mgp->ss.rx_big.wc_fifo = NULL;
2004 }
2005
2006 /* Firmware needs the big buff size as a power of 2. Lie and 2354 /* Firmware needs the big buff size as a power of 2. Lie and
2007 * tell him the buffer is larger, because we only use 1 2355 * tell him the buffer is larger, because we only use 1
2008 * buffer/pkt, and the mtu will prevent overruns. 2356 * buffer/pkt, and the mtu will prevent overruns.
@@ -2017,9 +2365,44 @@ static int myri10ge_open(struct net_device *dev)
2017 mgp->big_bytes = big_pow2; 2365 mgp->big_bytes = big_pow2;
2018 } 2366 }
2019 2367
2020 status = myri10ge_allocate_rings(&mgp->ss); 2368 /* setup the per-slice data structures */
2021 if (status != 0) 2369 for (slice = 0; slice < mgp->num_slices; slice++) {
2022 goto abort_with_irq; 2370 ss = &mgp->ss[slice];
2371
2372 status = myri10ge_get_txrx(mgp, slice);
2373 if (status != 0) {
2374 printk(KERN_ERR
2375 "myri10ge: %s: failed to get ring sizes or locations\n",
2376 dev->name);
2377 goto abort_with_rings;
2378 }
2379 status = myri10ge_allocate_rings(ss);
2380 if (status != 0)
2381 goto abort_with_rings;
2382 if (slice == 0)
2383 status = myri10ge_set_stats(mgp, slice);
2384 if (status) {
2385 printk(KERN_ERR
2386 "myri10ge: %s: Couldn't set stats DMA\n",
2387 dev->name);
2388 goto abort_with_rings;
2389 }
2390
2391 lro_mgr = &ss->rx_done.lro_mgr;
2392 lro_mgr->dev = dev;
2393 lro_mgr->features = LRO_F_NAPI;
2394 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2395 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2396 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2397 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2398 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2399 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2400 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2401 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2402
2403 /* must happen prior to any irq */
2404 napi_enable(&(ss)->napi);
2405 }
2023 2406
2024 /* now give firmware buffers sizes, and MTU */ 2407 /* now give firmware buffers sizes, and MTU */
2025 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; 2408 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
@@ -2036,25 +2419,15 @@ static int myri10ge_open(struct net_device *dev)
2036 goto abort_with_rings; 2419 goto abort_with_rings;
2037 } 2420 }
2038 2421
2039 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); 2422 /*
2040 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); 2423 * Set Linux style TSO mode; this is needed only on newer
2041 cmd.data2 = sizeof(struct mcp_irq_data); 2424 * firmware versions. Older versions default to Linux
2042 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2425 * style TSO
2043 if (status == -ENOSYS) { 2426 */
2044 dma_addr_t bus = mgp->ss.fw_stats_bus; 2427 cmd.data0 = 0;
2045 bus += offsetof(struct mcp_irq_data, send_done_count); 2428 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2046 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2429 if (status && status != -ENOSYS) {
2047 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); 2430 printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n",
2048 status = myri10ge_send_cmd(mgp,
2049 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2050 &cmd, 0);
2051 /* Firmware cannot support multicast without STATS_DMA_V2 */
2052 mgp->fw_multicast_support = 0;
2053 } else {
2054 mgp->fw_multicast_support = 1;
2055 }
2056 if (status) {
2057 printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
2058 dev->name); 2431 dev->name);
2059 goto abort_with_rings; 2432 goto abort_with_rings;
2060 } 2433 }
@@ -2062,21 +2435,6 @@ static int myri10ge_open(struct net_device *dev)
2062 mgp->link_state = ~0U; 2435 mgp->link_state = ~0U;
2063 mgp->rdma_tags_available = 15; 2436 mgp->rdma_tags_available = 15;
2064 2437
2065 lro_mgr = &mgp->ss.rx_done.lro_mgr;
2066 lro_mgr->dev = dev;
2067 lro_mgr->features = LRO_F_NAPI;
2068 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2069 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2070 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2071 lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
2072 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2073 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2074 lro_mgr->frag_align_pad = 2;
2075 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2076 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2077
2078 napi_enable(&mgp->ss.napi); /* must happen prior to any irq */
2079
2080 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2438 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2081 if (status) { 2439 if (status) {
2082 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n", 2440 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
@@ -2084,8 +2442,6 @@ static int myri10ge_open(struct net_device *dev)
2084 goto abort_with_rings; 2442 goto abort_with_rings;
2085 } 2443 }
2086 2444
2087 mgp->ss.tx.wake_queue = 0;
2088 mgp->ss.tx.stop_queue = 0;
2089 mgp->running = MYRI10GE_ETH_RUNNING; 2445 mgp->running = MYRI10GE_ETH_RUNNING;
2090 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2446 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2091 add_timer(&mgp->watchdog_timer); 2447 add_timer(&mgp->watchdog_timer);
@@ -2093,9 +2449,9 @@ static int myri10ge_open(struct net_device *dev)
2093 return 0; 2449 return 0;
2094 2450
2095abort_with_rings: 2451abort_with_rings:
2096 myri10ge_free_rings(&mgp->ss); 2452 for (i = 0; i < mgp->num_slices; i++)
2453 myri10ge_free_rings(&mgp->ss[i]);
2097 2454
2098abort_with_irq:
2099 myri10ge_free_irq(mgp); 2455 myri10ge_free_irq(mgp);
2100 2456
2101abort_with_nothing: 2457abort_with_nothing:
@@ -2108,16 +2464,19 @@ static int myri10ge_close(struct net_device *dev)
2108 struct myri10ge_priv *mgp = netdev_priv(dev); 2464 struct myri10ge_priv *mgp = netdev_priv(dev);
2109 struct myri10ge_cmd cmd; 2465 struct myri10ge_cmd cmd;
2110 int status, old_down_cnt; 2466 int status, old_down_cnt;
2467 int i;
2111 2468
2112 if (mgp->running != MYRI10GE_ETH_RUNNING) 2469 if (mgp->running != MYRI10GE_ETH_RUNNING)
2113 return 0; 2470 return 0;
2114 2471
2115 if (mgp->ss.tx.req_bytes == NULL) 2472 if (mgp->ss[0].tx.req_bytes == NULL)
2116 return 0; 2473 return 0;
2117 2474
2118 del_timer_sync(&mgp->watchdog_timer); 2475 del_timer_sync(&mgp->watchdog_timer);
2119 mgp->running = MYRI10GE_ETH_STOPPING; 2476 mgp->running = MYRI10GE_ETH_STOPPING;
2120 napi_disable(&mgp->ss.napi); 2477 for (i = 0; i < mgp->num_slices; i++) {
2478 napi_disable(&mgp->ss[i].napi);
2479 }
2121 netif_carrier_off(dev); 2480 netif_carrier_off(dev);
2122 netif_stop_queue(dev); 2481 netif_stop_queue(dev);
2123 old_down_cnt = mgp->down_cnt; 2482 old_down_cnt = mgp->down_cnt;
@@ -2133,7 +2492,8 @@ static int myri10ge_close(struct net_device *dev)
2133 2492
2134 netif_tx_disable(dev); 2493 netif_tx_disable(dev);
2135 myri10ge_free_irq(mgp); 2494 myri10ge_free_irq(mgp);
2136 myri10ge_free_rings(&mgp->ss); 2495 for (i = 0; i < mgp->num_slices; i++)
2496 myri10ge_free_rings(&mgp->ss[i]);
2137 2497
2138 mgp->running = MYRI10GE_ETH_STOPPED; 2498 mgp->running = MYRI10GE_ETH_STOPPED;
2139 return 0; 2499 return 0;
@@ -2254,7 +2614,7 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
2254 u8 flags, odd_flag; 2614 u8 flags, odd_flag;
2255 2615
2256 /* always transmit through slot 0 */ 2616 /* always transmit through slot 0 */
2257 ss = &mgp->ss; 2617 ss = mgp->ss;
2258 tx = &ss->tx; 2618 tx = &ss->tx;
2259again: 2619again:
2260 req = tx->req_list; 2620 req = tx->req_list;
@@ -2559,7 +2919,21 @@ drop:
2559static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) 2919static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2560{ 2920{
2561 struct myri10ge_priv *mgp = netdev_priv(dev); 2921 struct myri10ge_priv *mgp = netdev_priv(dev);
2562 return &mgp->stats; 2922 struct myri10ge_slice_netstats *slice_stats;
2923 struct net_device_stats *stats = &mgp->stats;
2924 int i;
2925
2926 memset(stats, 0, sizeof(*stats));
2927 for (i = 0; i < mgp->num_slices; i++) {
2928 slice_stats = &mgp->ss[i].stats;
2929 stats->rx_packets += slice_stats->rx_packets;
2930 stats->tx_packets += slice_stats->tx_packets;
2931 stats->rx_bytes += slice_stats->rx_bytes;
2932 stats->tx_bytes += slice_stats->tx_bytes;
2933 stats->rx_dropped += slice_stats->rx_dropped;
2934 stats->tx_dropped += slice_stats->tx_dropped;
2935 }
2936 return stats;
2563} 2937}
2564 2938
2565static void myri10ge_set_multicast_list(struct net_device *dev) 2939static void myri10ge_set_multicast_list(struct net_device *dev)
@@ -2770,10 +3144,10 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2770 * 3144 *
2771 * If the driver can neither enable ECRC nor verify that it has 3145 * If the driver can neither enable ECRC nor verify that it has
2772 * already been enabled, then it must use a firmware image which works 3146 * already been enabled, then it must use a firmware image which works
2773 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 3147 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
2774 * should also ensure that it never gives the device a Read-DMA which is 3148 * should also ensure that it never gives the device a Read-DMA which is
2775 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 3149 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
2776 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 3150 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
2777 * firmware image, and set tx_boundary to 4KB. 3151 * firmware image, and set tx_boundary to 4KB.
2778 */ 3152 */
2779 3153
@@ -2802,7 +3176,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2802 * completions) in order to see if it works on this host. 3176 * completions) in order to see if it works on this host.
2803 */ 3177 */
2804 mgp->fw_name = myri10ge_fw_aligned; 3178 mgp->fw_name = myri10ge_fw_aligned;
2805 status = myri10ge_load_firmware(mgp); 3179 status = myri10ge_load_firmware(mgp, 1);
2806 if (status != 0) { 3180 if (status != 0) {
2807 goto abort; 3181 goto abort;
2808 } 3182 }
@@ -2983,6 +3357,7 @@ static void myri10ge_watchdog(struct work_struct *work)
2983 struct myri10ge_tx_buf *tx; 3357 struct myri10ge_tx_buf *tx;
2984 u32 reboot; 3358 u32 reboot;
2985 int status; 3359 int status;
3360 int i;
2986 u16 cmd, vendor; 3361 u16 cmd, vendor;
2987 3362
2988 mgp->watchdog_resets++; 3363 mgp->watchdog_resets++;
@@ -3030,20 +3405,26 @@ static void myri10ge_watchdog(struct work_struct *work)
3030 3405
3031 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3406 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
3032 mgp->dev->name); 3407 mgp->dev->name);
3033 tx = &mgp->ss.tx; 3408 for (i = 0; i < mgp->num_slices; i++) {
3034 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3409 tx = &mgp->ss[i].tx;
3035 mgp->dev->name, tx->req, tx->done, 3410 printk(KERN_INFO
3036 tx->pkt_start, tx->pkt_done, 3411 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3037 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3412 mgp->dev->name, i, tx->req, tx->done,
3038 msleep(2000); 3413 tx->pkt_start, tx->pkt_done,
3039 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3414 (int)ntohl(mgp->ss[i].fw_stats->
3040 mgp->dev->name, tx->req, tx->done, 3415 send_done_count));
3041 tx->pkt_start, tx->pkt_done, 3416 msleep(2000);
3042 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3417 printk(KERN_INFO
3418 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3419 mgp->dev->name, i, tx->req, tx->done,
3420 tx->pkt_start, tx->pkt_done,
3421 (int)ntohl(mgp->ss[i].fw_stats->
3422 send_done_count));
3423 }
3043 } 3424 }
3044 rtnl_lock(); 3425 rtnl_lock();
3045 myri10ge_close(mgp->dev); 3426 myri10ge_close(mgp->dev);
3046 status = myri10ge_load_firmware(mgp); 3427 status = myri10ge_load_firmware(mgp, 1);
3047 if (status != 0) 3428 if (status != 0)
3048 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n", 3429 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
3049 mgp->dev->name); 3430 mgp->dev->name);
@@ -3063,47 +3444,241 @@ static void myri10ge_watchdog_timer(unsigned long arg)
3063{ 3444{
3064 struct myri10ge_priv *mgp; 3445 struct myri10ge_priv *mgp;
3065 struct myri10ge_slice_state *ss; 3446 struct myri10ge_slice_state *ss;
3447 int i, reset_needed;
3066 u32 rx_pause_cnt; 3448 u32 rx_pause_cnt;
3067 3449
3068 mgp = (struct myri10ge_priv *)arg; 3450 mgp = (struct myri10ge_priv *)arg;
3069 3451
3070 rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); 3452 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3453 for (i = 0, reset_needed = 0;
3454 i < mgp->num_slices && reset_needed == 0; ++i) {
3455
3456 ss = &mgp->ss[i];
3457 if (ss->rx_small.watchdog_needed) {
3458 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3459 mgp->small_bytes + MXGEFW_PAD,
3460 1);
3461 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3462 myri10ge_fill_thresh)
3463 ss->rx_small.watchdog_needed = 0;
3464 }
3465 if (ss->rx_big.watchdog_needed) {
3466 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3467 mgp->big_bytes, 1);
3468 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3469 myri10ge_fill_thresh)
3470 ss->rx_big.watchdog_needed = 0;
3471 }
3071 3472
3072 ss = &mgp->ss; 3473 if (ss->tx.req != ss->tx.done &&
3073 if (ss->rx_small.watchdog_needed) { 3474 ss->tx.done == ss->watchdog_tx_done &&
3074 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 3475 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3075 mgp->small_bytes + MXGEFW_PAD, 1); 3476 /* nic seems like it might be stuck.. */
3076 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 3477 if (rx_pause_cnt != mgp->watchdog_pause) {
3077 myri10ge_fill_thresh) 3478 if (net_ratelimit())
3078 ss->rx_small.watchdog_needed = 0; 3479 printk(KERN_WARNING "myri10ge %s:"
3079 } 3480 "TX paused, check link partner\n",
3080 if (ss->rx_big.watchdog_needed) { 3481 mgp->dev->name);
3081 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); 3482 } else {
3082 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 3483 reset_needed = 1;
3083 myri10ge_fill_thresh) 3484 }
3084 ss->rx_big.watchdog_needed = 0;
3085 }
3086
3087 if (ss->tx.req != ss->tx.done &&
3088 ss->tx.done == ss->watchdog_tx_done &&
3089 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3090 /* nic seems like it might be stuck.. */
3091 if (rx_pause_cnt != mgp->watchdog_pause) {
3092 if (net_ratelimit())
3093 printk(KERN_WARNING "myri10ge %s:"
3094 "TX paused, check link partner\n",
3095 mgp->dev->name);
3096 } else {
3097 schedule_work(&mgp->watchdog_work);
3098 return;
3099 } 3485 }
3486 ss->watchdog_tx_done = ss->tx.done;
3487 ss->watchdog_tx_req = ss->tx.req;
3100 } 3488 }
3101 /* rearm timer */
3102 mod_timer(&mgp->watchdog_timer,
3103 jiffies + myri10ge_watchdog_timeout * HZ);
3104 ss->watchdog_tx_done = ss->tx.done;
3105 ss->watchdog_tx_req = ss->tx.req;
3106 mgp->watchdog_pause = rx_pause_cnt; 3489 mgp->watchdog_pause = rx_pause_cnt;
3490
3491 if (reset_needed) {
3492 schedule_work(&mgp->watchdog_work);
3493 } else {
3494 /* rearm timer */
3495 mod_timer(&mgp->watchdog_timer,
3496 jiffies + myri10ge_watchdog_timeout * HZ);
3497 }
3498}
3499
3500static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3501{
3502 struct myri10ge_slice_state *ss;
3503 struct pci_dev *pdev = mgp->pdev;
3504 size_t bytes;
3505 int i;
3506
3507 if (mgp->ss == NULL)
3508 return;
3509
3510 for (i = 0; i < mgp->num_slices; i++) {
3511 ss = &mgp->ss[i];
3512 if (ss->rx_done.entry != NULL) {
3513 bytes = mgp->max_intr_slots *
3514 sizeof(*ss->rx_done.entry);
3515 dma_free_coherent(&pdev->dev, bytes,
3516 ss->rx_done.entry, ss->rx_done.bus);
3517 ss->rx_done.entry = NULL;
3518 }
3519 if (ss->fw_stats != NULL) {
3520 bytes = sizeof(*ss->fw_stats);
3521 dma_free_coherent(&pdev->dev, bytes,
3522 ss->fw_stats, ss->fw_stats_bus);
3523 ss->fw_stats = NULL;
3524 }
3525 }
3526 kfree(mgp->ss);
3527 mgp->ss = NULL;
3528}
3529
3530static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3531{
3532 struct myri10ge_slice_state *ss;
3533 struct pci_dev *pdev = mgp->pdev;
3534 size_t bytes;
3535 int i;
3536
3537 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3538 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3539 if (mgp->ss == NULL) {
3540 return -ENOMEM;
3541 }
3542
3543 for (i = 0; i < mgp->num_slices; i++) {
3544 ss = &mgp->ss[i];
3545 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3546 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3547 &ss->rx_done.bus,
3548 GFP_KERNEL);
3549 if (ss->rx_done.entry == NULL)
3550 goto abort;
3551 memset(ss->rx_done.entry, 0, bytes);
3552 bytes = sizeof(*ss->fw_stats);
3553 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3554 &ss->fw_stats_bus,
3555 GFP_KERNEL);
3556 if (ss->fw_stats == NULL)
3557 goto abort;
3558 ss->mgp = mgp;
3559 ss->dev = mgp->dev;
3560 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3561 myri10ge_napi_weight);
3562 }
3563 return 0;
3564abort:
3565 myri10ge_free_slices(mgp);
3566 return -ENOMEM;
3567}
3568
3569/*
3570 * This function determines the number of slices supported.
3571 * The number slices is the minumum of the number of CPUS,
3572 * the number of MSI-X irqs supported, the number of slices
3573 * supported by the firmware
3574 */
3575static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3576{
3577 struct myri10ge_cmd cmd;
3578 struct pci_dev *pdev = mgp->pdev;
3579 char *old_fw;
3580 int i, status, ncpus, msix_cap;
3581
3582 mgp->num_slices = 1;
3583 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3584 ncpus = num_online_cpus();
3585
3586 if (myri10ge_max_slices == 1 || msix_cap == 0 ||
3587 (myri10ge_max_slices == -1 && ncpus < 2))
3588 return;
3589
3590 /* try to load the slice aware rss firmware */
3591 old_fw = mgp->fw_name;
3592 if (old_fw == myri10ge_fw_aligned)
3593 mgp->fw_name = myri10ge_fw_rss_aligned;
3594 else
3595 mgp->fw_name = myri10ge_fw_rss_unaligned;
3596 status = myri10ge_load_firmware(mgp, 0);
3597 if (status != 0) {
3598 dev_info(&pdev->dev, "Rss firmware not found\n");
3599 return;
3600 }
3601
3602 /* hit the board with a reset to ensure it is alive */
3603 memset(&cmd, 0, sizeof(cmd));
3604 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3605 if (status != 0) {
3606 dev_err(&mgp->pdev->dev, "failed reset\n");
3607 goto abort_with_fw;
3608 return;
3609 }
3610
3611 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3612
3613 /* tell it the size of the interrupt queues */
3614 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3615 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3616 if (status != 0) {
3617 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3618 goto abort_with_fw;
3619 }
3620
3621 /* ask the maximum number of slices it supports */
3622 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3623 if (status != 0)
3624 goto abort_with_fw;
3625 else
3626 mgp->num_slices = cmd.data0;
3627
3628 /* Only allow multiple slices if MSI-X is usable */
3629 if (!myri10ge_msi) {
3630 goto abort_with_fw;
3631 }
3632
3633 /* if the admin did not specify a limit to how many
3634 * slices we should use, cap it automatically to the
3635 * number of CPUs currently online */
3636 if (myri10ge_max_slices == -1)
3637 myri10ge_max_slices = ncpus;
3638
3639 if (mgp->num_slices > myri10ge_max_slices)
3640 mgp->num_slices = myri10ge_max_slices;
3641
3642 /* Now try to allocate as many MSI-X vectors as we have
3643 * slices. We give up on MSI-X if we can only get a single
3644 * vector. */
3645
3646 mgp->msix_vectors = kzalloc(mgp->num_slices *
3647 sizeof(*mgp->msix_vectors), GFP_KERNEL);
3648 if (mgp->msix_vectors == NULL)
3649 goto disable_msix;
3650 for (i = 0; i < mgp->num_slices; i++) {
3651 mgp->msix_vectors[i].entry = i;
3652 }
3653
3654 while (mgp->num_slices > 1) {
3655 /* make sure it is a power of two */
3656 while (!is_power_of_2(mgp->num_slices))
3657 mgp->num_slices--;
3658 if (mgp->num_slices == 1)
3659 goto disable_msix;
3660 status = pci_enable_msix(pdev, mgp->msix_vectors,
3661 mgp->num_slices);
3662 if (status == 0) {
3663 pci_disable_msix(pdev);
3664 return;
3665 }
3666 if (status > 0)
3667 mgp->num_slices = status;
3668 else
3669 goto disable_msix;
3670 }
3671
3672disable_msix:
3673 if (mgp->msix_vectors != NULL) {
3674 kfree(mgp->msix_vectors);
3675 mgp->msix_vectors = NULL;
3676 }
3677
3678abort_with_fw:
3679 mgp->num_slices = 1;
3680 mgp->fw_name = old_fw;
3681 myri10ge_load_firmware(mgp, 0);
3107} 3682}
3108 3683
3109static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3684static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3111,7 +3686,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3111 struct net_device *netdev; 3686 struct net_device *netdev;
3112 struct myri10ge_priv *mgp; 3687 struct myri10ge_priv *mgp;
3113 struct device *dev = &pdev->dev; 3688 struct device *dev = &pdev->dev;
3114 size_t bytes;
3115 int i; 3689 int i;
3116 int status = -ENXIO; 3690 int status = -ENXIO;
3117 int dac_enabled; 3691 int dac_enabled;
@@ -3126,7 +3700,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3126 3700
3127 mgp = netdev_priv(netdev); 3701 mgp = netdev_priv(netdev);
3128 mgp->dev = netdev; 3702 mgp->dev = netdev;
3129 netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
3130 mgp->pdev = pdev; 3703 mgp->pdev = pdev;
3131 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3704 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
3132 mgp->pause = myri10ge_flow_control; 3705 mgp->pause = myri10ge_flow_control;
@@ -3172,11 +3745,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3172 if (mgp->cmd == NULL) 3745 if (mgp->cmd == NULL)
3173 goto abort_with_netdev; 3746 goto abort_with_netdev;
3174 3747
3175 mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3176 &mgp->ss.fw_stats_bus, GFP_KERNEL);
3177 if (mgp->ss.fw_stats == NULL)
3178 goto abort_with_cmd;
3179
3180 mgp->board_span = pci_resource_len(pdev, 0); 3748 mgp->board_span = pci_resource_len(pdev, 0);
3181 mgp->iomem_base = pci_resource_start(pdev, 0); 3749 mgp->iomem_base = pci_resource_start(pdev, 0);
3182 mgp->mtrr = -1; 3750 mgp->mtrr = -1;
@@ -3213,28 +3781,28 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3213 for (i = 0; i < ETH_ALEN; i++) 3781 for (i = 0; i < ETH_ALEN; i++)
3214 netdev->dev_addr[i] = mgp->mac_addr[i]; 3782 netdev->dev_addr[i] = mgp->mac_addr[i];
3215 3783
3216 /* allocate rx done ring */
3217 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3218 mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3219 &mgp->ss.rx_done.bus, GFP_KERNEL);
3220 if (mgp->ss.rx_done.entry == NULL)
3221 goto abort_with_ioremap;
3222 memset(mgp->ss.rx_done.entry, 0, bytes);
3223
3224 myri10ge_select_firmware(mgp); 3784 myri10ge_select_firmware(mgp);
3225 3785
3226 status = myri10ge_load_firmware(mgp); 3786 status = myri10ge_load_firmware(mgp, 1);
3227 if (status != 0) { 3787 if (status != 0) {
3228 dev_err(&pdev->dev, "failed to load firmware\n"); 3788 dev_err(&pdev->dev, "failed to load firmware\n");
3229 goto abort_with_rx_done; 3789 goto abort_with_ioremap;
3790 }
3791 myri10ge_probe_slices(mgp);
3792 status = myri10ge_alloc_slices(mgp);
3793 if (status != 0) {
3794 dev_err(&pdev->dev, "failed to alloc slice state\n");
3795 goto abort_with_firmware;
3230 } 3796 }
3231 3797
3232 status = myri10ge_reset(mgp); 3798 status = myri10ge_reset(mgp);
3233 if (status != 0) { 3799 if (status != 0) {
3234 dev_err(&pdev->dev, "failed reset\n"); 3800 dev_err(&pdev->dev, "failed reset\n");
3235 goto abort_with_firmware; 3801 goto abort_with_slices;
3236 } 3802 }
3237 3803#ifdef CONFIG_DCA
3804 myri10ge_setup_dca(mgp);
3805#endif
3238 pci_set_drvdata(pdev, mgp); 3806 pci_set_drvdata(pdev, mgp);
3239 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) 3807 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
3240 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 3808 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
@@ -3277,24 +3845,27 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3277 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 3845 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3278 goto abort_with_state; 3846 goto abort_with_state;
3279 } 3847 }
3280 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3848 if (mgp->msix_enabled)
3281 (mgp->msi_enabled ? "MSI" : "xPIC"), 3849 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
3282 netdev->irq, mgp->tx_boundary, mgp->fw_name, 3850 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3283 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3851 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3852 else
3853 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
3854 mgp->msi_enabled ? "MSI" : "xPIC",
3855 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3856 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3284 3857
3285 return 0; 3858 return 0;
3286 3859
3287abort_with_state: 3860abort_with_state:
3288 pci_restore_state(pdev); 3861 pci_restore_state(pdev);
3289 3862
3863abort_with_slices:
3864 myri10ge_free_slices(mgp);
3865
3290abort_with_firmware: 3866abort_with_firmware:
3291 myri10ge_dummy_rdma(mgp, 0); 3867 myri10ge_dummy_rdma(mgp, 0);
3292 3868
3293abort_with_rx_done:
3294 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3295 dma_free_coherent(&pdev->dev, bytes,
3296 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3297
3298abort_with_ioremap: 3869abort_with_ioremap:
3299 iounmap(mgp->sram); 3870 iounmap(mgp->sram);
3300 3871
@@ -3303,10 +3874,6 @@ abort_with_wc:
3303 if (mgp->mtrr >= 0) 3874 if (mgp->mtrr >= 0)
3304 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3875 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3305#endif 3876#endif
3306 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3307 mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
3308
3309abort_with_cmd:
3310 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3877 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3311 mgp->cmd, mgp->cmd_bus); 3878 mgp->cmd, mgp->cmd_bus);
3312 3879
@@ -3327,7 +3894,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
3327{ 3894{
3328 struct myri10ge_priv *mgp; 3895 struct myri10ge_priv *mgp;
3329 struct net_device *netdev; 3896 struct net_device *netdev;
3330 size_t bytes;
3331 3897
3332 mgp = pci_get_drvdata(pdev); 3898 mgp = pci_get_drvdata(pdev);
3333 if (mgp == NULL) 3899 if (mgp == NULL)
@@ -3337,24 +3903,23 @@ static void myri10ge_remove(struct pci_dev *pdev)
3337 netdev = mgp->dev; 3903 netdev = mgp->dev;
3338 unregister_netdev(netdev); 3904 unregister_netdev(netdev);
3339 3905
3906#ifdef CONFIG_DCA
3907 myri10ge_teardown_dca(mgp);
3908#endif
3340 myri10ge_dummy_rdma(mgp, 0); 3909 myri10ge_dummy_rdma(mgp, 0);
3341 3910
3342 /* avoid a memory leak */ 3911 /* avoid a memory leak */
3343 pci_restore_state(pdev); 3912 pci_restore_state(pdev);
3344 3913
3345 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3346 dma_free_coherent(&pdev->dev, bytes,
3347 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3348
3349 iounmap(mgp->sram); 3914 iounmap(mgp->sram);
3350 3915
3351#ifdef CONFIG_MTRR 3916#ifdef CONFIG_MTRR
3352 if (mgp->mtrr >= 0) 3917 if (mgp->mtrr >= 0)
3353 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3918 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3354#endif 3919#endif
3355 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3920 myri10ge_free_slices(mgp);
3356 mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3921 if (mgp->msix_vectors != NULL)
3357 3922 kfree(mgp->msix_vectors);
3358 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3923 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3359 mgp->cmd, mgp->cmd_bus); 3924 mgp->cmd, mgp->cmd_bus);
3360 3925
@@ -3383,10 +3948,42 @@ static struct pci_driver myri10ge_driver = {
3383#endif 3948#endif
3384}; 3949};
3385 3950
3951#ifdef CONFIG_DCA
3952static int
3953myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3954{
3955 int err = driver_for_each_device(&myri10ge_driver.driver,
3956 NULL, &event,
3957 myri10ge_notify_dca_device);
3958
3959 if (err)
3960 return NOTIFY_BAD;
3961 return NOTIFY_DONE;
3962}
3963
3964static struct notifier_block myri10ge_dca_notifier = {
3965 .notifier_call = myri10ge_notify_dca,
3966 .next = NULL,
3967 .priority = 0,
3968};
3969#endif /* CONFIG_DCA */
3970
3386static __init int myri10ge_init_module(void) 3971static __init int myri10ge_init_module(void)
3387{ 3972{
3388 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, 3973 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
3389 MYRI10GE_VERSION_STR); 3974 MYRI10GE_VERSION_STR);
3975
3976 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_SRC_PORT ||
3977 myri10ge_rss_hash < MXGEFW_RSS_HASH_TYPE_IPV4) {
3978 printk(KERN_ERR
3979 "%s: Illegal rssh hash type %d, defaulting to source port\n",
3980 myri10ge_driver.name, myri10ge_rss_hash);
3981 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3982 }
3983#ifdef CONFIG_DCA
3984 dca_register_notify(&myri10ge_dca_notifier);
3985#endif
3986
3390 return pci_register_driver(&myri10ge_driver); 3987 return pci_register_driver(&myri10ge_driver);
3391} 3988}
3392 3989
@@ -3394,6 +3991,9 @@ module_init(myri10ge_init_module);
3394 3991
3395static __exit void myri10ge_cleanup_module(void) 3992static __exit void myri10ge_cleanup_module(void)
3396{ 3993{
3994#ifdef CONFIG_DCA
3995 dca_unregister_notify(&myri10ge_dca_notifier);
3996#endif
3397 pci_unregister_driver(&myri10ge_driver); 3997 pci_unregister_driver(&myri10ge_driver);
3398} 3998}
3399 3999
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b42c05f84be1..ff449619f047 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -585,16 +585,13 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
585 for (i=0; i<NR_RX_DESC; i++) { 585 for (i=0; i<NR_RX_DESC; i++) {
586 struct sk_buff *skb; 586 struct sk_buff *skb;
587 long res; 587 long res;
588
588 /* extra 16 bytes for alignment */ 589 /* extra 16 bytes for alignment */
589 skb = __dev_alloc_skb(REAL_RX_BUF_SIZE+16, gfp); 590 skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
590 if (unlikely(!skb)) 591 if (unlikely(!skb))
591 break; 592 break;
592 593
593 res = (long)skb->data & 0xf; 594 skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16));
594 res = 0x10 - res;
595 res &= 0xf;
596 skb_reserve(skb, res);
597
598 if (gfp != GFP_ATOMIC) 595 if (gfp != GFP_ATOMIC)
599 spin_lock_irqsave(&dev->rx_info.lock, flags); 596 spin_lock_irqsave(&dev->rx_info.lock, flags);
600 res = ns83820_add_rx_skb(dev, skb); 597 res = ns83820_add_rx_skb(dev, skb);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 8f328a03847b..a550c9bd126f 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
391 cardtype = CONTEC; 391 cardtype = CONTEC;
392 break; 392 break;
393 case MANFID_FUJITSU: 393 case MANFID_FUJITSU:
394 if (link->card_id == PRODID_FUJITSU_MBH10302) 394 if (link->conf.ConfigBase == 0x0fe0)
395 cardtype = MBH10302;
396 else if (link->card_id == PRODID_FUJITSU_MBH10302)
395 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 397 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
396 but these are MBH10304 based card. */ 398 but these are MBH10304 based card. */
397 cardtype = MBH10304; 399 cardtype = MBH10304;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index d041f831a18d..f6c4698ce738 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1461,22 +1461,25 @@ static void
1461set_multicast_list(struct net_device *dev) 1461set_multicast_list(struct net_device *dev)
1462{ 1462{
1463 unsigned int ioaddr = dev->base_addr; 1463 unsigned int ioaddr = dev->base_addr;
1464 unsigned value;
1464 1465
1465 SelectPage(0x42); 1466 SelectPage(0x42);
1467 value = GetByte(XIRCREG42_SWC1) & 0xC0;
1468
1466 if (dev->flags & IFF_PROMISC) { /* snoop */ 1469 if (dev->flags & IFF_PROMISC) { /* snoop */
1467 PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ 1470 PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
1468 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1471 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
1469 PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ 1472 PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
1470 } else if (dev->mc_count) { 1473 } else if (dev->mc_count) {
1471 /* the chip can filter 9 addresses perfectly */ 1474 /* the chip can filter 9 addresses perfectly */
1472 PutByte(XIRCREG42_SWC1, 0x01); 1475 PutByte(XIRCREG42_SWC1, value | 0x01);
1473 SelectPage(0x40); 1476 SelectPage(0x40);
1474 PutByte(XIRCREG40_CMD0, Offline); 1477 PutByte(XIRCREG40_CMD0, Offline);
1475 set_addresses(dev); 1478 set_addresses(dev);
1476 SelectPage(0x40); 1479 SelectPage(0x40);
1477 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1480 PutByte(XIRCREG40_CMD0, EnableRecv | Online);
1478 } else { /* standard usage */ 1481 } else { /* standard usage */
1479 PutByte(XIRCREG42_SWC1, 0x00); 1482 PutByte(XIRCREG42_SWC1, value | 0x00);
1480 } 1483 }
1481 SelectPage(0); 1484 SelectPage(0);
1482} 1485}
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
1722 1725
1723 /* enable receiver and put the mac online */ 1726 /* enable receiver and put the mac online */
1724 if (full) { 1727 if (full) {
1728 set_multicast_list(dev);
1725 SelectPage(0x40); 1729 SelectPage(0x40);
1726 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1730 PutByte(XIRCREG40_CMD0, EnableRecv | Online);
1727 } 1731 }
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 81fd85214b98..ca8c0e037400 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
326 void *ptr); 326 void *ptr);
327static void pcnet32_purge_tx_ring(struct net_device *dev); 327static void pcnet32_purge_tx_ring(struct net_device *dev);
328static int pcnet32_alloc_ring(struct net_device *dev, char *name); 328static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
329static void pcnet32_free_ring(struct net_device *dev); 329static void pcnet32_free_ring(struct net_device *dev);
330static void pcnet32_check_media(struct net_device *dev, int verbose); 330static void pcnet32_check_media(struct net_device *dev, int verbose);
331 331
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1983} 1983}
1984 1984
1985/* if any allocation fails, caller must also call pcnet32_free_ring */ 1985/* if any allocation fails, caller must also call pcnet32_free_ring */
1986static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1986static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1987{ 1987{
1988 struct pcnet32_private *lp = netdev_priv(dev); 1988 struct pcnet32_private *lp = netdev_priv(dev);
1989 1989
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6bf9e76b0a00..d55932acd887 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
5menuconfig PHYLIB 5menuconfig PHYLIB
6 tristate "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390 7 depends on !S390
8 depends on NET_ETHERNET && (BROKEN || !S390) 8 depends on NET_ETHERNET
9 help 9 help
10 Ethernet controllers are usually attached to PHY 10 Ethernet controllers are usually attached to PHY
11 devices. This option provides infrastructure for 11 devices. This option provides infrastructure for
@@ -53,7 +53,8 @@ config SMSC_PHY
53config BROADCOM_PHY 53config BROADCOM_PHY
54 tristate "Drivers for Broadcom PHYs" 54 tristate "Drivers for Broadcom PHYs"
55 ---help--- 55 ---help---
56 Currently supports the BCM5411, BCM5421 and BCM5461 PHYs. 56 Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481
57 and BCM5482 PHYs.
57 58
58config ICPLUS_PHY 59config ICPLUS_PHY
59 tristate "Drivers for ICPlus PHYs" 60 tristate "Drivers for ICPlus PHYs"
@@ -83,4 +84,10 @@ config MDIO_BITBANG
83 84
84 If in doubt, say N. 85 If in doubt, say N.
85 86
87config MDIO_OF_GPIO
88 tristate "Support for GPIO lib-based bitbanged MDIO buses"
89 depends on MDIO_BITBANG && OF_GPIO
90 ---help---
91 Supports GPIO lib-based MDIO busses.
92
86endif # PHYLIB 93endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 5997d6ef702b..eee329fa6f53 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
15obj-$(CONFIG_REALTEK_PHY) += realtek.o 15obj-$(CONFIG_REALTEK_PHY) += realtek.o
16obj-$(CONFIG_FIXED_PHY) += fixed.o 16obj-$(CONFIG_FIXED_PHY) += fixed.o
17obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o 17obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
18obj-$(CONFIG_MDIO_OF_GPIO) += mdio-ofgpio.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60c5cfe96918..4b4dc98ad165 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -24,6 +24,12 @@
24#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ 24#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
25#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ 25#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
26 26
27#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
28#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
29#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
30#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
31
32#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
27#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ 33#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
28#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ 34#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
29#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ 35#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
@@ -42,10 +48,120 @@
42#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ 48#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
43#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ 49#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
44 50
51#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
52#define MII_BCM54XX_SHD_WRITE 0x8000
53#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
54#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
55
56/*
57 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
58 * BCM5482, and possibly some others.
59 */
60#define BCM_LED_SRC_LINKSPD1 0x0
61#define BCM_LED_SRC_LINKSPD2 0x1
62#define BCM_LED_SRC_XMITLED 0x2
63#define BCM_LED_SRC_ACTIVITYLED 0x3
64#define BCM_LED_SRC_FDXLED 0x4
65#define BCM_LED_SRC_SLAVE 0x5
66#define BCM_LED_SRC_INTR 0x6
67#define BCM_LED_SRC_QUALITY 0x7
68#define BCM_LED_SRC_RCVLED 0x8
69#define BCM_LED_SRC_MULTICOLOR1 0xa
70#define BCM_LED_SRC_OPENSHORT 0xb
71#define BCM_LED_SRC_OFF 0xe /* Tied high */
72#define BCM_LED_SRC_ON 0xf /* Tied low */
73
74/*
75 * BCM5482: Shadow registers
76 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
77 * register to access.
78 */
79#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
80 /* LED3 / ~LINKSPD[2] selector */
81#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
82 /* LED1 / ~LINKSPD[1] selector */
83#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
84#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
85#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
86#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
87#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
88#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
89
90/*
91 * BCM5482: Secondary SerDes registers
92 */
93#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
94#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
95#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
96#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
97#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
98
99/*
100 * Device flags for PHYs that can be configured for different operating
101 * modes.
102 */
103#define PHY_BCM_FLAGS_VALID 0x80000000
104#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
105#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
106#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
107#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
108
45MODULE_DESCRIPTION("Broadcom PHY driver"); 109MODULE_DESCRIPTION("Broadcom PHY driver");
46MODULE_AUTHOR("Maciej W. Rozycki"); 110MODULE_AUTHOR("Maciej W. Rozycki");
47MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
48 112
113/*
114 * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
115 * 0x1c shadow registers.
116 */
117static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
118{
119 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
120 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
121}
122
123static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val)
124{
125 return phy_write(phydev, MII_BCM54XX_SHD,
126 MII_BCM54XX_SHD_WRITE |
127 MII_BCM54XX_SHD_VAL(shadow) |
128 MII_BCM54XX_SHD_DATA(val));
129}
130
131/*
132 * Indirect register access functions for the Expansion Registers
133 * and Secondary SerDes registers (when sec_serdes=1).
134 */
135static int bcm54xx_exp_read(struct phy_device *phydev,
136 int sec_serdes, u8 regnum)
137{
138 int val;
139
140 phy_write(phydev, MII_BCM54XX_EXP_SEL,
141 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD :
142 MII_BCM54XX_EXP_SEL_ER) |
143 regnum);
144 val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
145 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
146
147 return val;
148}
149
150static int bcm54xx_exp_write(struct phy_device *phydev,
151 int sec_serdes, u8 regnum, u16 val)
152{
153 int ret;
154
155 phy_write(phydev, MII_BCM54XX_EXP_SEL,
156 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD :
157 MII_BCM54XX_EXP_SEL_ER) |
158 regnum);
159 ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
160 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
161
162 return ret;
163}
164
49static int bcm54xx_config_init(struct phy_device *phydev) 165static int bcm54xx_config_init(struct phy_device *phydev)
50{ 166{
51 int reg, err; 167 int reg, err;
@@ -70,6 +186,87 @@ static int bcm54xx_config_init(struct phy_device *phydev)
70 return 0; 186 return 0;
71} 187}
72 188
189static int bcm5482_config_init(struct phy_device *phydev)
190{
191 int err, reg;
192
193 err = bcm54xx_config_init(phydev);
194
195 if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
196 /*
197 * Enable secondary SerDes and its use as an LED source
198 */
199 reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_SSD);
200 bcm54xx_shadow_write(phydev, BCM5482_SHD_SSD,
201 reg |
202 BCM5482_SHD_SSD_LEDM |
203 BCM5482_SHD_SSD_EN);
204
205 /*
206 * Enable SGMII slave mode and auto-detection
207 */
208 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_SGMII_SLAVE);
209 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_SGMII_SLAVE,
210 reg |
211 BCM5482_SSD_SGMII_SLAVE_EN |
212 BCM5482_SSD_SGMII_SLAVE_AD);
213
214 /*
215 * Disable secondary SerDes powerdown
216 */
217 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_1000BX_CTL);
218 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_1000BX_CTL,
219 reg & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
220
221 /*
222 * Select 1000BASE-X register set (primary SerDes)
223 */
224 reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_MODE);
225 bcm54xx_shadow_write(phydev, BCM5482_SHD_MODE,
226 reg | BCM5482_SHD_MODE_1000BX);
227
228 /*
229 * LED1=ACTIVITYLED, LED3=LINKSPD[2]
230 * (Use LED1 as secondary SerDes ACTIVITY LED)
231 */
232 bcm54xx_shadow_write(phydev, BCM5482_SHD_LEDS1,
233 BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) |
234 BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2));
235
236 /*
237 * Auto-negotiation doesn't seem to work quite right
238 * in this mode, so we disable it and force it to the
239 * right speed/duplex setting. Only 'link status'
240 * is important.
241 */
242 phydev->autoneg = AUTONEG_DISABLE;
243 phydev->speed = SPEED_1000;
244 phydev->duplex = DUPLEX_FULL;
245 }
246
247 return err;
248}
249
250static int bcm5482_read_status(struct phy_device *phydev)
251{
252 int err;
253
254 err = genphy_read_status(phydev);
255
256 if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
257 /*
258 * Only link status matters for 1000Base-X mode, so force
259 * 1000 Mbit/s full-duplex status
260 */
261 if (phydev->link) {
262 phydev->speed = SPEED_1000;
263 phydev->duplex = DUPLEX_FULL;
264 }
265 }
266
267 return err;
268}
269
73static int bcm54xx_ack_interrupt(struct phy_device *phydev) 270static int bcm54xx_ack_interrupt(struct phy_device *phydev)
74{ 271{
75 int reg; 272 int reg;
@@ -210,9 +407,9 @@ static struct phy_driver bcm5482_driver = {
210 .name = "Broadcom BCM5482", 407 .name = "Broadcom BCM5482",
211 .features = PHY_GBIT_FEATURES, 408 .features = PHY_GBIT_FEATURES,
212 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 409 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
213 .config_init = bcm54xx_config_init, 410 .config_init = bcm5482_config_init,
214 .config_aneg = genphy_config_aneg, 411 .config_aneg = genphy_config_aneg,
215 .read_status = genphy_read_status, 412 .read_status = bcm5482_read_status,
216 .ack_interrupt = bcm54xx_ack_interrupt, 413 .ack_interrupt = bcm54xx_ack_interrupt,
217 .config_intr = bcm54xx_config_intr, 414 .config_intr = bcm54xx_config_intr,
218 .driver = { .owner = THIS_MODULE }, 415 .driver = { .owner = THIS_MODULE },
diff --git a/drivers/net/phy/mdio-ofgpio.c b/drivers/net/phy/mdio-ofgpio.c
new file mode 100644
index 000000000000..7edfc0c34835
--- /dev/null
+++ b/drivers/net/phy/mdio-ofgpio.c
@@ -0,0 +1,205 @@
1/*
2 * OpenFirmware GPIO based MDIO bitbang driver.
3 *
4 * Copyright (c) 2008 CSE Semaphore Belgium.
5 * by Laurent Pinchart <laurentp@cse-semaphore.com>
6 *
7 * Based on earlier work by
8 *
9 * Copyright (c) 2003 Intracom S.A.
10 * by Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2005 (c) MontaVista Software, Inc.
13 * Vitaly Bordug <vbordug@ru.mvista.com>
14 *
15 * This file is licensed under the terms of the GNU General Public License
16 * version 2. This program is licensed "as is" without any warranty of any
17 * kind, whether express or implied.
18 */
19
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/mdio-bitbang.h>
25#include <linux/of_gpio.h>
26#include <linux/of_platform.h>
27
28struct mdio_gpio_info {
29 struct mdiobb_ctrl ctrl;
30 int mdc, mdio;
31};
32
33static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
34{
35 struct mdio_gpio_info *bitbang =
36 container_of(ctrl, struct mdio_gpio_info, ctrl);
37
38 if (dir)
39 gpio_direction_output(bitbang->mdio, 1);
40 else
41 gpio_direction_input(bitbang->mdio);
42}
43
44static int mdio_read(struct mdiobb_ctrl *ctrl)
45{
46 struct mdio_gpio_info *bitbang =
47 container_of(ctrl, struct mdio_gpio_info, ctrl);
48
49 return gpio_get_value(bitbang->mdio);
50}
51
52static void mdio(struct mdiobb_ctrl *ctrl, int what)
53{
54 struct mdio_gpio_info *bitbang =
55 container_of(ctrl, struct mdio_gpio_info, ctrl);
56
57 gpio_set_value(bitbang->mdio, what);
58}
59
60static void mdc(struct mdiobb_ctrl *ctrl, int what)
61{
62 struct mdio_gpio_info *bitbang =
63 container_of(ctrl, struct mdio_gpio_info, ctrl);
64
65 gpio_set_value(bitbang->mdc, what);
66}
67
68static struct mdiobb_ops mdio_gpio_ops = {
69 .owner = THIS_MODULE,
70 .set_mdc = mdc,
71 .set_mdio_dir = mdio_dir,
72 .set_mdio_data = mdio,
73 .get_mdio_data = mdio_read,
74};
75
76static int __devinit mdio_ofgpio_bitbang_init(struct mii_bus *bus,
77 struct device_node *np)
78{
79 struct mdio_gpio_info *bitbang = bus->priv;
80
81 bitbang->mdc = of_get_gpio(np, 0);
82 bitbang->mdio = of_get_gpio(np, 1);
83
84 if (bitbang->mdc < 0 || bitbang->mdio < 0)
85 return -ENODEV;
86
87 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", bitbang->mdc);
88 return 0;
89}
90
91static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
92{
93 const u32 *data;
94 int len, id, irq;
95
96 data = of_get_property(np, "reg", &len);
97 if (!data || len != 4)
98 return;
99
100 id = *data;
101 bus->phy_mask &= ~(1 << id);
102
103 irq = of_irq_to_resource(np, 0, NULL);
104 if (irq != NO_IRQ)
105 bus->irq[id] = irq;
106}
107
108static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
109 const struct of_device_id *match)
110{
111 struct device_node *np = NULL;
112 struct mii_bus *new_bus;
113 struct mdio_gpio_info *bitbang;
114 int ret = -ENOMEM;
115 int i;
116
117 bitbang = kzalloc(sizeof(struct mdio_gpio_info), GFP_KERNEL);
118 if (!bitbang)
119 goto out;
120
121 bitbang->ctrl.ops = &mdio_gpio_ops;
122
123 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
124 if (!new_bus)
125 goto out_free_priv;
126
127 new_bus->name = "GPIO Bitbanged MII",
128
129 ret = mdio_ofgpio_bitbang_init(new_bus, ofdev->node);
130 if (ret)
131 goto out_free_bus;
132
133 new_bus->phy_mask = ~0;
134 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
135 if (!new_bus->irq)
136 goto out_free_bus;
137
138 for (i = 0; i < PHY_MAX_ADDR; i++)
139 new_bus->irq[i] = -1;
140
141 while ((np = of_get_next_child(ofdev->node, np)))
142 if (!strcmp(np->type, "ethernet-phy"))
143 add_phy(new_bus, np);
144
145 new_bus->dev = &ofdev->dev;
146 dev_set_drvdata(&ofdev->dev, new_bus);
147
148 ret = mdiobus_register(new_bus);
149 if (ret)
150 goto out_free_irqs;
151
152 return 0;
153
154out_free_irqs:
155 dev_set_drvdata(&ofdev->dev, NULL);
156 kfree(new_bus->irq);
157out_free_bus:
158 kfree(new_bus);
159out_free_priv:
160 free_mdio_bitbang(new_bus);
161out:
162 return ret;
163}
164
165static int mdio_ofgpio_remove(struct of_device *ofdev)
166{
167 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
168 struct mdio_gpio_info *bitbang = bus->priv;
169
170 mdiobus_unregister(bus);
171 free_mdio_bitbang(bus);
172 dev_set_drvdata(&ofdev->dev, NULL);
173 kfree(bus->irq);
174 kfree(bitbang);
175 kfree(bus);
176
177 return 0;
178}
179
180static struct of_device_id mdio_ofgpio_match[] = {
181 {
182 .compatible = "virtual,mdio-gpio",
183 },
184 {},
185};
186
187static struct of_platform_driver mdio_ofgpio_driver = {
188 .name = "mdio-gpio",
189 .match_table = mdio_ofgpio_match,
190 .probe = mdio_ofgpio_probe,
191 .remove = mdio_ofgpio_remove,
192};
193
194static int mdio_ofgpio_init(void)
195{
196 return of_register_platform_driver(&mdio_ofgpio_driver);
197}
198
199static void mdio_ofgpio_exit(void)
200{
201 of_unregister_platform_driver(&mdio_ofgpio_driver);
202}
203
204module_init(mdio_ofgpio_init);
205module_exit(mdio_ofgpio_exit);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ac3c01d28fdf..16a0e7de5888 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
207 207
208 return 0; 208 return 0;
209} 209}
210EXPORT_SYMBOL(get_phy_id);
210 211
211/** 212/**
212 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 213 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1f4ca2b54a73..c926bf0b190e 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -361,7 +361,7 @@ static int ppp_open(struct inode *inode, struct file *file)
361 return 0; 361 return 0;
362} 362}
363 363
364static int ppp_release(struct inode *inode, struct file *file) 364static int ppp_release(struct inode *unused, struct file *file)
365{ 365{
366 struct ppp_file *pf = file->private_data; 366 struct ppp_file *pf = file->private_data;
367 struct ppp *ppp; 367 struct ppp *ppp;
@@ -545,8 +545,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
545} 545}
546#endif /* CONFIG_PPP_FILTER */ 546#endif /* CONFIG_PPP_FILTER */
547 547
548static int ppp_ioctl(struct inode *inode, struct file *file, 548static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
549 unsigned int cmd, unsigned long arg)
550{ 549{
551 struct ppp_file *pf = file->private_data; 550 struct ppp_file *pf = file->private_data;
552 struct ppp *ppp; 551 struct ppp *ppp;
@@ -574,24 +573,29 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
574 * this fd and reopening /dev/ppp. 573 * this fd and reopening /dev/ppp.
575 */ 574 */
576 err = -EINVAL; 575 err = -EINVAL;
576 lock_kernel();
577 if (pf->kind == INTERFACE) { 577 if (pf->kind == INTERFACE) {
578 ppp = PF_TO_PPP(pf); 578 ppp = PF_TO_PPP(pf);
579 if (file == ppp->owner) 579 if (file == ppp->owner)
580 ppp_shutdown_interface(ppp); 580 ppp_shutdown_interface(ppp);
581 } 581 }
582 if (atomic_read(&file->f_count) <= 2) { 582 if (atomic_read(&file->f_count) <= 2) {
583 ppp_release(inode, file); 583 ppp_release(NULL, file);
584 err = 0; 584 err = 0;
585 } else 585 } else
586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
587 atomic_read(&file->f_count)); 587 atomic_read(&file->f_count));
588 unlock_kernel();
588 return err; 589 return err;
589 } 590 }
590 591
591 if (pf->kind == CHANNEL) { 592 if (pf->kind == CHANNEL) {
592 struct channel *pch = PF_TO_CHANNEL(pf); 593 struct channel *pch;
593 struct ppp_channel *chan; 594 struct ppp_channel *chan;
594 595
596 lock_kernel();
597 pch = PF_TO_CHANNEL(pf);
598
595 switch (cmd) { 599 switch (cmd) {
596 case PPPIOCCONNECT: 600 case PPPIOCCONNECT:
597 if (get_user(unit, p)) 601 if (get_user(unit, p))
@@ -611,6 +615,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
611 err = chan->ops->ioctl(chan, cmd, arg); 615 err = chan->ops->ioctl(chan, cmd, arg);
612 up_read(&pch->chan_sem); 616 up_read(&pch->chan_sem);
613 } 617 }
618 unlock_kernel();
614 return err; 619 return err;
615 } 620 }
616 621
@@ -620,6 +625,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
620 return -EINVAL; 625 return -EINVAL;
621 } 626 }
622 627
628 lock_kernel();
623 ppp = PF_TO_PPP(pf); 629 ppp = PF_TO_PPP(pf);
624 switch (cmd) { 630 switch (cmd) {
625 case PPPIOCSMRU: 631 case PPPIOCSMRU:
@@ -767,7 +773,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
767 default: 773 default:
768 err = -ENOTTY; 774 err = -ENOTTY;
769 } 775 }
770 776 unlock_kernel();
771 return err; 777 return err;
772} 778}
773 779
@@ -779,6 +785,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
779 struct channel *chan; 785 struct channel *chan;
780 int __user *p = (int __user *)arg; 786 int __user *p = (int __user *)arg;
781 787
788 lock_kernel();
782 switch (cmd) { 789 switch (cmd) {
783 case PPPIOCNEWUNIT: 790 case PPPIOCNEWUNIT:
784 /* Create a new ppp unit */ 791 /* Create a new ppp unit */
@@ -827,6 +834,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
827 default: 834 default:
828 err = -ENOTTY; 835 err = -ENOTTY;
829 } 836 }
837 unlock_kernel();
830 return err; 838 return err;
831} 839}
832 840
@@ -835,7 +843,7 @@ static const struct file_operations ppp_device_fops = {
835 .read = ppp_read, 843 .read = ppp_read,
836 .write = ppp_write, 844 .write = ppp_write,
837 .poll = ppp_poll, 845 .poll = ppp_poll,
838 .ioctl = ppp_ioctl, 846 .unlocked_ioctl = ppp_ioctl,
839 .open = ppp_open, 847 .open = ppp_open,
840 .release = ppp_release 848 .release = ppp_release
841}; 849};
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 79359919335b..8db342f2fdc9 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -980,6 +980,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
980 __wsum csum = 0; 980 __wsum csum = 0;
981 struct udphdr *uh; 981 struct udphdr *uh;
982 unsigned int len; 982 unsigned int len;
983 int old_headroom;
984 int new_headroom;
983 985
984 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 986 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
985 goto abort; 987 goto abort;
@@ -1001,16 +1003,18 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1001 1003
1002 /* Check that there's enough headroom in the skb to insert IP, 1004 /* Check that there's enough headroom in the skb to insert IP,
1003 * UDP and L2TP and PPP headers. If not enough, expand it to 1005 * UDP and L2TP and PPP headers. If not enough, expand it to
1004 * make room. Note that a new skb (or a clone) is 1006 * make room. Adjust truesize.
1005 * allocated. If we return an error from this point on, make
1006 * sure we free the new skb but do not free the original skb
1007 * since that is done by the caller for the error case.
1008 */ 1007 */
1009 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1008 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1010 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 1009 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
1010 old_headroom = skb_headroom(skb);
1011 if (skb_cow_head(skb, headroom)) 1011 if (skb_cow_head(skb, headroom))
1012 goto abort; 1012 goto abort;
1013 1013
1014 new_headroom = skb_headroom(skb);
1015 skb_orphan(skb);
1016 skb->truesize += new_headroom - old_headroom;
1017
1014 /* Setup PPP header */ 1018 /* Setup PPP header */
1015 __skb_push(skb, sizeof(ppph)); 1019 __skb_push(skb, sizeof(ppph));
1016 skb->data[0] = ppph[0]; 1020 skb->data[0] = ppph[0];
@@ -1065,7 +1069,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1065 /* Get routing info from the tunnel socket */ 1069 /* Get routing info from the tunnel socket */
1066 dst_release(skb->dst); 1070 dst_release(skb->dst);
1067 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1071 skb->dst = dst_clone(__sk_dst_get(sk_tun));
1068 skb_orphan(skb);
1069 skb->sk = sk_tun; 1072 skb->sk = sk_tun;
1070 1073
1071 /* Queue the packet to IP for output */ 1074 /* Queue the packet to IP for output */
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 2109508c047a..f8274f8941ea 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -250,7 +250,7 @@ struct XENA_dev_config {
250 u64 tx_mat0_n[0x8]; 250 u64 tx_mat0_n[0x8];
251#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) 251#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
252 252
253 u8 unused_1[0x8]; 253 u64 xmsi_mask_reg;
254 u64 stat_byte_cnt; 254 u64 stat_byte_cnt;
255#define STAT_BC(n) vBIT(n,4,12) 255#define STAT_BC(n) vBIT(n,4,12)
256 256
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 75bde6475832..dcc953e57ab1 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.23" 89#define DRV_VERSION "2.0.26.24"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1113 struct pci_dev *tdev = NULL; 1113 struct pci_dev *tdev = NULL;
1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116 if (tdev->bus == s2io_pdev->bus->parent) 1116 if (tdev->bus == s2io_pdev->bus->parent) {
1117 pci_dev_put(tdev); 1117 pci_dev_put(tdev);
1118 return 1; 1118 return 1;
1119 }
1119 } 1120 }
1120 } 1121 }
1121 return 0; 1122 return 0;
@@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link)
1219 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1220 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1220 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1221 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1221 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1222 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1222 1223 if (i == 0)
1223 if (use_continuous_tx_intrs && (link == LINK_UP)) 1224 if (use_continuous_tx_intrs && (link == LINK_UP))
1224 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1225 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1225 writeq(val64, &bar0->tti_data1_mem); 1226 writeq(val64, &bar0->tti_data1_mem);
1226 1227
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1228 if (nic->config.intr_type == MSI_X) {
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1229 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) | 1230 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80); 1231 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1232 TTI_DATA2_MEM_TX_UFC_D(0x300);
1233 } else {
1234 if ((nic->config.tx_steering_type ==
1235 TX_DEFAULT_STEERING) &&
1236 (config->tx_fifo_num > 1) &&
1237 (i >= nic->udp_fifo_idx) &&
1238 (i < (nic->udp_fifo_idx +
1239 nic->total_udp_fifos)))
1240 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1241 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1242 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1243 TTI_DATA2_MEM_TX_UFC_D(0x120);
1244 else
1245 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1246 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1247 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1248 TTI_DATA2_MEM_TX_UFC_D(0x80);
1249 }
1231 1250
1232 writeq(val64, &bar0->tti_data2_mem); 1251 writeq(val64, &bar0->tti_data2_mem);
1233 1252
@@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp)
2813 } 2832 }
2814} 2833}
2815 2834
2835static int s2io_chk_rx_buffers(struct ring_info *ring)
2836{
2837 if (fill_rx_buffers(ring) == -ENOMEM) {
2838 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2839 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2840 }
2841 return 0;
2842}
2843
2816/** 2844/**
2817 * s2io_poll - Rx interrupt handler for NAPI support 2845 * s2io_poll - Rx interrupt handler for NAPI support
2818 * @napi : pointer to the napi structure. 2846 * @napi : pointer to the napi structure.
@@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp)
2826 * 0 on success and 1 if there are No Rx packets to be processed. 2854 * 0 on success and 1 if there are No Rx packets to be processed.
2827 */ 2855 */
2828 2856
2829static int s2io_poll(struct napi_struct *napi, int budget) 2857static int s2io_poll_msix(struct napi_struct *napi, int budget)
2830{ 2858{
2831 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2859 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2832 struct net_device *dev = nic->dev; 2860 struct net_device *dev = ring->dev;
2833 int pkt_cnt = 0, org_pkts_to_process;
2834 struct mac_info *mac_control;
2835 struct config_param *config; 2861 struct config_param *config;
2862 struct mac_info *mac_control;
2863 int pkts_processed = 0;
2864 u8 *addr = NULL, val8 = 0;
2865 struct s2io_nic *nic = dev->priv;
2836 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2837 int i; 2867 int budget_org = budget;
2838 2868
2839 mac_control = &nic->mac_control;
2840 config = &nic->config; 2869 config = &nic->config;
2870 mac_control = &nic->mac_control;
2841 2871
2842 nic->pkts_to_process = budget; 2872 if (unlikely(!is_s2io_card_up(nic)))
2843 org_pkts_to_process = nic->pkts_to_process; 2873 return 0;
2844 2874
2845 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 2875 pkts_processed = rx_intr_handler(ring, budget);
2846 readl(&bar0->rx_traffic_int); 2876 s2io_chk_rx_buffers(ring);
2847 2877
2848 for (i = 0; i < config->rx_ring_num; i++) { 2878 if (pkts_processed < budget_org) {
2849 rx_intr_handler(&mac_control->rings[i]); 2879 netif_rx_complete(dev, napi);
2850 pkt_cnt = org_pkts_to_process - nic->pkts_to_process; 2880 /*Re Enable MSI-Rx Vector*/
2851 if (!nic->pkts_to_process) { 2881 addr = (u8 *)&bar0->xmsi_mask_reg;
2852 /* Quota for the current iteration has been met */ 2882 addr += 7 - ring->ring_no;
2853 goto no_rx; 2883 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2854 } 2884 writeb(val8, addr);
2885 val8 = readb(addr);
2855 } 2886 }
2887 return pkts_processed;
2888}
2889static int s2io_poll_inta(struct napi_struct *napi, int budget)
2890{
2891 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2892 struct ring_info *ring;
2893 struct net_device *dev = nic->dev;
2894 struct config_param *config;
2895 struct mac_info *mac_control;
2896 int pkts_processed = 0;
2897 int ring_pkts_processed, i;
2898 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2899 int budget_org = budget;
2856 2900
2857 netif_rx_complete(dev, napi); 2901 config = &nic->config;
2902 mac_control = &nic->mac_control;
2858 2903
2859 for (i = 0; i < config->rx_ring_num; i++) { 2904 if (unlikely(!is_s2io_card_up(nic)))
2860 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2905 return 0;
2861 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2862 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2863 break;
2864 }
2865 }
2866 /* Re enable the Rx interrupts. */
2867 writeq(0x0, &bar0->rx_traffic_mask);
2868 readl(&bar0->rx_traffic_mask);
2869 return pkt_cnt;
2870 2906
2871no_rx:
2872 for (i = 0; i < config->rx_ring_num; i++) { 2907 for (i = 0; i < config->rx_ring_num; i++) {
2873 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2908 ring = &mac_control->rings[i];
2874 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2909 ring_pkts_processed = rx_intr_handler(ring, budget);
2875 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2910 s2io_chk_rx_buffers(ring);
2911 pkts_processed += ring_pkts_processed;
2912 budget -= ring_pkts_processed;
2913 if (budget <= 0)
2876 break; 2914 break;
2877 }
2878 } 2915 }
2879 return pkt_cnt; 2916 if (pkts_processed < budget_org) {
2917 netif_rx_complete(dev, napi);
2918 /* Re enable the Rx interrupts for the ring */
2919 writeq(0, &bar0->rx_traffic_mask);
2920 readl(&bar0->rx_traffic_mask);
2921 }
2922 return pkts_processed;
2880} 2923}
2881 2924
2882#ifdef CONFIG_NET_POLL_CONTROLLER 2925#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev)
2918 2961
2919 /* check for received packet and indicate up to network */ 2962 /* check for received packet and indicate up to network */
2920 for (i = 0; i < config->rx_ring_num; i++) 2963 for (i = 0; i < config->rx_ring_num; i++)
2921 rx_intr_handler(&mac_control->rings[i]); 2964 rx_intr_handler(&mac_control->rings[i], 0);
2922 2965
2923 for (i = 0; i < config->rx_ring_num; i++) { 2966 for (i = 0; i < config->rx_ring_num; i++) {
2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2967 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
@@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev)
2934 2977
2935/** 2978/**
2936 * rx_intr_handler - Rx interrupt handler 2979 * rx_intr_handler - Rx interrupt handler
2937 * @nic: device private variable. 2980 * @ring_info: per ring structure.
2981 * @budget: budget for napi processing.
2938 * Description: 2982 * Description:
2939 * If the interrupt is because of a received frame or if the 2983 * If the interrupt is because of a received frame or if the
2940 * receive ring contains fresh as yet un-processed frames,this function is 2984 * receive ring contains fresh as yet un-processed frames,this function is
@@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev)
2942 * stopped and sends the skb to the OSM's Rx handler and then increments 2986 * stopped and sends the skb to the OSM's Rx handler and then increments
2943 * the offset. 2987 * the offset.
2944 * Return Value: 2988 * Return Value:
2945 * NONE. 2989 * No. of napi packets processed.
2946 */ 2990 */
2947static void rx_intr_handler(struct ring_info *ring_data) 2991static int rx_intr_handler(struct ring_info *ring_data, int budget)
2948{ 2992{
2949 int get_block, put_block; 2993 int get_block, put_block;
2950 struct rx_curr_get_info get_info, put_info; 2994 struct rx_curr_get_info get_info, put_info;
2951 struct RxD_t *rxdp; 2995 struct RxD_t *rxdp;
2952 struct sk_buff *skb; 2996 struct sk_buff *skb;
2953 int pkt_cnt = 0; 2997 int pkt_cnt = 0, napi_pkts = 0;
2954 int i; 2998 int i;
2955 struct RxD1* rxdp1; 2999 struct RxD1* rxdp1;
2956 struct RxD3* rxdp3; 3000 struct RxD3* rxdp3;
@@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
2977 DBG_PRINT(ERR_DBG, "%s: The skb is ", 3021 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2978 ring_data->dev->name); 3022 ring_data->dev->name);
2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 3023 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2980 return; 3024 return 0;
2981 } 3025 }
2982 if (ring_data->rxd_mode == RXD_MODE_1) { 3026 if (ring_data->rxd_mode == RXD_MODE_1) {
2983 rxdp1 = (struct RxD1*)rxdp; 3027 rxdp1 = (struct RxD1*)rxdp;
@@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data)
3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3058 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3015 } 3059 }
3016 3060
3017 if(ring_data->nic->config.napi){ 3061 if (ring_data->nic->config.napi) {
3018 ring_data->nic->pkts_to_process -= 1; 3062 budget--;
3019 if (!ring_data->nic->pkts_to_process) 3063 napi_pkts++;
3064 if (!budget)
3020 break; 3065 break;
3021 } 3066 }
3022 pkt_cnt++; 3067 pkt_cnt++;
@@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
3034 } 3079 }
3035 } 3080 }
3036 } 3081 }
3082 return(napi_pkts);
3037} 3083}
3038 3084
3039/** 3085/**
@@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic)
3730{ 3776{
3731 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3777 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3732 u64 val64; 3778 u64 val64;
3733 int i; 3779 int i, msix_index;
3780
3781
3782 if (nic->device_type == XFRAME_I_DEVICE)
3783 return;
3734 3784
3735 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3785 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3786 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3736 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3787 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3737 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3788 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3738 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); 3789 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3739 writeq(val64, &bar0->xmsi_access); 3790 writeq(val64, &bar0->xmsi_access);
3740 if (wait_for_msix_trans(nic, i)) { 3791 if (wait_for_msix_trans(nic, msix_index)) {
3741 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3792 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3742 continue; 3793 continue;
3743 } 3794 }
@@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic)
3748{ 3799{
3749 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3800 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3750 u64 val64, addr, data; 3801 u64 val64, addr, data;
3751 int i; 3802 int i, msix_index;
3803
3804 if (nic->device_type == XFRAME_I_DEVICE)
3805 return;
3752 3806
3753 /* Store and display */ 3807 /* Store and display */
3754 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3808 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3755 val64 = (s2BIT(15) | vBIT(i, 26, 6)); 3809 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3810 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3756 writeq(val64, &bar0->xmsi_access); 3811 writeq(val64, &bar0->xmsi_access);
3757 if (wait_for_msix_trans(nic, i)) { 3812 if (wait_for_msix_trans(nic, msix_index)) {
3758 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3813 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3759 continue; 3814 continue;
3760 } 3815 }
@@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic)
3770static int s2io_enable_msi_x(struct s2io_nic *nic) 3825static int s2io_enable_msi_x(struct s2io_nic *nic)
3771{ 3826{
3772 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3827 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3773 u64 tx_mat, rx_mat; 3828 u64 rx_mat;
3774 u16 msi_control; /* Temp variable */ 3829 u16 msi_control; /* Temp variable */
3775 int ret, i, j, msix_indx = 1; 3830 int ret, i, j, msix_indx = 1;
3776 3831
3777 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), 3832 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3778 GFP_KERNEL); 3833 GFP_KERNEL);
3779 if (!nic->entries) { 3834 if (!nic->entries) {
3780 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3835 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
@@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3783 return -ENOMEM; 3838 return -ENOMEM;
3784 } 3839 }
3785 nic->mac_control.stats_info->sw_stat.mem_allocated 3840 nic->mac_control.stats_info->sw_stat.mem_allocated
3786 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3841 += (nic->num_entries * sizeof(struct msix_entry));
3842
3843 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3787 3844
3788 nic->s2io_entries = 3845 nic->s2io_entries =
3789 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), 3846 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3790 GFP_KERNEL); 3847 GFP_KERNEL);
3791 if (!nic->s2io_entries) { 3848 if (!nic->s2io_entries) {
3792 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3849 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
@@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3851 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3795 kfree(nic->entries); 3852 kfree(nic->entries);
3796 nic->mac_control.stats_info->sw_stat.mem_freed 3853 nic->mac_control.stats_info->sw_stat.mem_freed
3797 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3854 += (nic->num_entries * sizeof(struct msix_entry));
3798 return -ENOMEM; 3855 return -ENOMEM;
3799 } 3856 }
3800 nic->mac_control.stats_info->sw_stat.mem_allocated 3857 nic->mac_control.stats_info->sw_stat.mem_allocated
3801 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3858 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3802 3859 memset(nic->s2io_entries, 0,
3803 for (i=0; i< MAX_REQUESTED_MSI_X; i++) { 3860 nic->num_entries * sizeof(struct s2io_msix_entry));
3804 nic->entries[i].entry = i; 3861
3805 nic->s2io_entries[i].entry = i; 3862 nic->entries[0].entry = 0;
3863 nic->s2io_entries[0].entry = 0;
3864 nic->s2io_entries[0].in_use = MSIX_FLG;
3865 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3866 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3867
3868 for (i = 1; i < nic->num_entries; i++) {
3869 nic->entries[i].entry = ((i - 1) * 8) + 1;
3870 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3806 nic->s2io_entries[i].arg = NULL; 3871 nic->s2io_entries[i].arg = NULL;
3807 nic->s2io_entries[i].in_use = 0; 3872 nic->s2io_entries[i].in_use = 0;
3808 } 3873 }
3809 3874
3810 tx_mat = readq(&bar0->tx_mat0_n[0]);
3811 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3812 tx_mat |= TX_MAT_SET(i, msix_indx);
3813 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3814 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3815 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3816 }
3817 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3818
3819 rx_mat = readq(&bar0->rx_mat); 3875 rx_mat = readq(&bar0->rx_mat);
3820 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { 3876 for (j = 0; j < nic->config.rx_ring_num; j++) {
3821 rx_mat |= RX_MAT_SET(j, msix_indx); 3877 rx_mat |= RX_MAT_SET(j, msix_indx);
3822 nic->s2io_entries[msix_indx].arg 3878 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3823 = &nic->mac_control.rings[j]; 3879 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3824 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3880 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3825 nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3881 msix_indx += 8;
3826 } 3882 }
3827 writeq(rx_mat, &bar0->rx_mat); 3883 writeq(rx_mat, &bar0->rx_mat);
3884 readq(&bar0->rx_mat);
3828 3885
3829 nic->avail_msix_vectors = 0; 3886 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3830 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3831 /* We fail init if error or we get less vectors than min required */ 3887 /* We fail init if error or we get less vectors than min required */
3832 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3833 nic->avail_msix_vectors = ret;
3834 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3835 }
3836 if (ret) { 3888 if (ret) {
3837 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3889 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3838 kfree(nic->entries); 3890 kfree(nic->entries);
3839 nic->mac_control.stats_info->sw_stat.mem_freed 3891 nic->mac_control.stats_info->sw_stat.mem_freed
3840 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3892 += (nic->num_entries * sizeof(struct msix_entry));
3841 kfree(nic->s2io_entries); 3893 kfree(nic->s2io_entries);
3842 nic->mac_control.stats_info->sw_stat.mem_freed 3894 nic->mac_control.stats_info->sw_stat.mem_freed
3843 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3895 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3844 nic->entries = NULL; 3896 nic->entries = NULL;
3845 nic->s2io_entries = NULL; 3897 nic->s2io_entries = NULL;
3846 nic->avail_msix_vectors = 0;
3847 return -ENOMEM; 3898 return -ENOMEM;
3848 } 3899 }
3849 if (!nic->avail_msix_vectors)
3850 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3851 3900
3852 /* 3901 /*
3853 * To enable MSI-X, MSI also needs to be enabled, due to a bug 3902 * To enable MSI-X, MSI also needs to be enabled, due to a bug
@@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
3919 int i; 3968 int i;
3920 u16 msi_control; 3969 u16 msi_control;
3921 3970
3922 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3971 for (i = 0; i < sp->num_entries; i++) {
3923 if (sp->s2io_entries[i].in_use == 3972 if (sp->s2io_entries[i].in_use ==
3924 MSIX_REGISTERED_SUCCESS) { 3973 MSIX_REGISTERED_SUCCESS) {
3925 int vector = sp->entries[i].vector; 3974 int vector = sp->entries[i].vector;
@@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev)
3975 netif_carrier_off(dev); 4024 netif_carrier_off(dev);
3976 sp->last_link_state = 0; 4025 sp->last_link_state = 0;
3977 4026
3978 if (sp->config.intr_type == MSI_X) {
3979 int ret = s2io_enable_msi_x(sp);
3980
3981 if (!ret) {
3982 ret = s2io_test_msi(sp);
3983 /* rollback MSI-X, will re-enable during add_isr() */
3984 remove_msix_isr(sp);
3985 }
3986 if (ret) {
3987
3988 DBG_PRINT(ERR_DBG,
3989 "%s: MSI-X requested but failed to enable\n",
3990 dev->name);
3991 sp->config.intr_type = INTA;
3992 }
3993 }
3994
3995 /* NAPI doesn't work well with MSI(X) */
3996 if (sp->config.intr_type != INTA) {
3997 if(sp->config.napi)
3998 sp->config.napi = 0;
3999 }
4000
4001 /* Initialize H/W and enable interrupts */ 4027 /* Initialize H/W and enable interrupts */
4002 err = s2io_card_up(sp); 4028 err = s2io_card_up(sp);
4003 if (err) { 4029 if (err) {
@@ -4020,12 +4046,12 @@ hw_init_failed:
4020 if (sp->entries) { 4046 if (sp->entries) {
4021 kfree(sp->entries); 4047 kfree(sp->entries);
4022 sp->mac_control.stats_info->sw_stat.mem_freed 4048 sp->mac_control.stats_info->sw_stat.mem_freed
4023 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 4049 += (sp->num_entries * sizeof(struct msix_entry));
4024 } 4050 }
4025 if (sp->s2io_entries) { 4051 if (sp->s2io_entries) {
4026 kfree(sp->s2io_entries); 4052 kfree(sp->s2io_entries);
4027 sp->mac_control.stats_info->sw_stat.mem_freed 4053 sp->mac_control.stats_info->sw_stat.mem_freed
4028 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 4054 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4029 } 4055 }
4030 } 4056 }
4031 return err; 4057 return err;
@@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data)
4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4353 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4328} 4354}
4329 4355
4330static int s2io_chk_rx_buffers(struct ring_info *ring)
4331{
4332 if (fill_rx_buffers(ring) == -ENOMEM) {
4333 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4334 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4335 }
4336 return 0;
4337}
4338
4339static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4356static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4340{ 4357{
4341 struct ring_info *ring = (struct ring_info *)dev_id; 4358 struct ring_info *ring = (struct ring_info *)dev_id;
4342 struct s2io_nic *sp = ring->nic; 4359 struct s2io_nic *sp = ring->nic;
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361 struct net_device *dev = sp->dev;
4343 4362
4344 if (!is_s2io_card_up(sp)) 4363 if (unlikely(!is_s2io_card_up(sp)))
4345 return IRQ_HANDLED; 4364 return IRQ_HANDLED;
4346 4365
4347 rx_intr_handler(ring); 4366 if (sp->config.napi) {
4348 s2io_chk_rx_buffers(ring); 4367 u8 *addr = NULL, val8 = 0;
4368
4369 addr = (u8 *)&bar0->xmsi_mask_reg;
4370 addr += (7 - ring->ring_no);
4371 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4372 writeb(val8, addr);
4373 val8 = readb(addr);
4374 netif_rx_schedule(dev, &ring->napi);
4375 } else {
4376 rx_intr_handler(ring, 0);
4377 s2io_chk_rx_buffers(ring);
4378 }
4349 4379
4350 return IRQ_HANDLED; 4380 return IRQ_HANDLED;
4351} 4381}
4352 4382
4353static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4383static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4354{ 4384{
4355 struct fifo_info *fifo = (struct fifo_info *)dev_id; 4385 int i;
4356 struct s2io_nic *sp = fifo->nic; 4386 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4387 struct s2io_nic *sp = fifos->nic;
4388 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4389 struct config_param *config = &sp->config;
4390 u64 reason;
4357 4391
4358 if (!is_s2io_card_up(sp)) 4392 if (unlikely(!is_s2io_card_up(sp)))
4393 return IRQ_NONE;
4394
4395 reason = readq(&bar0->general_int_status);
4396 if (unlikely(reason == S2IO_MINUS_ONE))
4397 /* Nothing much can be done. Get out */
4359 return IRQ_HANDLED; 4398 return IRQ_HANDLED;
4360 4399
4361 tx_intr_handler(fifo); 4400 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4401
4402 if (reason & GEN_INTR_TXTRAFFIC)
4403 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4404
4405 for (i = 0; i < config->tx_fifo_num; i++)
4406 tx_intr_handler(&fifos[i]);
4407
4408 writeq(sp->general_int_mask, &bar0->general_int_mask);
4409 readl(&bar0->general_int_status);
4410
4362 return IRQ_HANDLED; 4411 return IRQ_HANDLED;
4363} 4412}
4413
4364static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4414static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4365{ 4415{
4366 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4416 struct XENA_dev_config __iomem *bar0 = sp->bar0;
@@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4762 4812
4763 if (config->napi) { 4813 if (config->napi) {
4764 if (reason & GEN_INTR_RXTRAFFIC) { 4814 if (reason & GEN_INTR_RXTRAFFIC) {
4765 if (likely(netif_rx_schedule_prep(dev, 4815 netif_rx_schedule(dev, &sp->napi);
4766 &sp->napi))) { 4816 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4767 __netif_rx_schedule(dev, &sp->napi); 4817 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4768 writeq(S2IO_MINUS_ONE, 4818 readl(&bar0->rx_traffic_int);
4769 &bar0->rx_traffic_mask);
4770 } else
4771 writeq(S2IO_MINUS_ONE,
4772 &bar0->rx_traffic_int);
4773 } 4819 }
4774 } else { 4820 } else {
4775 /* 4821 /*
@@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4781 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4827 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4782 4828
4783 for (i = 0; i < config->rx_ring_num; i++) 4829 for (i = 0; i < config->rx_ring_num; i++)
4784 rx_intr_handler(&mac_control->rings[i]); 4830 rx_intr_handler(&mac_control->rings[i], 0);
4785 } 4831 }
4786 4832
4787 /* 4833 /*
@@ -6951,7 +6997,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6951 &skb,(u64 *)&temp0_64, 6997 &skb,(u64 *)&temp0_64,
6952 (u64 *)&temp1_64, 6998 (u64 *)&temp1_64,
6953 (u64 *)&temp2_64, 6999 (u64 *)&temp2_64,
6954 size) == ENOMEM) { 7000 size) == -ENOMEM) {
6955 return 0; 7001 return 0;
6956 } 7002 }
6957 7003
@@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp)
6984 7030
6985 /* After proper initialization of H/W, register ISR */ 7031 /* After proper initialization of H/W, register ISR */
6986 if (sp->config.intr_type == MSI_X) { 7032 if (sp->config.intr_type == MSI_X) {
6987 int i, msix_tx_cnt=0,msix_rx_cnt=0; 7033 int i, msix_rx_cnt = 0;
6988 7034
6989 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 7035 for (i = 0; i < sp->num_entries; i++) {
6990 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 7036 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6991 sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7037 if (sp->s2io_entries[i].type ==
7038 MSIX_RING_TYPE) {
7039 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7040 dev->name, i);
7041 err = request_irq(sp->entries[i].vector,
7042 s2io_msix_ring_handle, 0,
7043 sp->desc[i],
7044 sp->s2io_entries[i].arg);
7045 } else if (sp->s2io_entries[i].type ==
7046 MSIX_ALARM_TYPE) {
7047 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6992 dev->name, i); 7048 dev->name, i);
6993 err = request_irq(sp->entries[i].vector, 7049 err = request_irq(sp->entries[i].vector,
6994 s2io_msix_fifo_handle, 0, sp->desc[i], 7050 s2io_msix_fifo_handle, 0,
6995 sp->s2io_entries[i].arg); 7051 sp->desc[i],
6996 /* If either data or addr is zero print it */ 7052 sp->s2io_entries[i].arg);
6997 if(!(sp->msix_info[i].addr && 7053
6998 sp->msix_info[i].data)) {
6999 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7000 "Data:0x%llx\n",sp->desc[i],
7001 (unsigned long long)
7002 sp->msix_info[i].addr,
7003 (unsigned long long)
7004 sp->msix_info[i].data);
7005 } else {
7006 msix_tx_cnt++;
7007 } 7054 }
7008 } else { 7055 /* if either data or addr is zero print it. */
7009 sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 7056 if (!(sp->msix_info[i].addr &&
7010 dev->name, i);
7011 err = request_irq(sp->entries[i].vector,
7012 s2io_msix_ring_handle, 0, sp->desc[i],
7013 sp->s2io_entries[i].arg);
7014 /* If either data or addr is zero print it */
7015 if(!(sp->msix_info[i].addr &&
7016 sp->msix_info[i].data)) { 7057 sp->msix_info[i].data)) {
7017 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7058 DBG_PRINT(ERR_DBG,
7018 "Data:0x%llx\n",sp->desc[i], 7059 "%s @Addr:0x%llx Data:0x%llx\n",
7060 sp->desc[i],
7019 (unsigned long long) 7061 (unsigned long long)
7020 sp->msix_info[i].addr, 7062 sp->msix_info[i].addr,
7021 (unsigned long long) 7063 (unsigned long long)
7022 sp->msix_info[i].data); 7064 ntohl(sp->msix_info[i].data));
7023 } else { 7065 } else
7024 msix_rx_cnt++; 7066 msix_rx_cnt++;
7067 if (err) {
7068 remove_msix_isr(sp);
7069
7070 DBG_PRINT(ERR_DBG,
7071 "%s:MSI-X-%d registration "
7072 "failed\n", dev->name, i);
7073
7074 DBG_PRINT(ERR_DBG,
7075 "%s: Defaulting to INTA\n",
7076 dev->name);
7077 sp->config.intr_type = INTA;
7078 break;
7025 } 7079 }
7080 sp->s2io_entries[i].in_use =
7081 MSIX_REGISTERED_SUCCESS;
7026 } 7082 }
7027 if (err) {
7028 remove_msix_isr(sp);
7029 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
7030 "failed\n", dev->name, i);
7031 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
7032 dev->name);
7033 sp->config.intr_type = INTA;
7034 break;
7035 }
7036 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
7037 } 7083 }
7038 if (!err) { 7084 if (!err) {
7039 printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
7040 msix_tx_cnt);
7041 printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 7085 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7042 msix_rx_cnt); 7086 --msix_rx_cnt);
7087 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7088 " through alarm vector\n");
7043 } 7089 }
7044 } 7090 }
7045 if (sp->config.intr_type == INTA) { 7091 if (sp->config.intr_type == INTA) {
@@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7080 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7126 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7081 7127
7082 /* Disable napi */ 7128 /* Disable napi */
7083 if (config->napi) 7129 if (sp->config.napi) {
7084 napi_disable(&sp->napi); 7130 int off = 0;
7131 if (config->intr_type == MSI_X) {
7132 for (; off < sp->config.rx_ring_num; off++)
7133 napi_disable(&sp->mac_control.rings[off].napi);
7134 }
7135 else
7136 napi_disable(&sp->napi);
7137 }
7085 7138
7086 /* disable Tx and Rx traffic on the NIC */ 7139 /* disable Tx and Rx traffic on the NIC */
7087 if (do_io) 7140 if (do_io)
@@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp)
7173 } 7226 }
7174 7227
7175 /* Initialise napi */ 7228 /* Initialise napi */
7176 if (config->napi) 7229 if (config->napi) {
7177 napi_enable(&sp->napi); 7230 int i;
7231 if (config->intr_type == MSI_X) {
7232 for (i = 0; i < sp->config.rx_ring_num; i++)
7233 napi_enable(&sp->mac_control.rings[i].napi);
7234 } else {
7235 napi_enable(&sp->napi);
7236 }
7237 }
7178 7238
7179 /* Maintain the state prior to the open */ 7239 /* Maintain the state prior to the open */
7180 if (sp->promisc_flg) 7240 if (sp->promisc_flg)
@@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7217 /* Enable select interrupts */ 7277 /* Enable select interrupts */
7218 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7278 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7219 if (sp->config.intr_type != INTA) 7279 if (sp->config.intr_type != INTA)
7220 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); 7280 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7221 else { 7281 else {
7222 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7282 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7223 interruptible |= TX_PIC_INTR; 7283 interruptible |= TX_PIC_INTR;
@@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7615 rx_ring_num = MAX_RX_RINGS; 7675 rx_ring_num = MAX_RX_RINGS;
7616 } 7676 }
7617 7677
7618 if (*dev_intr_type != INTA)
7619 napi = 0;
7620
7621 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7678 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7622 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 7679 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7623 "Defaulting to INTA\n"); 7680 "Defaulting to INTA\n");
@@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7918 * will use eth_mac_addr() for dev->set_mac_address 7975 * will use eth_mac_addr() for dev->set_mac_address
7919 * mac address will be set every time dev->open() is called 7976 * mac address will be set every time dev->open() is called
7920 */ 7977 */
7921 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7922
7923#ifdef CONFIG_NET_POLL_CONTROLLER 7978#ifdef CONFIG_NET_POLL_CONTROLLER
7924 dev->poll_controller = s2io_netpoll; 7979 dev->poll_controller = s2io_netpoll;
7925#endif 7980#endif
@@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7963 } 8018 }
7964 } 8019 }
7965 8020
8021 if (sp->config.intr_type == MSI_X) {
8022 sp->num_entries = config->rx_ring_num + 1;
8023 ret = s2io_enable_msi_x(sp);
8024
8025 if (!ret) {
8026 ret = s2io_test_msi(sp);
8027 /* rollback MSI-X, will re-enable during add_isr() */
8028 remove_msix_isr(sp);
8029 }
8030 if (ret) {
8031
8032 DBG_PRINT(ERR_DBG,
8033 "%s: MSI-X requested but failed to enable\n",
8034 dev->name);
8035 sp->config.intr_type = INTA;
8036 }
8037 }
8038
8039 if (config->intr_type == MSI_X) {
8040 for (i = 0; i < config->rx_ring_num ; i++)
8041 netif_napi_add(dev, &mac_control->rings[i].napi,
8042 s2io_poll_msix, 64);
8043 } else {
8044 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8045 }
8046
7966 /* Not needed for Herc */ 8047 /* Not needed for Herc */
7967 if (sp->device_type & XFRAME_I_DEVICE) { 8048 if (sp->device_type & XFRAME_I_DEVICE) {
7968 /* 8049 /*
@@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8013 /* store mac addresses from CAM to s2io_nic structure */ 8094 /* store mac addresses from CAM to s2io_nic structure */
8014 do_s2io_store_unicast_mc(sp); 8095 do_s2io_store_unicast_mc(sp);
8015 8096
8097 /* Configure MSIX vector for number of rings configured plus one */
8098 if ((sp->device_type == XFRAME_II_DEVICE) &&
8099 (config->intr_type == MSI_X))
8100 sp->num_entries = config->rx_ring_num + 1;
8101
8016 /* Store the values of the MSIX table in the s2io_nic structure */ 8102 /* Store the values of the MSIX table in the s2io_nic structure */
8017 store_xmsi_data(sp); 8103 store_xmsi_data(sp);
8018 /* reset Nic and bring it to known state */ 8104 /* reset Nic and bring it to known state */
@@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8078 break; 8164 break;
8079 } 8165 }
8080 8166
8081 if (napi) 8167 switch (sp->config.napi) {
8168 case 0:
8169 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8170 break;
8171 case 1:
8082 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8172 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8173 break;
8174 }
8083 8175
8084 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8176 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8085 sp->config.tx_fifo_num); 8177 sp->config.tx_fifo_num);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 58229dcbf5ec..d0a84ba887a5 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -706,7 +706,7 @@ struct ring_info {
706 /* per-ring buffer counter */ 706 /* per-ring buffer counter */
707 u32 rx_bufs_left; 707 u32 rx_bufs_left;
708 708
709 #define MAX_LRO_SESSIONS 32 709#define MAX_LRO_SESSIONS 32
710 struct lro lro0_n[MAX_LRO_SESSIONS]; 710 struct lro lro0_n[MAX_LRO_SESSIONS];
711 u8 lro; 711 u8 lro;
712 712
@@ -725,6 +725,11 @@ struct ring_info {
725 /* copy of sp->pdev pointer */ 725 /* copy of sp->pdev pointer */
726 struct pci_dev *pdev; 726 struct pci_dev *pdev;
727 727
728 /* Per ring napi struct */
729 struct napi_struct napi;
730
731 unsigned long interrupt_count;
732
728 /* 733 /*
729 * Place holders for the virtual and physical addresses of 734 * Place holders for the virtual and physical addresses of
730 * all the Rx Blocks 735 * all the Rx Blocks
@@ -841,7 +846,7 @@ struct usr_addr {
841 * Structure to keep track of the MSI-X vectors and the corresponding 846 * Structure to keep track of the MSI-X vectors and the corresponding
842 * argument registered against each vector 847 * argument registered against each vector
843 */ 848 */
844#define MAX_REQUESTED_MSI_X 17 849#define MAX_REQUESTED_MSI_X 9
845struct s2io_msix_entry 850struct s2io_msix_entry
846{ 851{
847 u16 vector; 852 u16 vector;
@@ -849,8 +854,8 @@ struct s2io_msix_entry
849 void *arg; 854 void *arg;
850 855
851 u8 type; 856 u8 type;
852#define MSIX_FIFO_TYPE 1 857#define MSIX_ALARM_TYPE 1
853#define MSIX_RING_TYPE 2 858#define MSIX_RING_TYPE 2
854 859
855 u8 in_use; 860 u8 in_use;
856#define MSIX_REGISTERED_SUCCESS 0xAA 861#define MSIX_REGISTERED_SUCCESS 0xAA
@@ -877,7 +882,6 @@ struct s2io_nic {
877 */ 882 */
878 int pkts_to_process; 883 int pkts_to_process;
879 struct net_device *dev; 884 struct net_device *dev;
880 struct napi_struct napi;
881 struct mac_info mac_control; 885 struct mac_info mac_control;
882 struct config_param config; 886 struct config_param config;
883 struct pci_dev *pdev; 887 struct pci_dev *pdev;
@@ -948,6 +952,7 @@ struct s2io_nic {
948 */ 952 */
949 u8 other_fifo_idx; 953 u8 other_fifo_idx;
950 954
955 struct napi_struct napi;
951 /* after blink, the adapter must be restored with original 956 /* after blink, the adapter must be restored with original
952 * values. 957 * values.
953 */ 958 */
@@ -962,6 +967,7 @@ struct s2io_nic {
962 unsigned long long start_time; 967 unsigned long long start_time;
963 struct vlan_group *vlgrp; 968 struct vlan_group *vlgrp;
964#define MSIX_FLG 0xA5 969#define MSIX_FLG 0xA5
970 int num_entries;
965 struct msix_entry *entries; 971 struct msix_entry *entries;
966 int msi_detected; 972 int msi_detected;
967 wait_queue_head_t msi_wait; 973 wait_queue_head_t msi_wait;
@@ -982,6 +988,7 @@ struct s2io_nic {
982 u16 lro_max_aggr_per_sess; 988 u16 lro_max_aggr_per_sess;
983 volatile unsigned long state; 989 volatile unsigned long state;
984 u64 general_int_mask; 990 u64 general_int_mask;
991
985#define VPD_STRING_LEN 80 992#define VPD_STRING_LEN 80
986 u8 product_name[VPD_STRING_LEN]; 993 u8 product_name[VPD_STRING_LEN];
987 u8 serial_num[VPD_STRING_LEN]; 994 u8 serial_num[VPD_STRING_LEN];
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
1103static int init_shared_mem(struct s2io_nic *sp); 1110static int init_shared_mem(struct s2io_nic *sp);
1104static void free_shared_mem(struct s2io_nic *sp); 1111static void free_shared_mem(struct s2io_nic *sp);
1105static int init_nic(struct s2io_nic *nic); 1112static int init_nic(struct s2io_nic *nic);
1106static void rx_intr_handler(struct ring_info *ring_data); 1113static int rx_intr_handler(struct ring_info *ring_data, int budget);
1107static void tx_intr_handler(struct fifo_info *fifo_data); 1114static void tx_intr_handler(struct fifo_info *fifo_data);
1108static void s2io_handle_errors(void * dev_id); 1115static void s2io_handle_errors(void * dev_id);
1109 1116
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
1114static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1121static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
1115static void s2io_link(struct s2io_nic * sp, int link); 1122static void s2io_link(struct s2io_nic * sp, int link);
1116static void s2io_reset(struct s2io_nic * sp); 1123static void s2io_reset(struct s2io_nic * sp);
1117static int s2io_poll(struct napi_struct *napi, int budget); 1124static int s2io_poll_msix(struct napi_struct *napi, int budget);
1125static int s2io_poll_inta(struct napi_struct *napi, int budget);
1118static void s2io_init_pci(struct s2io_nic * sp); 1126static void s2io_init_pci(struct s2io_nic * sp);
1119static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); 1127static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
1120static void s2io_alarm_handle(unsigned long data); 1128static void s2io_alarm_handle(unsigned long data);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 295199025173..fe41e4ec21ec 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -179,8 +179,7 @@ enum sbmac_state {
179#define SBMAC_MAX_TXDESCR 256 179#define SBMAC_MAX_TXDESCR 256
180#define SBMAC_MAX_RXDESCR 256 180#define SBMAC_MAX_RXDESCR 256
181 181
182#define ETHER_ALIGN 2 182#define ETHER_ADDR_LEN 6
183#define ETHER_ADDR_LEN 6
184#define ENET_PACKET_SIZE 1518 183#define ENET_PACKET_SIZE 1518
185/*#define ENET_PACKET_SIZE 9216 */ 184/*#define ENET_PACKET_SIZE 9216 */
186 185
@@ -262,8 +261,6 @@ struct sbmac_softc {
262 spinlock_t sbm_lock; /* spin lock */ 261 spinlock_t sbm_lock; /* spin lock */
263 int sbm_devflags; /* current device flags */ 262 int sbm_devflags; /* current device flags */
264 263
265 int sbm_buffersize;
266
267 /* 264 /*
268 * Controller-specific things 265 * Controller-specific things
269 */ 266 */
@@ -305,10 +302,11 @@ struct sbmac_softc {
305static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 302static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
306 int txrx, int maxdescr); 303 int txrx, int maxdescr);
307static void sbdma_channel_start(struct sbmacdma *d, int rxtx); 304static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
308static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); 305static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
306 struct sk_buff *m);
309static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); 307static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
310static void sbdma_emptyring(struct sbmacdma *d); 308static void sbdma_emptyring(struct sbmacdma *d);
311static void sbdma_fillring(struct sbmacdma *d); 309static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
312static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 310static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
313 int work_to_do, int poll); 311 int work_to_do, int poll);
314static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, 312static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
777 d->sbdma_remptr = NULL; 775 d->sbdma_remptr = NULL;
778} 776}
779 777
780static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) 778static inline void sbdma_align_skb(struct sk_buff *skb,
779 unsigned int power2, unsigned int offset)
781{ 780{
782 unsigned long addr; 781 unsigned char *addr = skb->data;
783 unsigned long newaddr; 782 unsigned char *newaddr = PTR_ALIGN(addr, power2);
784
785 addr = (unsigned long) skb->data;
786
787 newaddr = (addr + power2 - 1) & ~(power2 - 1);
788 783
789 skb_reserve(skb,newaddr-addr+offset); 784 skb_reserve(skb, newaddr - addr + offset);
790} 785}
791 786
792 787
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
797 * this queues a buffer for inbound packets. 792 * this queues a buffer for inbound packets.
798 * 793 *
799 * Input parameters: 794 * Input parameters:
800 * d - DMA channel descriptor 795 * sc - softc structure
796 * d - DMA channel descriptor
801 * sb - sk_buff to add, or NULL if we should allocate one 797 * sb - sk_buff to add, or NULL if we should allocate one
802 * 798 *
803 * Return value: 799 * Return value:
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
806 ********************************************************************* */ 802 ********************************************************************* */
807 803
808 804
809static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) 805static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
806 struct sk_buff *sb)
810{ 807{
808 struct net_device *dev = sc->sbm_dev;
811 struct sbdmadscr *dsc; 809 struct sbdmadscr *dsc;
812 struct sbdmadscr *nextdsc; 810 struct sbdmadscr *nextdsc;
813 struct sk_buff *sb_new = NULL; 811 struct sk_buff *sb_new = NULL;
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
848 */ 846 */
849 847
850 if (sb == NULL) { 848 if (sb == NULL) {
851 sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); 849 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
850 SMP_CACHE_BYTES * 2 +
851 NET_IP_ALIGN);
852 if (sb_new == NULL) { 852 if (sb_new == NULL) {
853 pr_info("%s: sk_buff allocation failed\n", 853 pr_info("%s: sk_buff allocation failed\n",
854 d->sbdma_eth->sbm_dev->name); 854 d->sbdma_eth->sbm_dev->name);
855 return -ENOBUFS; 855 return -ENOBUFS;
856 } 856 }
857 857
858 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); 858 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
859 } 859 }
860 else { 860 else {
861 sb_new = sb; 861 sb_new = sb;
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
874 * Do not interrupt per DMA transfer. 874 * Do not interrupt per DMA transfer.
875 */ 875 */
876 dsc->dscr_a = virt_to_phys(sb_new->data) | 876 dsc->dscr_a = virt_to_phys(sb_new->data) |
877 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; 877 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
878#else 878#else
879 dsc->dscr_a = virt_to_phys(sb_new->data) | 879 dsc->dscr_a = virt_to_phys(sb_new->data) |
880 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 880 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
881 M_DMA_DSCRA_INTERRUPT; 881 M_DMA_DSCRA_INTERRUPT;
882#endif 882#endif
883 883
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
1032 * with sk_buffs 1032 * with sk_buffs
1033 * 1033 *
1034 * Input parameters: 1034 * Input parameters:
1035 * d - DMA channel 1035 * sc - softc structure
1036 * d - DMA channel
1036 * 1037 *
1037 * Return value: 1038 * Return value:
1038 * nothing 1039 * nothing
1039 ********************************************************************* */ 1040 ********************************************************************* */
1040 1041
1041static void sbdma_fillring(struct sbmacdma *d) 1042static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
1042{ 1043{
1043 int idx; 1044 int idx;
1044 1045
1045 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { 1046 for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
1046 if (sbdma_add_rcvbuffer(d,NULL) != 0) 1047 if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
1047 break; 1048 break;
1048 } 1049 }
1049} 1050}
@@ -1159,10 +1160,11 @@ again:
1159 * packet and put it right back on the receive ring. 1160 * packet and put it right back on the receive ring.
1160 */ 1161 */
1161 1162
1162 if (unlikely (sbdma_add_rcvbuffer(d,NULL) == 1163 if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
1163 -ENOBUFS)) { 1164 -ENOBUFS)) {
1164 dev->stats.rx_dropped++; 1165 dev->stats.rx_dropped++;
1165 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1166 /* Re-add old buffer */
1167 sbdma_add_rcvbuffer(sc, d, sb);
1166 /* No point in continuing at the moment */ 1168 /* No point in continuing at the moment */
1167 printk(KERN_ERR "dropped packet (1)\n"); 1169 printk(KERN_ERR "dropped packet (1)\n");
1168 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1170 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
@@ -1212,7 +1214,7 @@ again:
1212 * put it back on the receive ring. 1214 * put it back on the receive ring.
1213 */ 1215 */
1214 dev->stats.rx_errors++; 1216 dev->stats.rx_errors++;
1215 sbdma_add_rcvbuffer(d,sb); 1217 sbdma_add_rcvbuffer(sc, d, sb);
1216 } 1218 }
1217 1219
1218 1220
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1570 * Fill the receive ring 1572 * Fill the receive ring
1571 */ 1573 */
1572 1574
1573 sbdma_fillring(&(s->sbm_rxdma)); 1575 sbdma_fillring(s, &(s->sbm_rxdma));
1574 1576
1575 /* 1577 /*
1576 * Turn on the rest of the bits in the enable register 1578 * Turn on the rest of the bits in the enable register
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2312 dev->dev_addr[i] = eaddr[i]; 2314 dev->dev_addr[i] = eaddr[i];
2313 } 2315 }
2314 2316
2315
2316 /*
2317 * Init packet size
2318 */
2319
2320 sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
2321
2322 /* 2317 /*
2323 * Initialize context (get pointers to registers and stuff), then 2318 * Initialize context (get pointers to registers and stuff), then
2324 * allocate the memory for the descriptor tables. 2319 * allocate the memory for the descriptor tables.
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index f64a860029b7..b4b63805ee8f 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
953 unsigned entry; 953 unsigned entry;
954 u32 tx_status; 954 u32 tx_status;
955 955
956 if (skb_padto(skb, ETH_ZLEN))
957 return NETDEV_TX_OK;
958
959 if (unlikely(skb->len > TX_BUF_SIZE)) { 956 if (unlikely(skb->len > TX_BUF_SIZE)) {
960 dev->stats.tx_dropped++; 957 dev->stats.tx_dropped++;
961 goto out; 958 goto out;
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
976 973
977 len = skb->len; 974 len = skb->len;
975 if (unlikely(len < ETH_ZLEN)) {
976 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
977 0, ETH_ZLEN - len);
978 len = ETH_ZLEN;
979 }
978 980
979 wmb(); 981 wmb();
980 982
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index dbad95c295bd..3be13b592b4d 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -4,6 +4,8 @@ config SFC
4 select MII 4 select MII
5 select INET_LRO 5 select INET_LRO
6 select CRC32 6 select CRC32
7 select I2C
8 select I2C_ALGOBIT
7 help 9 help
8 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
9 the Solarflare Communications Solarstorm SFC4000 controller. 11 the Solarflare Communications Solarstorm SFC4000 controller.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1d2daeec7ac1..c8f5704c8fb1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o selftest.o ethtool.o xfp_phy.o \ 2 selftest.o ethtool.o xfp_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o boards.o sfe4001.o
4 4
5obj-$(CONFIG_SFC) += sfc.o 5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2806201644cc..2c79d27404e0 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -483,7 +483,7 @@ typedef union efx_oword {
483#endif 483#endif
484 484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \ 486 if (falcon_rev(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \ 488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@@ -491,7 +491,7 @@ typedef union efx_oword {
491} while (0) 491} while (0)
492 492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \ 493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \ 494 (falcon_rev(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \ 495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1)) 496 EFX_QWORD_FIELD((qword), field##_A1))
497 497
@@ -501,8 +501,5 @@ typedef union efx_oword {
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \ 502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507 504
508#endif /* EFX_BITFIELD_H */ 505#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index eecaa6d58584..d3d3dd0a1170 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
27 struct efx_blinker *bl = &efx->board_info.blinker; 27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state); 28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state; 29 bl->state = !bl->state;
30 if (bl->resubmit) { 30 if (bl->resubmit)
31 bl->timer.expires = jiffies + BLINK_INTERVAL; 31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
32 add_timer(&bl->timer);
33 }
34} 32}
35 33
36static void board_blink(struct efx_nic *efx, int blink) 34static void board_blink(struct efx_nic *efx, int blink)
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
44 blinker->state = 0; 42 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer, 43 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx); 44 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL; 45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
48 add_timer(&blinker->timer);
49 } else { 46 } else {
50 blinker->resubmit = 0; 47 blinker->resubmit = 0;
51 if (blinker->timer.function) 48 if (blinker->timer.function)
@@ -112,7 +109,7 @@ static struct efx_board_data board_data[] = {
112 [EFX_BOARD_INVALID] = 109 [EFX_BOARD_INVALID] =
113 {NULL, NULL, dummy_init}, 110 {NULL, NULL, dummy_init},
114 [EFX_BOARD_SFE4001] = 111 [EFX_BOARD_SFE4001] =
115 {"SFE4001", "10GBASE-T adapter", sfe4001_poweron}, 112 {"SFE4001", "10GBASE-T adapter", sfe4001_init},
116 [EFX_BOARD_SFE4002] = 113 [EFX_BOARD_SFE4002] =
117 {"SFE4002", "XFP adapter", sfe4002_init}, 114 {"SFE4002", "XFP adapter", sfe4002_init},
118}; 115};
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index 695764dc2e64..e5e844359ce7 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -20,8 +20,7 @@ enum efx_board_type {
20}; 20};
21 21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx); 23extern int sfe4001_init(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx);
25/* Are we putting the PHY into flash config mode */ 24/* Are we putting the PHY into flash config mode */
26extern unsigned int sfe4001_phy_flash_cfg; 25extern unsigned int sfe4001_phy_flash_cfg;
27 26
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 418f2e53a95b..74265d8553b8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
199 */ 199 */
200static inline void efx_channel_processed(struct efx_channel *channel) 200static inline void efx_channel_processed(struct efx_channel *channel)
201{ 201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race 202 /* The interrupt handler for this channel may set work_pending
203 * with finishing processing, a new interrupt will be raised. 203 * as soon as we acknowledge the events we've seen. Make sure
204 */ 204 * it's cleared before then. */
205 channel->work_pending = 0; 205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */ 206 smp_wmb();
207
207 falcon_eventq_read_ack(channel); 208 falcon_eventq_read_ack(channel);
208} 209}
209 210
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
265 napi_disable(&channel->napi_str); 266 napi_disable(&channel->napi_str);
266 267
267 /* Poll the channel */ 268 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size); 269 efx_process_channel(channel, efx->type->evq_size);
269 270
270 /* Ack the eventq. This may cause an interrupt to be generated 271 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */ 272 * when they are reenabled */
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
317 * 318 *
318 *************************************************************************/ 319 *************************************************************************/
319 320
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel) 321static int efx_probe_channel(struct efx_channel *channel)
341{ 322{
342 struct efx_tx_queue *tx_queue; 323 struct efx_tx_queue *tx_queue;
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
387 struct efx_channel *channel; 368 struct efx_channel *channel;
388 int rc = 0; 369 int rc = 0;
389 370
390 efx_calc_rx_buffer_params(efx); 371 /* Calculate the rx buffer allocation parameters required to
372 * support the current MTU, including padding for header
373 * alignment and overruns.
374 */
375 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
376 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
377 efx->type->rx_buffer_padding);
378 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
391 379
392 /* Initialise the channels */ 380 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) { 381 efx_for_each_channel(channel, efx) {
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
440 netif_napi_add(channel->napi_dev, &channel->napi_str, 428 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight); 429 efx_poll, napi_weight);
442 430
431 /* The interrupt handler for this channel may set work_pending
432 * as soon as we enable it. Make sure it's cleared before
433 * then. Similarly, make sure it sees the enabled flag set. */
443 channel->work_pending = 0; 434 channel->work_pending = 0;
444 channel->enabled = 1; 435 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */ 436 smp_wmb();
446 437
447 napi_enable(&channel->napi_str); 438 napi_enable(&channel->napi_str);
448 439
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
704 mutex_unlock(&efx->mac_lock); 695 mutex_unlock(&efx->mac_lock);
705 696
706 /* Serialise against efx_set_multicast_list() */ 697 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) { 698 if (efx_dev_registered(efx)) {
708 netif_tx_lock_bh(efx->net_dev); 699 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev); 700 netif_tx_unlock_bh(efx->net_dev);
710 } 701 }
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
791 efx->membase = ioremap_nocache(efx->membase_phys, 782 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size); 783 efx->type->mem_map_size);
793 if (!efx->membase) { 784 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", 785 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
795 efx->type->mem_bar, efx->membase_phys, 786 efx->type->mem_bar,
787 (unsigned long long)efx->membase_phys,
796 efx->type->mem_map_size); 788 efx->type->mem_map_size);
797 rc = -ENOMEM; 789 rc = -ENOMEM;
798 goto fail4; 790 goto fail4;
799 } 791 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", 792 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, 793 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
802 efx->membase); 794 efx->type->mem_map_size, efx->membase);
803 795
804 return 0; 796 return 0;
805 797
806 fail4: 798 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 799 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3: 800 fail3:
809 efx->membase_phys = 0UL; 801 efx->membase_phys = 0;
810 fail2: 802 fail2:
811 pci_disable_device(efx->pci_dev); 803 pci_disable_device(efx->pci_dev);
812 fail1: 804 fail1:
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
824 816
825 if (efx->membase_phys) { 817 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar); 818 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL; 819 efx->membase_phys = 0;
828 } 820 }
829 821
830 pci_disable_device(efx->pci_dev); 822 pci_disable_device(efx->pci_dev);
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
1043 return; 1035 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return; 1037 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) 1038 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1047 return; 1039 return;
1048 1040
1049 /* Mark the port as enabled so port reconfigurations can start, then 1041 /* Mark the port as enabled so port reconfigurations can start, then
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
1073 cancel_delayed_work_sync(&efx->monitor_work); 1065 cancel_delayed_work_sync(&efx->monitor_work);
1074 1066
1075 /* Ensure that all RX slow refills are complete. */ 1067 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) { 1068 efx_for_each_rx_queue(rx_queue, efx)
1077 cancel_delayed_work_sync(&rx_queue->work); 1069 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079 1070
1080 /* Stop scheduled port reconfigurations */ 1071 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work); 1072 cancel_work_sync(&efx->reconfigure_work);
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
1101 falcon_disable_interrupts(efx); 1092 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq) 1093 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq); 1094 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx) 1095 efx_for_each_channel_with_interrupt(channel, efx) {
1105 if (channel->irq) 1096 if (channel->irq)
1106 synchronize_irq(channel->irq); 1097 synchronize_irq(channel->irq);
1098 }
1107 1099
1108 /* Stop all NAPI processing and synchronous rx refills */ 1100 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx) 1101 efx_for_each_channel(channel, efx)
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
1125 /* Stop the kernel transmit interface late, so the watchdog 1117 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */ 1118 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx); 1119 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) { 1120 if (efx_dev_registered(efx)) {
1129 netif_tx_lock_bh(efx->net_dev); 1121 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev); 1122 netif_tx_unlock_bh(efx->net_dev);
1131 } 1123 }
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
1344 return 0; 1336 return 0;
1345} 1337}
1346 1338
1347/* Context: process, dev_base_lock held, non-blocking. */ 1339/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1340static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{ 1341{
1350 struct efx_nic *efx = net_dev->priv; 1342 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1343 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats; 1344 struct net_device_stats *stats = &net_dev->stats;
1353 1345
1346 /* Update stats if possible, but do not wait if another thread
1347 * is updating them (or resetting the NIC); slightly stale
1348 * stats are acceptable.
1349 */
1354 if (!spin_trylock(&efx->stats_lock)) 1350 if (!spin_trylock(&efx->stats_lock))
1355 return stats; 1351 return stats;
1356 if (efx->state == STATE_RUNNING) { 1352 if (efx->state == STATE_RUNNING) {
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1494static int efx_netdev_event(struct notifier_block *this, 1490static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr) 1491 unsigned long event, void *ptr)
1496{ 1492{
1497 struct net_device *net_dev = (struct net_device *)ptr; 1493 struct net_device *net_dev = ptr;
1498 1494
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1495 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv; 1496 struct efx_nic *efx = net_dev->priv;
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1563 efx_for_each_tx_queue(tx_queue, efx) 1559 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue); 1560 efx_release_tx_buffers(tx_queue);
1565 1561
1566 if (NET_DEV_REGISTERED(efx)) { 1562 if (efx_dev_registered(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1563 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev); 1564 unregister_netdev(efx->net_dev);
1569 } 1565 }
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
1688 if (method == RESET_TYPE_DISABLE) { 1684 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence 1685 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */ 1686 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx); 1687 falcon_init_nic(efx);
1692 rc = -EIO; 1688 rc = -EIO;
1693 goto fail4; 1689 goto fail4;
1694 } 1690 }
@@ -1819,6 +1815,7 @@ static struct efx_board efx_dummy_board_info = {
1819 .init = efx_nic_dummy_op_int, 1815 .init = efx_nic_dummy_op_int,
1820 .init_leds = efx_port_dummy_op_int, 1816 .init_leds = efx_port_dummy_op_int,
1821 .set_fault_led = efx_port_dummy_op_blink, 1817 .set_fault_led = efx_port_dummy_op_blink,
1818 .fini = efx_port_dummy_op_void,
1822}; 1819};
1823 1820
1824/************************************************************************** 1821/**************************************************************************
@@ -1945,6 +1942,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
1945 efx_fini_port(efx); 1942 efx_fini_port(efx);
1946 1943
1947 /* Shutdown the board, then the NIC and board state */ 1944 /* Shutdown the board, then the NIC and board state */
1945 efx->board_info.fini(efx);
1948 falcon_fini_interrupt(efx); 1946 falcon_fini_interrupt(efx);
1949 1947
1950 efx_fini_napi(efx); 1948 efx_fini_napi(efx);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index b57cc68058c0..8cb57987905e 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -13,6 +13,8 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h>
16#include "net_driver.h" 18#include "net_driver.h"
17#include "bitfield.h" 19#include "bitfield.h"
18#include "efx.h" 20#include "efx.h"
@@ -36,10 +38,12 @@
36 * struct falcon_nic_data - Falcon NIC state 38 * struct falcon_nic_data - Falcon NIC state
37 * @next_buffer_table: First available buffer table id 39 * @next_buffer_table: First available buffer table id
38 * @pci_dev2: The secondary PCI device if present 40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
39 */ 42 */
40struct falcon_nic_data { 43struct falcon_nic_data {
41 unsigned next_buffer_table; 44 unsigned next_buffer_table;
42 struct pci_dev *pci_dev2; 45 struct pci_dev *pci_dev2;
46 struct i2c_algo_bit_data i2c_data;
43}; 47};
44 48
45/************************************************************************** 49/**************************************************************************
@@ -116,17 +120,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
116 ************************************************************************** 120 **************************************************************************
117 */ 121 */
118 122
119/* DMA address mask (up to 46-bit, avoiding compiler warnings) 123/* DMA address mask */
120 * 124#define FALCON_DMA_MASK DMA_BIT_MASK(46)
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130 125
131/* TX DMA length mask (13-bit) */ 126/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1) 127#define FALCON_TX_DMA_MASK (4096 - 1)
@@ -145,7 +140,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 140#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146 141
147#define FALCON_IS_DUAL_FUNC(efx) \ 142#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0) 143 (falcon_rev(efx) < FALCON_REV_B0)
149 144
150/************************************************************************** 145/**************************************************************************
151 * 146 *
@@ -184,39 +179,57 @@ static inline int falcon_event_present(efx_qword_t *event)
184 * 179 *
185 ************************************************************************** 180 **************************************************************************
186 */ 181 */
187static void falcon_setsdascl(struct efx_i2c_interface *i2c) 182static void falcon_setsda(void *data, int state)
183{
184 struct efx_nic *efx = (struct efx_nic *)data;
185 efx_oword_t reg;
186
187 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
188 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
189 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
190}
191
192static void falcon_setscl(void *data, int state)
188{ 193{
194 struct efx_nic *efx = (struct efx_nic *)data;
189 efx_oword_t reg; 195 efx_oword_t reg;
190 196
191 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 197 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
192 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1)); 198 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
193 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1)); 199 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
194 falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
195} 200}
196 201
197static int falcon_getsda(struct efx_i2c_interface *i2c) 202static int falcon_getsda(void *data)
198{ 203{
204 struct efx_nic *efx = (struct efx_nic *)data;
199 efx_oword_t reg; 205 efx_oword_t reg;
200 206
201 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 207 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
202 return EFX_OWORD_FIELD(reg, GPIO3_IN); 208 return EFX_OWORD_FIELD(reg, GPIO3_IN);
203} 209}
204 210
205static int falcon_getscl(struct efx_i2c_interface *i2c) 211static int falcon_getscl(void *data)
206{ 212{
213 struct efx_nic *efx = (struct efx_nic *)data;
207 efx_oword_t reg; 214 efx_oword_t reg;
208 215
209 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 216 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
210 return EFX_DWORD_FIELD(reg, GPIO0_IN); 217 return EFX_OWORD_FIELD(reg, GPIO0_IN);
211} 218}
212 219
213static struct efx_i2c_bit_operations falcon_i2c_bit_operations = { 220static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
214 .setsda = falcon_setsdascl, 221 .setsda = falcon_setsda,
215 .setscl = falcon_setsdascl, 222 .setscl = falcon_setscl,
216 .getsda = falcon_getsda, 223 .getsda = falcon_getsda,
217 .getscl = falcon_getscl, 224 .getscl = falcon_getscl,
218 .udelay = 100, 225 .udelay = 5,
219 .mdelay = 10, 226 /*
227 * This is the number of system clock ticks after which
228 * i2c-algo-bit gives up waiting for SCL to become high.
229 * It must be at least 2 since the first tick can happen
230 * immediately after it starts waiting.
231 */
232 .timeout = 2,
220}; 233};
221 234
222/************************************************************************** 235/**************************************************************************
@@ -465,7 +478,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
465 TX_DESCQ_TYPE, 0, 478 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1); 479 TX_NON_IP_DROP_DIS_B0, 1);
467 480
468 if (FALCON_REV(efx) >= FALCON_REV_B0) { 481 if (falcon_rev(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 482 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 483 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 484 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@@ -474,7 +487,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 487 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue); 488 tx_queue->queue);
476 489
477 if (FALCON_REV(efx) < FALCON_REV_B0) { 490 if (falcon_rev(efx) < FALCON_REV_B0) {
478 efx_oword_t reg; 491 efx_oword_t reg;
479 492
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 493 BUG_ON(tx_queue->queue >= 128); /* HW limit */
@@ -635,7 +648,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
635 efx_oword_t rx_desc_ptr; 648 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx; 649 struct efx_nic *efx = rx_queue->efx;
637 int rc; 650 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 651 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0; 652 int iscsi_digest_en = is_b0;
640 653
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 654 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@@ -822,10 +835,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 835 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 836 tx_queue = &efx->tx_queue[tx_ev_q_label];
824 837
825 if (NET_DEV_REGISTERED(efx)) 838 if (efx_dev_registered(efx))
826 netif_tx_lock(efx->net_dev); 839 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue); 840 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx)) 841 if (efx_dev_registered(efx))
829 netif_tx_unlock(efx->net_dev); 842 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 843 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) { 844 EFX_WORKAROUND_10727(efx)) {
@@ -884,7 +897,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
884 RX_EV_TCP_UDP_CHKSUM_ERR); 897 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 898 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 899 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 900 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 901 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 902 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890 903
@@ -1065,7 +1078,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1078 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1; 1079 is_phy_event = 1;
1067 1080
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1081 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1082 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1; 1083 is_phy_event = 1;
1071 1084
@@ -1405,7 +1418,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1405static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1418static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1406{ 1419{
1407 struct falcon_nic_data *nic_data = efx->nic_data; 1420 struct falcon_nic_data *nic_data = efx->nic_data;
1408 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1421 efx_oword_t *int_ker = efx->irq_status.addr;
1409 efx_oword_t fatal_intr; 1422 efx_oword_t fatal_intr;
1410 int error, mem_perr; 1423 int error, mem_perr;
1411 static int n_int_errors; 1424 static int n_int_errors;
@@ -1451,8 +1464,8 @@ out:
1451 */ 1464 */
1452static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1465static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1453{ 1466{
1454 struct efx_nic *efx = (struct efx_nic *)dev_id; 1467 struct efx_nic *efx = dev_id;
1455 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1468 efx_oword_t *int_ker = efx->irq_status.addr;
1456 struct efx_channel *channel; 1469 struct efx_channel *channel;
1457 efx_dword_t reg; 1470 efx_dword_t reg;
1458 u32 queues; 1471 u32 queues;
@@ -1489,8 +1502,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1489 1502
1490static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1503static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491{ 1504{
1492 struct efx_nic *efx = (struct efx_nic *)dev_id; 1505 struct efx_nic *efx = dev_id;
1493 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1506 efx_oword_t *int_ker = efx->irq_status.addr;
1494 struct efx_channel *channel; 1507 struct efx_channel *channel;
1495 int syserr; 1508 int syserr;
1496 int queues; 1509 int queues;
@@ -1542,9 +1555,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1542 */ 1555 */
1543static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1556static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1544{ 1557{
1545 struct efx_channel *channel = (struct efx_channel *)dev_id; 1558 struct efx_channel *channel = dev_id;
1546 struct efx_nic *efx = channel->efx; 1559 struct efx_nic *efx = channel->efx;
1547 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1560 efx_oword_t *int_ker = efx->irq_status.addr;
1548 int syserr; 1561 int syserr;
1549 1562
1550 efx->last_irq_cpu = raw_smp_processor_id(); 1563 efx->last_irq_cpu = raw_smp_processor_id();
@@ -1572,7 +1585,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1572 unsigned long offset; 1585 unsigned long offset;
1573 efx_dword_t dword; 1586 efx_dword_t dword;
1574 1587
1575 if (FALCON_REV(efx) < FALCON_REV_B0) 1588 if (falcon_rev(efx) < FALCON_REV_B0)
1576 return; 1589 return;
1577 1590
1578 for (offset = RX_RSS_INDIR_TBL_B0; 1591 for (offset = RX_RSS_INDIR_TBL_B0;
@@ -1595,7 +1608,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1595 1608
1596 if (!EFX_INT_MODE_USE_MSI(efx)) { 1609 if (!EFX_INT_MODE_USE_MSI(efx)) {
1597 irq_handler_t handler; 1610 irq_handler_t handler;
1598 if (FALCON_REV(efx) >= FALCON_REV_B0) 1611 if (falcon_rev(efx) >= FALCON_REV_B0)
1599 handler = falcon_legacy_interrupt_b0; 1612 handler = falcon_legacy_interrupt_b0;
1600 else 1613 else
1601 handler = falcon_legacy_interrupt_a1; 1614 handler = falcon_legacy_interrupt_a1;
@@ -1636,12 +1649,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1636 efx_oword_t reg; 1649 efx_oword_t reg;
1637 1650
1638 /* Disable MSI/MSI-X interrupts */ 1651 /* Disable MSI/MSI-X interrupts */
1639 efx_for_each_channel_with_interrupt(channel, efx) 1652 efx_for_each_channel_with_interrupt(channel, efx) {
1640 if (channel->irq) 1653 if (channel->irq)
1641 free_irq(channel->irq, channel); 1654 free_irq(channel->irq, channel);
1655 }
1642 1656
1643 /* ACK legacy interrupt */ 1657 /* ACK legacy interrupt */
1644 if (FALCON_REV(efx) >= FALCON_REV_B0) 1658 if (falcon_rev(efx) >= FALCON_REV_B0)
1645 falcon_read(efx, &reg, INT_ISR0_B0); 1659 falcon_read(efx, &reg, INT_ISR0_B0);
1646 else 1660 else
1647 falcon_irq_ack_a1(efx); 1661 falcon_irq_ack_a1(efx);
@@ -1732,7 +1746,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1732 efx_oword_t temp; 1746 efx_oword_t temp;
1733 int count; 1747 int count;
1734 1748
1735 if ((FALCON_REV(efx) < FALCON_REV_B0) || 1749 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1736 (efx->loopback_mode != LOOPBACK_NONE)) 1750 (efx->loopback_mode != LOOPBACK_NONE))
1737 return; 1751 return;
1738 1752
@@ -1785,7 +1799,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1785{ 1799{
1786 efx_oword_t temp; 1800 efx_oword_t temp;
1787 1801
1788 if (FALCON_REV(efx) < FALCON_REV_B0) 1802 if (falcon_rev(efx) < FALCON_REV_B0)
1789 return; 1803 return;
1790 1804
1791 /* Isolate the MAC -> RX */ 1805 /* Isolate the MAC -> RX */
@@ -1823,7 +1837,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1823 MAC_SPEED, link_speed); 1837 MAC_SPEED, link_speed);
1824 /* On B0, MAC backpressure can be disabled and packets get 1838 /* On B0, MAC backpressure can be disabled and packets get
1825 * discarded. */ 1839 * discarded. */
1826 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1840 if (falcon_rev(efx) >= FALCON_REV_B0) {
1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1841 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1828 !efx->link_up); 1842 !efx->link_up);
1829 } 1843 }
@@ -1841,7 +1855,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1855 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1842 1856
1843 /* Unisolate the MAC -> RX */ 1857 /* Unisolate the MAC -> RX */
1844 if (FALCON_REV(efx) >= FALCON_REV_B0) 1858 if (falcon_rev(efx) >= FALCON_REV_B0)
1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1859 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1846 falcon_write(efx, &reg, RX_CFG_REG_KER); 1860 falcon_write(efx, &reg, RX_CFG_REG_KER);
1847} 1861}
@@ -1856,7 +1870,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1856 return 0; 1870 return 0;
1857 1871
1858 /* Statistics fetch will fail if the MAC is in TX drain */ 1872 /* Statistics fetch will fail if the MAC is in TX drain */
1859 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1873 if (falcon_rev(efx) >= FALCON_REV_B0) {
1860 efx_oword_t temp; 1874 efx_oword_t temp;
1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1875 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1876 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@@ -1940,7 +1954,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1940static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1954static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1941 int addr, int value) 1955 int addr, int value)
1942{ 1956{
1943 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1957 struct efx_nic *efx = net_dev->priv;
1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1958 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1945 efx_oword_t reg; 1959 efx_oword_t reg;
1946 1960
@@ -2008,7 +2022,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2008 * could be read, -1 will be returned. */ 2022 * could be read, -1 will be returned. */
2009static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2023static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2010{ 2024{
2011 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2025 struct efx_nic *efx = net_dev->priv;
2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2026 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2013 efx_oword_t reg; 2027 efx_oword_t reg;
2014 int value = -1; 2028 int value = -1;
@@ -2113,7 +2127,7 @@ int falcon_probe_port(struct efx_nic *efx)
2113 falcon_init_mdio(&efx->mii); 2127 falcon_init_mdio(&efx->mii);
2114 2128
2115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2129 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2116 if (FALCON_REV(efx) >= FALCON_REV_B0) 2130 if (falcon_rev(efx) >= FALCON_REV_B0)
2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2131 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2118 else 2132 else
2119 efx->flow_control = EFX_FC_RX; 2133 efx->flow_control = EFX_FC_RX;
@@ -2373,7 +2387,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2373 return -ENODEV; 2387 return -ENODEV;
2374 } 2388 }
2375 2389
2376 switch (FALCON_REV(efx)) { 2390 switch (falcon_rev(efx)) {
2377 case FALCON_REV_A0: 2391 case FALCON_REV_A0:
2378 case 0xff: 2392 case 0xff:
2379 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2393 EFX_ERR(efx, "Falcon rev A0 not supported\n");
@@ -2399,7 +2413,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2399 break; 2413 break;
2400 2414
2401 default: 2415 default:
2402 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2416 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2403 return -ENODEV; 2417 return -ENODEV;
2404 } 2418 }
2405 2419
@@ -2411,15 +2425,9 @@ int falcon_probe_nic(struct efx_nic *efx)
2411 struct falcon_nic_data *nic_data; 2425 struct falcon_nic_data *nic_data;
2412 int rc; 2426 int rc;
2413 2427
2414 /* Initialise I2C interface state */
2415 efx->i2c.efx = efx;
2416 efx->i2c.op = &falcon_i2c_bit_operations;
2417 efx->i2c.sda = 1;
2418 efx->i2c.scl = 1;
2419
2420 /* Allocate storage for hardware specific data */ 2428 /* Allocate storage for hardware specific data */
2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2429 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2422 efx->nic_data = (void *) nic_data; 2430 efx->nic_data = nic_data;
2423 2431
2424 /* Determine number of ports etc. */ 2432 /* Determine number of ports etc. */
2425 rc = falcon_probe_nic_variant(efx); 2433 rc = falcon_probe_nic_variant(efx);
@@ -2467,6 +2475,18 @@ int falcon_probe_nic(struct efx_nic *efx)
2467 if (rc) 2475 if (rc)
2468 goto fail5; 2476 goto fail5;
2469 2477
2478 /* Initialise I2C adapter */
2479 efx->i2c_adap.owner = THIS_MODULE;
2480 efx->i2c_adap.class = I2C_CLASS_HWMON;
2481 nic_data->i2c_data = falcon_i2c_bit_operations;
2482 nic_data->i2c_data.data = efx;
2483 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2484 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2485 strcpy(efx->i2c_adap.name, "SFC4000 GPIO");
2486 rc = i2c_bit_add_bus(&efx->i2c_adap);
2487 if (rc)
2488 goto fail5;
2489
2470 return 0; 2490 return 0;
2471 2491
2472 fail5: 2492 fail5:
@@ -2489,13 +2509,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2489 */ 2509 */
2490int falcon_init_nic(struct efx_nic *efx) 2510int falcon_init_nic(struct efx_nic *efx)
2491{ 2511{
2492 struct falcon_nic_data *data;
2493 efx_oword_t temp; 2512 efx_oword_t temp;
2494 unsigned thresh; 2513 unsigned thresh;
2495 int rc; 2514 int rc;
2496 2515
2497 data = (struct falcon_nic_data *)efx->nic_data;
2498
2499 /* Set up the address region register. This is only needed 2516 /* Set up the address region register. This is only needed
2500 * for the B0 FPGA, but since we are just pushing in the 2517 * for the B0 FPGA, but since we are just pushing in the
2501 * reset defaults this may as well be unconditional. */ 2518 * reset defaults this may as well be unconditional. */
@@ -2562,7 +2579,7 @@ int falcon_init_nic(struct efx_nic *efx)
2562 2579
2563 /* Set number of RSS queues for receive path. */ 2580 /* Set number of RSS queues for receive path. */
2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2581 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2565 if (FALCON_REV(efx) >= FALCON_REV_B0) 2582 if (falcon_rev(efx) >= FALCON_REV_B0)
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2583 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2567 else 2584 else
2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); 2585 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@@ -2600,7 +2617,7 @@ int falcon_init_nic(struct efx_nic *efx)
2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2617 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2618 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2602 /* Squash TX of packets of 16 bytes or less */ 2619 /* Squash TX of packets of 16 bytes or less */
2603 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2620 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2621 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2605 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2622 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2606 2623
@@ -2617,7 +2634,7 @@ int falcon_init_nic(struct efx_nic *efx)
2617 if (EFX_WORKAROUND_7575(efx)) 2634 if (EFX_WORKAROUND_7575(efx))
2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2635 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2619 (3 * 4096) / 32); 2636 (3 * 4096) / 32);
2620 if (FALCON_REV(efx) >= FALCON_REV_B0) 2637 if (falcon_rev(efx) >= FALCON_REV_B0)
2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2638 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2622 2639
2623 /* RX FIFO flow control thresholds */ 2640 /* RX FIFO flow control thresholds */
@@ -2633,7 +2650,7 @@ int falcon_init_nic(struct efx_nic *efx)
2633 falcon_write(efx, &temp, RX_CFG_REG_KER); 2650 falcon_write(efx, &temp, RX_CFG_REG_KER);
2634 2651
2635 /* Set destination of both TX and RX Flush events */ 2652 /* Set destination of both TX and RX Flush events */
2636 if (FALCON_REV(efx) >= FALCON_REV_B0) { 2653 if (falcon_rev(efx) >= FALCON_REV_B0) {
2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2654 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2638 falcon_write(efx, &temp, DP_CTRL_REG); 2655 falcon_write(efx, &temp, DP_CTRL_REG);
2639 } 2656 }
@@ -2644,10 +2661,14 @@ int falcon_init_nic(struct efx_nic *efx)
2644void falcon_remove_nic(struct efx_nic *efx) 2661void falcon_remove_nic(struct efx_nic *efx)
2645{ 2662{
2646 struct falcon_nic_data *nic_data = efx->nic_data; 2663 struct falcon_nic_data *nic_data = efx->nic_data;
2664 int rc;
2665
2666 rc = i2c_del_adapter(&efx->i2c_adap);
2667 BUG_ON(rc);
2647 2668
2648 falcon_free_buffer(efx, &efx->irq_status); 2669 falcon_free_buffer(efx, &efx->irq_status);
2649 2670
2650 (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2671 falcon_reset_hw(efx, RESET_TYPE_ALL);
2651 2672
2652 /* Release the second function after the reset */ 2673 /* Release the second function after the reset */
2653 if (nic_data->pci_dev2) { 2674 if (nic_data->pci_dev2) {
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 6117403b0c03..492f9bc28840 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -23,7 +23,10 @@ enum falcon_revision {
23 FALCON_REV_B0 = 2, 23 FALCON_REV_B0 = 2,
24}; 24};
25 25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision) 26static inline int falcon_rev(struct efx_nic *efx)
27{
28 return efx->pci_dev->revision;
29}
27 30
28extern struct efx_nic_type falcon_a_nic_type; 31extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type; 32extern struct efx_nic_type falcon_b_nic_type;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 06e2d68fc3d1..6d003114eeab 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
1125 u8 port1_phy_type; 1125 u8 port1_phy_type;
1126 __le16 asic_sub_revision; 1126 __le16 asic_sub_revision;
1127 __le16 board_revision; 1127 __le16 board_revision;
1128} __attribute__ ((packed)); 1128} __packed;
1129 1129
1130#define NVCONFIG_BASE 0x300 1130#define NVCONFIG_BASE 0x300
1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
1144 __le16 board_struct_ver; 1144 __le16 board_struct_ver;
1145 __le16 board_checksum; 1145 __le16 board_checksum;
1146 struct falcon_nvconfig_board_v2 board_v2; 1146 struct falcon_nvconfig_board_v2 board_v2;
1147} __attribute__ ((packed)); 1147} __packed;
1148 1148
1149#endif /* EFX_FALCON_HWDEFS_H */ 1149#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index ea08184ddfa9..6670cdfc41ab 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -56,14 +56,27 @@
56#define FALCON_USE_QWORD_IO 1 56#define FALCON_USE_QWORD_IO 1
57#endif 57#endif
58 58
59#define _falcon_writeq(efx, value, reg) \ 59#ifdef FALCON_USE_QWORD_IO
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg)) 60static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
61#define _falcon_writel(efx, value, reg) \ 61 unsigned int reg)
62 __raw_writel((__force u32) (value), (efx)->membase + (reg)) 62{
63#define _falcon_readq(efx, reg) \ 63 __raw_writeq((__force u64)value, efx->membase + reg);
64 ((__force __le64) __raw_readq((efx)->membase + (reg))) 64}
65#define _falcon_readl(efx, reg) \ 65static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
66 ((__force __le32) __raw_readl((efx)->membase + (reg))) 66{
67 return (__force __le64)__raw_readq(efx->membase + reg);
68}
69#endif
70
71static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
72 unsigned int reg)
73{
74 __raw_writel((__force u32)value, efx->membase + reg);
75}
76static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
77{
78 return (__force __le32)__raw_readl(efx->membase + reg);
79}
67 80
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 81/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, 82static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index a74b7931a3c4..dbdcee4b0f8d 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
221{ 221{
222 efx_dword_t reg; 222 efx_dword_t reg;
223 223
224 if (FALCON_REV(efx) < FALCON_REV_B0) 224 if (falcon_rev(efx) < FALCON_REV_B0)
225 return 1; 225 return 1;
226 226
227 /* The ISR latches, so clear it and re-read */ 227 /* The ISR latches, so clear it and re-read */
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
241{ 241{
242 efx_dword_t reg; 242 efx_dword_t reg;
243 243
244 if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 244 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
245 return; 245 return;
246 246
247 /* Flush the ISR */ 247 /* Flush the ISR */
@@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
454 454
455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
456 __func__, tries); 456 __func__, tries);
457 (void) falcon_reset_xaui(efx); 457 falcon_reset_xaui(efx);
458 udelay(200); 458 udelay(200);
459 tries--; 459 tries--;
460 } 460 }
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
572 xaui_link_ok = falcon_xaui_link_ok(efx); 572 xaui_link_ok = falcon_xaui_link_ok(efx);
573 573
574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
575 (void) falcon_reset_xaui(efx); 575 falcon_reset_xaui(efx);
576 576
577 /* Call the PHY check_hw routine */ 577 /* Call the PHY check_hw routine */
578 rc = efx->phy_op->check_hw(efx); 578 rc = efx->phy_op->check_hw(efx);
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
639 reset = ((flow_control & EFX_FC_TX) && 639 reset = ((flow_control & EFX_FC_TX) &&
640 !(efx->flow_control & EFX_FC_TX)); 640 !(efx->flow_control & EFX_FC_TX));
641 if (EFX_WORKAROUND_11482(efx) && reset) { 641 if (EFX_WORKAROUND_11482(efx) && reset) {
642 if (FALCON_REV(efx) >= FALCON_REV_B0) { 642 if (falcon_rev(efx) >= FALCON_REV_B0) {
643 /* Recover by resetting the EM block */ 643 /* Recover by resetting the EM block */
644 if (efx->link_up) 644 if (efx->link_up)
645 falcon_drain_tx_fifo(efx); 645 falcon_drain_tx_fifo(efx);
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
deleted file mode 100644
index b6c62d0ed9c2..000000000000
--- a/drivers/net/sfc/i2c-direct.c
+++ /dev/null
@@ -1,381 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "i2c-direct.h"
14
15/*
16 * I2C data (SDA) and clock (SCL) line read/writes with appropriate
17 * delays.
18 */
19
20static inline void setsda(struct efx_i2c_interface *i2c, int state)
21{
22 udelay(i2c->op->udelay);
23 i2c->sda = state;
24 i2c->op->setsda(i2c);
25 udelay(i2c->op->udelay);
26}
27
28static inline void setscl(struct efx_i2c_interface *i2c, int state)
29{
30 udelay(i2c->op->udelay);
31 i2c->scl = state;
32 i2c->op->setscl(i2c);
33 udelay(i2c->op->udelay);
34}
35
36static inline int getsda(struct efx_i2c_interface *i2c)
37{
38 int sda;
39
40 udelay(i2c->op->udelay);
41 sda = i2c->op->getsda(i2c);
42 udelay(i2c->op->udelay);
43 return sda;
44}
45
46static inline int getscl(struct efx_i2c_interface *i2c)
47{
48 int scl;
49
50 udelay(i2c->op->udelay);
51 scl = i2c->op->getscl(i2c);
52 udelay(i2c->op->udelay);
53 return scl;
54}
55
56/*
57 * I2C low-level protocol operations
58 *
59 */
60
61static inline void i2c_release(struct efx_i2c_interface *i2c)
62{
63 EFX_WARN_ON_PARANOID(!i2c->scl);
64 EFX_WARN_ON_PARANOID(!i2c->sda);
65 /* Devices may time out if operations do not end */
66 setscl(i2c, 1);
67 setsda(i2c, 1);
68 EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
69 EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
70}
71
72static inline void i2c_start(struct efx_i2c_interface *i2c)
73{
74 /* We may be restarting immediately after a {send,recv}_bit,
75 * so SCL will not necessarily already be high.
76 */
77 EFX_WARN_ON_PARANOID(!i2c->sda);
78 setscl(i2c, 1);
79 setsda(i2c, 0);
80 setscl(i2c, 0);
81 setsda(i2c, 1);
82}
83
84static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
85{
86 EFX_WARN_ON_PARANOID(i2c->scl != 0);
87 setsda(i2c, bit);
88 setscl(i2c, 1);
89 setscl(i2c, 0);
90 setsda(i2c, 1);
91}
92
93static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
94{
95 int bit;
96
97 EFX_WARN_ON_PARANOID(i2c->scl != 0);
98 EFX_WARN_ON_PARANOID(!i2c->sda);
99 setscl(i2c, 1);
100 bit = getsda(i2c);
101 setscl(i2c, 0);
102 return bit;
103}
104
105static inline void i2c_stop(struct efx_i2c_interface *i2c)
106{
107 EFX_WARN_ON_PARANOID(i2c->scl != 0);
108 setsda(i2c, 0);
109 setscl(i2c, 1);
110 setsda(i2c, 1);
111}
112
113/*
114 * I2C mid-level protocol operations
115 *
116 */
117
118/* Sends a byte via the I2C bus and checks for an acknowledgement from
119 * the slave device.
120 */
121static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
122{
123 int i;
124
125 /* Send byte */
126 for (i = 0; i < 8; i++) {
127 i2c_send_bit(i2c, !!(byte & 0x80));
128 byte <<= 1;
129 }
130
131 /* Check for acknowledgement from slave */
132 return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
133}
134
135/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
136static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
137{
138 u8 value = 0;
139 int i;
140
141 /* Receive byte */
142 for (i = 0; i < 8; i++)
143 value = (value << 1) | i2c_recv_bit(i2c);
144
145 /* Send ACK/NACK */
146 i2c_send_bit(i2c, (ack ? 0 : 1));
147
148 return value;
149}
150
151/* Calculate command byte for a read operation */
152static inline u8 i2c_read_cmd(u8 device_id)
153{
154 return ((device_id << 1) | 1);
155}
156
157/* Calculate command byte for a write operation */
158static inline u8 i2c_write_cmd(u8 device_id)
159{
160 return ((device_id << 1) | 0);
161}
162
163int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
164{
165 int rc;
166
167 /* If someone is driving the bus low we just give up. */
168 if (getsda(i2c) == 0 || getscl(i2c) == 0) {
169 EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
170 " Giving up.\n", __func__);
171 return -EFAULT;
172 }
173
174 /* Pretend to initiate a device write */
175 i2c_start(i2c);
176 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
177 if (rc)
178 goto out;
179
180 out:
181 i2c_stop(i2c);
182 i2c_release(i2c);
183
184 return rc;
185}
186
187/* This performs a fast read of one or more consecutive bytes from an
188 * I2C device. Not all devices support consecutive reads of more than
189 * one byte; for these devices use efx_i2c_read() instead.
190 */
191int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
192 u8 device_id, u8 offset, u8 *data, unsigned int len)
193{
194 int i;
195 int rc;
196
197 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
198 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
199 EFX_WARN_ON_PARANOID(data == NULL);
200 EFX_WARN_ON_PARANOID(len < 1);
201
202 /* Select device and starting offset */
203 i2c_start(i2c);
204 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
205 if (rc)
206 goto out;
207 rc = i2c_send_byte(i2c, offset);
208 if (rc)
209 goto out;
210
211 /* Read data from device */
212 i2c_start(i2c);
213 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
214 if (rc)
215 goto out;
216 for (i = 0; i < (len - 1); i++)
217 /* Read and acknowledge all but the last byte */
218 data[i] = i2c_recv_byte(i2c, 1);
219 /* Read last byte with no acknowledgement */
220 data[i] = i2c_recv_byte(i2c, 0);
221
222 out:
223 i2c_stop(i2c);
224 i2c_release(i2c);
225
226 return rc;
227}
228
229/* This performs a fast write of one or more consecutive bytes to an
230 * I2C device. Not all devices support consecutive writes of more
231 * than one byte; for these devices use efx_i2c_write() instead.
232 */
233int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
234 u8 device_id, u8 offset,
235 const u8 *data, unsigned int len)
236{
237 int i;
238 int rc;
239
240 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
241 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
242 EFX_WARN_ON_PARANOID(len < 1);
243
244 /* Select device and starting offset */
245 i2c_start(i2c);
246 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
247 if (rc)
248 goto out;
249 rc = i2c_send_byte(i2c, offset);
250 if (rc)
251 goto out;
252
253 /* Write data to device */
254 for (i = 0; i < len; i++) {
255 rc = i2c_send_byte(i2c, data[i]);
256 if (rc)
257 goto out;
258 }
259
260 out:
261 i2c_stop(i2c);
262 i2c_release(i2c);
263
264 return rc;
265}
266
267/* I2C byte-by-byte read */
268int efx_i2c_read(struct efx_i2c_interface *i2c,
269 u8 device_id, u8 offset, u8 *data, unsigned int len)
270{
271 int rc;
272
273 /* i2c_fast_read with length 1 is a single byte read */
274 for (; len > 0; offset++, data++, len--) {
275 rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
276 if (rc)
277 return rc;
278 }
279
280 return 0;
281}
282
283/* I2C byte-by-byte write */
284int efx_i2c_write(struct efx_i2c_interface *i2c,
285 u8 device_id, u8 offset, const u8 *data, unsigned int len)
286{
287 int rc;
288
289 /* i2c_fast_write with length 1 is a single byte write */
290 for (; len > 0; offset++, data++, len--) {
291 rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
292 if (rc)
293 return rc;
294 mdelay(i2c->op->mdelay);
295 }
296
297 return 0;
298}
299
300
301/* This is just a slightly neater wrapper round efx_i2c_fast_write
302 * in the case where the target doesn't take an offset
303 */
304int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
305 u8 device_id, const u8 *data, unsigned int len)
306{
307 return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
308}
309
310/* I2C receiving of bytes - does not send an offset byte */
311int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
312 u8 *bytes, unsigned int len)
313{
314 int i;
315 int rc;
316
317 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
318 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
319 EFX_WARN_ON_PARANOID(len < 1);
320
321 /* Select device */
322 i2c_start(i2c);
323
324 /* Read data from device */
325 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
326 if (rc)
327 goto out;
328
329 for (i = 0; i < (len - 1); i++)
330 /* Read and acknowledge all but the last byte */
331 bytes[i] = i2c_recv_byte(i2c, 1);
332 /* Read last byte with no acknowledgement */
333 bytes[i] = i2c_recv_byte(i2c, 0);
334
335 out:
336 i2c_stop(i2c);
337 i2c_release(i2c);
338
339 return rc;
340}
341
342/* SMBus and some I2C devices will time out if the I2C clock is
343 * held low for too long. This is most likely to happen in virtualised
344 * systems (when the entire domain is descheduled) but could in
345 * principle happen due to preemption on any busy system (and given the
346 * potential length of an I2C operation turning preemption off is not
347 * a sensible option). The following functions deal with the failure by
348 * retrying up to a fixed number of times.
349 */
350
351#define I2C_MAX_RETRIES (10)
352
353/* The timeout problem will result in -EIO. If the wrapped function
354 * returns any other error, pass this up and do not retry. */
355#define RETRY_WRAPPER(_f) \
356 int retries = I2C_MAX_RETRIES; \
357 int rc; \
358 while (retries) { \
359 rc = _f; \
360 if (rc != -EIO) \
361 return rc; \
362 retries--; \
363 } \
364 return rc; \
365
366int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
367{
368 RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
369}
370
371int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
372 u8 device_id, u8 offset, u8 *data, unsigned int len)
373{
374 RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
375}
376
377int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
378 u8 device_id, u8 offset, const u8 *data, unsigned int len)
379{
380 RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
381}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
deleted file mode 100644
index 291e561071f5..000000000000
--- a/drivers/net/sfc/i2c-direct.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_I2C_DIRECT_H
12#define EFX_I2C_DIRECT_H
13
14#include "net_driver.h"
15
16/*
17 * Direct control of an I2C bus
18 */
19
20struct efx_i2c_interface;
21
22/**
23 * struct efx_i2c_bit_operations - I2C bus direct control methods
24 *
25 * I2C bus direct control methods.
26 *
27 * @setsda: Set state of SDA line
28 * @setscl: Set state of SCL line
29 * @getsda: Get state of SDA line
30 * @getscl: Get state of SCL line
31 * @udelay: Delay between each bit operation
32 * @mdelay: Delay between each byte write
33 */
34struct efx_i2c_bit_operations {
35 void (*setsda) (struct efx_i2c_interface *i2c);
36 void (*setscl) (struct efx_i2c_interface *i2c);
37 int (*getsda) (struct efx_i2c_interface *i2c);
38 int (*getscl) (struct efx_i2c_interface *i2c);
39 unsigned int udelay;
40 unsigned int mdelay;
41};
42
43/**
44 * struct efx_i2c_interface - an I2C interface
45 *
46 * An I2C interface.
47 *
48 * @efx: Attached Efx NIC
49 * @op: I2C bus control methods
50 * @sda: Current output state of SDA line
51 * @scl: Current output state of SCL line
52 */
53struct efx_i2c_interface {
54 struct efx_nic *efx;
55 struct efx_i2c_bit_operations *op;
56 unsigned int sda:1;
57 unsigned int scl:1;
58};
59
60extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
61extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
62 u8 device_id, u8 offset,
63 u8 *data, unsigned int len);
64extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
65 u8 device_id, u8 offset,
66 const u8 *data, unsigned int len);
67extern int efx_i2c_read(struct efx_i2c_interface *i2c,
68 u8 device_id, u8 offset, u8 *data, unsigned int len);
69extern int efx_i2c_write(struct efx_i2c_interface *i2c,
70 u8 device_id, u8 offset,
71 const u8 *data, unsigned int len);
72
73extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
74 const u8 *bytes, unsigned int len);
75
76extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
77 u8 *bytes, unsigned int len);
78
79
80/* Versions of the API that retry on failure. */
81extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
82 u8 device_id);
83
84extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
85 u8 device_id, u8 offset, u8 *data, unsigned int len);
86
87extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
88 u8 device_id, u8 offset,
89 const u8 *data, unsigned int len);
90
91#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 59f261b4171f..d803b86c647c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -26,10 +26,10 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/inet_lro.h> 28#include <linux/inet_lro.h>
29#include <linux/i2c.h>
29 30
30#include "enum.h" 31#include "enum.h"
31#include "bitfield.h" 32#include "bitfield.h"
32#include "i2c-direct.h"
33 33
34#define EFX_MAX_LRO_DESCRIPTORS 8 34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS 35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
@@ -42,7 +42,7 @@
42#ifndef EFX_DRIVER_NAME 42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc" 43#define EFX_DRIVER_NAME "sfc"
44#endif 44#endif
45#define EFX_DRIVER_VERSION "2.2.0136" 45#define EFX_DRIVER_VERSION "2.2"
46 46
47#ifdef EFX_ENABLE_DEBUG 47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -52,28 +52,19 @@
52#define EFX_WARN_ON_PARANOID(x) do {} while (0) 52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif 53#endif
54 54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */ 55/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \ 56#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) 57dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
67 58
68#define EFX_INFO(efx, fmt, args...) \ 59#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) 60dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
70 61
71#ifdef EFX_ENABLE_DEBUG 62#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \ 63#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 64dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
74#else 65#else
75#define EFX_LOG(efx, fmt, args...) \ 66#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 67dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
77#endif 68#endif
78 69
79#define EFX_TRACE(efx, fmt, args...) do {} while (0) 70#define EFX_TRACE(efx, fmt, args...) do {} while (0)
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
90#define EFX_LOG_RL(efx, fmt, args...) \ 81#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 82do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92 83
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/************************************************************************** 84/**************************************************************************
99 * 85 *
100 * Efx data structures 86 * Efx data structures
@@ -432,7 +418,10 @@ struct efx_blinker {
432 * @init_leds: Sets up board LEDs 418 * @init_leds: Sets up board LEDs
433 * @set_fault_led: Turns the fault LED on or off 419 * @set_fault_led: Turns the fault LED on or off
434 * @blink: Starts/stops blinking 420 * @blink: Starts/stops blinking
421 * @fini: Cleanup function
435 * @blinker: used to blink LEDs in software 422 * @blinker: used to blink LEDs in software
423 * @hwmon_client: I2C client for hardware monitor
424 * @ioexp_client: I2C client for power/port control
436 */ 425 */
437struct efx_board { 426struct efx_board {
438 int type; 427 int type;
@@ -445,7 +434,9 @@ struct efx_board {
445 int (*init_leds)(struct efx_nic *efx); 434 int (*init_leds)(struct efx_nic *efx);
446 void (*set_fault_led) (struct efx_nic *efx, int state); 435 void (*set_fault_led) (struct efx_nic *efx, int state);
447 void (*blink) (struct efx_nic *efx, int start); 436 void (*blink) (struct efx_nic *efx, int start);
437 void (*fini) (struct efx_nic *nic);
448 struct efx_blinker blinker; 438 struct efx_blinker blinker;
439 struct i2c_client *hwmon_client, *ioexp_client;
449}; 440};
450 441
451#define STRING_TABLE_LOOKUP(val, member) \ 442#define STRING_TABLE_LOOKUP(val, member) \
@@ -632,7 +623,7 @@ union efx_multicast_hash {
632 * @membase: Memory BAR value 623 * @membase: Memory BAR value
633 * @biu_lock: BIU (bus interface unit) lock 624 * @biu_lock: BIU (bus interface unit) lock
634 * @interrupt_mode: Interrupt mode 625 * @interrupt_mode: Interrupt mode
635 * @i2c: I2C interface 626 * @i2c_adap: I2C adapter
636 * @board_info: Board-level information 627 * @board_info: Board-level information
637 * @state: Device state flag. Serialised by the rtnl_lock. 628 * @state: Device state flag. Serialised by the rtnl_lock.
638 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 629 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
@@ -695,12 +686,12 @@ struct efx_nic {
695 struct workqueue_struct *workqueue; 686 struct workqueue_struct *workqueue;
696 struct work_struct reset_work; 687 struct work_struct reset_work;
697 struct delayed_work monitor_work; 688 struct delayed_work monitor_work;
698 unsigned long membase_phys; 689 resource_size_t membase_phys;
699 void __iomem *membase; 690 void __iomem *membase;
700 spinlock_t biu_lock; 691 spinlock_t biu_lock;
701 enum efx_int_mode interrupt_mode; 692 enum efx_int_mode interrupt_mode;
702 693
703 struct efx_i2c_interface i2c; 694 struct i2c_adapter i2c_adap;
704 struct efx_board board_info; 695 struct efx_board board_info;
705 696
706 enum nic_state state; 697 enum nic_state state;
@@ -719,7 +710,7 @@ struct efx_nic {
719 710
720 unsigned n_rx_nodesc_drop_cnt; 711 unsigned n_rx_nodesc_drop_cnt;
721 712
722 void *nic_data; 713 struct falcon_nic_data *nic_data;
723 714
724 struct mutex mac_lock; 715 struct mutex mac_lock;
725 int port_enabled; 716 int port_enabled;
@@ -760,6 +751,20 @@ struct efx_nic {
760 void *loopback_selftest; 751 void *loopback_selftest;
761}; 752};
762 753
754static inline int efx_dev_registered(struct efx_nic *efx)
755{
756 return efx->net_dev->reg_state == NETREG_REGISTERED;
757}
758
759/* Net device name, for inclusion in log messages if it has been registered.
760 * Use efx->name not efx->net_dev->name so that races with (un)registration
761 * are harmless.
762 */
763static inline const char *efx_dev_name(struct efx_nic *efx)
764{
765 return efx_dev_registered(efx) ? efx->name : "";
766}
767
763/** 768/**
764 * struct efx_nic_type - Efx device type definition 769 * struct efx_nic_type - Efx device type definition
765 * @mem_bar: Memory BAR number 770 * @mem_bar: Memory BAR number
@@ -795,7 +800,7 @@ struct efx_nic_type {
795 unsigned int txd_ring_mask; 800 unsigned int txd_ring_mask;
796 unsigned int rxd_ring_mask; 801 unsigned int rxd_ring_mask;
797 unsigned int evq_size; 802 unsigned int evq_size;
798 dma_addr_t max_dma_mask; 803 u64 max_dma_mask;
799 unsigned int tx_dma_mask; 804 unsigned int tx_dma_mask;
800 unsigned bug5391_mask; 805 unsigned bug5391_mask;
801 806
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 670622373ddf..601b001437c0 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
86 */ 86 */
87#define EFX_RXD_HEAD_ROOM 2 87#define EFX_RXD_HEAD_ROOM 2
88 88
89/* Macros for zero-order pages (potentially) containing multiple RX buffers */ 89static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
90#define RX_DATA_OFFSET(_data) \ 90{
91 (((unsigned long) (_data)) & (PAGE_SIZE-1)) 91 /* Offset is always within one page, so we don't need to consider
92#define RX_BUF_OFFSET(_rx_buf) \ 92 * the page order.
93 RX_DATA_OFFSET((_rx_buf)->data) 93 */
94 94 return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
95#define RX_PAGE_SIZE(_efx) \ 95}
96 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) 96static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
97{
98 return PAGE_SIZE << efx->rx_buffer_order;
99}
97 100
98 101
99/************************************************************************** 102/**************************************************************************
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
106static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
107 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
108{ 111{
109 struct efx_channel *channel = (struct efx_channel *)priv; 112 struct efx_channel *channel = priv;
110 struct iphdr *iph; 113 struct iphdr *iph;
111 struct tcphdr *th; 114 struct tcphdr *th;
112 115
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
131 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
132 void *priv) 135 void *priv)
133{ 136{
134 struct efx_channel *channel = (struct efx_channel *)priv; 137 struct efx_channel *channel = priv;
135 struct ethhdr *eh; 138 struct ethhdr *eh;
136 struct iphdr *iph; 139 struct iphdr *iph;
137 140
138 /* We support EtherII and VLAN encapsulated IPv4 */ 141 /* We support EtherII and VLAN encapsulated IPv4 */
139 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); 142 eh = page_address(frag->page) + frag->page_offset;
140 *mac_hdr = eh; 143 *mac_hdr = eh;
141 144
142 if (eh->h_proto == htons(ETH_P_IP)) { 145 if (eh->h_proto == htons(ETH_P_IP)) {
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
269 return -ENOMEM; 272 return -ENOMEM;
270 273
271 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 274 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
272 0, RX_PAGE_SIZE(efx), 275 0, efx_rx_buf_size(efx),
273 PCI_DMA_FROMDEVICE); 276 PCI_DMA_FROMDEVICE);
274 277
275 if (unlikely(pci_dma_mapping_error(dma_addr))) { 278 if (unlikely(pci_dma_mapping_error(dma_addr))) {
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
280 283
281 rx_queue->buf_page = rx_buf->page; 284 rx_queue->buf_page = rx_buf->page;
282 rx_queue->buf_dma_addr = dma_addr; 285 rx_queue->buf_dma_addr = dma_addr;
283 rx_queue->buf_data = ((char *) page_address(rx_buf->page) + 286 rx_queue->buf_data = (page_address(rx_buf->page) +
284 EFX_PAGE_IP_ALIGN); 287 EFX_PAGE_IP_ALIGN);
285 } 288 }
286 289
287 offset = RX_DATA_OFFSET(rx_queue->buf_data);
288 rx_buf->len = bytes; 290 rx_buf->len = bytes;
289 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
290 rx_buf->data = rx_queue->buf_data; 291 rx_buf->data = rx_queue->buf_data;
292 offset = efx_rx_buf_offset(rx_buf);
293 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
291 294
292 /* Try to pack multiple buffers per page */ 295 /* Try to pack multiple buffers per page */
293 if (efx->rx_buffer_order == 0) { 296 if (efx->rx_buffer_order == 0) {
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
295 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 298 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
296 offset += ((bytes + 0x1ff) & ~0x1ff); 299 offset += ((bytes + 0x1ff) & ~0x1ff);
297 300
298 space = RX_PAGE_SIZE(efx) - offset; 301 space = efx_rx_buf_size(efx) - offset;
299 if (space >= bytes) { 302 if (space >= bytes) {
300 /* Refs dropped on kernel releasing each skb */ 303 /* Refs dropped on kernel releasing each skb */
301 get_page(rx_queue->buf_page); 304 get_page(rx_queue->buf_page);
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
344 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
345 if (rx_buf->unmap_addr) { 348 if (rx_buf->unmap_addr) {
346 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 349 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
347 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); 350 efx_rx_buf_size(efx),
351 PCI_DMA_FROMDEVICE);
348 rx_buf->unmap_addr = 0; 352 rx_buf->unmap_addr = 0;
349 } 353 }
350 } else if (likely(rx_buf->skb)) { 354 } else if (likely(rx_buf->skb)) {
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
400 return 0; 404 return 0;
401 405
402 /* Record minimum fill level */ 406 /* Record minimum fill level */
403 if (unlikely(fill_level < rx_queue->min_fill)) 407 if (unlikely(fill_level < rx_queue->min_fill)) {
404 if (fill_level) 408 if (fill_level)
405 rx_queue->min_fill = fill_level; 409 rx_queue->min_fill = fill_level;
410 }
406 411
407 /* Acquire RX add lock. If this lock is contended, then a fast 412 /* Acquire RX add lock. If this lock is contended, then a fast
408 * fill must already be in progress (e.g. in the refill 413 * fill must already be in progress (e.g. in the refill
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
552 struct skb_frag_struct frags; 557 struct skb_frag_struct frags;
553 558
554 frags.page = rx_buf->page; 559 frags.page = rx_buf->page;
555 frags.page_offset = RX_BUF_OFFSET(rx_buf); 560 frags.page_offset = efx_rx_buf_offset(rx_buf);
556 frags.size = rx_buf->len; 561 frags.size = rx_buf->len;
557 562
558 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 563 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
597 if (unlikely(rx_buf->len > hdr_len)) { 602 if (unlikely(rx_buf->len > hdr_len)) {
598 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
599 frag->page = rx_buf->page; 604 frag->page = rx_buf->page;
600 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; 605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
601 frag->size = skb->len - hdr_len; 606 frag->size = skb->len - hdr_len;
602 skb_shinfo(skb)->nr_frags = 1; 607 skb_shinfo(skb)->nr_frags = 1;
603 skb->data_len = frag->size; 608 skb->data_len = frag->size;
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
851 /* For a page that is part-way through splitting into RX buffers */ 856 /* For a page that is part-way through splitting into RX buffers */
852 if (rx_queue->buf_page != NULL) { 857 if (rx_queue->buf_page != NULL) {
853 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 858 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
854 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); 859 efx_rx_buf_size(rx_queue->efx),
860 PCI_DMA_FROMDEVICE);
855 __free_pages(rx_queue->buf_page, 861 __free_pages(rx_queue->buf_page,
856 rx_queue->efx->rx_buffer_order); 862 rx_queue->efx->rx_buffer_order);
857 rx_queue->buf_page = NULL; 863 rx_queue->buf_page = NULL;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index cbda15946e8f..3b2de9fe7f27 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
290 290
291 payload = &state->payload; 291 payload = &state->payload;
292 292
293 received = (struct efx_loopback_payload *)(char *) buf_ptr; 293 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 294 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 295 received->ip.check = payload->ip.check;
296 296
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
424 * interrupt handler. */ 424 * interrupt handler. */
425 smp_wmb(); 425 smp_wmb();
426 426
427 if (NET_DEV_REGISTERED(efx)) 427 if (efx_dev_registered(efx))
428 netif_tx_lock_bh(efx->net_dev); 428 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb); 429 rc = efx_xmit(efx, tx_queue, skb);
430 if (NET_DEV_REGISTERED(efx)) 430 if (efx_dev_registered(efx))
431 netif_tx_unlock_bh(efx->net_dev); 431 netif_tx_unlock_bh(efx->net_dev);
432 432
433 if (rc != NETDEV_TX_OK) { 433 if (rc != NETDEV_TX_OK) {
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
453 int tx_done = 0, rx_good, rx_bad; 453 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0; 454 int i, rc = 0;
455 455
456 if (NET_DEV_REGISTERED(efx)) 456 if (efx_dev_registered(efx))
457 netif_tx_lock_bh(efx->net_dev); 457 netif_tx_lock_bh(efx->net_dev);
458 458
459 /* Count the number of tx completions, and decrement the refcnt. Any 459 /* Count the number of tx completions, and decrement the refcnt. Any
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
465 dev_kfree_skb_any(skb); 465 dev_kfree_skb_any(skb);
466 } 466 }
467 467
468 if (NET_DEV_REGISTERED(efx)) 468 if (efx_dev_registered(efx))
469 netif_tx_unlock_bh(efx->net_dev); 469 netif_tx_unlock_bh(efx->net_dev);
470 470
471 /* Check TX completion and received packet counts */ 471 /* Check TX completion and received packet counts */
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
517 state->packet_count = min(1 << (i << 2), state->packet_count); 517 state->packet_count = min(1 << (i << 2), state->packet_count);
518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 518 state->skbs = kzalloc(sizeof(state->skbs[0]) *
519 state->packet_count, GFP_KERNEL); 519 state->packet_count, GFP_KERNEL);
520 if (!state->skbs)
521 return -ENOMEM;
520 state->flush = 0; 522 state->flush = 0;
521 523
522 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
700 * "flushing" so all inflight packets are dropped */ 702 * "flushing" so all inflight packets are dropped */
701 BUG_ON(efx->loopback_selftest); 703 BUG_ON(efx->loopback_selftest);
702 state->flush = 1; 704 state->flush = 1;
703 efx->loopback_selftest = (void *)state; 705 efx->loopback_selftest = state;
704 706
705 rc = efx_test_loopbacks(efx, tests, loopback_modes); 707 rc = efx_test_loopbacks(efx, tests, loopback_modes);
706 708
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 725d1a539c49..b27849523990 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -106,28 +106,27 @@
106 106
107static const u8 xgphy_max_temperature = 90; 107static const u8 xgphy_max_temperature = 90;
108 108
109void sfe4001_poweroff(struct efx_nic *efx) 109static void sfe4001_poweroff(struct efx_nic *efx)
110{ 110{
111 struct efx_i2c_interface *i2c = &efx->i2c; 111 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
112 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
112 113
113 u8 cfg, out, in; 114 /* Turn off all power rails and disable outputs */
115 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
116 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
117 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
114 118
115 EFX_INFO(efx, "%s\n", __func__); 119 /* Clear any over-temperature alert */
116 120 i2c_smbus_read_byte_data(hwmon_client, RSL);
117 /* Turn off all power rails */ 121}
118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120
121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124 122
125 /* Disable port 0 outputs on IO expander */ 123static void sfe4001_fini(struct efx_nic *efx)
126 cfg = 0xff; 124{
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 125 EFX_INFO(efx, "%s\n", __func__);
128 126
129 /* Clear any over-temperature alert */ 127 sfe4001_poweroff(efx);
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 128 i2c_unregister_device(efx->board_info.ioexp_client);
129 i2c_unregister_device(efx->board_info.hwmon_client);
131} 130}
132 131
133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 132/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@@ -143,14 +142,26 @@ MODULE_PARM_DESC(phy_flash_cfg,
143 * be turned on before the PHY can be used. 142 * be turned on before the PHY can be used.
144 * Context: Process context, rtnl lock held 143 * Context: Process context, rtnl lock held
145 */ 144 */
146int sfe4001_poweron(struct efx_nic *efx) 145int sfe4001_init(struct efx_nic *efx)
147{ 146{
148 struct efx_i2c_interface *i2c = &efx->i2c; 147 struct i2c_client *hwmon_client, *ioexp_client;
149 unsigned int count; 148 unsigned int count;
150 int rc; 149 int rc;
151 u8 out, in, cfg; 150 u8 out;
152 efx_dword_t reg; 151 efx_dword_t reg;
153 152
153 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
154 if (!hwmon_client)
155 return -EIO;
156 efx->board_info.hwmon_client = hwmon_client;
157
158 ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
159 if (!ioexp_client) {
160 rc = -EIO;
161 goto fail_hwmon;
162 }
163 efx->board_info.ioexp_client = ioexp_client;
164
154 /* 10Xpress has fixed-function LED pins, so there is no board-specific 165 /* 10Xpress has fixed-function LED pins, so there is no board-specific
155 * blink code. */ 166 * blink code. */
156 efx->board_info.blink = tenxpress_phy_blink; 167 efx->board_info.blink = tenxpress_phy_blink;
@@ -166,44 +177,45 @@ int sfe4001_poweron(struct efx_nic *efx)
166 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 177 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
167 udelay(10); 178 udelay(10);
168 179
180 efx->board_info.fini = sfe4001_fini;
181
169 /* Set DSP over-temperature alert threshold */ 182 /* Set DSP over-temperature alert threshold */
170 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); 183 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
171 rc = efx_i2c_write(i2c, MAX6647, WLHO, 184 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
172 &xgphy_max_temperature, 1); 185 xgphy_max_temperature);
173 if (rc) 186 if (rc)
174 goto fail1; 187 goto fail_ioexp;
175 188
176 /* Read it back and verify */ 189 /* Read it back and verify */
177 rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1); 190 rc = i2c_smbus_read_byte_data(hwmon_client, RLHN);
178 if (rc) 191 if (rc < 0)
179 goto fail1; 192 goto fail_ioexp;
180 if (in != xgphy_max_temperature) { 193 if (rc != xgphy_max_temperature) {
181 rc = -EFAULT; 194 rc = -EFAULT;
182 goto fail1; 195 goto fail_ioexp;
183 } 196 }
184 197
185 /* Clear any previous over-temperature alert */ 198 /* Clear any previous over-temperature alert */
186 rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 199 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
187 if (rc) 200 if (rc < 0)
188 goto fail1; 201 goto fail_ioexp;
189 202
190 /* Enable port 0 and port 1 outputs on IO expander */ 203 /* Enable port 0 and port 1 outputs on IO expander */
191 cfg = 0x00; 204 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
192 rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
193 if (rc) 205 if (rc)
194 goto fail1; 206 goto fail_ioexp;
195 cfg = 0xff & ~(1 << P1_SPARE_LBN); 207 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
196 rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 208 0xff & ~(1 << P1_SPARE_LBN));
197 if (rc) 209 if (rc)
198 goto fail2; 210 goto fail_on;
199 211
200 /* Turn all power off then wait 1 sec. This ensures PHY is reset */ 212 /* Turn all power off then wait 1 sec. This ensures PHY is reset */
201 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | 213 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
202 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | 214 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
203 (0 << P0_EN_1V0X_LBN)); 215 (0 << P0_EN_1V0X_LBN));
204 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 216 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
205 if (rc) 217 if (rc)
206 goto fail3; 218 goto fail_on;
207 219
208 schedule_timeout_uninterruptible(HZ); 220 schedule_timeout_uninterruptible(HZ);
209 count = 0; 221 count = 0;
@@ -215,26 +227,26 @@ int sfe4001_poweron(struct efx_nic *efx)
215 if (sfe4001_phy_flash_cfg) 227 if (sfe4001_phy_flash_cfg)
216 out |= 1 << P0_EN_3V3X_LBN; 228 out |= 1 << P0_EN_3V3X_LBN;
217 229
218 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 230 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
219 if (rc) 231 if (rc)
220 goto fail3; 232 goto fail_on;
221 msleep(10); 233 msleep(10);
222 234
223 /* Turn on 1V power rail */ 235 /* Turn on 1V power rail */
224 out &= ~(1 << P0_EN_1V0X_LBN); 236 out &= ~(1 << P0_EN_1V0X_LBN);
225 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 237 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
226 if (rc) 238 if (rc)
227 goto fail3; 239 goto fail_on;
228 240
229 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count); 241 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
230 242
231 schedule_timeout_uninterruptible(HZ); 243 schedule_timeout_uninterruptible(HZ);
232 244
233 /* Check DSP is powered */ 245 /* Check DSP is powered */
234 rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1); 246 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
235 if (rc) 247 if (rc < 0)
236 goto fail3; 248 goto fail_on;
237 if (in & (1 << P1_AFE_PWD_LBN)) 249 if (rc & (1 << P1_AFE_PWD_LBN))
238 goto done; 250 goto done;
239 251
240 /* DSP doesn't look powered in flash config mode */ 252 /* DSP doesn't look powered in flash config mode */
@@ -244,23 +256,17 @@ int sfe4001_poweron(struct efx_nic *efx)
244 256
245 EFX_INFO(efx, "timed out waiting for power\n"); 257 EFX_INFO(efx, "timed out waiting for power\n");
246 rc = -ETIMEDOUT; 258 rc = -ETIMEDOUT;
247 goto fail3; 259 goto fail_on;
248 260
249done: 261done:
250 EFX_INFO(efx, "PHY is powered on\n"); 262 EFX_INFO(efx, "PHY is powered on\n");
251 return 0; 263 return 0;
252 264
253fail3: 265fail_on:
254 /* Turn off all power rails */ 266 sfe4001_poweroff(efx);
255 out = 0xff; 267fail_ioexp:
256 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 268 i2c_unregister_device(ioexp_client);
257 /* Disable port 1 outputs on IO expander */ 269fail_hwmon:
258 out = 0xff; 270 i2c_unregister_device(hwmon_client);
259 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
260fail2:
261 /* Disable port 0 outputs on IO expander */
262 out = 0xff;
263 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
264fail1:
265 return rc; 271 return rc;
266} 272}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index b1cd6deec01f..c0146061c326 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
211 int rc = 0; 211 int rc = 0;
212 212
213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
214 if (!phy_data)
215 return -ENOMEM;
214 efx->phy_data = phy_data; 216 efx->phy_data = phy_data;
215 217
216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
376 * perform a special software reset */ 378 * perform a special software reset */
377 if ((phy_data->tx_disabled && !efx->tx_disabled) || 379 if ((phy_data->tx_disabled && !efx->tx_disabled) ||
378 loop_change) { 380 loop_change) {
379 (void) tenxpress_special_reset(efx); 381 tenxpress_special_reset(efx);
380 falcon_reset_xaui(efx); 382 falcon_reset_xaui(efx);
381 } 383 }
382 384
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 9b436f5b4888..5cdd082ab8f6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
387 if (unlikely(tx_queue->stopped)) { 387 if (unlikely(tx_queue->stopped)) {
388 fill_level = tx_queue->insert_count - tx_queue->read_count; 388 fill_level = tx_queue->insert_count - tx_queue->read_count;
389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
390 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); 390 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
391 391
392 /* Do this under netif_tx_lock(), to avoid racing 392 /* Do this under netif_tx_lock(), to avoid racing
393 * with efx_xmit(). */ 393 * with efx_xmit(). */
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
639 base_dma = tsoh->dma_addr & PAGE_MASK; 639 base_dma = tsoh->dma_addr & PAGE_MASK;
640 640
641 p = &tx_queue->tso_headers_free; 641 p = &tx_queue->tso_headers_free;
642 while (*p != NULL) 642 while (*p != NULL) {
643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 643 if (((unsigned long)*p & PAGE_MASK) == base_kva)
644 *p = (*p)->next; 644 *p = (*p)->next;
645 else 645 else
646 p = &(*p)->next; 646 p = &(*p)->next;
647 }
647 648
648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
649} 650}
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
939 940
940 /* Allocate a DMA-mapped header buffer. */ 941 /* Allocate a DMA-mapped header buffer. */
941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 942 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
942 if (tx_queue->tso_headers_free == NULL) 943 if (tx_queue->tso_headers_free == NULL) {
943 if (efx_tsoh_block_alloc(tx_queue)) 944 if (efx_tsoh_block_alloc(tx_queue))
944 return -1; 945 return -1;
946 }
945 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
946 tsoh = tx_queue->tso_headers_free; 948 tsoh = tx_queue->tso_headers_free;
947 tx_queue->tso_headers_free = tsoh->next; 949 tx_queue->tso_headers_free = tsoh->next;
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1106{ 1108{
1107 unsigned i; 1109 unsigned i;
1108 1110
1109 if (tx_queue->buffer) 1111 if (tx_queue->buffer) {
1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1112 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1113 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1114 }
1112 1115
1113 while (tx_queue->tso_headers_free != NULL) 1116 while (tx_queue->tso_headers_free != NULL)
1114 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1117 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index dca62f190198..35ab19c27f8d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
20 20
21/* XAUI resets if link not detected */ 21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 3b9f9ddbc372..f3684ad28887 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
85 int rc; 85 int rc;
86 86
87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
88 efx->phy_data = (void *) phy_data; 88 if (!phy_data)
89 return -ENOMEM;
90 efx->phy_data = phy_data;
89 91
90 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 92 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
91 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 4b0f03358777..c83406f4f2a7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -284,6 +284,86 @@ static void sky2_power_aux(struct sky2_hw *hw)
284 PC_VAUX_ON | PC_VCC_OFF)); 284 PC_VAUX_ON | PC_VCC_OFF));
285} 285}
286 286
287static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
288{
289 u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
290 int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
291 u32 reg;
292
293 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
294
295 switch (state) {
296 case PCI_D0:
297 break;
298
299 case PCI_D1:
300 power_control |= 1;
301 break;
302
303 case PCI_D2:
304 power_control |= 2;
305 break;
306
307 case PCI_D3hot:
308 case PCI_D3cold:
309 power_control |= 3;
310 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
311 /* additional power saving measurements */
312 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
313
314 /* set gating core clock for LTSSM in L1 state */
315 reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
316 /* auto clock gated scheme controlled by CLKREQ */
317 P_ASPM_A1_MODE_SELECT |
318 /* enable Gate Root Core Clock */
319 P_CLK_GATE_ROOT_COR_ENA;
320
321 if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
322 /* enable Clock Power Management (CLKREQ) */
323 u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
324
325 ctrl |= PCI_EXP_DEVCTL_AUX_PME;
326 sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
327 } else
328 /* force CLKREQ Enable in Our4 (A1b only) */
329 reg |= P_ASPM_FORCE_CLKREQ_ENA;
330
331 /* set Mask Register for Release/Gate Clock */
332 sky2_pci_write32(hw, PCI_DEV_REG5,
333 P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
334 P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
335 P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
336 } else
337 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
338
339 /* put CPU into reset state */
340 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
341 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
342 /* put CPU into halt state */
343 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
344
345 if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
346 reg = sky2_pci_read32(hw, PCI_DEV_REG1);
347 /* force to PCIe L1 */
348 reg |= PCI_FORCE_PEX_L1;
349 sky2_pci_write32(hw, PCI_DEV_REG1, reg);
350 }
351 break;
352
353 default:
354 dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
355 state);
356 return;
357 }
358
359 power_control |= PCI_PM_CTRL_PME_ENABLE;
360 /* Finally, set the new power state. */
361 sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
362
363 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
364 sky2_pci_read32(hw, B0_CTST);
365}
366
287static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 367static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
288{ 368{
289 u16 reg; 369 u16 reg;
@@ -619,28 +699,71 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
619 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 699 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
620} 700}
621 701
622static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) 702static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
703static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
704
705static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
623{ 706{
624 u32 reg1; 707 u32 reg1;
625 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
626 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
627 708
628 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 709 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
629 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
630 /* Turn on/off phy power saving */ 711 reg1 &= ~phy_power[port];
631 if (onoff)
632 reg1 &= ~phy_power[port];
633 else
634 reg1 |= phy_power[port];
635 712
636 if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 713 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
637 reg1 |= coma_mode[port]; 714 reg1 |= coma_mode[port];
638 715
639 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 716 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
640 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 717 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
641 sky2_pci_read32(hw, PCI_DEV_REG1); 718 sky2_pci_read32(hw, PCI_DEV_REG1);
719}
720
721static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
722{
723 u32 reg1;
724 u16 ctrl;
725
726 /* release GPHY Control reset */
727 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
728
729 /* release GMAC reset */
730 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
642 731
643 udelay(100); 732 if (hw->flags & SKY2_HW_NEWER_PHY) {
733 /* select page 2 to access MAC control register */
734 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
735
736 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
737 /* allow GMII Power Down */
738 ctrl &= ~PHY_M_MAC_GMIF_PUP;
739 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
740
741 /* set page register back to 0 */
742 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
743 }
744
745 /* setup General Purpose Control Register */
746 gma_write16(hw, port, GM_GP_CTRL,
747 GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
748
749 if (hw->chip_id != CHIP_ID_YUKON_EC) {
750 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
751 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
752
753 /* enable Power Down */
754 ctrl |= PHY_M_PC_POW_D_ENA;
755 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
756 }
757
758 /* set IEEE compatible Power Down Mode (dev. #4.99) */
759 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
760 }
761
762 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
763 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
764 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
765 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
766 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
644} 767}
645 768
646/* Force a renegotiation */ 769/* Force a renegotiation */
@@ -675,8 +798,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
675 798
676 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); 799 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
677 sky2->flow_mode = FC_NONE; 800 sky2->flow_mode = FC_NONE;
678 sky2_phy_power(hw, port, 1); 801
679 sky2_phy_reinit(sky2); 802 spin_lock_bh(&sky2->phy_lock);
803 sky2_phy_power_up(hw, port);
804 sky2_phy_init(hw, port);
805 spin_unlock_bh(&sky2->phy_lock);
680 806
681 sky2->flow_mode = save_mode; 807 sky2->flow_mode = save_mode;
682 sky2->advertising = ctrl; 808 sky2->advertising = ctrl;
@@ -781,6 +907,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
781 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 907 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
782 908
783 spin_lock_bh(&sky2->phy_lock); 909 spin_lock_bh(&sky2->phy_lock);
910 sky2_phy_power_up(hw, port);
784 sky2_phy_init(hw, port); 911 sky2_phy_init(hw, port);
785 spin_unlock_bh(&sky2->phy_lock); 912 spin_unlock_bh(&sky2->phy_lock);
786 913
@@ -1159,17 +1286,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1159} 1286}
1160 1287
1161#ifdef SKY2_VLAN_TAG_USED 1288#ifdef SKY2_VLAN_TAG_USED
1162static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1289static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
1163{ 1290{
1164 struct sky2_port *sky2 = netdev_priv(dev); 1291 if (onoff) {
1165 struct sky2_hw *hw = sky2->hw;
1166 u16 port = sky2->port;
1167
1168 netif_tx_lock_bh(dev);
1169 napi_disable(&hw->napi);
1170
1171 sky2->vlgrp = grp;
1172 if (grp) {
1173 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1292 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1174 RX_VLAN_STRIP_ON); 1293 RX_VLAN_STRIP_ON);
1175 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1294 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
@@ -1180,6 +1299,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
1180 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1299 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1181 TX_VLAN_TAG_OFF); 1300 TX_VLAN_TAG_OFF);
1182 } 1301 }
1302}
1303
1304static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1305{
1306 struct sky2_port *sky2 = netdev_priv(dev);
1307 struct sky2_hw *hw = sky2->hw;
1308 u16 port = sky2->port;
1309
1310 netif_tx_lock_bh(dev);
1311 napi_disable(&hw->napi);
1312
1313 sky2->vlgrp = grp;
1314 sky2_set_vlan_mode(hw, port, grp != NULL);
1183 1315
1184 sky2_read32(hw, B0_Y2_SP_LISR); 1316 sky2_read32(hw, B0_Y2_SP_LISR);
1185 napi_enable(&hw->napi); 1317 napi_enable(&hw->napi);
@@ -1380,8 +1512,6 @@ static int sky2_up(struct net_device *dev)
1380 if (!sky2->rx_ring) 1512 if (!sky2->rx_ring)
1381 goto err_out; 1513 goto err_out;
1382 1514
1383 sky2_phy_power(hw, port, 1);
1384
1385 sky2_mac_init(hw, port); 1515 sky2_mac_init(hw, port);
1386 1516
1387 /* Register is number of 4K blocks on internal RAM buffer. */ 1517 /* Register is number of 4K blocks on internal RAM buffer. */
@@ -1418,6 +1548,10 @@ static int sky2_up(struct net_device *dev)
1418 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1548 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1419 TX_RING_SIZE - 1); 1549 TX_RING_SIZE - 1);
1420 1550
1551#ifdef SKY2_VLAN_TAG_USED
1552 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1553#endif
1554
1421 err = sky2_rx_start(sky2); 1555 err = sky2_rx_start(sky2);
1422 if (err) 1556 if (err)
1423 goto err_out; 1557 goto err_out;
@@ -1758,7 +1892,7 @@ static int sky2_down(struct net_device *dev)
1758 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1892 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1759 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1893 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1760 1894
1761 sky2_phy_power(hw, port, 0); 1895 sky2_phy_power_down(hw, port);
1762 1896
1763 netif_carrier_off(dev); 1897 netif_carrier_off(dev);
1764 1898
@@ -2732,6 +2866,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2732 hw->flags = SKY2_HW_GIGABIT 2866 hw->flags = SKY2_HW_GIGABIT
2733 | SKY2_HW_NEWER_PHY 2867 | SKY2_HW_NEWER_PHY
2734 | SKY2_HW_ADV_POWER_CTL; 2868 | SKY2_HW_ADV_POWER_CTL;
2869
2870 /* check for Rev. A1 dev 4200 */
2871 if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
2872 hw->flags |= SKY2_HW_CLK_POWER;
2735 break; 2873 break;
2736 2874
2737 case CHIP_ID_YUKON_EX: 2875 case CHIP_ID_YUKON_EX:
@@ -2782,6 +2920,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2782 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2920 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2783 hw->flags |= SKY2_HW_FIBRE_PHY; 2921 hw->flags |= SKY2_HW_FIBRE_PHY;
2784 2922
2923 hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
2924 if (hw->pm_cap == 0) {
2925 dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
2926 return -EIO;
2927 }
2785 2928
2786 hw->ports = 1; 2929 hw->ports = 1;
2787 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2930 t8 = sky2_read8(hw, B2_Y2_HW_RES);
@@ -4353,7 +4496,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4353 4496
4354 pci_save_state(pdev); 4497 pci_save_state(pdev);
4355 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4498 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4356 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4499 sky2_power_state(hw, pci_choose_state(pdev, state));
4357 4500
4358 return 0; 4501 return 0;
4359} 4502}
@@ -4366,9 +4509,7 @@ static int sky2_resume(struct pci_dev *pdev)
4366 if (!hw) 4509 if (!hw)
4367 return 0; 4510 return 0;
4368 4511
4369 err = pci_set_power_state(pdev, PCI_D0); 4512 sky2_power_state(hw, PCI_D0);
4370 if (err)
4371 goto out;
4372 4513
4373 err = pci_restore_state(pdev); 4514 err = pci_restore_state(pdev);
4374 if (err) 4515 if (err)
@@ -4436,8 +4577,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
4436 pci_enable_wake(pdev, PCI_D3cold, wol); 4577 pci_enable_wake(pdev, PCI_D3cold, wol);
4437 4578
4438 pci_disable_device(pdev); 4579 pci_disable_device(pdev);
4439 pci_set_power_state(pdev, PCI_D3hot); 4580 sky2_power_state(hw, PCI_D3hot);
4440
4441} 4581}
4442 4582
4443static struct pci_driver sky2_driver = { 4583static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index c0a5eea20007..1fa82bf029d9 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -28,6 +28,11 @@ enum pci_dev_reg_1 {
28 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 28 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
29 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ 29 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
30 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ 30 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
31
32 PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */
33 PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
34 PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
35 PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
31}; 36};
32 37
33enum pci_dev_reg_2 { 38enum pci_dev_reg_2 {
@@ -45,7 +50,11 @@ enum pci_dev_reg_2 {
45 50
46/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ 51/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
47enum pci_dev_reg_4 { 52enum pci_dev_reg_4 {
48 /* (Link Training & Status State Machine) */ 53 /* (Link Training & Status State Machine) */
54 P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */
55#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK)
56 P_PEX_LTSSM_L1_STAT = 0x34,
57 P_PEX_LTSSM_DET_STAT = 0x01,
49 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ 58 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
50 /* (Active State Power Management) */ 59 /* (Active State Power Management) */
51 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ 60 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
@@ -454,6 +463,9 @@ enum yukon_ex_rev {
454 CHIP_REV_YU_EX_A0 = 1, 463 CHIP_REV_YU_EX_A0 = 1,
455 CHIP_REV_YU_EX_B0 = 2, 464 CHIP_REV_YU_EX_B0 = 2,
456}; 465};
466enum yukon_supr_rev {
467 CHIP_REV_YU_SU_A0 = 0,
468};
457 469
458 470
459/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 471/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
@@ -1143,6 +1155,12 @@ enum {
1143 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ 1155 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1144}; 1156};
1145 1157
1158/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
1159enum {
1160 PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */
1161 PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */
1162};
1163
1146/* for 10/100 Fast Ethernet PHY (88E3082 only) */ 1164/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1147enum { 1165enum {
1148 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ 1166 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
@@ -1411,6 +1429,7 @@ enum {
1411/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ 1429/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1412enum { 1430enum {
1413 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ 1431 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1432 PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */
1414 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ 1433 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1415 PHY_M_MAC_MD_COPPER = 5,/* Copper only */ 1434 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1416 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ 1435 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
@@ -2052,7 +2071,9 @@ struct sky2_hw {
2052#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2071#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2053#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2072#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2054#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2073#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2074#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
2055 2075
2076 int pm_cap;
2056 u8 chip_id; 2077 u8 chip_id;
2057 u8 chip_rev; 2078 u8 chip_rev;
2058 u8 pmd_type; 2079 u8 pmd_type;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 07b3f77e7626..d9f248f23b97 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,8 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34#include <linux/mii.h> 34#include <linux/mii.h>
35#include <linux/phy.h>
36#include <linux/brcmphy.h>
35#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
36#include <linux/ip.h> 38#include <linux/ip.h>
37#include <linux/tcp.h> 39#include <linux/tcp.h>
@@ -64,8 +66,8 @@
64 66
65#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.92" 69#define DRV_MODULE_VERSION "3.93"
68#define DRV_MODULE_RELDATE "May 2, 2008" 70#define DRV_MODULE_RELDATE "May 22, 2008"
69 71
70#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -203,6 +205,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 209 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 210 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -804,6 +807,569 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
804 return ret; 807 return ret;
805} 808}
806 809
810static int tg3_bmcr_reset(struct tg3 *tp)
811{
812 u32 phy_control;
813 int limit, err;
814
815 /* OK, reset it, and poll the BMCR_RESET bit until it
816 * clears or we time out.
817 */
818 phy_control = BMCR_RESET;
819 err = tg3_writephy(tp, MII_BMCR, phy_control);
820 if (err != 0)
821 return -EBUSY;
822
823 limit = 5000;
824 while (limit--) {
825 err = tg3_readphy(tp, MII_BMCR, &phy_control);
826 if (err != 0)
827 return -EBUSY;
828
829 if ((phy_control & BMCR_RESET) == 0) {
830 udelay(40);
831 break;
832 }
833 udelay(10);
834 }
835 if (limit <= 0)
836 return -EBUSY;
837
838 return 0;
839}
840
841static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
842{
843 struct tg3 *tp = (struct tg3 *)bp->priv;
844 u32 val;
845
846 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
847 return -EAGAIN;
848
849 if (tg3_readphy(tp, reg, &val))
850 return -EIO;
851
852 return val;
853}
854
855static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
856{
857 struct tg3 *tp = (struct tg3 *)bp->priv;
858
859 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860 return -EAGAIN;
861
862 if (tg3_writephy(tp, reg, val))
863 return -EIO;
864
865 return 0;
866}
867
868static int tg3_mdio_reset(struct mii_bus *bp)
869{
870 return 0;
871}
872
873static void tg3_mdio_config(struct tg3 *tp)
874{
875 u32 val;
876
877 if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
878 PHY_INTERFACE_MODE_RGMII)
879 return;
880
881 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
882 MAC_PHYCFG1_RGMII_SND_STAT_EN);
883 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
884 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
885 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
886 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
887 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
888 }
889 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
890
891 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
892 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
893 val |= MAC_PHYCFG2_INBAND_ENABLE;
894 tw32(MAC_PHYCFG2, val);
895
896 val = tr32(MAC_EXT_RGMII_MODE);
897 val &= ~(MAC_RGMII_MODE_RX_INT_B |
898 MAC_RGMII_MODE_RX_QUALITY |
899 MAC_RGMII_MODE_RX_ACTIVITY |
900 MAC_RGMII_MODE_RX_ENG_DET |
901 MAC_RGMII_MODE_TX_ENABLE |
902 MAC_RGMII_MODE_TX_LOWPWR |
903 MAC_RGMII_MODE_TX_RESET);
904 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
905 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
906 val |= MAC_RGMII_MODE_RX_INT_B |
907 MAC_RGMII_MODE_RX_QUALITY |
908 MAC_RGMII_MODE_RX_ACTIVITY |
909 MAC_RGMII_MODE_RX_ENG_DET;
910 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
911 val |= MAC_RGMII_MODE_TX_ENABLE |
912 MAC_RGMII_MODE_TX_LOWPWR |
913 MAC_RGMII_MODE_TX_RESET;
914 }
915 tw32(MAC_EXT_RGMII_MODE, val);
916}
917
918static void tg3_mdio_start(struct tg3 *tp)
919{
920 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
921 mutex_lock(&tp->mdio_bus.mdio_lock);
922 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
923 mutex_unlock(&tp->mdio_bus.mdio_lock);
924 }
925
926 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
927 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 udelay(80);
929
930 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
931 tg3_mdio_config(tp);
932}
933
934static void tg3_mdio_stop(struct tg3 *tp)
935{
936 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
937 mutex_lock(&tp->mdio_bus.mdio_lock);
938 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
939 mutex_unlock(&tp->mdio_bus.mdio_lock);
940 }
941}
942
943static int tg3_mdio_init(struct tg3 *tp)
944{
945 int i;
946 u32 reg;
947 struct phy_device *phydev;
948 struct mii_bus *mdio_bus = &tp->mdio_bus;
949
950 tg3_mdio_start(tp);
951
952 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
953 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
954 return 0;
955
956 memset(mdio_bus, 0, sizeof(*mdio_bus));
957
958 mdio_bus->name = "tg3 mdio bus";
959 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
960 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
961 mdio_bus->priv = tp;
962 mdio_bus->dev = &tp->pdev->dev;
963 mdio_bus->read = &tg3_mdio_read;
964 mdio_bus->write = &tg3_mdio_write;
965 mdio_bus->reset = &tg3_mdio_reset;
966 mdio_bus->phy_mask = ~(1 << PHY_ADDR);
967 mdio_bus->irq = &tp->mdio_irq[0];
968
969 for (i = 0; i < PHY_MAX_ADDR; i++)
970 mdio_bus->irq[i] = PHY_POLL;
971
972 /* The bus registration will look for all the PHYs on the mdio bus.
973 * Unfortunately, it does not ensure the PHY is powered up before
974 * accessing the PHY ID registers. A chip reset is the
975 * quickest way to bring the device back to an operational state..
976 */
977 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
978 tg3_bmcr_reset(tp);
979
980 i = mdiobus_register(mdio_bus);
981 if (i) {
982 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
983 tp->dev->name, i);
984 return i;
985 }
986
987 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
988
989 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
990
991 switch (phydev->phy_id) {
992 case TG3_PHY_ID_BCM50610:
993 phydev->interface = PHY_INTERFACE_MODE_RGMII;
994 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
995 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
997 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
998 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
999 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1000 break;
1001 case TG3_PHY_ID_BCMAC131:
1002 phydev->interface = PHY_INTERFACE_MODE_MII;
1003 break;
1004 }
1005
1006 tg3_mdio_config(tp);
1007
1008 return 0;
1009}
1010
1011static void tg3_mdio_fini(struct tg3 *tp)
1012{
1013 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1014 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1015 mdiobus_unregister(&tp->mdio_bus);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 }
1018}
1019
1020/* tp->lock is held. */
1021static void tg3_wait_for_event_ack(struct tg3 *tp)
1022{
1023 int i;
1024
1025 /* Wait for up to 2.5 milliseconds */
1026 for (i = 0; i < 250000; i++) {
1027 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1028 break;
1029 udelay(10);
1030 }
1031}
1032
1033/* tp->lock is held. */
1034static void tg3_ump_link_report(struct tg3 *tp)
1035{
1036 u32 reg;
1037 u32 val;
1038
1039 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1040 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1041 return;
1042
1043 tg3_wait_for_event_ack(tp);
1044
1045 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1046
1047 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1048
1049 val = 0;
1050 if (!tg3_readphy(tp, MII_BMCR, &reg))
1051 val = reg << 16;
1052 if (!tg3_readphy(tp, MII_BMSR, &reg))
1053 val |= (reg & 0xffff);
1054 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1055
1056 val = 0;
1057 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1058 val = reg << 16;
1059 if (!tg3_readphy(tp, MII_LPA, &reg))
1060 val |= (reg & 0xffff);
1061 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1062
1063 val = 0;
1064 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1065 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1066 val = reg << 16;
1067 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1068 val |= (reg & 0xffff);
1069 }
1070 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1071
1072 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1073 val = reg << 16;
1074 else
1075 val = 0;
1076 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1077
1078 val = tr32(GRC_RX_CPU_EVENT);
1079 val |= GRC_RX_CPU_DRIVER_EVENT;
1080 tw32_f(GRC_RX_CPU_EVENT, val);
1081}
1082
1083static void tg3_link_report(struct tg3 *tp)
1084{
1085 if (!netif_carrier_ok(tp->dev)) {
1086 if (netif_msg_link(tp))
1087 printk(KERN_INFO PFX "%s: Link is down.\n",
1088 tp->dev->name);
1089 tg3_ump_link_report(tp);
1090 } else if (netif_msg_link(tp)) {
1091 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1092 tp->dev->name,
1093 (tp->link_config.active_speed == SPEED_1000 ?
1094 1000 :
1095 (tp->link_config.active_speed == SPEED_100 ?
1096 100 : 10)),
1097 (tp->link_config.active_duplex == DUPLEX_FULL ?
1098 "full" : "half"));
1099
1100 printk(KERN_INFO PFX
1101 "%s: Flow control is %s for TX and %s for RX.\n",
1102 tp->dev->name,
1103 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1104 "on" : "off",
1105 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1106 "on" : "off");
1107 tg3_ump_link_report(tp);
1108 }
1109}
1110
1111static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1112{
1113 u16 miireg;
1114
1115 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1116 miireg = ADVERTISE_PAUSE_CAP;
1117 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1118 miireg = ADVERTISE_PAUSE_ASYM;
1119 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1120 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1121 else
1122 miireg = 0;
1123
1124 return miireg;
1125}
1126
1127static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1128{
1129 u16 miireg;
1130
1131 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1132 miireg = ADVERTISE_1000XPAUSE;
1133 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1134 miireg = ADVERTISE_1000XPSE_ASYM;
1135 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1136 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1137 else
1138 miireg = 0;
1139
1140 return miireg;
1141}
1142
1143static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1144{
1145 u8 cap = 0;
1146
1147 if (lcladv & ADVERTISE_PAUSE_CAP) {
1148 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1149 if (rmtadv & LPA_PAUSE_CAP)
1150 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1151 else if (rmtadv & LPA_PAUSE_ASYM)
1152 cap = TG3_FLOW_CTRL_RX;
1153 } else {
1154 if (rmtadv & LPA_PAUSE_CAP)
1155 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1156 }
1157 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1158 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1159 cap = TG3_FLOW_CTRL_TX;
1160 }
1161
1162 return cap;
1163}
1164
1165static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1166{
1167 u8 cap = 0;
1168
1169 if (lcladv & ADVERTISE_1000XPAUSE) {
1170 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1171 if (rmtadv & LPA_1000XPAUSE)
1172 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1173 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1174 cap = TG3_FLOW_CTRL_RX;
1175 } else {
1176 if (rmtadv & LPA_1000XPAUSE)
1177 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1178 }
1179 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1180 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1181 cap = TG3_FLOW_CTRL_TX;
1182 }
1183
1184 return cap;
1185}
1186
1187static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1188{
1189 u8 autoneg;
1190 u8 flowctrl = 0;
1191 u32 old_rx_mode = tp->rx_mode;
1192 u32 old_tx_mode = tp->tx_mode;
1193
1194 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1195 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1196 else
1197 autoneg = tp->link_config.autoneg;
1198
1199 if (autoneg == AUTONEG_ENABLE &&
1200 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1201 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1202 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1203 else
1204 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1205 } else
1206 flowctrl = tp->link_config.flowctrl;
1207
1208 tp->link_config.active_flowctrl = flowctrl;
1209
1210 if (flowctrl & TG3_FLOW_CTRL_RX)
1211 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1212 else
1213 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1214
1215 if (old_rx_mode != tp->rx_mode)
1216 tw32_f(MAC_RX_MODE, tp->rx_mode);
1217
1218 if (flowctrl & TG3_FLOW_CTRL_TX)
1219 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1220 else
1221 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1222
1223 if (old_tx_mode != tp->tx_mode)
1224 tw32_f(MAC_TX_MODE, tp->tx_mode);
1225}
1226
1227static void tg3_adjust_link(struct net_device *dev)
1228{
1229 u8 oldflowctrl, linkmesg = 0;
1230 u32 mac_mode, lcl_adv, rmt_adv;
1231 struct tg3 *tp = netdev_priv(dev);
1232 struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1233
1234 spin_lock(&tp->lock);
1235
1236 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1237 MAC_MODE_HALF_DUPLEX);
1238
1239 oldflowctrl = tp->link_config.active_flowctrl;
1240
1241 if (phydev->link) {
1242 lcl_adv = 0;
1243 rmt_adv = 0;
1244
1245 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1246 mac_mode |= MAC_MODE_PORT_MODE_MII;
1247 else
1248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1249
1250 if (phydev->duplex == DUPLEX_HALF)
1251 mac_mode |= MAC_MODE_HALF_DUPLEX;
1252 else {
1253 lcl_adv = tg3_advert_flowctrl_1000T(
1254 tp->link_config.flowctrl);
1255
1256 if (phydev->pause)
1257 rmt_adv = LPA_PAUSE_CAP;
1258 if (phydev->asym_pause)
1259 rmt_adv |= LPA_PAUSE_ASYM;
1260 }
1261
1262 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1263 } else
1264 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1265
1266 if (mac_mode != tp->mac_mode) {
1267 tp->mac_mode = mac_mode;
1268 tw32_f(MAC_MODE, tp->mac_mode);
1269 udelay(40);
1270 }
1271
1272 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1273 tw32(MAC_TX_LENGTHS,
1274 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1275 (6 << TX_LENGTHS_IPG_SHIFT) |
1276 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1277 else
1278 tw32(MAC_TX_LENGTHS,
1279 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1280 (6 << TX_LENGTHS_IPG_SHIFT) |
1281 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1282
1283 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1284 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1285 phydev->speed != tp->link_config.active_speed ||
1286 phydev->duplex != tp->link_config.active_duplex ||
1287 oldflowctrl != tp->link_config.active_flowctrl)
1288 linkmesg = 1;
1289
1290 tp->link_config.active_speed = phydev->speed;
1291 tp->link_config.active_duplex = phydev->duplex;
1292
1293 spin_unlock(&tp->lock);
1294
1295 if (linkmesg)
1296 tg3_link_report(tp);
1297}
1298
1299static int tg3_phy_init(struct tg3 *tp)
1300{
1301 struct phy_device *phydev;
1302
1303 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1304 return 0;
1305
1306 /* Bring the PHY back to a known state. */
1307 tg3_bmcr_reset(tp);
1308
1309 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1310
1311 /* Attach the MAC to the PHY. */
1312 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1313 phydev->dev_flags, phydev->interface);
1314 if (IS_ERR(phydev)) {
1315 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1316 return PTR_ERR(phydev);
1317 }
1318
1319 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1320
1321 /* Mask with MAC supported features. */
1322 phydev->supported &= (PHY_GBIT_FEATURES |
1323 SUPPORTED_Pause |
1324 SUPPORTED_Asym_Pause);
1325
1326 phydev->advertising = phydev->supported;
1327
1328 printk(KERN_INFO
1329 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1330 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1331
1332 return 0;
1333}
1334
1335static void tg3_phy_start(struct tg3 *tp)
1336{
1337 struct phy_device *phydev;
1338
1339 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1340 return;
1341
1342 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1343
1344 if (tp->link_config.phy_is_low_power) {
1345 tp->link_config.phy_is_low_power = 0;
1346 phydev->speed = tp->link_config.orig_speed;
1347 phydev->duplex = tp->link_config.orig_duplex;
1348 phydev->autoneg = tp->link_config.orig_autoneg;
1349 phydev->advertising = tp->link_config.orig_advertising;
1350 }
1351
1352 phy_start(phydev);
1353
1354 phy_start_aneg(phydev);
1355}
1356
1357static void tg3_phy_stop(struct tg3 *tp)
1358{
1359 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1360 return;
1361
1362 phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1363}
1364
1365static void tg3_phy_fini(struct tg3 *tp)
1366{
1367 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1368 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1369 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1370 }
1371}
1372
807static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1373static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808{ 1374{
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1375 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
@@ -861,37 +1427,6 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
861 (val | (1 << 15) | (1 << 4))); 1427 (val | (1 << 15) | (1 << 4)));
862} 1428}
863 1429
864static int tg3_bmcr_reset(struct tg3 *tp)
865{
866 u32 phy_control;
867 int limit, err;
868
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
871 */
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 limit = 5000;
878 while (limit--) {
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880 if (err != 0)
881 return -EBUSY;
882
883 if ((phy_control & BMCR_RESET) == 0) {
884 udelay(40);
885 break;
886 }
887 udelay(10);
888 }
889 if (limit <= 0)
890 return -EBUSY;
891
892 return 0;
893}
894
895static void tg3_phy_apply_otp(struct tg3 *tp) 1430static void tg3_phy_apply_otp(struct tg3 *tp)
896{ 1431{
897 u32 otp, phy; 1432 u32 otp, phy;
@@ -1115,8 +1650,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1115 return err; 1650 return err;
1116} 1651}
1117 1652
1118static void tg3_link_report(struct tg3 *);
1119
1120/* This will reset the tigon3 PHY if there is no valid 1653/* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero. 1654 * link unless the FORCE argument is non-zero.
1122 */ 1655 */
@@ -1406,7 +1939,7 @@ static void tg3_power_down_phy(struct tg3 *tp)
1406 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 1939 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1407 udelay(40); 1940 udelay(40);
1408 return; 1941 return;
1409 } else { 1942 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1410 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1943 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1944 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1945 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
@@ -1480,7 +2013,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1480 "requested.\n", 2013 "requested.\n",
1481 tp->dev->name, state); 2014 tp->dev->name, state);
1482 return -EINVAL; 2015 return -EINVAL;
1483 }; 2016 }
1484 2017
1485 power_control |= PCI_PM_CTRL_PME_ENABLE; 2018 power_control |= PCI_PM_CTRL_PME_ENABLE;
1486 2019
@@ -1488,18 +2021,55 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1488 tw32(TG3PCI_MISC_HOST_CTRL, 2021 tw32(TG3PCI_MISC_HOST_CTRL,
1489 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2022 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1490 2023
1491 if (tp->link_config.phy_is_low_power == 0) { 2024 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1492 tp->link_config.phy_is_low_power = 1; 2025 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
1493 tp->link_config.orig_speed = tp->link_config.speed; 2026 !tp->link_config.phy_is_low_power) {
1494 tp->link_config.orig_duplex = tp->link_config.duplex; 2027 struct phy_device *phydev;
1495 tp->link_config.orig_autoneg = tp->link_config.autoneg; 2028 u32 advertising;
1496 } 2029
2030 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2031
2032 tp->link_config.phy_is_low_power = 1;
2033
2034 tp->link_config.orig_speed = phydev->speed;
2035 tp->link_config.orig_duplex = phydev->duplex;
2036 tp->link_config.orig_autoneg = phydev->autoneg;
2037 tp->link_config.orig_advertising = phydev->advertising;
2038
2039 advertising = ADVERTISED_TP |
2040 ADVERTISED_Pause |
2041 ADVERTISED_Autoneg |
2042 ADVERTISED_10baseT_Half;
2043
2044 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2045 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2046 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2047 advertising |=
2048 ADVERTISED_100baseT_Half |
2049 ADVERTISED_100baseT_Full |
2050 ADVERTISED_10baseT_Full;
2051 else
2052 advertising |= ADVERTISED_10baseT_Full;
2053 }
1497 2054
1498 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 2055 phydev->advertising = advertising;
1499 tp->link_config.speed = SPEED_10; 2056
1500 tp->link_config.duplex = DUPLEX_HALF; 2057 phy_start_aneg(phydev);
1501 tp->link_config.autoneg = AUTONEG_ENABLE; 2058 }
1502 tg3_setup_phy(tp, 0); 2059 } else {
2060 if (tp->link_config.phy_is_low_power == 0) {
2061 tp->link_config.phy_is_low_power = 1;
2062 tp->link_config.orig_speed = tp->link_config.speed;
2063 tp->link_config.orig_duplex = tp->link_config.duplex;
2064 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2065 }
2066
2067 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2068 tp->link_config.speed = SPEED_10;
2069 tp->link_config.duplex = DUPLEX_HALF;
2070 tp->link_config.autoneg = AUTONEG_ENABLE;
2071 tg3_setup_phy(tp, 0);
2072 }
1503 } 2073 }
1504 2074
1505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -1530,8 +2100,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1530 u32 mac_mode; 2100 u32 mac_mode;
1531 2101
1532 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2102 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2103 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1534 udelay(40); 2104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2105 udelay(40);
2106 }
1535 2107
1536 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 2108 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537 mac_mode = MAC_MODE_PORT_MODE_GMII; 2109 mac_mode = MAC_MODE_PORT_MODE_GMII;
@@ -1656,212 +2228,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1656 return 0; 2228 return 0;
1657} 2229}
1658 2230
1659/* tp->lock is held. */
1660static void tg3_wait_for_event_ack(struct tg3 *tp)
1661{
1662 int i;
1663
1664 /* Wait for up to 2.5 milliseconds */
1665 for (i = 0; i < 250000; i++) {
1666 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1667 break;
1668 udelay(10);
1669 }
1670}
1671
1672/* tp->lock is held. */
1673static void tg3_ump_link_report(struct tg3 *tp)
1674{
1675 u32 reg;
1676 u32 val;
1677
1678 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1679 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1680 return;
1681
1682 tg3_wait_for_event_ack(tp);
1683
1684 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1685
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1687
1688 val = 0;
1689 if (!tg3_readphy(tp, MII_BMCR, &reg))
1690 val = reg << 16;
1691 if (!tg3_readphy(tp, MII_BMSR, &reg))
1692 val |= (reg & 0xffff);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1694
1695 val = 0;
1696 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1697 val = reg << 16;
1698 if (!tg3_readphy(tp, MII_LPA, &reg))
1699 val |= (reg & 0xffff);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1701
1702 val = 0;
1703 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1704 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1705 val = reg << 16;
1706 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1707 val |= (reg & 0xffff);
1708 }
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1710
1711 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1712 val = reg << 16;
1713 else
1714 val = 0;
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1716
1717 val = tr32(GRC_RX_CPU_EVENT);
1718 val |= GRC_RX_CPU_DRIVER_EVENT;
1719 tw32_f(GRC_RX_CPU_EVENT, val);
1720}
1721
1722static void tg3_link_report(struct tg3 *tp)
1723{
1724 if (!netif_carrier_ok(tp->dev)) {
1725 if (netif_msg_link(tp))
1726 printk(KERN_INFO PFX "%s: Link is down.\n",
1727 tp->dev->name);
1728 tg3_ump_link_report(tp);
1729 } else if (netif_msg_link(tp)) {
1730 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1731 tp->dev->name,
1732 (tp->link_config.active_speed == SPEED_1000 ?
1733 1000 :
1734 (tp->link_config.active_speed == SPEED_100 ?
1735 100 : 10)),
1736 (tp->link_config.active_duplex == DUPLEX_FULL ?
1737 "full" : "half"));
1738
1739 printk(KERN_INFO PFX
1740 "%s: Flow control is %s for TX and %s for RX.\n",
1741 tp->dev->name,
1742 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1743 "on" : "off",
1744 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1745 "on" : "off");
1746 tg3_ump_link_report(tp);
1747 }
1748}
1749
1750static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1751{
1752 u16 miireg;
1753
1754 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1755 miireg = ADVERTISE_PAUSE_CAP;
1756 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1757 miireg = ADVERTISE_PAUSE_ASYM;
1758 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1759 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1760 else
1761 miireg = 0;
1762
1763 return miireg;
1764}
1765
1766static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1767{
1768 u16 miireg;
1769
1770 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1771 miireg = ADVERTISE_1000XPAUSE;
1772 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1773 miireg = ADVERTISE_1000XPSE_ASYM;
1774 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1775 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1776 else
1777 miireg = 0;
1778
1779 return miireg;
1780}
1781
1782static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1783{
1784 u8 cap = 0;
1785
1786 if (lcladv & ADVERTISE_PAUSE_CAP) {
1787 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1788 if (rmtadv & LPA_PAUSE_CAP)
1789 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1790 else if (rmtadv & LPA_PAUSE_ASYM)
1791 cap = TG3_FLOW_CTRL_RX;
1792 } else {
1793 if (rmtadv & LPA_PAUSE_CAP)
1794 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1795 }
1796 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1797 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1798 cap = TG3_FLOW_CTRL_TX;
1799 }
1800
1801 return cap;
1802}
1803
1804static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1805{
1806 u8 cap = 0;
1807
1808 if (lcladv & ADVERTISE_1000XPAUSE) {
1809 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1810 if (rmtadv & LPA_1000XPAUSE)
1811 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1812 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1813 cap = TG3_FLOW_CTRL_RX;
1814 } else {
1815 if (rmtadv & LPA_1000XPAUSE)
1816 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1817 }
1818 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1819 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1820 cap = TG3_FLOW_CTRL_TX;
1821 }
1822
1823 return cap;
1824}
1825
1826static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1827{
1828 u8 new_tg3_flags = 0;
1829 u32 old_rx_mode = tp->rx_mode;
1830 u32 old_tx_mode = tp->tx_mode;
1831
1832 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1833 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1834 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1835 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1836 remote_adv);
1837 else
1838 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1839 remote_adv);
1840 } else {
1841 new_tg3_flags = tp->link_config.flowctrl;
1842 }
1843
1844 tp->link_config.active_flowctrl = new_tg3_flags;
1845
1846 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1847 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1848 else
1849 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1850
1851 if (old_rx_mode != tp->rx_mode) {
1852 tw32_f(MAC_RX_MODE, tp->rx_mode);
1853 }
1854
1855 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1856 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1857 else
1858 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1859
1860 if (old_tx_mode != tp->tx_mode) {
1861 tw32_f(MAC_TX_MODE, tp->tx_mode);
1862 }
1863}
1864
1865static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 2231static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1866{ 2232{
1867 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 2233 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
@@ -1906,7 +2272,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
1906 *speed = SPEED_INVALID; 2272 *speed = SPEED_INVALID;
1907 *duplex = DUPLEX_INVALID; 2273 *duplex = DUPLEX_INVALID;
1908 break; 2274 break;
1909 }; 2275 }
1910} 2276}
1911 2277
1912static void tg3_phy_copper_begin(struct tg3 *tp) 2278static void tg3_phy_copper_begin(struct tg3 *tp)
@@ -2018,7 +2384,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2018 case SPEED_1000: 2384 case SPEED_1000:
2019 bmcr |= TG3_BMCR_SPEED1000; 2385 bmcr |= TG3_BMCR_SPEED1000;
2020 break; 2386 break;
2021 }; 2387 }
2022 2388
2023 if (tp->link_config.duplex == DUPLEX_FULL) 2389 if (tp->link_config.duplex == DUPLEX_FULL)
2024 bmcr |= BMCR_FULLDPLX; 2390 bmcr |= BMCR_FULLDPLX;
@@ -2716,7 +3082,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2716 default: 3082 default:
2717 ret = ANEG_FAILED; 3083 ret = ANEG_FAILED;
2718 break; 3084 break;
2719 }; 3085 }
2720 3086
2721 return ret; 3087 return ret;
2722} 3088}
@@ -3558,7 +3924,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3558 3924
3559 default: 3925 default:
3560 return -EINVAL; 3926 return -EINVAL;
3561 }; 3927 }
3562 3928
3563 /* Do not overwrite any of the map or rp information 3929 /* Do not overwrite any of the map or rp information
3564 * until we are sure we can commit to a new buffer. 3930 * until we are sure we can commit to a new buffer.
@@ -3618,7 +3984,7 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3618 3984
3619 default: 3985 default:
3620 return; 3986 return;
3621 }; 3987 }
3622 3988
3623 dest_map->skb = src_map->skb; 3989 dest_map->skb = src_map->skb;
3624 pci_unmap_addr_set(dest_map, mapping, 3990 pci_unmap_addr_set(dest_map, mapping,
@@ -3828,7 +4194,15 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3828 sblk->status = SD_STATUS_UPDATED | 4194 sblk->status = SD_STATUS_UPDATED |
3829 (sblk->status & ~SD_STATUS_LINK_CHG); 4195 (sblk->status & ~SD_STATUS_LINK_CHG);
3830 spin_lock(&tp->lock); 4196 spin_lock(&tp->lock);
3831 tg3_setup_phy(tp, 0); 4197 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4198 tw32_f(MAC_STATUS,
4199 (MAC_STATUS_SYNC_CHANGED |
4200 MAC_STATUS_CFG_CHANGED |
4201 MAC_STATUS_MI_COMPLETION |
4202 MAC_STATUS_LNKSTATE_CHANGED));
4203 udelay(40);
4204 } else
4205 tg3_setup_phy(tp, 0);
3832 spin_unlock(&tp->lock); 4206 spin_unlock(&tp->lock);
3833 } 4207 }
3834 } 4208 }
@@ -4116,6 +4490,7 @@ static void tg3_poll_controller(struct net_device *dev)
4116static void tg3_reset_task(struct work_struct *work) 4490static void tg3_reset_task(struct work_struct *work)
4117{ 4491{
4118 struct tg3 *tp = container_of(work, struct tg3, reset_task); 4492 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4493 int err;
4119 unsigned int restart_timer; 4494 unsigned int restart_timer;
4120 4495
4121 tg3_full_lock(tp, 0); 4496 tg3_full_lock(tp, 0);
@@ -4127,6 +4502,8 @@ static void tg3_reset_task(struct work_struct *work)
4127 4502
4128 tg3_full_unlock(tp); 4503 tg3_full_unlock(tp);
4129 4504
4505 tg3_phy_stop(tp);
4506
4130 tg3_netif_stop(tp); 4507 tg3_netif_stop(tp);
4131 4508
4132 tg3_full_lock(tp, 1); 4509 tg3_full_lock(tp, 1);
@@ -4142,7 +4519,8 @@ static void tg3_reset_task(struct work_struct *work)
4142 } 4519 }
4143 4520
4144 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 4521 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4145 if (tg3_init_hw(tp, 1)) 4522 err = tg3_init_hw(tp, 1);
4523 if (err)
4146 goto out; 4524 goto out;
4147 4525
4148 tg3_netif_start(tp); 4526 tg3_netif_start(tp);
@@ -4152,6 +4530,9 @@ static void tg3_reset_task(struct work_struct *work)
4152 4530
4153out: 4531out:
4154 tg3_full_unlock(tp); 4532 tg3_full_unlock(tp);
4533
4534 if (!err)
4535 tg3_phy_start(tp);
4155} 4536}
4156 4537
4157static void tg3_dump_short_state(struct tg3 *tp) 4538static void tg3_dump_short_state(struct tg3 *tp)
@@ -4655,6 +5036,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4655 return 0; 5036 return 0;
4656 } 5037 }
4657 5038
5039 tg3_phy_stop(tp);
5040
4658 tg3_netif_stop(tp); 5041 tg3_netif_stop(tp);
4659 5042
4660 tg3_full_lock(tp, 1); 5043 tg3_full_lock(tp, 1);
@@ -4670,6 +5053,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4670 5053
4671 tg3_full_unlock(tp); 5054 tg3_full_unlock(tp);
4672 5055
5056 if (!err)
5057 tg3_phy_start(tp);
5058
4673 return err; 5059 return err;
4674} 5060}
4675 5061
@@ -4961,7 +5347,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
4961 5347
4962 default: 5348 default:
4963 break; 5349 break;
4964 }; 5350 }
4965 } 5351 }
4966 5352
4967 val = tr32(ofs); 5353 val = tr32(ofs);
@@ -5203,7 +5589,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5203 5589
5204 default: 5590 default:
5205 break; 5591 break;
5206 }; 5592 }
5207 } 5593 }
5208 5594
5209 if (kind == RESET_KIND_INIT || 5595 if (kind == RESET_KIND_INIT ||
@@ -5228,7 +5614,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5228 5614
5229 default: 5615 default:
5230 break; 5616 break;
5231 }; 5617 }
5232 } 5618 }
5233 5619
5234 if (kind == RESET_KIND_SHUTDOWN) 5620 if (kind == RESET_KIND_SHUTDOWN)
@@ -5257,7 +5643,7 @@ static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5257 5643
5258 default: 5644 default:
5259 break; 5645 break;
5260 }; 5646 }
5261 } 5647 }
5262} 5648}
5263 5649
@@ -5379,6 +5765,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5379 5765
5380 tg3_nvram_lock(tp); 5766 tg3_nvram_lock(tp);
5381 5767
5768 tg3_mdio_stop(tp);
5769
5382 /* No matching tg3_nvram_unlock() after this because 5770 /* No matching tg3_nvram_unlock() after this because
5383 * chip reset below will undo the nvram lock. 5771 * chip reset below will undo the nvram lock.
5384 */ 5772 */
@@ -5394,7 +5782,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 5782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 5783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 5784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 5785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5398 tw32(GRC_FASTBOOT_PC, 0); 5787 tw32(GRC_FASTBOOT_PC, 0);
5399 5788
5400 /* 5789 /*
@@ -5530,6 +5919,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5530 tw32_f(MAC_MODE, 0); 5919 tw32_f(MAC_MODE, 0);
5531 udelay(40); 5920 udelay(40);
5532 5921
5922 tg3_mdio_start(tp);
5923
5533 err = tg3_poll_fw(tp); 5924 err = tg3_poll_fw(tp);
5534 if (err) 5925 if (err)
5535 return err; 5926 return err;
@@ -6609,7 +7000,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6609 tg3_abort_hw(tp, 1); 7000 tg3_abort_hw(tp, 1);
6610 } 7001 }
6611 7002
6612 if (reset_phy) 7003 if (reset_phy &&
7004 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6613 tg3_phy_reset(tp); 7005 tg3_phy_reset(tp);
6614 7006
6615 err = tg3_chip_reset(tp); 7007 err = tg3_chip_reset(tp);
@@ -6685,7 +7077,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6685 return err; 7077 return err;
6686 7078
6687 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7079 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6688 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7080 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7081 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6689 /* This value is determined during the probe time DMA 7082 /* This value is determined during the probe time DMA
6690 * engine test, tg3_test_dma. 7083 * engine test, tg3_test_dma.
6691 */ 7084 */
@@ -6924,7 +7317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6924 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7317 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6925 RDMAC_MODE_LNGREAD_ENAB); 7318 RDMAC_MODE_LNGREAD_ENAB);
6926 7319
6927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 7320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
6928 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 7322 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6929 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 7323 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6930 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 7324 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
@@ -7092,8 +7486,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7092 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || 7486 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7093 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || 7487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7094 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || 7488 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7095 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) 7489 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7096 val |= (1 << 29); 7490 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7491 val |= WDMAC_MODE_STATUS_TAG_FIX;
7097 7492
7098 tw32_f(WDMAC_MODE, val); 7493 tw32_f(WDMAC_MODE, val);
7099 udelay(40); 7494 udelay(40);
@@ -7154,23 +7549,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7154 7549
7155 tp->rx_mode = RX_MODE_ENABLE; 7550 tp->rx_mode = RX_MODE_ENABLE;
7156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 7552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7158 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 7555 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7159 7556
7160 tw32_f(MAC_RX_MODE, tp->rx_mode); 7557 tw32_f(MAC_RX_MODE, tp->rx_mode);
7161 udelay(10); 7558 udelay(10);
7162 7559
7163 if (tp->link_config.phy_is_low_power) {
7164 tp->link_config.phy_is_low_power = 0;
7165 tp->link_config.speed = tp->link_config.orig_speed;
7166 tp->link_config.duplex = tp->link_config.orig_duplex;
7167 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7168 }
7169
7170 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7171 tw32_f(MAC_MI_MODE, tp->mi_mode);
7172 udelay(80);
7173
7174 tw32(MAC_LED_CTRL, tp->led_ctrl); 7560 tw32(MAC_LED_CTRL, tp->led_ctrl);
7175 7561
7176 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 7562 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
@@ -7217,19 +7603,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7217 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 7603 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7218 } 7604 }
7219 7605
7220 err = tg3_setup_phy(tp, 0); 7606 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7221 if (err) 7607 if (tp->link_config.phy_is_low_power) {
7222 return err; 7608 tp->link_config.phy_is_low_power = 0;
7609 tp->link_config.speed = tp->link_config.orig_speed;
7610 tp->link_config.duplex = tp->link_config.orig_duplex;
7611 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7612 }
7223 7613
7224 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7614 err = tg3_setup_phy(tp, 0);
7225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { 7615 if (err)
7226 u32 tmp; 7616 return err;
7227 7617
7228 /* Clear CRC stats. */ 7618 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7229 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 7619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7230 tg3_writephy(tp, MII_TG3_TEST1, 7620 u32 tmp;
7231 tmp | MII_TG3_TEST1_CRC_EN); 7621
7232 tg3_readphy(tp, 0x14, &tmp); 7622 /* Clear CRC stats. */
7623 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7624 tg3_writephy(tp, MII_TG3_TEST1,
7625 tmp | MII_TG3_TEST1_CRC_EN);
7626 tg3_readphy(tp, 0x14, &tmp);
7627 }
7233 } 7628 }
7234 } 7629 }
7235 7630
@@ -7282,7 +7677,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7282 7677
7283 default: 7678 default:
7284 break; 7679 break;
7285 }; 7680 }
7286 7681
7287 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 7682 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7288 /* Write our heartbeat update interval to APE. */ 7683 /* Write our heartbeat update interval to APE. */
@@ -7744,6 +8139,8 @@ static int tg3_open(struct net_device *dev)
7744 } 8139 }
7745 } 8140 }
7746 8141
8142 tg3_phy_start(tp);
8143
7747 tg3_full_lock(tp, 0); 8144 tg3_full_lock(tp, 0);
7748 8145
7749 add_timer(&tp->timer); 8146 add_timer(&tp->timer);
@@ -8545,7 +8942,13 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
8545 8942
8546static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 8943static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8547{ 8944{
8548 struct tg3 *tp = netdev_priv(dev); 8945 struct tg3 *tp = netdev_priv(dev);
8946
8947 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8948 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8949 return -EAGAIN;
8950 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8951 }
8549 8952
8550 cmd->supported = (SUPPORTED_Autoneg); 8953 cmd->supported = (SUPPORTED_Autoneg);
8551 8954
@@ -8582,6 +8985,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8582{ 8985{
8583 struct tg3 *tp = netdev_priv(dev); 8986 struct tg3 *tp = netdev_priv(dev);
8584 8987
8988 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8989 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8990 return -EAGAIN;
8991 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8992 }
8993
8585 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 8994 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8586 /* These are the only valid advertisement bits allowed. */ 8995 /* These are the only valid advertisement bits allowed. */
8587 if (cmd->autoneg == AUTONEG_ENABLE && 8996 if (cmd->autoneg == AUTONEG_ENABLE &&
@@ -8614,7 +9023,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8614 tp->link_config.advertising = 0; 9023 tp->link_config.advertising = 0;
8615 tp->link_config.speed = cmd->speed; 9024 tp->link_config.speed = cmd->speed;
8616 tp->link_config.duplex = cmd->duplex; 9025 tp->link_config.duplex = cmd->duplex;
8617 } 9026 }
8618 9027
8619 tp->link_config.orig_speed = tp->link_config.speed; 9028 tp->link_config.orig_speed = tp->link_config.speed;
8620 tp->link_config.orig_duplex = tp->link_config.duplex; 9029 tp->link_config.orig_duplex = tp->link_config.duplex;
@@ -8697,7 +9106,10 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8697 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { 9106 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8698 if (value) { 9107 if (value) {
8699 dev->features |= NETIF_F_TSO6; 9108 dev->features |= NETIF_F_TSO6;
8700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9110 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9111 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8701 dev->features |= NETIF_F_TSO_ECN; 9113 dev->features |= NETIF_F_TSO_ECN;
8702 } else 9114 } else
8703 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9115 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -8708,7 +9120,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8708static int tg3_nway_reset(struct net_device *dev) 9120static int tg3_nway_reset(struct net_device *dev)
8709{ 9121{
8710 struct tg3 *tp = netdev_priv(dev); 9122 struct tg3 *tp = netdev_priv(dev);
8711 u32 bmcr;
8712 int r; 9123 int r;
8713 9124
8714 if (!netif_running(dev)) 9125 if (!netif_running(dev))
@@ -8717,17 +9128,25 @@ static int tg3_nway_reset(struct net_device *dev)
8717 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 9128 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8718 return -EINVAL; 9129 return -EINVAL;
8719 9130
8720 spin_lock_bh(&tp->lock); 9131 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8721 r = -EINVAL; 9132 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8722 tg3_readphy(tp, MII_BMCR, &bmcr); 9133 return -EAGAIN;
8723 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 9134 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
8724 ((bmcr & BMCR_ANENABLE) || 9135 } else {
8725 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 9136 u32 bmcr;
8726 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 9137
8727 BMCR_ANENABLE); 9138 spin_lock_bh(&tp->lock);
8728 r = 0; 9139 r = -EINVAL;
9140 tg3_readphy(tp, MII_BMCR, &bmcr);
9141 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9142 ((bmcr & BMCR_ANENABLE) ||
9143 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9144 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9145 BMCR_ANENABLE);
9146 r = 0;
9147 }
9148 spin_unlock_bh(&tp->lock);
8729 } 9149 }
8730 spin_unlock_bh(&tp->lock);
8731 9150
8732 return r; 9151 return r;
8733} 9152}
@@ -8769,6 +9188,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8769 return -EINVAL; 9188 return -EINVAL;
8770 9189
8771 if (netif_running(dev)) { 9190 if (netif_running(dev)) {
9191 tg3_phy_stop(tp);
8772 tg3_netif_stop(tp); 9192 tg3_netif_stop(tp);
8773 irq_sync = 1; 9193 irq_sync = 1;
8774 } 9194 }
@@ -8792,6 +9212,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8792 9212
8793 tg3_full_unlock(tp); 9213 tg3_full_unlock(tp);
8794 9214
9215 if (irq_sync && !err)
9216 tg3_phy_start(tp);
9217
8795 return err; 9218 return err;
8796} 9219}
8797 9220
@@ -8815,36 +9238,92 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8815static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 9238static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8816{ 9239{
8817 struct tg3 *tp = netdev_priv(dev); 9240 struct tg3 *tp = netdev_priv(dev);
8818 int irq_sync = 0, err = 0; 9241 int err = 0;
8819 9242
8820 if (netif_running(dev)) { 9243 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8821 tg3_netif_stop(tp); 9244 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8822 irq_sync = 1; 9245 return -EAGAIN;
8823 }
8824 9246
8825 tg3_full_lock(tp, irq_sync); 9247 if (epause->autoneg) {
9248 u32 newadv;
9249 struct phy_device *phydev;
8826 9250
8827 if (epause->autoneg) 9251 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
8828 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8829 else
8830 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8831 if (epause->rx_pause)
8832 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8833 else
8834 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8835 if (epause->tx_pause)
8836 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8837 else
8838 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8839 9252
8840 if (netif_running(dev)) { 9253 if (epause->rx_pause) {
8841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9254 if (epause->tx_pause)
8842 err = tg3_restart_hw(tp, 1); 9255 newadv = ADVERTISED_Pause;
8843 if (!err) 9256 else
8844 tg3_netif_start(tp); 9257 newadv = ADVERTISED_Pause |
8845 } 9258 ADVERTISED_Asym_Pause;
9259 } else if (epause->tx_pause) {
9260 newadv = ADVERTISED_Asym_Pause;
9261 } else
9262 newadv = 0;
9263
9264 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9265 u32 oldadv = phydev->advertising &
9266 (ADVERTISED_Pause |
9267 ADVERTISED_Asym_Pause);
9268 if (oldadv != newadv) {
9269 phydev->advertising &=
9270 ~(ADVERTISED_Pause |
9271 ADVERTISED_Asym_Pause);
9272 phydev->advertising |= newadv;
9273 err = phy_start_aneg(phydev);
9274 }
9275 } else {
9276 tp->link_config.advertising &=
9277 ~(ADVERTISED_Pause |
9278 ADVERTISED_Asym_Pause);
9279 tp->link_config.advertising |= newadv;
9280 }
9281 } else {
9282 if (epause->rx_pause)
9283 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9284 else
9285 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8846 9286
8847 tg3_full_unlock(tp); 9287 if (epause->tx_pause)
9288 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9289 else
9290 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9291
9292 if (netif_running(dev))
9293 tg3_setup_flow_control(tp, 0, 0);
9294 }
9295 } else {
9296 int irq_sync = 0;
9297
9298 if (netif_running(dev)) {
9299 tg3_netif_stop(tp);
9300 irq_sync = 1;
9301 }
9302
9303 tg3_full_lock(tp, irq_sync);
9304
9305 if (epause->autoneg)
9306 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9307 else
9308 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9309 if (epause->rx_pause)
9310 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9311 else
9312 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9313 if (epause->tx_pause)
9314 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9315 else
9316 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9317
9318 if (netif_running(dev)) {
9319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9320 err = tg3_restart_hw(tp, 1);
9321 if (!err)
9322 tg3_netif_start(tp);
9323 }
9324
9325 tg3_full_unlock(tp);
9326 }
8848 9327
8849 return err; 9328 return err;
8850} 9329}
@@ -8888,7 +9367,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8892 ethtool_op_set_tx_ipv6_csum(dev, data); 9372 ethtool_op_set_tx_ipv6_csum(dev, data);
8893 else 9373 else
8894 ethtool_op_set_tx_csum(dev, data); 9374 ethtool_op_set_tx_csum(dev, data);
@@ -9409,7 +9889,8 @@ static int tg3_test_memory(struct tg3 *tp)
9409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9413 mem_tbl = mem_tbl_5755; 9894 mem_tbl = mem_tbl_5755;
9414 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 9895 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9415 mem_tbl = mem_tbl_5906; 9896 mem_tbl = mem_tbl_5906;
@@ -9616,7 +10097,8 @@ static int tg3_test_loopback(struct tg3 *tp)
9616 return TG3_LOOPBACK_FAILED; 10097 return TG3_LOOPBACK_FAILED;
9617 10098
9618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9620 int i; 10102 int i;
9621 u32 status; 10103 u32 status;
9622 10104
@@ -9644,14 +10126,16 @@ static int tg3_test_loopback(struct tg3 *tp)
9644 err |= TG3_MAC_LOOPBACK_FAILED; 10126 err |= TG3_MAC_LOOPBACK_FAILED;
9645 10127
9646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9648 tw32(TG3_CPMU_CTRL, cpmuctrl); 10131 tw32(TG3_CPMU_CTRL, cpmuctrl);
9649 10132
9650 /* Release the mutex */ 10133 /* Release the mutex */
9651 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); 10134 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9652 } 10135 }
9653 10136
9654 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 10137 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10138 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9655 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 10139 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9656 err |= TG3_PHY_LOOPBACK_FAILED; 10140 err |= TG3_PHY_LOOPBACK_FAILED;
9657 } 10141 }
@@ -9678,9 +10162,10 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9678 data[1] = 1; 10162 data[1] = 1;
9679 } 10163 }
9680 if (etest->flags & ETH_TEST_FL_OFFLINE) { 10164 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9681 int err, irq_sync = 0; 10165 int err, err2 = 0, irq_sync = 0;
9682 10166
9683 if (netif_running(dev)) { 10167 if (netif_running(dev)) {
10168 tg3_phy_stop(tp);
9684 tg3_netif_stop(tp); 10169 tg3_netif_stop(tp);
9685 irq_sync = 1; 10170 irq_sync = 1;
9686 } 10171 }
@@ -9721,11 +10206,15 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9721 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9722 if (netif_running(dev)) { 10207 if (netif_running(dev)) {
9723 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 10208 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9724 if (!tg3_restart_hw(tp, 1)) 10209 err2 = tg3_restart_hw(tp, 1);
10210 if (!err2)
9725 tg3_netif_start(tp); 10211 tg3_netif_start(tp);
9726 } 10212 }
9727 10213
9728 tg3_full_unlock(tp); 10214 tg3_full_unlock(tp);
10215
10216 if (irq_sync && !err2)
10217 tg3_phy_start(tp);
9729 } 10218 }
9730 if (tp->link_config.phy_is_low_power) 10219 if (tp->link_config.phy_is_low_power)
9731 tg3_set_power_state(tp, PCI_D3hot); 10220 tg3_set_power_state(tp, PCI_D3hot);
@@ -9738,6 +10227,12 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9738 struct tg3 *tp = netdev_priv(dev); 10227 struct tg3 *tp = netdev_priv(dev);
9739 int err; 10228 int err;
9740 10229
10230 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10231 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10232 return -EAGAIN;
10233 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10234 }
10235
9741 switch(cmd) { 10236 switch(cmd) {
9742 case SIOCGMIIPHY: 10237 case SIOCGMIIPHY:
9743 data->phy_id = PHY_ADDR; 10238 data->phy_id = PHY_ADDR;
@@ -10280,7 +10775,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
10280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 10775 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10281 tg3_get_5755_nvram_info(tp); 10776 tg3_get_5755_nvram_info(tp);
10282 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 10777 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 10778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10284 tg3_get_5787_nvram_info(tp); 10780 tg3_get_5787_nvram_info(tp);
10285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 10781 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10286 tg3_get_5761_nvram_info(tp); 10782 tg3_get_5761_nvram_info(tp);
@@ -10611,6 +11107,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10611 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && 11107 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10612 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && 11108 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10613 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && 11109 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11110 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
10614 (tp->nvram_jedecnum == JEDEC_ST) && 11111 (tp->nvram_jedecnum == JEDEC_ST) &&
10615 (nvram_cmd & NVRAM_CMD_FIRST)) { 11112 (nvram_cmd & NVRAM_CMD_FIRST)) {
10616 11113
@@ -10793,7 +11290,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10793 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 11290 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10794 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 11291 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10795 u32 nic_cfg, led_cfg; 11292 u32 nic_cfg, led_cfg;
10796 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; 11293 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
10797 int eeprom_phy_serdes = 0; 11294 int eeprom_phy_serdes = 0;
10798 11295
10799 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 11296 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -10807,6 +11304,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10807 (ver > 0) && (ver < 0x100)) 11304 (ver > 0) && (ver < 0x100))
10808 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 11305 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10809 11306
11307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11309
10810 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 11310 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10811 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 11311 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10812 eeprom_phy_serdes = 1; 11312 eeprom_phy_serdes = 1;
@@ -10879,7 +11379,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10879 LED_CTRL_MODE_PHY_2); 11379 LED_CTRL_MODE_PHY_2);
10880 break; 11380 break;
10881 11381
10882 }; 11382 }
10883 11383
10884 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 11384 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && 11385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
@@ -10931,6 +11431,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10931 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 11431 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10932 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11432 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10933 } 11433 }
11434
11435 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11436 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11437 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11438 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11439 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11440 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
10934 } 11441 }
10935} 11442}
10936 11443
@@ -10989,6 +11496,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
10989 u32 hw_phy_id, hw_phy_id_masked; 11496 u32 hw_phy_id, hw_phy_id_masked;
10990 int err; 11497 int err;
10991 11498
11499 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11500 return tg3_phy_init(tp);
11501
10992 /* Reading the PHY ID register can conflict with ASF 11502 /* Reading the PHY ID register can conflict with ASF
10993 * firwmare access to the PHY hardware. 11503 * firwmare access to the PHY hardware.
10994 */ 11504 */
@@ -11511,6 +12021,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 12025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11515 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12026 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11516 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 12027 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
@@ -11532,6 +12043,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11536 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 12048 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11537 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12049 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
@@ -11544,14 +12056,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11544 } 12056 }
11545 } 12057 }
11546 12058
11547 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 12059 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 12060 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11550 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11551 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11552 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11553 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11554 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11555 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 12061 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11556 12062
11557 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); 12063 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
@@ -11740,7 +12246,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11740 } 12246 }
11741 12247
11742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 12249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
11744 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 12251 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11745 12252
11746 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || 12253 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
@@ -11824,7 +12331,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11824 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 12331 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11825 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 12332 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11826 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 12333 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11827 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 12334 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
11828 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 12336 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11829 } 12337 }
11830 12338
@@ -11835,8 +12343,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11835 tp->phy_otp = TG3_OTP_DEFAULT; 12343 tp->phy_otp = TG3_OTP_DEFAULT;
11836 } 12344 }
11837 12345
11838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12346 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11840 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 12347 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11841 else 12348 else
11842 tp->mi_mode = MAC_MI_MODE_BASE; 12349 tp->mi_mode = MAC_MI_MODE_BASE;
@@ -11846,9 +12353,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11846 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 12353 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11847 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 12354 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11848 12355
11849 /* Initialize MAC MI mode, polling disabled. */ 12356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11850 tw32_f(MAC_MI_MODE, tp->mi_mode); 12357 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
11851 udelay(80); 12358
12359 err = tg3_mdio_init(tp);
12360 if (err)
12361 return err;
11852 12362
11853 /* Initialize data/descriptor byte/word swapping. */ 12363 /* Initialize data/descriptor byte/word swapping. */
11854 val = tr32(GRC_MODE); 12364 val = tr32(GRC_MODE);
@@ -11929,6 +12439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11929 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", 12439 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11930 pci_name(tp->pdev), err); 12440 pci_name(tp->pdev), err);
11931 /* ... but do not return immediately ... */ 12441 /* ... but do not return immediately ... */
12442 tg3_mdio_fini(tp);
11932 } 12443 }
11933 12444
11934 tg3_read_partno(tp); 12445 tg3_read_partno(tp);
@@ -11976,6 +12487,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11980 tp->dev->hard_start_xmit = tg3_start_xmit; 12492 tp->dev->hard_start_xmit = tg3_start_xmit;
11981 else 12493 else
@@ -12178,7 +12690,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12178 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 12690 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12179 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 12691 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12180 break; 12692 break;
12181 }; 12693 }
12182 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 12694 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12183 switch (cacheline_size) { 12695 switch (cacheline_size) {
12184 case 16: 12696 case 16:
@@ -12195,7 +12707,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12195 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 12707 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12196 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 12708 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12197 break; 12709 break;
12198 }; 12710 }
12199 } else { 12711 } else {
12200 switch (cacheline_size) { 12712 switch (cacheline_size) {
12201 case 16: 12713 case 16:
@@ -12239,7 +12751,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12239 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 12751 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12240 DMA_RWCTRL_WRITE_BNDRY_1024); 12752 DMA_RWCTRL_WRITE_BNDRY_1024);
12241 break; 12753 break;
12242 }; 12754 }
12243 } 12755 }
12244 12756
12245out: 12757out:
@@ -12599,7 +13111,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
12599 case PHY_ID_BCM8002: return "8002/serdes"; 13111 case PHY_ID_BCM8002: return "8002/serdes";
12600 case 0: return "serdes"; 13112 case 0: return "serdes";
12601 default: return "unknown"; 13113 default: return "unknown";
12602 }; 13114 }
12603} 13115}
12604 13116
12605static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 13117static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
@@ -12900,7 +13412,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12900 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && 13412 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12901 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) 13413 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12902 dev->features |= NETIF_F_TSO6; 13414 dev->features |= NETIF_F_TSO6;
12903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13416 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13417 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12904 dev->features |= NETIF_F_TSO_ECN; 13419 dev->features |= NETIF_F_TSO_ECN;
12905 } 13420 }
12906 13421
@@ -12966,7 +13481,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12970 dev->features |= NETIF_F_IPV6_CSUM; 13486 dev->features |= NETIF_F_IPV6_CSUM;
12971 13487
12972 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 13488 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
@@ -13048,6 +13564,12 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
13048 struct tg3 *tp = netdev_priv(dev); 13564 struct tg3 *tp = netdev_priv(dev);
13049 13565
13050 flush_scheduled_work(); 13566 flush_scheduled_work();
13567
13568 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13569 tg3_phy_fini(tp);
13570 tg3_mdio_fini(tp);
13571 }
13572
13051 unregister_netdev(dev); 13573 unregister_netdev(dev);
13052 if (tp->aperegs) { 13574 if (tp->aperegs) {
13053 iounmap(tp->aperegs); 13575 iounmap(tp->aperegs);
@@ -13080,6 +13602,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13080 return 0; 13602 return 0;
13081 13603
13082 flush_scheduled_work(); 13604 flush_scheduled_work();
13605 tg3_phy_stop(tp);
13083 tg3_netif_stop(tp); 13606 tg3_netif_stop(tp);
13084 13607
13085 del_timer_sync(&tp->timer); 13608 del_timer_sync(&tp->timer);
@@ -13097,10 +13620,13 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13097 13620
13098 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 13621 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13099 if (err) { 13622 if (err) {
13623 int err2;
13624
13100 tg3_full_lock(tp, 0); 13625 tg3_full_lock(tp, 0);
13101 13626
13102 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 13627 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13103 if (tg3_restart_hw(tp, 1)) 13628 err2 = tg3_restart_hw(tp, 1);
13629 if (err2)
13104 goto out; 13630 goto out;
13105 13631
13106 tp->timer.expires = jiffies + tp->timer_offset; 13632 tp->timer.expires = jiffies + tp->timer_offset;
@@ -13111,6 +13637,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13111 13637
13112out: 13638out:
13113 tg3_full_unlock(tp); 13639 tg3_full_unlock(tp);
13640
13641 if (!err2)
13642 tg3_phy_start(tp);
13114 } 13643 }
13115 13644
13116 return err; 13645 return err;
@@ -13148,6 +13677,9 @@ static int tg3_resume(struct pci_dev *pdev)
13148out: 13677out:
13149 tg3_full_unlock(tp); 13678 tg3_full_unlock(tp);
13150 13679
13680 if (!err)
13681 tg3_phy_start(tp);
13682
13151 return err; 13683 return err;
13152} 13684}
13153 13685
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0404f93baa29..df07842172b7 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -128,6 +128,7 @@
128#define ASIC_REV_USE_PROD_ID_REG 0x0f 128#define ASIC_REV_USE_PROD_ID_REG 0x0f
129#define ASIC_REV_5784 0x5784 129#define ASIC_REV_5784 0x5784
130#define ASIC_REV_5761 0x5761 130#define ASIC_REV_5761 0x5761
131#define ASIC_REV_5785 0x5785
131#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 132#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
132#define CHIPREV_5700_AX 0x70 133#define CHIPREV_5700_AX 0x70
133#define CHIPREV_5700_BX 0x71 134#define CHIPREV_5700_BX 0x71
@@ -528,7 +529,23 @@
528#define MAC_SERDES_CFG 0x00000590 529#define MAC_SERDES_CFG 0x00000590
529#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 530#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
530#define MAC_SERDES_STAT 0x00000594 531#define MAC_SERDES_STAT 0x00000594
531/* 0x598 --> 0x5b0 unused */ 532/* 0x598 --> 0x5a0 unused */
533#define MAC_PHYCFG1 0x000005a0
534#define MAC_PHYCFG1_RGMII_INT 0x00000001
535#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000
536#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000
537#define MAC_PHYCFG1_TXC_DRV 0x20000000
538#define MAC_PHYCFG2 0x000005a4
539#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001
540#define MAC_EXT_RGMII_MODE 0x000005a8
541#define MAC_RGMII_MODE_TX_ENABLE 0x00000001
542#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002
543#define MAC_RGMII_MODE_TX_RESET 0x00000004
544#define MAC_RGMII_MODE_RX_INT_B 0x00000100
545#define MAC_RGMII_MODE_RX_QUALITY 0x00000200
546#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400
547#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800
548/* 0x5ac --> 0x5b0 unused */
532#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ 549#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
533#define SERDES_RX_SIG_DETECT 0x00000400 550#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 551#define SG_DIG_CTRL 0x000005b0
@@ -1109,6 +1126,7 @@
1109#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1126#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1110#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1127#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1111#define WDMAC_MODE_RX_ACCEL 0x00000400 1128#define WDMAC_MODE_RX_ACCEL 0x00000400
1129#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1112#define WDMAC_STATUS 0x00004c04 1130#define WDMAC_STATUS 0x00004c04
1113#define WDMAC_STATUS_TGTABORT 0x00000004 1131#define WDMAC_STATUS_TGTABORT 0x00000004
1114#define WDMAC_STATUS_MSTABORT 0x00000008 1132#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1713,6 +1731,12 @@
1713#define NIC_SRAM_DATA_CFG_3 0x00000d3c 1731#define NIC_SRAM_DATA_CFG_3 0x00000d3c
1714#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 1732#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
1715 1733
1734#define NIC_SRAM_DATA_CFG_4 0x00000d60
1735#define NIC_SRAM_GMII_MODE 0x00000002
1736#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004
1737#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
1738#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
1739
1716#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 1740#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
1717 1741
1718#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 1742#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2204,6 +2228,7 @@ struct tg3_link_config {
2204 u16 orig_speed; 2228 u16 orig_speed;
2205 u8 orig_duplex; 2229 u8 orig_duplex;
2206 u8 orig_autoneg; 2230 u8 orig_autoneg;
2231 u32 orig_advertising;
2207}; 2232};
2208 2233
2209struct tg3_bufmgr_config { 2234struct tg3_bufmgr_config {
@@ -2479,6 +2504,13 @@ struct tg3 {
2479#define TG3_FLG3_ENABLE_APE 0x00000002 2504#define TG3_FLG3_ENABLE_APE 0x00000002
2480#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004 2505#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
2481#define TG3_FLG3_5701_DMA_BUG 0x00000008 2506#define TG3_FLG3_5701_DMA_BUG 0x00000008
2507#define TG3_FLG3_USE_PHYLIB 0x00000010
2508#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2509#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2510#define TG3_FLG3_PHY_CONNECTED 0x00000080
2511#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2512#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2513#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2482 2514
2483 struct timer_list timer; 2515 struct timer_list timer;
2484 u16 timer_counter; 2516 u16 timer_counter;
@@ -2519,6 +2551,9 @@ struct tg3 {
2519 int msi_cap; 2551 int msi_cap;
2520 int pcix_cap; 2552 int pcix_cap;
2521 2553
2554 struct mii_bus mdio_bus;
2555 int mdio_irq[PHY_MAX_ADDR];
2556
2522 /* PHY info */ 2557 /* PHY info */
2523 u32 phy_id; 2558 u32 phy_id;
2524#define PHY_ID_MASK 0xfffffff0 2559#define PHY_ID_MASK 0xfffffff0
@@ -2546,6 +2581,9 @@ struct tg3 {
2546#define PHY_REV_BCM5401_B2 0x3 2581#define PHY_REV_BCM5401_B2 0x3
2547#define PHY_REV_BCM5401_C0 0x6 2582#define PHY_REV_BCM5401_C0 0x6
2548#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2583#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2584#define TG3_PHY_ID_BCM50610 0x143bd60
2585#define TG3_PHY_ID_BCMAC131 0x143bc70
2586
2549 2587
2550 u32 led_ctrl; 2588 u32 led_ctrl;
2551 u32 phy_otp; 2589 u32 phy_otp;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 0166407d7061..85246ed7cb9c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -13,8 +13,6 @@
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 * 15 *
16 ** This file is best viewed/edited with columns>=132.
17 *
18 ** Useful (if not required) reading: 16 ** Useful (if not required) reading:
19 * 17 *
20 * Texas Instruments, ThunderLAN Programmer's Guide, 18 * Texas Instruments, ThunderLAN Programmer's Guide,
@@ -218,9 +216,7 @@ static int bbuf;
218module_param(bbuf, int, 0); 216module_param(bbuf, int, 0);
219MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)"); 217MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
220 218
221static u8 *TLanPadBuffer; 219static const char TLanSignature[] = "TLAN";
222static dma_addr_t TLanPadBufferDMA;
223static char TLanSignature[] = "TLAN";
224static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; 220static const char tlan_banner[] = "ThunderLAN driver v1.15\n";
225static int tlan_have_pci; 221static int tlan_have_pci;
226static int tlan_have_eisa; 222static int tlan_have_eisa;
@@ -238,9 +234,11 @@ static struct board {
238 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 234 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
239 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 235 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
240 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 236 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 237 { "Compaq NetFlex-3/P",
238 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
242 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 239 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
243 { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 240 { "Compaq Netelligent Integrated 10/100 TX UTP",
241 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 242 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 243 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 244 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
@@ -248,8 +246,9 @@ static struct board {
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 246 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 247 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 248 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 249 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 250 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
251 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
253 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 252 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
254}; 253};
255 254
@@ -294,12 +293,12 @@ static int TLan_Close( struct net_device *);
294static struct net_device_stats *TLan_GetStats( struct net_device *); 293static struct net_device_stats *TLan_GetStats( struct net_device *);
295static void TLan_SetMulticastList( struct net_device *); 294static void TLan_SetMulticastList( struct net_device *);
296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 295static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 296static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
297 int irq, int rev, const struct pci_device_id *ent);
298static void TLan_tx_timeout( struct net_device *dev); 298static void TLan_tx_timeout( struct net_device *dev);
299static void TLan_tx_timeout_work(struct work_struct *work); 299static void TLan_tx_timeout_work(struct work_struct *work);
300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
301 301
302static u32 TLan_HandleInvalid( struct net_device *, u16 );
303static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 302static u32 TLan_HandleTxEOF( struct net_device *, u16 );
304static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 303static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
305static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 304static u32 TLan_HandleRxEOF( struct net_device *, u16 );
@@ -348,29 +347,27 @@ static void TLan_EeReceiveByte( u16, u8 *, int );
348static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 347static int TLan_EeReadByte( struct net_device *, u8, u8 * );
349 348
350 349
351static void 350static inline void
352TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 351TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
353{ 352{
354 unsigned long addr = (unsigned long)skb; 353 unsigned long addr = (unsigned long)skb;
355 tag->buffer[9].address = (u32)addr; 354 tag->buffer[9].address = addr;
356 addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */ 355 tag->buffer[8].address = upper_32_bits(addr);
357 addr >>= 1;
358 tag->buffer[8].address = (u32)addr;
359} 356}
360 357
361static struct sk_buff * 358static inline struct sk_buff *
362TLan_GetSKB( struct tlan_list_tag *tag) 359TLan_GetSKB( const struct tlan_list_tag *tag)
363{ 360{
364 unsigned long addr = tag->buffer[8].address; 361 unsigned long addr;
365 addr <<= 31; 362
366 addr <<= 1; 363 addr = tag->buffer[8].address;
367 addr |= tag->buffer[9].address; 364 addr |= (tag->buffer[9].address << 16) << 16;
368 return (struct sk_buff *) addr; 365 return (struct sk_buff *) addr;
369} 366}
370 367
371 368
372static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 369static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
373 TLan_HandleInvalid, 370 NULL,
374 TLan_HandleTxEOF, 371 TLan_HandleTxEOF,
375 TLan_HandleStatOverflow, 372 TLan_HandleStatOverflow,
376 TLan_HandleRxEOF, 373 TLan_HandleRxEOF,
@@ -444,7 +441,9 @@ static void __devexit tlan_remove_one( struct pci_dev *pdev)
444 unregister_netdev( dev ); 441 unregister_netdev( dev );
445 442
446 if ( priv->dmaStorage ) { 443 if ( priv->dmaStorage ) {
447 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 444 pci_free_consistent(priv->pciDev,
445 priv->dmaSize, priv->dmaStorage,
446 priv->dmaStorageDMA );
448 } 447 }
449 448
450#ifdef CONFIG_PCI 449#ifdef CONFIG_PCI
@@ -469,16 +468,6 @@ static int __init tlan_probe(void)
469 468
470 printk(KERN_INFO "%s", tlan_banner); 469 printk(KERN_INFO "%s", tlan_banner);
471 470
472 TLanPadBuffer = (u8 *) pci_alloc_consistent(NULL, TLAN_MIN_FRAME_SIZE, &TLanPadBufferDMA);
473
474 if (TLanPadBuffer == NULL) {
475 printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
476 rc = -ENOMEM;
477 goto err_out;
478 }
479
480 memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
481
482 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 471 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
483 472
484 /* Use new style PCI probing. Now the kernel will 473 /* Use new style PCI probing. Now the kernel will
@@ -506,8 +495,6 @@ static int __init tlan_probe(void)
506err_out_pci_unreg: 495err_out_pci_unreg:
507 pci_unregister_driver(&tlan_driver); 496 pci_unregister_driver(&tlan_driver);
508err_out_pci_free: 497err_out_pci_free:
509 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
510err_out:
511 return rc; 498 return rc;
512} 499}
513 500
@@ -539,7 +526,8 @@ static int __devinit tlan_init_one( struct pci_dev *pdev,
539 **************************************************************/ 526 **************************************************************/
540 527
541static int __devinit TLan_probe1(struct pci_dev *pdev, 528static int __devinit TLan_probe1(struct pci_dev *pdev,
542 long ioaddr, int irq, int rev, const struct pci_device_id *ent ) 529 long ioaddr, int irq, int rev,
530 const struct pci_device_id *ent )
543{ 531{
544 532
545 struct net_device *dev; 533 struct net_device *dev;
@@ -625,8 +613,10 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
625 /* Kernel parameters */ 613 /* Kernel parameters */
626 if (dev->mem_start) { 614 if (dev->mem_start) {
627 priv->aui = dev->mem_start & 0x01; 615 priv->aui = dev->mem_start & 0x01;
628 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 : (dev->mem_start & 0x06) >> 1; 616 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
629 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; 617 : (dev->mem_start & 0x06) >> 1;
618 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
619 : (dev->mem_start & 0x18) >> 3;
630 620
631 if (priv->speed == 0x1) { 621 if (priv->speed == 0x1) {
632 priv->speed = TLAN_SPEED_10; 622 priv->speed = TLAN_SPEED_10;
@@ -706,7 +696,8 @@ static void TLan_Eisa_Cleanup(void)
706 dev = TLan_Eisa_Devices; 696 dev = TLan_Eisa_Devices;
707 priv = netdev_priv(dev); 697 priv = netdev_priv(dev);
708 if (priv->dmaStorage) { 698 if (priv->dmaStorage) {
709 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 699 pci_free_consistent(priv->pciDev, priv->dmaSize,
700 priv->dmaStorage, priv->dmaStorageDMA );
710 } 701 }
711 release_region( dev->base_addr, 0x10); 702 release_region( dev->base_addr, 0x10);
712 unregister_netdev( dev ); 703 unregister_netdev( dev );
@@ -724,8 +715,6 @@ static void __exit tlan_exit(void)
724 if (tlan_have_eisa) 715 if (tlan_have_eisa)
725 TLan_Eisa_Cleanup(); 716 TLan_Eisa_Cleanup();
726 717
727 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
728
729} 718}
730 719
731 720
@@ -763,8 +752,10 @@ static void __init TLan_EisaProbe (void)
763 /* Loop through all slots of the EISA bus */ 752 /* Loop through all slots of the EISA bus */
764 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 753 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
765 754
766 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 755 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
767 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 756 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
768 759
769 760
770 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 761 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
@@ -874,7 +865,8 @@ static int TLan_Init( struct net_device *dev )
874 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 865 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
875 * ( sizeof(TLanList) ); 866 * ( sizeof(TLanList) );
876 } 867 }
877 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, dma_size, &priv->dmaStorageDMA); 868 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
869 dma_size, &priv->dmaStorageDMA);
878 priv->dmaSize = dma_size; 870 priv->dmaSize = dma_size;
879 871
880 if ( priv->dmaStorage == NULL ) { 872 if ( priv->dmaStorage == NULL ) {
@@ -883,16 +875,19 @@ static int TLan_Init( struct net_device *dev )
883 return -ENOMEM; 875 return -ENOMEM;
884 } 876 }
885 memset( priv->dmaStorage, 0, dma_size ); 877 memset( priv->dmaStorage, 0, dma_size );
886 priv->rxList = (TLanList *) 878 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
887 ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 ); 879 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
888 priv->rxListDMA = ( ( ( (u32) priv->dmaStorageDMA ) + 7 ) & 0xFFFFFFF8 );
889 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 880 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
890 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 881 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
882
891 if ( bbuf ) { 883 if ( bbuf ) {
892 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS ); 884 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
893 priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS; 885 priv->rxBufferDMA =priv->txListDMA
894 priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 886 + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
895 priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 887 priv->txBuffer = priv->rxBuffer
888 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
889 priv->txBufferDMA = priv->rxBufferDMA
890 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
896 } 891 }
897 892
898 err = 0; 893 err = 0;
@@ -952,10 +947,12 @@ static int TLan_Open( struct net_device *dev )
952 int err; 947 int err;
953 948
954 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 949 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
955 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, TLanSignature, dev ); 950 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
951 dev->name, dev );
956 952
957 if ( err ) { 953 if ( err ) {
958 printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq ); 954 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
955 dev->name, dev->irq );
959 return err; 956 return err;
960 } 957 }
961 958
@@ -969,7 +966,8 @@ static int TLan_Open( struct net_device *dev )
969 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
970 TLan_ResetAdapter( dev ); 967 TLan_ResetAdapter( dev );
971 968
972 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev ); 969 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
970 dev->name, priv->tlanRev );
973 971
974 return 0; 972 return 0;
975 973
@@ -1007,14 +1005,16 @@ static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1007 1005
1008 1006
1009 case SIOCGMIIREG: /* Read MII PHY register. */ 1007 case SIOCGMIIREG: /* Read MII PHY register. */
1010 TLan_MiiReadReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, &data->val_out); 1008 TLan_MiiReadReg(dev, data->phy_id & 0x1f,
1009 data->reg_num & 0x1f, &data->val_out);
1011 return 0; 1010 return 0;
1012 1011
1013 1012
1014 case SIOCSMIIREG: /* Write MII PHY register. */ 1013 case SIOCSMIIREG: /* Write MII PHY register. */
1015 if (!capable(CAP_NET_ADMIN)) 1014 if (!capable(CAP_NET_ADMIN))
1016 return -EPERM; 1015 return -EPERM;
1017 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1016 TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
1017 data->reg_num & 0x1f, data->val_in);
1018 return 0; 1018 return 0;
1019 default: 1019 default:
1020 return -EOPNOTSUPP; 1020 return -EOPNOTSUPP;
@@ -1096,20 +1096,25 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1096 TLanList *tail_list; 1096 TLanList *tail_list;
1097 dma_addr_t tail_list_phys; 1097 dma_addr_t tail_list_phys;
1098 u8 *tail_buffer; 1098 u8 *tail_buffer;
1099 int pad;
1100 unsigned long flags; 1099 unsigned long flags;
1101 1100
1102 if ( ! priv->phyOnline ) { 1101 if ( ! priv->phyOnline ) {
1103 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", dev->name ); 1102 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1103 dev->name );
1104 dev_kfree_skb_any(skb); 1104 dev_kfree_skb_any(skb);
1105 return 0; 1105 return 0;
1106 } 1106 }
1107 1107
1108 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1109 return 0;
1110
1108 tail_list = priv->txList + priv->txTail; 1111 tail_list = priv->txList + priv->txTail;
1109 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1112 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
1110 1113
1111 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1114 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
1112 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail ); 1115 TLAN_DBG( TLAN_DEBUG_TX,
1116 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1117 dev->name, priv->txHead, priv->txTail );
1113 netif_stop_queue(dev); 1118 netif_stop_queue(dev);
1114 priv->txBusyCount++; 1119 priv->txBusyCount++;
1115 return 1; 1120 return 1;
@@ -1121,37 +1126,34 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1121 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1126 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1122 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1127 skb_copy_from_linear_data(skb, tail_buffer, skb->len);
1123 } else { 1128 } else {
1124 tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); 1129 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1130 skb->data, skb->len,
1131 PCI_DMA_TODEVICE);
1125 TLan_StoreSKB(tail_list, skb); 1132 TLan_StoreSKB(tail_list, skb);
1126 } 1133 }
1127 1134
1128 pad = TLAN_MIN_FRAME_SIZE - skb->len; 1135 tail_list->frameSize = (u16) skb->len;
1129 1136 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1130 if ( pad > 0 ) { 1137 tail_list->buffer[1].count = 0;
1131 tail_list->frameSize = (u16) skb->len + pad; 1138 tail_list->buffer[1].address = 0;
1132 tail_list->buffer[0].count = (u32) skb->len;
1133 tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
1134 tail_list->buffer[1].address = TLanPadBufferDMA;
1135 } else {
1136 tail_list->frameSize = (u16) skb->len;
1137 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1138 tail_list->buffer[1].count = 0;
1139 tail_list->buffer[1].address = 0;
1140 }
1141 1139
1142 spin_lock_irqsave(&priv->lock, flags); 1140 spin_lock_irqsave(&priv->lock, flags);
1143 tail_list->cStat = TLAN_CSTAT_READY; 1141 tail_list->cStat = TLAN_CSTAT_READY;
1144 if ( ! priv->txInProgress ) { 1142 if ( ! priv->txInProgress ) {
1145 priv->txInProgress = 1; 1143 priv->txInProgress = 1;
1146 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1144 TLAN_DBG( TLAN_DEBUG_TX,
1145 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
1147 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1146 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
1148 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1147 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
1149 } else { 1148 } else {
1150 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail ); 1149 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
1150 priv->txTail );
1151 if ( priv->txTail == 0 ) { 1151 if ( priv->txTail == 0 ) {
1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; 1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
1153 = tail_list_phys;
1153 } else { 1154 } else {
1154 ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; 1155 ( priv->txList + ( priv->txTail - 1 ) )->forward
1156 = tail_list_phys;
1155 } 1157 }
1156 } 1158 }
1157 spin_unlock_irqrestore(&priv->lock, flags); 1159 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1191,33 +1193,31 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1191 1193
1192static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1194static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
1193{ 1195{
1194 u32 ack; 1196 struct net_device *dev = dev_id;
1195 struct net_device *dev; 1197 TLanPrivateInfo *priv = netdev_priv(dev);
1196 u32 host_cmd;
1197 u16 host_int; 1198 u16 host_int;
1198 int type; 1199 u16 type;
1199 TLanPrivateInfo *priv;
1200
1201 dev = dev_id;
1202 priv = netdev_priv(dev);
1203 1200
1204 spin_lock(&priv->lock); 1201 spin_lock(&priv->lock);
1205 1202
1206 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1203 host_int = inw( dev->base_addr + TLAN_HOST_INT );
1207 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1208
1209 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1204 type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
1205 if ( type ) {
1206 u32 ack;
1207 u32 host_cmd;
1210 1208
1211 ack = TLanIntVector[type]( dev, host_int ); 1209 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1210 ack = TLanIntVector[type]( dev, host_int );
1212 1211
1213 if ( ack ) { 1212 if ( ack ) {
1214 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1213 host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
1215 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1214 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
1215 }
1216 } 1216 }
1217 1217
1218 spin_unlock(&priv->lock); 1218 spin_unlock(&priv->lock);
1219 1219
1220 return IRQ_HANDLED; 1220 return IRQ_RETVAL(type);
1221} /* TLan_HandleInterrupts */ 1221} /* TLan_HandleInterrupts */
1222 1222
1223 1223
@@ -1286,8 +1286,10 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1286 /* Should only read stats if open ? */ 1286 /* Should only read stats if open ? */
1287 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1287 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1288 1288
1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount ); 1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1290 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount ); 1290 priv->rxEocCount );
1291 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1292 priv->txBusyCount );
1291 if ( debug & TLAN_DEBUG_GNRL ) { 1293 if ( debug & TLAN_DEBUG_GNRL ) {
1292 TLan_PrintDio( dev->base_addr ); 1294 TLan_PrintDio( dev->base_addr );
1293 TLan_PhyPrint( dev ); 1295 TLan_PhyPrint( dev );
@@ -1299,7 +1301,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1299 TLan_PrintList( priv->txList + i, "TX", i ); 1301 TLan_PrintList( priv->txList + i, "TX", i );
1300 } 1302 }
1301 1303
1302 return ( &( (TLanPrivateInfo *) netdev_priv(dev) )->stats ); 1304 return &dev->stats;
1303 1305
1304} /* TLan_GetStats */ 1306} /* TLan_GetStats */
1305 1307
@@ -1337,10 +1339,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
1337 1339
1338 if ( dev->flags & IFF_PROMISC ) { 1340 if ( dev->flags & IFF_PROMISC ) {
1339 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1341 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1340 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1342 TLan_DioWrite8( dev->base_addr,
1343 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
1341 } else { 1344 } else {
1342 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1345 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1343 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1346 TLan_DioWrite8( dev->base_addr,
1347 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
1344 if ( dev->flags & IFF_ALLMULTI ) { 1348 if ( dev->flags & IFF_ALLMULTI ) {
1345 for ( i = 0; i < 3; i++ ) 1349 for ( i = 0; i < 3; i++ )
1346 TLan_SetMac( dev, i + 1, NULL ); 1350 TLan_SetMac( dev, i + 1, NULL );
@@ -1349,7 +1353,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
1349 } else { 1353 } else {
1350 for ( i = 0; i < dev->mc_count; i++ ) { 1354 for ( i = 0; i < dev->mc_count; i++ ) {
1351 if ( i < 3 ) { 1355 if ( i < 3 ) {
1352 TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr ); 1356 TLan_SetMac( dev, i + 1,
1357 (char *) &dmi->dmi_addr );
1353 } else { 1358 } else {
1354 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1359 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
1355 if ( offset < 32 ) 1360 if ( offset < 32 )
@@ -1383,31 +1388,6 @@ static void TLan_SetMulticastList( struct net_device *dev )
1383*****************************************************************************/ 1388*****************************************************************************/
1384 1389
1385 1390
1386 /***************************************************************
1387 * TLan_HandleInvalid
1388 *
1389 * Returns:
1390 * 0
1391 * Parms:
1392 * dev Device assigned the IRQ that was
1393 * raised.
1394 * host_int The contents of the HOST_INT
1395 * port.
1396 *
1397 * This function handles invalid interrupts. This should
1398 * never happen unless some other adapter is trying to use
1399 * the IRQ line assigned to the device.
1400 *
1401 **************************************************************/
1402
1403static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
1404{
1405 /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
1406 return 0;
1407
1408} /* TLan_HandleInvalid */
1409
1410
1411 1391
1412 1392
1413 /*************************************************************** 1393 /***************************************************************
@@ -1441,14 +1421,16 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1441 u32 ack = 0; 1421 u32 ack = 0;
1442 u16 tmpCStat; 1422 u16 tmpCStat;
1443 1423
1444 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1424 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1425 priv->txHead, priv->txTail );
1445 head_list = priv->txList + priv->txHead; 1426 head_list = priv->txList + priv->txHead;
1446 1427
1447 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1428 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1448 ack++; 1429 ack++;
1449 if ( ! bbuf ) { 1430 if ( ! bbuf ) {
1450 struct sk_buff *skb = TLan_GetSKB(head_list); 1431 struct sk_buff *skb = TLan_GetSKB(head_list);
1451 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 1432 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1433 skb->len, PCI_DMA_TODEVICE);
1452 dev_kfree_skb_any(skb); 1434 dev_kfree_skb_any(skb);
1453 head_list->buffer[8].address = 0; 1435 head_list->buffer[8].address = 0;
1454 head_list->buffer[9].address = 0; 1436 head_list->buffer[9].address = 0;
@@ -1457,7 +1439,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1457 if ( tmpCStat & TLAN_CSTAT_EOC ) 1439 if ( tmpCStat & TLAN_CSTAT_EOC )
1458 eoc = 1; 1440 eoc = 1;
1459 1441
1460 priv->stats.tx_bytes += head_list->frameSize; 1442 dev->stats.tx_bytes += head_list->frameSize;
1461 1443
1462 head_list->cStat = TLAN_CSTAT_UNUSED; 1444 head_list->cStat = TLAN_CSTAT_UNUSED;
1463 netif_start_queue(dev); 1445 netif_start_queue(dev);
@@ -1469,7 +1451,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1469 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1451 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
1470 1452
1471 if ( eoc ) { 1453 if ( eoc ) {
1472 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1454 TLAN_DBG( TLAN_DEBUG_TX,
1455 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
1456 priv->txHead, priv->txTail );
1473 head_list = priv->txList + priv->txHead; 1457 head_list = priv->txList + priv->txHead;
1474 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1458 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1475 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1459 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1481,7 +1465,8 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1481 } 1465 }
1482 1466
1483 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1467 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1484 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1468 TLan_DioWrite8( dev->base_addr,
1469 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1485 if ( priv->timer.function == NULL ) { 1470 if ( priv->timer.function == NULL ) {
1486 priv->timer.function = &TLan_Timer; 1471 priv->timer.function = &TLan_Timer;
1487 priv->timer.data = (unsigned long) dev; 1472 priv->timer.data = (unsigned long) dev;
@@ -1563,66 +1548,65 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 TLanList *head_list; 1548 TLanList *head_list;
1564 struct sk_buff *skb; 1549 struct sk_buff *skb;
1565 TLanList *tail_list; 1550 TLanList *tail_list;
1566 void *t;
1567 u32 frameSize;
1568 u16 tmpCStat; 1551 u16 tmpCStat;
1569 dma_addr_t head_list_phys; 1552 dma_addr_t head_list_phys;
1570 1553
1571 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1554 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
1555 priv->rxHead, priv->rxTail );
1572 head_list = priv->rxList + priv->rxHead; 1556 head_list = priv->rxList + priv->rxHead;
1573 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1557 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1574 1558
1575 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1559 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1576 frameSize = head_list->frameSize; 1560 dma_addr_t frameDma = head_list->buffer[0].address;
1561 u32 frameSize = head_list->frameSize;
1577 ack++; 1562 ack++;
1578 if (tmpCStat & TLAN_CSTAT_EOC) 1563 if (tmpCStat & TLAN_CSTAT_EOC)
1579 eoc = 1; 1564 eoc = 1;
1580 1565
1581 if (bbuf) { 1566 if (bbuf) {
1582 skb = dev_alloc_skb(frameSize + 7); 1567 skb = netdev_alloc_skb(dev, frameSize + 7);
1583 if (skb == NULL) 1568 if ( !skb )
1584 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); 1569 goto drop_and_reuse;
1585 else { 1570
1586 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1571 head_buffer = priv->rxBuffer
1587 skb_reserve(skb, 2); 1572 + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1588 t = (void *) skb_put(skb, frameSize); 1573 skb_reserve(skb, 2);
1589 1574 pci_dma_sync_single_for_cpu(priv->pciDev,
1590 priv->stats.rx_bytes += head_list->frameSize; 1575 frameDma, frameSize,
1591 1576 PCI_DMA_FROMDEVICE);
1592 memcpy( t, head_buffer, frameSize ); 1577 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1593 skb->protocol = eth_type_trans( skb, dev ); 1578 skb_put(skb, frameSize);
1594 netif_rx( skb ); 1579 dev->stats.rx_bytes += frameSize;
1595 } 1580
1581 skb->protocol = eth_type_trans( skb, dev );
1582 netif_rx( skb );
1596 } else { 1583 } else {
1597 struct sk_buff *new_skb; 1584 struct sk_buff *new_skb;
1598 1585
1599 /* 1586 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1600 * I changed the algorithm here. What we now do 1587 if ( !new_skb )
1601 * is allocate the new frame. If this fails we 1588 goto drop_and_reuse;
1602 * simply recycle the frame.
1603 */
1604 1589
1605 new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 1590 skb = TLan_GetSKB(head_list);
1591 pci_unmap_single(priv->pciDev, frameDma,
1592 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1593 skb_put( skb, frameSize );
1606 1594
1607 if ( new_skb != NULL ) { 1595 dev->stats.rx_bytes += frameSize;
1608 skb = TLan_GetSKB(head_list);
1609 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1610 skb_trim( skb, frameSize );
1611 1596
1612 priv->stats.rx_bytes += frameSize; 1597 skb->protocol = eth_type_trans( skb, dev );
1598 netif_rx( skb );
1613 1599
1614 skb->protocol = eth_type_trans( skb, dev ); 1600 skb_reserve( new_skb, NET_IP_ALIGN );
1615 netif_rx( skb ); 1601 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1602 new_skb->data,
1603 TLAN_MAX_FRAME_SIZE,
1604 PCI_DMA_FROMDEVICE);
1616 1605
1617 skb_reserve( new_skb, 2 ); 1606 TLan_StoreSKB(head_list, new_skb);
1618 t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
1619 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1620 head_list->buffer[8].address = (u32) t;
1621 TLan_StoreSKB(head_list, new_skb);
1622 } else
1623 printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
1624 }
1625 1607
1608 }
1609drop_and_reuse:
1626 head_list->forward = 0; 1610 head_list->forward = 0;
1627 head_list->cStat = 0; 1611 head_list->cStat = 0;
1628 tail_list = priv->rxList + priv->rxTail; 1612 tail_list = priv->rxList + priv->rxTail;
@@ -1638,10 +1622,10 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1638 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1622 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
1639 1623
1640 1624
1641
1642
1643 if ( eoc ) { 1625 if ( eoc ) {
1644 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1626 TLAN_DBG( TLAN_DEBUG_RX,
1627 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
1628 priv->rxHead, priv->rxTail );
1645 head_list = priv->rxList + priv->rxHead; 1629 head_list = priv->rxList + priv->rxHead;
1646 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1630 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1647 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1631 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
@@ -1650,7 +1634,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1650 } 1634 }
1651 1635
1652 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1636 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1653 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1637 TLan_DioWrite8( dev->base_addr,
1638 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1654 if ( priv->timer.function == NULL ) { 1639 if ( priv->timer.function == NULL ) {
1655 priv->timer.function = &TLan_Timer; 1640 priv->timer.function = &TLan_Timer;
1656 priv->timer.data = (unsigned long) dev; 1641 priv->timer.data = (unsigned long) dev;
@@ -1728,7 +1713,9 @@ static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1728 1713
1729 host_int = 0; 1714 host_int = 0;
1730 if ( priv->tlanRev < 0x30 ) { 1715 if ( priv->tlanRev < 0x30 ) {
1731 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail ); 1716 TLAN_DBG( TLAN_DEBUG_TX,
1717 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1718 priv->txHead, priv->txTail );
1732 head_list = priv->txList + priv->txHead; 1719 head_list = priv->txList + priv->txHead;
1733 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1720 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1734 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1721 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1796,15 +1783,18 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1796 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1783 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
1797 if ( net_sts ) { 1784 if ( net_sts ) {
1798 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1785 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
1799 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", dev->name, (unsigned) net_sts ); 1786 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1787 dev->name, (unsigned) net_sts );
1800 } 1788 }
1801 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1789 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
1802 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1790 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
1803 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1791 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
1804 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1792 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
1793 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1805 tlphy_ctl |= TLAN_TC_SWAPOL; 1794 tlphy_ctl |= TLAN_TC_SWAPOL;
1806 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1795 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1807 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1796 } else if ( ( tlphy_sts & TLAN_TS_POLOK )
1797 && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1808 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1798 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1809 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1799 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1810 } 1800 }
@@ -1849,7 +1839,9 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1849 u32 ack = 1; 1839 u32 ack = 1;
1850 1840
1851 if ( priv->tlanRev < 0x30 ) { 1841 if ( priv->tlanRev < 0x30 ) {
1852 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail ); 1842 TLAN_DBG( TLAN_DEBUG_RX,
1843 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
1844 priv->rxHead, priv->rxTail );
1853 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1845 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1854 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1846 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
1855 ack |= TLAN_HC_GO | TLAN_HC_RT; 1847 ack |= TLAN_HC_GO | TLAN_HC_RT;
@@ -1940,10 +1932,12 @@ static void TLan_Timer( unsigned long data )
1940 if ( priv->timer.function == NULL ) { 1932 if ( priv->timer.function == NULL ) {
1941 elapsed = jiffies - priv->timerSetAt; 1933 elapsed = jiffies - priv->timerSetAt;
1942 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1934 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
1943 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 1935 TLan_DioWrite8( dev->base_addr,
1936 TLAN_LED_REG, TLAN_LED_LINK );
1944 } else { 1937 } else {
1945 priv->timer.function = &TLan_Timer; 1938 priv->timer.function = &TLan_Timer;
1946 priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY; 1939 priv->timer.expires = priv->timerSetAt
1940 + TLAN_TIMER_ACT_DELAY;
1947 spin_unlock_irqrestore(&priv->lock, flags); 1941 spin_unlock_irqrestore(&priv->lock, flags);
1948 add_timer( &priv->timer ); 1942 add_timer( &priv->timer );
1949 break; 1943 break;
@@ -1998,7 +1992,8 @@ static void TLan_ResetLists( struct net_device *dev )
1998 list = priv->txList + i; 1992 list = priv->txList + i;
1999 list->cStat = TLAN_CSTAT_UNUSED; 1993 list->cStat = TLAN_CSTAT_UNUSED;
2000 if ( bbuf ) { 1994 if ( bbuf ) {
2001 list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1995 list->buffer[0].address = priv->txBufferDMA
1996 + ( i * TLAN_MAX_FRAME_SIZE );
2002 } else { 1997 } else {
2003 list->buffer[0].address = 0; 1998 list->buffer[0].address = 0;
2004 } 1999 }
@@ -2017,28 +2012,32 @@ static void TLan_ResetLists( struct net_device *dev )
2017 list->frameSize = TLAN_MAX_FRAME_SIZE; 2012 list->frameSize = TLAN_MAX_FRAME_SIZE;
2018 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 2013 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2019 if ( bbuf ) { 2014 if ( bbuf ) {
2020 list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 2015 list->buffer[0].address = priv->rxBufferDMA
2016 + ( i * TLAN_MAX_FRAME_SIZE );
2021 } else { 2017 } else {
2022 skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 2018 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2023 if ( skb == NULL ) { 2019 if ( !skb ) {
2024 printk( "TLAN: Couldn't allocate memory for received data.\n" ); 2020 pr_err("TLAN: out of memory for received data.\n" );
2025 /* If this ever happened it would be a problem */ 2021 break;
2026 } else {
2027 skb->dev = dev;
2028 skb_reserve( skb, 2 );
2029 t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
2030 } 2022 }
2031 list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2023
2032 list->buffer[8].address = (u32) t; 2024 skb_reserve( skb, NET_IP_ALIGN );
2025 list->buffer[0].address = pci_map_single(priv->pciDev, t,
2026 TLAN_MAX_FRAME_SIZE,
2027 PCI_DMA_FROMDEVICE);
2033 TLan_StoreSKB(list, skb); 2028 TLan_StoreSKB(list, skb);
2034 } 2029 }
2035 list->buffer[1].count = 0; 2030 list->buffer[1].count = 0;
2036 list->buffer[1].address = 0; 2031 list->buffer[1].address = 0;
2037 if ( i < TLAN_NUM_RX_LISTS - 1 ) 2032 list->forward = list_phys + sizeof(TLanList);
2038 list->forward = list_phys + sizeof(TLanList); 2033 }
2039 else 2034
2040 list->forward = 0; 2035 /* in case ran out of memory early, clear bits */
2036 while (i < TLAN_NUM_RX_LISTS) {
2037 TLan_StoreSKB(priv->rxList + i, NULL);
2038 ++i;
2041 } 2039 }
2040 list->forward = 0;
2042 2041
2043} /* TLan_ResetLists */ 2042} /* TLan_ResetLists */
2044 2043
@@ -2055,7 +2054,9 @@ static void TLan_FreeLists( struct net_device *dev )
2055 list = priv->txList + i; 2054 list = priv->txList + i;
2056 skb = TLan_GetSKB(list); 2055 skb = TLan_GetSKB(list);
2057 if ( skb ) { 2056 if ( skb ) {
2058 pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 2057 pci_unmap_single(priv->pciDev,
2058 list->buffer[0].address, skb->len,
2059 PCI_DMA_TODEVICE);
2059 dev_kfree_skb_any( skb ); 2060 dev_kfree_skb_any( skb );
2060 list->buffer[8].address = 0; 2061 list->buffer[8].address = 0;
2061 list->buffer[9].address = 0; 2062 list->buffer[9].address = 0;
@@ -2066,7 +2067,10 @@ static void TLan_FreeLists( struct net_device *dev )
2066 list = priv->rxList + i; 2067 list = priv->rxList + i;
2067 skb = TLan_GetSKB(list); 2068 skb = TLan_GetSKB(list);
2068 if ( skb ) { 2069 if ( skb ) {
2069 pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2070 pci_unmap_single(priv->pciDev,
2071 list->buffer[0].address,
2072 TLAN_MAX_FRAME_SIZE,
2073 PCI_DMA_FROMDEVICE);
2070 dev_kfree_skb_any( skb ); 2074 dev_kfree_skb_any( skb );
2071 list->buffer[8].address = 0; 2075 list->buffer[8].address = 0;
2072 list->buffer[9].address = 0; 2076 list->buffer[9].address = 0;
@@ -2097,7 +2101,8 @@ static void TLan_PrintDio( u16 io_base )
2097 u32 data0, data1; 2101 u32 data0, data1;
2098 int i; 2102 int i;
2099 2103
2100 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base ); 2104 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
2105 io_base );
2101 printk( "TLAN: Off. +0 +4\n" ); 2106 printk( "TLAN: Off. +0 +4\n" );
2102 for ( i = 0; i < 0x4C; i+= 8 ) { 2107 for ( i = 0; i < 0x4C; i+= 8 ) {
2103 data0 = TLan_DioRead32( io_base, i ); 2108 data0 = TLan_DioRead32( io_base, i );
@@ -2131,13 +2136,14 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2131{ 2136{
2132 int i; 2137 int i;
2133 2138
2134 printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list ); 2139 printk( "TLAN: %s List %d at %p\n", type, num, list );
2135 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2140 printk( "TLAN: Forward = 0x%08x\n", list->forward );
2136 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2141 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
2137 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2142 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
2138 /* for ( i = 0; i < 10; i++ ) { */ 2143 /* for ( i = 0; i < 10; i++ ) { */
2139 for ( i = 0; i < 2; i++ ) { 2144 for ( i = 0; i < 2; i++ ) {
2140 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address ); 2145 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2146 i, list->buffer[i].count, list->buffer[i].address );
2141 } 2147 }
2142 2148
2143} /* TLan_PrintList */ 2149} /* TLan_PrintList */
@@ -2165,7 +2171,6 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2165 2171
2166static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2172static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2167{ 2173{
2168 TLanPrivateInfo *priv = netdev_priv(dev);
2169 u32 tx_good, tx_under; 2174 u32 tx_good, tx_under;
2170 u32 rx_good, rx_over; 2175 u32 rx_good, rx_over;
2171 u32 def_tx, crc, code; 2176 u32 def_tx, crc, code;
@@ -2202,18 +2207,18 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2202 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2207 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2203 2208
2204 if ( record ) { 2209 if ( record ) {
2205 priv->stats.rx_packets += rx_good; 2210 dev->stats.rx_packets += rx_good;
2206 priv->stats.rx_errors += rx_over + crc + code; 2211 dev->stats.rx_errors += rx_over + crc + code;
2207 priv->stats.tx_packets += tx_good; 2212 dev->stats.tx_packets += tx_good;
2208 priv->stats.tx_errors += tx_under + loss; 2213 dev->stats.tx_errors += tx_under + loss;
2209 priv->stats.collisions += multi_col + single_col + excess_col + late_col; 2214 dev->stats.collisions += multi_col + single_col + excess_col + late_col;
2210 2215
2211 priv->stats.rx_over_errors += rx_over; 2216 dev->stats.rx_over_errors += rx_over;
2212 priv->stats.rx_crc_errors += crc; 2217 dev->stats.rx_crc_errors += crc;
2213 priv->stats.rx_frame_errors += code; 2218 dev->stats.rx_frame_errors += code;
2214 2219
2215 priv->stats.tx_aborted_errors += tx_under; 2220 dev->stats.tx_aborted_errors += tx_under;
2216 priv->stats.tx_carrier_errors += loss; 2221 dev->stats.tx_carrier_errors += loss;
2217 } 2222 }
2218 2223
2219} /* TLan_ReadAndClearStats */ 2224} /* TLan_ReadAndClearStats */
@@ -2354,14 +2359,16 @@ TLan_FinishReset( struct net_device *dev )
2354 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2359 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
2355 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); 2360 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2356 2361
2357 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) { 2362 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
2363 ( priv->aui ) ) {
2358 status = MII_GS_LINK; 2364 status = MII_GS_LINK;
2359 printk( "TLAN: %s: Link forced.\n", dev->name ); 2365 printk( "TLAN: %s: Link forced.\n", dev->name );
2360 } else { 2366 } else {
2361 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2367 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2362 udelay( 1000 ); 2368 udelay( 1000 );
2363 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2369 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2364 if ( (status & MII_GS_LINK) && /* We only support link info on Nat.Sem. PHY's */ 2370 if ( (status & MII_GS_LINK) &&
2371 /* We only support link info on Nat.Sem. PHY's */
2365 (tlphy_id1 == NAT_SEM_ID1) && 2372 (tlphy_id1 == NAT_SEM_ID1) &&
2366 (tlphy_id2 == NAT_SEM_ID2) ) { 2373 (tlphy_id2 == NAT_SEM_ID2) ) {
2367 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2374 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
@@ -2370,12 +2377,12 @@ TLan_FinishReset( struct net_device *dev )
2370 printk( "TLAN: %s: Link active with ", dev->name ); 2377 printk( "TLAN: %s: Link active with ", dev->name );
2371 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2378 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
2372 printk( "forced 10%sMbps %s-Duplex\n", 2379 printk( "forced 10%sMbps %s-Duplex\n",
2373 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2380 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2374 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2381 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2375 } else { 2382 } else {
2376 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2383 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
2377 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2384 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2378 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2385 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2379 printk("TLAN: Partner capability: "); 2386 printk("TLAN: Partner capability: ");
2380 for (i = 5; i <= 10; i++) 2387 for (i = 5; i <= 10; i++)
2381 if (partner & (1<<i)) 2388 if (partner & (1<<i))
@@ -2416,7 +2423,8 @@ TLan_FinishReset( struct net_device *dev )
2416 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2423 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
2417 netif_carrier_on(dev); 2424 netif_carrier_on(dev);
2418 } else { 2425 } else {
2419 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name ); 2426 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
2427 dev->name );
2420 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); 2428 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2421 return; 2429 return;
2422 } 2430 }
@@ -2456,10 +2464,12 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2456 2464
2457 if ( mac != NULL ) { 2465 if ( mac != NULL ) {
2458 for ( i = 0; i < 6; i++ ) 2466 for ( i = 0; i < 6; i++ )
2459 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] ); 2467 TLan_DioWrite8( dev->base_addr,
2468 TLAN_AREG_0 + areg + i, mac[i] );
2460 } else { 2469 } else {
2461 for ( i = 0; i < 6; i++ ) 2470 for ( i = 0; i < 6; i++ )
2462 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 ); 2471 TLan_DioWrite8( dev->base_addr,
2472 TLAN_AREG_0 + areg + i, 0 );
2463 } 2473 }
2464 2474
2465} /* TLan_SetMac */ 2475} /* TLan_SetMac */
@@ -2565,9 +2575,13 @@ static void TLan_PhyDetect( struct net_device *dev )
2565 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2575 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
2566 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2576 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
2567 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2577 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
2568 if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2578 if ( ( control != 0xFFFF ) ||
2569 TLAN_DBG( TLAN_DEBUG_GNRL, "PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo ); 2579 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
2570 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) { 2580 TLAN_DBG( TLAN_DEBUG_GNRL,
2581 "PHY found at %02x %04x %04x %04x\n",
2582 phy, control, hi, lo );
2583 if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
2584 ( phy != TLAN_PHY_MAX_ADDR ) ) {
2571 priv->phy[1] = phy; 2585 priv->phy[1] = phy;
2572 } 2586 }
2573 } 2587 }
@@ -2595,7 +2609,9 @@ static void TLan_PhyPowerDown( struct net_device *dev )
2595 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2609 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2596 TLan_MiiSync( dev->base_addr ); 2610 TLan_MiiSync( dev->base_addr );
2597 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2611 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
2598 if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2612 if ( ( priv->phyNum == 0 ) &&
2613 ( priv->phy[1] != TLAN_PHY_NONE ) &&
2614 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
2599 TLan_MiiSync( dev->base_addr ); 2615 TLan_MiiSync( dev->base_addr );
2600 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2616 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
2601 } 2617 }
@@ -2768,10 +2784,10 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2768 * more time. Perhaps we should fail after a while. 2784 * more time. Perhaps we should fail after a while.
2769 */ 2785 */
2770 if (!priv->neg_be_verbose++) { 2786 if (!priv->neg_be_verbose++) {
2771 printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n"); 2787 pr_info("TLAN: Giving autonegotiation more time.\n");
2772 printk(KERN_INFO "TLAN: Please check that your adapter has\n"); 2788 pr_info("TLAN: Please check that your adapter has\n");
2773 printk(KERN_INFO "TLAN: been properly connected to a HUB or Switch.\n"); 2789 pr_info("TLAN: been properly connected to a HUB or Switch.\n");
2774 printk(KERN_INFO "TLAN: Trying to establish link in the background...\n"); 2790 pr_info("TLAN: Trying to establish link in the background...\n");
2775 } 2791 }
2776 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2792 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
2777 return; 2793 return;
@@ -2787,7 +2803,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2787 priv->tlanFullDuplex = TRUE; 2803 priv->tlanFullDuplex = TRUE;
2788 } 2804 }
2789 2805
2790 if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) { 2806 if ( ( ! ( mode & 0x0180 ) ) &&
2807 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
2808 ( priv->phyNum != 0 ) ) {
2791 priv->phyNum = 0; 2809 priv->phyNum = 0;
2792 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2810 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2793 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2811 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
@@ -2796,12 +2814,14 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2796 } 2814 }
2797 2815
2798 if ( priv->phyNum == 0 ) { 2816 if ( priv->phyNum == 0 ) {
2799 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) { 2817 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
2800 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX ); 2818 ( an_adv & an_lpa & 0x0040 ) ) {
2801 printk( "TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2819 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
2820 MII_GC_AUTOENB | MII_GC_DUPLEX );
2821 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
2802 } else { 2822 } else {
2803 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2823 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
2804 printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2824 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
2805 } 2825 }
2806 } 2826 }
2807 2827
@@ -3209,7 +3229,8 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3209 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3229 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3210 3230
3211 if ( ( ! err ) && stop ) { 3231 if ( ( ! err ) && stop ) {
3212 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3232 /* STOP, raise data while clock is high */
3233 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3213 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3234 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3214 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3235 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3215 } 3236 }
@@ -3272,7 +3293,8 @@ static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3272 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3293 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
3273 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3294 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3274 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3295 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3275 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3296 /* STOP, raise data while clock is high */
3297 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3276 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3298 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3277 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3299 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3278 } 3300 }
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 41ce0b665937..4b82f283e985 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -13,8 +13,6 @@
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 * 15 *
16 ** This file is best viewed/edited with tabstop=4, colums>=132
17 *
18 * 16 *
19 * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com> 17 * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
20 * New Maintainer 18 * New Maintainer
@@ -45,7 +43,9 @@
45#define TLAN_IGNORE 0 43#define TLAN_IGNORE 0
46#define TLAN_RECORD 1 44#define TLAN_RECORD 1
47 45
48#define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); 46#define TLAN_DBG(lvl, format, args...) \
47 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
48
49#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
50#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
51#define TLAN_DEBUG_RX 0x0004 51#define TLAN_DEBUG_RX 0x0004
@@ -194,7 +194,6 @@ typedef struct tlan_private_tag {
194 u32 timerSetAt; 194 u32 timerSetAt;
195 u32 timerType; 195 u32 timerType;
196 struct timer_list timer; 196 struct timer_list timer;
197 struct net_device_stats stats;
198 struct board *adapter; 197 struct board *adapter;
199 u32 adapterRev; 198 u32 adapterRev;
200 u32 aui; 199 u32 aui;
@@ -205,7 +204,6 @@ typedef struct tlan_private_tag {
205 u32 speed; 204 u32 speed;
206 u8 tlanRev; 205 u8 tlanRev;
207 u8 tlanFullDuplex; 206 u8 tlanFullDuplex;
208 char devName[8];
209 spinlock_t lock; 207 spinlock_t lock;
210 u8 link; 208 u8 link;
211 u8 is_eisa; 209 u8 is_eisa;
@@ -517,12 +515,18 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
517 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 515 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
518 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 516 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
519 * 517 *
520 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) ); 518 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
521 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1; 519 * DA(a,30), DA(a,36), DA(a,42) );
522 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2; 520 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
523 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3; 521 * DA(a,31), DA(a,37), DA(a,43) ) << 1;
524 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4; 522 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5; 523 * DA(a,32), DA(a,38), DA(a,44) ) << 2;
524 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
525 * DA(a,33), DA(a,39), DA(a,45) ) << 3;
526 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
527 * DA(a,34), DA(a,40), DA(a,46) ) << 4;
528 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
529 * DA(a,35), DA(a,41), DA(a,47) ) << 5;
526 * 530 *
527 */ 531 */
528static inline u32 TLan_HashFunc( const u8 *a ) 532static inline u32 TLan_HashFunc( const u8 *a )
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
index 0f8b4ec8695a..66b1ff603234 100644
--- a/drivers/net/tokenring/3c359.h
+++ b/drivers/net/tokenring/3c359.h
@@ -264,7 +264,7 @@ struct xl_private {
264 u16 asb; 264 u16 asb;
265 265
266 u8 __iomem *xl_mmio; 266 u8 __iomem *xl_mmio;
267 char *xl_card_name; 267 const char *xl_card_name;
268 struct pci_dev *pdev ; 268 struct pci_dev *pdev ;
269 269
270 spinlock_t xl_lock ; 270 spinlock_t xl_lock ;
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
index c91956310fb2..10fbba08978f 100644
--- a/drivers/net/tokenring/olympic.h
+++ b/drivers/net/tokenring/olympic.h
@@ -254,7 +254,7 @@ struct olympic_private {
254 u8 __iomem *olympic_mmio; 254 u8 __iomem *olympic_mmio;
255 u8 __iomem *olympic_lap; 255 u8 __iomem *olympic_lap;
256 struct pci_dev *pdev ; 256 struct pci_dev *pdev ;
257 char *olympic_card_name ; 257 const char *olympic_card_name;
258 258
259 spinlock_t olympic_lock ; 259 spinlock_t olympic_lock ;
260 260
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index c028facd9346..febfaee44fe9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -803,7 +803,8 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
803 int rx = data->rxhead; 803 int rx = data->rxhead;
804 struct sk_buff *skb; 804 struct sk_buff *skb;
805 805
806 data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2); 806 data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
807 TSI108_RXBUF_SIZE + 2);
807 if (!skb) 808 if (!skb)
808 break; 809 break;
809 810
@@ -1352,8 +1353,9 @@ static int tsi108_open(struct net_device *dev)
1352 data->rxhead = 0; 1353 data->rxhead = 0;
1353 1354
1354 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1355 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1355 struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN); 1356 struct sk_buff *skb;
1356 1357
1358 skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
1357 if (!skb) { 1359 if (!skb) {
1358 /* Bah. No memory for now, but maybe we'll get 1360 /* Bah. No memory for now, but maybe we'll get
1359 * some more later. 1361 * some more later.
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 2511ca7a12aa..e9e628621639 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
225static const struct ethtool_ops netdev_ethtool_ops; 225static const struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int); 226static u16 read_srom_word(long, int);
227static irqreturn_t uli526x_interrupt(int, void *); 227static irqreturn_t uli526x_interrupt(int, void *);
228#ifdef CONFIG_NET_POLL_CONTROLLER
229static void uli526x_poll(struct net_device *dev);
230#endif
228static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 231static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
229static void allocate_rx_buffer(struct uli526x_board_info *); 232static void allocate_rx_buffer(struct uli526x_board_info *);
230static void update_cr6(u32, unsigned long); 233static void update_cr6(u32, unsigned long);
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
339 dev->get_stats = &uli526x_get_stats; 342 dev->get_stats = &uli526x_get_stats;
340 dev->set_multicast_list = &uli526x_set_filter_mode; 343 dev->set_multicast_list = &uli526x_set_filter_mode;
341 dev->ethtool_ops = &netdev_ethtool_ops; 344 dev->ethtool_ops = &netdev_ethtool_ops;
345#ifdef CONFIG_NET_POLL_CONTROLLER
346 dev->poll_controller = &uli526x_poll;
347#endif
342 spin_lock_init(&db->lock); 348 spin_lock_init(&db->lock);
343 349
344 350
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
681 db->cr5_data = inl(ioaddr + DCR5); 687 db->cr5_data = inl(ioaddr + DCR5);
682 outl(db->cr5_data, ioaddr + DCR5); 688 outl(db->cr5_data, ioaddr + DCR5);
683 if ( !(db->cr5_data & 0x180c1) ) { 689 if ( !(db->cr5_data & 0x180c1) ) {
684 spin_unlock_irqrestore(&db->lock, flags); 690 /* Restore CR7 to enable interrupt mask */
685 outl(db->cr7_data, ioaddr + DCR7); 691 outl(db->cr7_data, ioaddr + DCR7);
692 spin_unlock_irqrestore(&db->lock, flags);
686 return IRQ_HANDLED; 693 return IRQ_HANDLED;
687 } 694 }
688 695
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
715 return IRQ_HANDLED; 722 return IRQ_HANDLED;
716} 723}
717 724
725#ifdef CONFIG_NET_POLL_CONTROLLER
726static void uli526x_poll(struct net_device *dev)
727{
728 /* ISR grabs the irqsave lock, so this should be safe */
729 uli526x_interrupt(dev->irq, dev);
730}
731#endif
718 732
719/* 733/*
720 * Free TX resource after TX complete 734 * Free TX resource after TX complete
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index ca0bdac07a78..fb0b918e5ccb 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
237 skb->dev = ugeth->dev; 237 skb->dev = ugeth->dev;
238 238
239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 239 out_be32(&((struct qe_bd __iomem *)bd)->buf,
240 dma_map_single(NULL, 240 dma_map_single(&ugeth->dev->dev,
241 skb->data, 241 skb->data,
242 ugeth->ug_info->uf_info.max_rx_buf_length + 242 ugeth->ug_info->uf_info.max_rx_buf_length +
243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2158 continue; 2158 continue;
2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2160 if (ugeth->tx_skbuff[i][j]) { 2160 if (ugeth->tx_skbuff[i][j]) {
2161 dma_unmap_single(NULL, 2161 dma_unmap_single(&ugeth->dev->dev,
2162 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2163 (in_be32((u32 __iomem *)bd) & 2163 (in_be32((u32 __iomem *)bd) &
2164 BD_LENGTH_MASK), 2164 BD_LENGTH_MASK),
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2186 bd = ugeth->p_rx_bd_ring[i]; 2186 bd = ugeth->p_rx_bd_ring[i];
2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2188 if (ugeth->rx_skbuff[i][j]) { 2188 if (ugeth->rx_skbuff[i][j]) {
2189 dma_unmap_single(NULL, 2189 dma_unmap_single(&ugeth->dev->dev,
2190 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2191 ugeth->ug_info-> 2191 ugeth->ug_info->
2192 uf_info.max_rx_buf_length + 2192 uf_info.max_rx_buf_length +
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3406 3406
3407 /* set up the buffer descriptor */ 3407 /* set up the buffer descriptor */
3408 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3409 dma_map_single(&ugeth->dev->dev, skb->data,
3410 skb->len, DMA_TO_DEVICE));
3410 3411
3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3412 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3412 3413
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index dc6f097062df..37ecf845edfe 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
1440 // Belkin F5D5055 1440 // Belkin F5D5055
1441 USB_DEVICE(0x050d, 0x5055), 1441 USB_DEVICE(0x050d, 0x5055),
1442 .driver_info = (unsigned long) &ax88178_info, 1442 .driver_info = (unsigned long) &ax88178_info,
1443}, {
1444 // Apple USB Ethernet Adapter
1445 USB_DEVICE(0x05ac, 0x1402),
1446 .driver_info = (unsigned long) &ax88772_info,
1443}, 1447},
1444 { }, // END 1448 { }, // END
1445}; 1449};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 76752d84a30f..22c17bbacb69 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -423,7 +423,10 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
423 423
424 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; 424 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
425 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; 425 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
426 *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); 426 if (catc->is_f5u011)
427 *(__be16 *)tx_buf = cpu_to_be16(skb->len);
428 else
429 *(__le16 *)tx_buf = cpu_to_le16(skb->len);
427 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); 430 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
428 catc->tx_ptr += skb->len + 2; 431 catc->tx_ptr += skb->len + 2;
429 432
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 21a7785cb8b6..ae467f182c40 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
194 dev_dbg(&info->control->dev, 194 dev_dbg(&info->control->dev,
195 "rndis response error, code %d\n", retval); 195 "rndis response error, code %d\n", retval);
196 } 196 }
197 msleep(2); 197 msleep(20);
198 } 198 }
199 dev_dbg(&info->control->dev, "rndis response timeout\n"); 199 dev_dbg(&info->control->dev, "rndis response timeout\n");
200 return -ETIMEDOUT; 200 return -ETIMEDOUT;
@@ -283,8 +283,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
283 struct rndis_set_c *set_c; 283 struct rndis_set_c *set_c;
284 struct rndis_halt *halt; 284 struct rndis_halt *halt;
285 } u; 285 } u;
286 u32 tmp, phym_unspec; 286 u32 tmp;
287 __le32 *phym; 287 __le32 phym_unspec, *phym;
288 int reply_len; 288 int reply_len;
289 unsigned char *bp; 289 unsigned char *bp;
290 290
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 6b8d882d197b..bcbf2fa9b94a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1495,24 +1495,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1495 * enough. This function returns a negative value if the received 1495 * enough. This function returns a negative value if the received
1496 * packet is too big or if memory is exhausted. 1496 * packet is too big or if memory is exhausted.
1497 */ 1497 */
1498static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, 1498static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1499 struct velocity_info *vptr) 1499 struct velocity_info *vptr)
1500{ 1500{
1501 int ret = -1; 1501 int ret = -1;
1502
1503 if (pkt_size < rx_copybreak) { 1502 if (pkt_size < rx_copybreak) {
1504 struct sk_buff *new_skb; 1503 struct sk_buff *new_skb;
1505 1504
1506 new_skb = dev_alloc_skb(pkt_size + 2); 1505 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
1507 if (new_skb) { 1506 if (new_skb) {
1508 new_skb->dev = vptr->dev;
1509 new_skb->ip_summed = rx_skb[0]->ip_summed; 1507 new_skb->ip_summed = rx_skb[0]->ip_summed;
1510 1508 skb_reserve(new_skb, 2);
1511 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1509 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1512 skb_reserve(new_skb, 2);
1513
1514 skb_copy_from_linear_data(rx_skb[0], new_skb->data,
1515 pkt_size);
1516 *rx_skb = new_skb; 1510 *rx_skb = new_skb;
1517 ret = 0; 1511 ret = 0;
1518 } 1512 }
@@ -1533,12 +1527,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1533static inline void velocity_iph_realign(struct velocity_info *vptr, 1527static inline void velocity_iph_realign(struct velocity_info *vptr,
1534 struct sk_buff *skb, int pkt_size) 1528 struct sk_buff *skb, int pkt_size)
1535{ 1529{
1536 /* FIXME - memmove ? */
1537 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { 1530 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1538 int i; 1531 memmove(skb->data + 2, skb->data, pkt_size);
1539
1540 for (i = pkt_size; i >= 0; i--)
1541 *(skb->data + i + 2) = *(skb->data + i);
1542 skb_reserve(skb, 2); 1532 skb_reserve(skb, 2);
1543 } 1533 }
1544} 1534}
@@ -1629,7 +1619,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1629 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1619 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1630 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1620 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1631 1621
1632 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); 1622 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);
1633 if (rd_info->skb == NULL) 1623 if (rd_info->skb == NULL)
1634 return -ENOMEM; 1624 return -ENOMEM;
1635 1625
@@ -1638,7 +1628,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1638 * 64byte alignment. 1628 * 64byte alignment.
1639 */ 1629 */
1640 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1630 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1641 rd_info->skb->dev = vptr->dev;
1642 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1631 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1643 1632
1644 /* 1633 /*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f926b5ab3d09..fe7cdf2a2a23 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
470 kfree_skb(skb); 470 kfree_skb(skb);
471 vi->num--; 471 vi->num--;
472 } 472 }
473 while ((skb = __skb_dequeue(&vi->send)) != NULL) 473 __skb_queue_purge(&vi->send);
474 kfree_skb(skb);
475 474
476 BUG_ON(vi->num != 0); 475 BUG_ON(vi->num != 0);
477 476
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9a83c9d5b8cf..7f984895b0d5 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
43 43
44#undef DEBUG_LINK 44#undef DEBUG_LINK
45 45
46static struct hdlc_proto *first_proto = NULL; 46static struct hdlc_proto *first_proto;
47
48 47
49static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 48static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
50{ 49{
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
314 313
315void register_hdlc_protocol(struct hdlc_proto *proto) 314void register_hdlc_protocol(struct hdlc_proto *proto)
316{ 315{
316 rtnl_lock();
317 proto->next = first_proto; 317 proto->next = first_proto;
318 first_proto = proto; 318 first_proto = proto;
319 rtnl_unlock();
319} 320}
320 321
321 322
322void unregister_hdlc_protocol(struct hdlc_proto *proto) 323void unregister_hdlc_protocol(struct hdlc_proto *proto)
323{ 324{
324 struct hdlc_proto **p = &first_proto; 325 struct hdlc_proto **p;
325 while (*p) { 326
326 if (*p == proto) { 327 rtnl_lock();
327 *p = proto->next; 328 p = &first_proto;
328 return; 329 while (*p != proto) {
329 } 330 BUG_ON(!*p);
330 p = &((*p)->next); 331 p = &((*p)->next);
331 } 332 }
333 *p = proto->next;
334 rtnl_unlock();
332} 335}
333 336
334 337
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 7133c688cf20..762d21c1c703 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -56,6 +56,7 @@ struct cisco_state {
56 cisco_proto settings; 56 cisco_proto settings;
57 57
58 struct timer_list timer; 58 struct timer_list timer;
59 spinlock_t lock;
59 unsigned long last_poll; 60 unsigned long last_poll;
60 int up; 61 int up;
61 int request_sent; 62 int request_sent;
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
158{ 159{
159 struct net_device *dev = skb->dev; 160 struct net_device *dev = skb->dev;
160 hdlc_device *hdlc = dev_to_hdlc(dev); 161 hdlc_device *hdlc = dev_to_hdlc(dev);
162 struct cisco_state *st = state(hdlc);
161 struct hdlc_header *data = (struct hdlc_header*)skb->data; 163 struct hdlc_header *data = (struct hdlc_header*)skb->data;
162 struct cisco_packet *cisco_data; 164 struct cisco_packet *cisco_data;
163 struct in_device *in_dev; 165 struct in_device *in_dev;
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
220 goto rx_error; 222 goto rx_error;
221 223
222 case CISCO_KEEPALIVE_REQ: 224 case CISCO_KEEPALIVE_REQ:
223 state(hdlc)->rxseq = ntohl(cisco_data->par1); 225 spin_lock(&st->lock);
224 if (state(hdlc)->request_sent && 226 st->rxseq = ntohl(cisco_data->par1);
225 ntohl(cisco_data->par2) == state(hdlc)->txseq) { 227 if (st->request_sent &&
226 state(hdlc)->last_poll = jiffies; 228 ntohl(cisco_data->par2) == st->txseq) {
227 if (!state(hdlc)->up) { 229 st->last_poll = jiffies;
230 if (!st->up) {
228 u32 sec, min, hrs, days; 231 u32 sec, min, hrs, days;
229 sec = ntohl(cisco_data->time) / 1000; 232 sec = ntohl(cisco_data->time) / 1000;
230 min = sec / 60; sec -= min * 60; 233 min = sec / 60; sec -= min * 60;
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
232 days = hrs / 24; hrs -= days * 24; 235 days = hrs / 24; hrs -= days * 24;
233 printk(KERN_INFO "%s: Link up (peer " 236 printk(KERN_INFO "%s: Link up (peer "
234 "uptime %ud%uh%um%us)\n", 237 "uptime %ud%uh%um%us)\n",
235 dev->name, days, hrs, 238 dev->name, days, hrs, min, sec);
236 min, sec);
237 netif_dormant_off(dev); 239 netif_dormant_off(dev);
238 state(hdlc)->up = 1; 240 st->up = 1;
239 } 241 }
240 } 242 }
243 spin_unlock(&st->lock);
241 244
242 dev_kfree_skb_any(skb); 245 dev_kfree_skb_any(skb);
243 return NET_RX_SUCCESS; 246 return NET_RX_SUCCESS;
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
261{ 264{
262 struct net_device *dev = (struct net_device *)arg; 265 struct net_device *dev = (struct net_device *)arg;
263 hdlc_device *hdlc = dev_to_hdlc(dev); 266 hdlc_device *hdlc = dev_to_hdlc(dev);
267 struct cisco_state *st = state(hdlc);
264 268
265 if (state(hdlc)->up && 269 spin_lock(&st->lock);
266 time_after(jiffies, state(hdlc)->last_poll + 270 if (st->up &&
267 state(hdlc)->settings.timeout * HZ)) { 271 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
268 state(hdlc)->up = 0; 272 st->up = 0;
269 printk(KERN_INFO "%s: Link down\n", dev->name); 273 printk(KERN_INFO "%s: Link down\n", dev->name);
270 netif_dormant_on(dev); 274 netif_dormant_on(dev);
271 } 275 }
272 276
273 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, 277 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
274 htonl(++state(hdlc)->txseq), 278 htonl(st->rxseq));
275 htonl(state(hdlc)->rxseq)); 279 st->request_sent = 1;
276 state(hdlc)->request_sent = 1; 280 spin_unlock(&st->lock);
277 state(hdlc)->timer.expires = jiffies + 281
278 state(hdlc)->settings.interval * HZ; 282 st->timer.expires = jiffies + st->settings.interval * HZ;
279 state(hdlc)->timer.function = cisco_timer; 283 st->timer.function = cisco_timer;
280 state(hdlc)->timer.data = arg; 284 st->timer.data = arg;
281 add_timer(&state(hdlc)->timer); 285 add_timer(&st->timer);
282} 286}
283 287
284 288
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
286static void cisco_start(struct net_device *dev) 290static void cisco_start(struct net_device *dev)
287{ 291{
288 hdlc_device *hdlc = dev_to_hdlc(dev); 292 hdlc_device *hdlc = dev_to_hdlc(dev);
289 state(hdlc)->up = 0; 293 struct cisco_state *st = state(hdlc);
290 state(hdlc)->request_sent = 0; 294 unsigned long flags;
291 state(hdlc)->txseq = state(hdlc)->rxseq = 0; 295
292 296 spin_lock_irqsave(&st->lock, flags);
293 init_timer(&state(hdlc)->timer); 297 st->up = 0;
294 state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ 298 st->request_sent = 0;
295 state(hdlc)->timer.function = cisco_timer; 299 st->txseq = st->rxseq = 0;
296 state(hdlc)->timer.data = (unsigned long)dev; 300 spin_unlock_irqrestore(&st->lock, flags);
297 add_timer(&state(hdlc)->timer); 301
302 init_timer(&st->timer);
303 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
304 st->timer.function = cisco_timer;
305 st->timer.data = (unsigned long)dev;
306 add_timer(&st->timer);
298} 307}
299 308
300 309
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
302static void cisco_stop(struct net_device *dev) 311static void cisco_stop(struct net_device *dev)
303{ 312{
304 hdlc_device *hdlc = dev_to_hdlc(dev); 313 hdlc_device *hdlc = dev_to_hdlc(dev);
305 del_timer_sync(&state(hdlc)->timer); 314 struct cisco_state *st = state(hdlc);
315 unsigned long flags;
316
317 del_timer_sync(&st->timer);
318
319 spin_lock_irqsave(&st->lock, flags);
306 netif_dormant_on(dev); 320 netif_dormant_on(dev);
307 state(hdlc)->up = 0; 321 st->up = 0;
308 state(hdlc)->request_sent = 0; 322 st->request_sent = 0;
323 spin_unlock_irqrestore(&st->lock, flags);
309} 324}
310 325
311 326
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
367 return result; 382 return result;
368 383
369 memcpy(&state(hdlc)->settings, &new_settings, size); 384 memcpy(&state(hdlc)->settings, &new_settings, size);
385 spin_lock_init(&state(hdlc)->lock);
370 dev->hard_start_xmit = hdlc->xmit; 386 dev->hard_start_xmit = hdlc->xmit;
371 dev->header_ops = &cisco_header_ops; 387 dev->header_ops = &cisco_header_ops;
372 dev->type = ARPHRD_CISCO; 388 dev->type = ARPHRD_CISCO;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 8a78283e8607..17ced37e55ed 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2669,6 +2669,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2669 dev->irq = ethdev->irq; 2669 dev->irq = ethdev->irq;
2670 dev->base_addr = ethdev->base_addr; 2670 dev->base_addr = ethdev->base_addr;
2671 dev->wireless_data = ethdev->wireless_data; 2671 dev->wireless_data = ethdev->wireless_data;
2672 SET_NETDEV_DEV(dev, ethdev->dev.parent);
2672 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); 2673 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
2673 err = register_netdev(dev); 2674 err = register_netdev(dev);
2674 if (err<0) { 2675 if (err<0) {
@@ -2905,7 +2906,7 @@ EXPORT_SYMBOL(init_airo_card);
2905 2906
2906static int waitbusy (struct airo_info *ai) { 2907static int waitbusy (struct airo_info *ai) {
2907 int delay = 0; 2908 int delay = 0;
2908 while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) { 2909 while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
2909 udelay (10); 2910 udelay (10);
2910 if ((++delay % 20) == 0) 2911 if ((++delay % 20) == 0)
2911 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); 2912 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 437a9bcc9bd3..ed4317a17cbb 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -833,6 +833,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
833 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), 833 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
834 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), 834 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
835/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ 835/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */
836 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
836 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), 837 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
837 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), 838 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
838 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), 839 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 7be68db6f300..cdf90c40f11b 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3276,11 +3276,6 @@ while (0)
3276 } 3276 }
3277 printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); 3277 printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
3278 3278
3279#ifndef PRISM2_NO_PROCFS_DEBUG
3280 create_proc_read_entry("registers", 0, local->proc,
3281 prism2_registers_proc_read, local);
3282#endif /* PRISM2_NO_PROCFS_DEBUG */
3283
3284 hostap_init_data(local); 3279 hostap_init_data(local);
3285 return dev; 3280 return dev;
3286 3281
@@ -3307,6 +3302,10 @@ static int hostap_hw_ready(struct net_device *dev)
3307 netif_carrier_off(local->ddev); 3302 netif_carrier_off(local->ddev);
3308 } 3303 }
3309 hostap_init_proc(local); 3304 hostap_init_proc(local);
3305#ifndef PRISM2_NO_PROCFS_DEBUG
3306 create_proc_read_entry("registers", 0, local->proc,
3307 prism2_registers_proc_read, local);
3308#endif /* PRISM2_NO_PROCFS_DEBUG */
3310 hostap_init_ap_proc(local); 3309 hostap_init_ap_proc(local);
3311 return 0; 3310 return 0;
3312 } 3311 }
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index fa87c5c2ae0b..d74c061994ae 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -11584,6 +11584,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11584 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; 11584 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11585 11585
11586 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11586 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11587 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11587 11588
11588 rc = register_netdev(priv->prom_net_dev); 11589 rc = register_netdev(priv->prom_net_dev);
11589 if (rc) { 11590 if (rc) {
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index dcfdb404678b..688d60de55cb 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -73,8 +73,8 @@ out:
73 return ret; 73 return ret;
74} 74}
75 75
76static void lbs_ethtool_get_stats(struct net_device * dev, 76static void lbs_ethtool_get_stats(struct net_device *dev,
77 struct ethtool_stats * stats, u64 * data) 77 struct ethtool_stats *stats, uint64_t *data)
78{ 78{
79 struct lbs_private *priv = dev->priv; 79 struct lbs_private *priv = dev->priv;
80 struct cmd_ds_mesh_access mesh_access; 80 struct cmd_ds_mesh_access mesh_access;
@@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
83 lbs_deb_enter(LBS_DEB_ETHTOOL); 83 lbs_deb_enter(LBS_DEB_ETHTOOL);
84 84
85 /* Get Mesh Statistics */ 85 /* Get Mesh Statistics */
86 ret = lbs_prepare_and_send_command(priv, 86 ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
87 CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS,
88 CMD_OPTION_WAITFORRSP, 0, &mesh_access);
89 87
90 if (ret) 88 if (ret) {
89 memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t)));
91 return; 90 return;
91 }
92 92
93 priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); 93 priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]);
94 priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); 94 priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]);
@@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
111 lbs_deb_enter(LBS_DEB_ETHTOOL); 111 lbs_deb_enter(LBS_DEB_ETHTOOL);
112} 112}
113 113
114static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) 114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
115{ 115{
116 switch (sset) { 116 struct lbs_private *priv = dev->priv;
117 case ETH_SS_STATS: 117
118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
118 return MESH_STATS_NUM; 119 return MESH_STATS_NUM;
119 default: 120
120 return -EOPNOTSUPP; 121 return -EOPNOTSUPP;
121 }
122} 122}
123 123
124static void lbs_ethtool_get_strings(struct net_device *dev, 124static void lbs_ethtool_get_strings(struct net_device *dev,
125 u32 stringset, 125 uint32_t stringset, uint8_t *s)
126 u8 * s)
127{ 126{
128 int i; 127 int i;
129 128
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 039e09a8b024..b7ab3590b586 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -792,6 +792,7 @@ static int lbs_thread(void *data)
792 priv->reset_card(priv); 792 priv->reset_card(priv);
793 } else { 793 } else {
794 priv->cur_cmd = NULL; 794 priv->cur_cmd = NULL;
795 priv->dnld_sent = DNLD_RES_RECEIVED;
795 lbs_pr_info("requeueing command 0x%04x due " 796 lbs_pr_info("requeueing command 0x%04x due "
796 "to timeout (#%d)\n", 797 "to timeout (#%d)\n",
797 le16_to_cpu(cmdnode->cmdbuf->command), 798 le16_to_cpu(cmdnode->cmdbuf->command),
@@ -1602,6 +1603,7 @@ static int lbs_add_rtap(struct lbs_private *priv)
1602 rtap_dev->get_stats = lbs_rtap_get_stats; 1603 rtap_dev->get_stats = lbs_rtap_get_stats;
1603 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; 1604 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
1604 rtap_dev->priv = priv; 1605 rtap_dev->priv = priv;
1606 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1605 1607
1606 ret = register_netdev(rtap_dev); 1608 ret = register_netdev(rtap_dev);
1607 if (ret) { 1609 if (ret) {
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 8b7f5768a103..1c216e015f64 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
461 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ 461 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
462 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ 462 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
463 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ 463 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
464 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
464 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ 465 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
465 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ 466 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
466 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ 467 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index b581ef8a6377..0078c7e9918c 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
92 u8 data[4]; 92 u8 data[4];
93 struct usb_ctrlrequest dr; 93 struct usb_ctrlrequest dr;
94 } *buf; 94 } *buf;
95 int rc;
95 96
96 buf = kmalloc(sizeof(*buf), GFP_ATOMIC); 97 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
97 if (!buf) 98 if (!buf)
@@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
116 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), 117 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0),
117 (unsigned char *)dr, buf, len, 118 (unsigned char *)dr, buf, len,
118 rtl8187_iowrite_async_cb, buf); 119 rtl8187_iowrite_async_cb, buf);
119 usb_submit_urb(urb, GFP_ATOMIC); 120 rc = usb_submit_urb(urb, GFP_ATOMIC);
121 if (rc < 0) {
122 kfree(buf);
123 usb_free_urb(urb);
124 }
120} 125}
121 126
122static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, 127static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv,
@@ -164,6 +169,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
164 struct urb *urb; 169 struct urb *urb;
165 __le16 rts_dur = 0; 170 __le16 rts_dur = 0;
166 u32 flags; 171 u32 flags;
172 int rc;
167 173
168 urb = usb_alloc_urb(0, GFP_ATOMIC); 174 urb = usb_alloc_urb(0, GFP_ATOMIC);
169 if (!urb) { 175 if (!urb) {
@@ -197,7 +203,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
197 info->driver_data[1] = urb; 203 info->driver_data[1] = urb;
198 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), 204 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
199 hdr, skb->len, rtl8187_tx_cb, skb); 205 hdr, skb->len, rtl8187_tx_cb, skb);
200 usb_submit_urb(urb, GFP_ATOMIC); 206 rc = usb_submit_urb(urb, GFP_ATOMIC);
207 if (rc < 0) {
208 usb_free_urb(urb);
209 kfree_skb(skb);
210 }
201 211
202 return 0; 212 return 0;
203} 213}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index edb1aefb4add..d2378d083a35 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -735,7 +735,7 @@ void zd_process_intr(struct work_struct *work)
735 u16 int_status; 735 u16 int_status;
736 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); 736 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
737 737
738 int_status = le16_to_cpu(*(u16 *)(mac->intr_buffer+4)); 738 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
739 if (int_status & INT_CFG_NEXT_BCN) { 739 if (int_status & INT_CFG_NEXT_BCN) {
740 if (net_ratelimit()) 740 if (net_ratelimit())
741 dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); 741 dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 6a51ae419e6f..1ccff240bf97 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -353,7 +353,7 @@ static inline void handle_regs_int(struct urb *urb)
353 ZD_ASSERT(in_interrupt()); 353 ZD_ASSERT(in_interrupt());
354 spin_lock(&intr->lock); 354 spin_lock(&intr->lock);
355 355
356 int_num = le16_to_cpu(*(u16 *)(urb->transfer_buffer+2)); 356 int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
357 if (int_num == CR_INTERRUPT) { 357 if (int_num == CR_INTERRUPT) {
358 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); 358 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
359 memcpy(&mac->intr_buffer, urb->transfer_buffer, 359 memcpy(&mac->intr_buffer, urb->transfer_buffer,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8bddff150c70..d26f69b0184f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -946,8 +946,7 @@ err:
946 work_done++; 946 work_done++;
947 } 947 }
948 948
949 while ((skb = __skb_dequeue(&errq))) 949 __skb_queue_purge(&errq);
950 kfree_skb(skb);
951 950
952 work_done -= handle_incoming_queue(dev, &rxq); 951 work_done -= handle_incoming_queue(dev, &rxq);
953 952
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
1079 } 1078 }
1080 } 1079 }
1081 1080
1082 while ((skb = __skb_dequeue(&free_list)) != NULL) 1081 __skb_queue_purge(&free_list);
1083 dev_kfree_skb(skb);
1084 1082
1085 spin_unlock_bh(&np->rx_lock); 1083 spin_unlock_bh(&np->rx_lock);
1086} 1084}