aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/atm/fore200e.h1
-rw-r--r--drivers/atm/fore200e_mkfirm.c2
-rw-r--r--drivers/atm/he.h2
-rw-r--r--drivers/atm/idt77252.c7
-rw-r--r--drivers/atm/idt77252.h4
-rw-r--r--drivers/atm/nicstarmac.copyright2
-rw-r--r--drivers/net/3c509.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/au1000_eth.c7
-rw-r--r--drivers/net/bfin_mac.c1
-rw-r--r--drivers/net/cassini.c11
-rw-r--r--drivers/net/cpmac.c234
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000e/netdev.c4
-rw-r--r--drivers/net/ehea/ehea_main.c5
-rw-r--r--drivers/net/forcedeth.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c4
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c12
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/phy_device.c1
-rw-r--r--drivers/net/ppp_generic.c22
-rw-r--r--drivers/net/pppol2tp.c13
-rw-r--r--drivers/net/s2io-regs.h2
-rw-r--r--drivers/net/s2io.c494
-rw-r--r--drivers/net/s2io.h22
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/sfc/bitfield.h7
-rw-r--r--drivers/net/sfc/boards.c9
-rw-r--r--drivers/net/sfc/efx.c84
-rw-r--r--drivers/net/sfc/falcon.c87
-rw-r--r--drivers/net/sfc/falcon.h5
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h4
-rw-r--r--drivers/net/sfc/falcon_io.h29
-rw-r--r--drivers/net/sfc/falcon_xmac.c10
-rw-r--r--drivers/net/sfc/net_driver.h44
-rw-r--r--drivers/net/sfc/rx.c48
-rw-r--r--drivers/net/sfc/selftest.c14
-rw-r--r--drivers/net/sfc/sfe4001.c14
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c11
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sfc/xfp_phy.c4
-rw-r--r--drivers/net/sky2.c29
-rw-r--r--drivers/net/tg3.c1236
-rw-r--r--drivers/net/tg3.h40
-rw-r--r--drivers/net/tokenring/3c359.h2
-rw-r--r--drivers/net/tokenring/olympic.h2
-rw-r--r--drivers/net/tulip/uli526x.c16
-rw-r--r--drivers/net/ucc_geth.c9
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wan/hdlc.c19
-rw-r--r--drivers/net/wan/hdlc_cisco.c82
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/ipw2200.c1
-rw-r--r--drivers/net/wireless/libertas/ethtool.c27
-rw-r--r--drivers/net/wireless/libertas/main.c2
-rw-r--r--drivers/net/wireless/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/rtl8187_dev.c14
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/brcmphy.h6
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter_arp/arp_tables.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h2
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/tcp.h52
-rw-r--r--include/linux/wanrouter.h2
-rw-r--r--include/net/mac80211.h25
-rw-r--r--include/net/ndisc.h4
-rw-r--r--include/net/netlink.h11
-rw-r--r--net/8021q/vlan.c28
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/ipv4/arp.c5
-rw-r--r--net/ipv4/ip_gre.c146
-rw-r--r--net/ipv4/ipip.c130
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv6/addrconf.c75
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/sit.c89
-rw-r--r--net/irda/irnet/irnet_ppp.c54
-rw-r--r--net/irda/irnet/irnet_ppp.h7
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/mlme.c29
-rw-r--r--net/mac80211/util.c37
-rw-r--r--net/mac80211/wext.c1
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/wanrouter/wanmain.c6
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/xfrm/xfrm_user.c11
103 files changed, 2086 insertions, 1453 deletions
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index 183841cc8fdf..8dd4aa76c3bd 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -1,4 +1,3 @@
1/* $Id: fore200e.h,v 1.4 2000/04/14 10:10:34 davem Exp $ */
2#ifndef _FORE200E_H 1#ifndef _FORE200E_H
3#define _FORE200E_H 2#define _FORE200E_H
4 3
diff --git a/drivers/atm/fore200e_mkfirm.c b/drivers/atm/fore200e_mkfirm.c
index 2ebe1a1e6f8b..520e14b488ff 100644
--- a/drivers/atm/fore200e_mkfirm.c
+++ b/drivers/atm/fore200e_mkfirm.c
@@ -1,6 +1,4 @@
1/* 1/*
2 $Id: fore200e_mkfirm.c,v 1.1 2000/02/21 16:04:32 davem Exp $
3
4 mkfirm.c: generates a C readable file from a binary firmware image 2 mkfirm.c: generates a C readable file from a binary firmware image
5 3
6 Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999. 4 Christophe Lizzi (lizzi@{csti.fr, cnam.fr}), June 1999.
diff --git a/drivers/atm/he.h b/drivers/atm/he.h
index 1dc277547a73..fe6cd15a78a4 100644
--- a/drivers/atm/he.h
+++ b/drivers/atm/he.h
@@ -1,5 +1,3 @@
1/* $Id: he.h,v 1.4 2003/05/06 22:48:00 chas Exp $ */
2
3/* 1/*
4 2
5 he.h 3 he.h
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 28d77b5195de..3a504e94a4d9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1,8 +1,4 @@
1/******************************************************************* 1/*******************************************************************
2 * ident "$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $"
3 *
4 * $Author: ecd $
5 * $Date: 2001/11/11 08:13:54 $
6 * 2 *
7 * Copyright (c) 2000 ATecoM GmbH 3 * Copyright (c) 2000 ATecoM GmbH
8 * 4 *
@@ -29,9 +25,6 @@
29 * 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 675 Mass Ave, Cambridge, MA 02139, USA.
30 * 26 *
31 *******************************************************************/ 27 *******************************************************************/
32static char const rcsid[] =
33"$Id: idt77252.c,v 1.2 2001/11/11 08:13:54 ecd Exp $";
34
35 28
36#include <linux/module.h> 29#include <linux/module.h>
37#include <linux/pci.h> 30#include <linux/pci.h>
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 6f2b4a5875fb..e83eaf120da0 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -1,8 +1,4 @@
1/******************************************************************* 1/*******************************************************************
2 * ident "$Id: idt77252.h,v 1.2 2001/11/11 08:13:54 ecd Exp $"
3 *
4 * $Author: ecd $
5 * $Date: 2001/11/11 08:13:54 $
6 * 2 *
7 * Copyright (c) 2000 ATecoM GmbH 3 * Copyright (c) 2000 ATecoM GmbH
8 * 4 *
diff --git a/drivers/atm/nicstarmac.copyright b/drivers/atm/nicstarmac.copyright
index 2e15b39fac4f..180531a83c62 100644
--- a/drivers/atm/nicstarmac.copyright
+++ b/drivers/atm/nicstarmac.copyright
@@ -13,7 +13,7 @@
13 * 13 *
14 * Modified to work with the IDT7721 nicstar -- AAL5 (tested) only. 14 * Modified to work with the IDT7721 nicstar -- AAL5 (tested) only.
15 * 15 *
16 * R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997 $Revision: 1.1 $ $Date: 1999/08/20 11:00:11 $ 16 * R. D. Rechenmacher <ron@fnal.gov>, Aug. 6, 1997
17 * 17 *
18 * Linux driver for the IDT77201 NICStAR PCI ATM controller. 18 * Linux driver for the IDT77201 NICStAR PCI ATM controller.
19 * PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155; 19 * PHY component is expected to be 155 Mbps S/UNI-Lite or IDT 77155;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 87d8795823d7..b9d097c9f6bb 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1062,7 +1062,6 @@ el3_rx(struct net_device *dev)
1062 struct sk_buff *skb; 1062 struct sk_buff *skb;
1063 1063
1064 skb = dev_alloc_skb(pkt_len+5); 1064 skb = dev_alloc_skb(pkt_len+5);
1065 dev->stats.rx_bytes += pkt_len;
1066 if (el3_debug > 4) 1065 if (el3_debug > 4)
1067 printk("Receiving packet size %d status %4.4x.\n", 1066 printk("Receiving packet size %d status %4.4x.\n",
1068 pkt_len, rx_status); 1067 pkt_len, rx_status);
@@ -1077,6 +1076,7 @@ el3_rx(struct net_device *dev)
1077 skb->protocol = eth_type_trans(skb,dev); 1076 skb->protocol = eth_type_trans(skb,dev);
1078 netif_rx(skb); 1077 netif_rx(skb);
1079 dev->last_rx = jiffies; 1078 dev->last_rx = jiffies;
1079 dev->stats.rx_bytes += pkt_len;
1080 dev->stats.rx_packets++; 1080 dev->stats.rx_packets++;
1081 continue; 1081 continue;
1082 } 1082 }
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index daeb23d7fec1..8178a4dfd09c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2228,6 +2228,7 @@ config VIA_VELOCITY
2228config TIGON3 2228config TIGON3
2229 tristate "Broadcom Tigon3 support" 2229 tristate "Broadcom Tigon3 support"
2230 depends on PCI 2230 depends on PCI
2231 select PHYLIB
2231 help 2232 help
2232 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 2233 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
2233 2234
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 3634b5fd7919..7023d77bf380 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
1239 */ 1239 */
1240static irqreturn_t au1000_interrupt(int irq, void *dev_id) 1240static irqreturn_t au1000_interrupt(int irq, void *dev_id)
1241{ 1241{
1242 struct net_device *dev = (struct net_device *) dev_id; 1242 struct net_device *dev = dev_id;
1243
1244 if (dev == NULL) {
1245 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
1246 return IRQ_RETVAL(1);
1247 }
1248 1243
1249 /* Handle RX interrupts first to minimize chance of overrun */ 1244 /* Handle RX interrupts first to minimize chance of overrun */
1250 1245
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 89c0018132ec..41443435ab1c 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -22,7 +22,6 @@
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/ethtool.h>
26#include <linux/mii.h> 25#include <linux/mii.h>
27#include <linux/phy.h> 26#include <linux/phy.h>
28#include <linux/netdevice.h> 27#include <linux/netdevice.h>
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 93e13636f8dd..83768df27806 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -142,8 +142,8 @@
142 142
143#define DRV_MODULE_NAME "cassini" 143#define DRV_MODULE_NAME "cassini"
144#define PFX DRV_MODULE_NAME ": " 144#define PFX DRV_MODULE_NAME ": "
145#define DRV_MODULE_VERSION "1.5" 145#define DRV_MODULE_VERSION "1.6"
146#define DRV_MODULE_RELDATE "4 Jan 2008" 146#define DRV_MODULE_RELDATE "21 May 2008"
147 147
148#define CAS_DEF_MSG_ENABLE \ 148#define CAS_DEF_MSG_ENABLE \
149 (NETIF_MSG_DRV | \ 149 (NETIF_MSG_DRV | \
@@ -2136,9 +2136,12 @@ end_copy_pkt:
2136 if (addr) 2136 if (addr)
2137 cas_page_unmap(addr); 2137 cas_page_unmap(addr);
2138 } 2138 }
2139 skb->csum = csum_unfold(~csum);
2140 skb->ip_summed = CHECKSUM_COMPLETE;
2141 skb->protocol = eth_type_trans(skb, cp->dev); 2139 skb->protocol = eth_type_trans(skb, cp->dev);
2140 if (skb->protocol == htons(ETH_P_IP)) {
2141 skb->csum = csum_unfold(~csum);
2142 skb->ip_summed = CHECKSUM_COMPLETE;
2143 } else
2144 skb->ip_summed = CHECKSUM_NONE;
2142 return len; 2145 return len;
2143} 2146}
2144 2147
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ae07100bb935..7f3f62e1b113 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,6 +38,7 @@
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <asm/gpio.h> 40#include <asm/gpio.h>
41#include <asm/atomic.h>
41 42
42MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 43MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
43MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 44MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
@@ -187,6 +188,7 @@ struct cpmac_desc {
187#define CPMAC_EOQ 0x1000 188#define CPMAC_EOQ 0x1000
188 struct sk_buff *skb; 189 struct sk_buff *skb;
189 struct cpmac_desc *next; 190 struct cpmac_desc *next;
191 struct cpmac_desc *prev;
190 dma_addr_t mapping; 192 dma_addr_t mapping;
191 dma_addr_t data_mapping; 193 dma_addr_t data_mapping;
192}; 194};
@@ -208,6 +210,7 @@ struct cpmac_priv {
208 struct work_struct reset_work; 210 struct work_struct reset_work;
209 struct platform_device *pdev; 211 struct platform_device *pdev;
210 struct napi_struct napi; 212 struct napi_struct napi;
213 atomic_t reset_pending;
211}; 214};
212 215
213static irqreturn_t cpmac_irq(int, void *); 216static irqreturn_t cpmac_irq(int, void *);
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
241 printk("\n"); 244 printk("\n");
242} 245}
243 246
247static void cpmac_dump_all_desc(struct net_device *dev)
248{
249 struct cpmac_priv *priv = netdev_priv(dev);
250 struct cpmac_desc *dump = priv->rx_head;
251 do {
252 cpmac_dump_desc(dev, dump);
253 dump = dump->next;
254 } while (dump != priv->rx_head);
255}
256
244static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 257static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
245{ 258{
246 int i; 259 int i;
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
412static int cpmac_poll(struct napi_struct *napi, int budget) 425static int cpmac_poll(struct napi_struct *napi, int budget)
413{ 426{
414 struct sk_buff *skb; 427 struct sk_buff *skb;
415 struct cpmac_desc *desc; 428 struct cpmac_desc *desc, *restart;
416 int received = 0;
417 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 429 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
430 int received = 0, processed = 0;
418 431
419 spin_lock(&priv->rx_lock); 432 spin_lock(&priv->rx_lock);
420 if (unlikely(!priv->rx_head)) { 433 if (unlikely(!priv->rx_head)) {
421 if (netif_msg_rx_err(priv) && net_ratelimit()) 434 if (netif_msg_rx_err(priv) && net_ratelimit())
422 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 435 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
423 priv->dev->name); 436 priv->dev->name);
437 spin_unlock(&priv->rx_lock);
424 netif_rx_complete(priv->dev, napi); 438 netif_rx_complete(priv->dev, napi);
425 return 0; 439 return 0;
426 } 440 }
427 441
428 desc = priv->rx_head; 442 desc = priv->rx_head;
443 restart = NULL;
429 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 444 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
445 processed++;
446
447 if ((desc->dataflags & CPMAC_EOQ) != 0) {
448 /* The last update to eoq->hw_next didn't happen
449 * soon enough, and the receiver stopped here.
450 *Remember this descriptor so we can restart
451 * the receiver after freeing some space.
452 */
453 if (unlikely(restart)) {
454 if (netif_msg_rx_err(priv))
455 printk(KERN_ERR "%s: poll found a"
456 " duplicate EOQ: %p and %p\n",
457 priv->dev->name, restart, desc);
458 goto fatal_error;
459 }
460
461 restart = desc->next;
462 }
463
430 skb = cpmac_rx_one(priv, desc); 464 skb = cpmac_rx_one(priv, desc);
431 if (likely(skb)) { 465 if (likely(skb)) {
432 netif_receive_skb(skb); 466 netif_receive_skb(skb);
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
435 desc = desc->next; 469 desc = desc->next;
436 } 470 }
437 471
472 if (desc != priv->rx_head) {
473 /* We freed some buffers, but not the whole ring,
474 * add what we did free to the rx list */
475 desc->prev->hw_next = (u32)0;
476 priv->rx_head->prev->hw_next = priv->rx_head->mapping;
477 }
478
479 /* Optimization: If we did not actually process an EOQ (perhaps because
480 * of quota limits), check to see if the tail of the queue has EOQ set.
481 * We should immediately restart in that case so that the receiver can
482 * restart and run in parallel with more packet processing.
483 * This lets us handle slightly larger bursts before running
484 * out of ring space (assuming dev->weight < ring_size) */
485
486 if (!restart &&
487 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
488 == CPMAC_EOQ &&
489 (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
490 /* reset EOQ so the poll loop (above) doesn't try to
491 * restart this when it eventually gets to this descriptor.
492 */
493 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
494 restart = priv->rx_head;
495 }
496
497 if (restart) {
498 priv->dev->stats.rx_errors++;
499 priv->dev->stats.rx_fifo_errors++;
500 if (netif_msg_rx_err(priv) && net_ratelimit())
501 printk(KERN_WARNING "%s: rx dma ring overrun\n",
502 priv->dev->name);
503
504 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
505 if (netif_msg_drv(priv))
506 printk(KERN_ERR "%s: cpmac_poll is trying to "
507 "restart rx from a descriptor that's "
508 "not free: %p\n",
509 priv->dev->name, restart);
510 goto fatal_error;
511 }
512
513 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
514 }
515
438 priv->rx_head = desc; 516 priv->rx_head = desc;
439 spin_unlock(&priv->rx_lock); 517 spin_unlock(&priv->rx_lock);
440 if (unlikely(netif_msg_rx_status(priv))) 518 if (unlikely(netif_msg_rx_status(priv)))
441 printk(KERN_DEBUG "%s: poll processed %d packets\n", 519 printk(KERN_DEBUG "%s: poll processed %d packets\n",
442 priv->dev->name, received); 520 priv->dev->name, received);
443 if (desc->dataflags & CPMAC_OWN) { 521 if (processed == 0) {
522 /* we ran out of packets to read,
523 * revert to interrupt-driven mode */
444 netif_rx_complete(priv->dev, napi); 524 netif_rx_complete(priv->dev, napi);
445 cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
446 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 525 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
447 return 0; 526 return 0;
448 } 527 }
449 528
450 return 1; 529 return 1;
530
531fatal_error:
532 /* Something went horribly wrong.
533 * Reset hardware to try to recover rather than wedging. */
534
535 if (netif_msg_drv(priv)) {
536 printk(KERN_ERR "%s: cpmac_poll is confused. "
537 "Resetting hardware\n", priv->dev->name);
538 cpmac_dump_all_desc(priv->dev);
539 printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
540 priv->dev->name,
541 cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
542 cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
543 }
544
545 spin_unlock(&priv->rx_lock);
546 netif_rx_complete(priv->dev, napi);
547 netif_stop_queue(priv->dev);
548 napi_disable(&priv->napi);
549
550 atomic_inc(&priv->reset_pending);
551 cpmac_hw_stop(priv->dev);
552 if (!schedule_work(&priv->reset_work))
553 atomic_dec(&priv->reset_pending);
554 return 0;
555
451} 556}
452 557
453static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 558static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
456 struct cpmac_desc *desc; 561 struct cpmac_desc *desc;
457 struct cpmac_priv *priv = netdev_priv(dev); 562 struct cpmac_priv *priv = netdev_priv(dev);
458 563
564 if (unlikely(atomic_read(&priv->reset_pending)))
565 return NETDEV_TX_BUSY;
566
459 if (unlikely(skb_padto(skb, ETH_ZLEN))) 567 if (unlikely(skb_padto(skb, ETH_ZLEN)))
460 return NETDEV_TX_OK; 568 return NETDEV_TX_OK;
461 569
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
621 desc->dataflags = CPMAC_OWN; 729 desc->dataflags = CPMAC_OWN;
622 dev->stats.rx_dropped++; 730 dev->stats.rx_dropped++;
623 } 731 }
732 desc->hw_next = desc->next->mapping;
624 desc = desc->next; 733 desc = desc->next;
625 } 734 }
735 priv->rx_head->prev->hw_next = 0;
626} 736}
627 737
628static void cpmac_clear_tx(struct net_device *dev) 738static void cpmac_clear_tx(struct net_device *dev)
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
635 priv->desc_ring[i].dataflags = 0; 745 priv->desc_ring[i].dataflags = 0;
636 if (priv->desc_ring[i].skb) { 746 if (priv->desc_ring[i].skb) {
637 dev_kfree_skb_any(priv->desc_ring[i].skb); 747 dev_kfree_skb_any(priv->desc_ring[i].skb);
638 if (netif_subqueue_stopped(dev, i)) 748 priv->desc_ring[i].skb = NULL;
639 netif_wake_subqueue(dev, i);
640 } 749 }
641 } 750 }
642} 751}
643 752
644static void cpmac_hw_error(struct work_struct *work) 753static void cpmac_hw_error(struct work_struct *work)
645{ 754{
755 int i;
646 struct cpmac_priv *priv = 756 struct cpmac_priv *priv =
647 container_of(work, struct cpmac_priv, reset_work); 757 container_of(work, struct cpmac_priv, reset_work);
648 758
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
651 spin_unlock(&priv->rx_lock); 761 spin_unlock(&priv->rx_lock);
652 cpmac_clear_tx(priv->dev); 762 cpmac_clear_tx(priv->dev);
653 cpmac_hw_start(priv->dev); 763 cpmac_hw_start(priv->dev);
654 napi_enable(&priv->napi); 764 barrier();
655 netif_start_queue(priv->dev); 765 atomic_dec(&priv->reset_pending);
766
767 for (i = 0; i < CPMAC_QUEUES; i++)
768 netif_wake_subqueue(priv->dev, i);
769 netif_wake_queue(priv->dev);
770 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
771}
772
773static void cpmac_check_status(struct net_device *dev)
774{
775 struct cpmac_priv *priv = netdev_priv(dev);
776
777 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
778 int rx_channel = (macstatus >> 8) & 7;
779 int rx_code = (macstatus >> 12) & 15;
780 int tx_channel = (macstatus >> 16) & 7;
781 int tx_code = (macstatus >> 20) & 15;
782
783 if (rx_code || tx_code) {
784 if (netif_msg_drv(priv) && net_ratelimit()) {
785 /* Can't find any documentation on what these
786 *error codes actually are. So just log them and hope..
787 */
788 if (rx_code)
789 printk(KERN_WARNING "%s: host error %d on rx "
790 "channel %d (macstatus %08x), resetting\n",
791 dev->name, rx_code, rx_channel, macstatus);
792 if (tx_code)
793 printk(KERN_WARNING "%s: host error %d on tx "
794 "channel %d (macstatus %08x), resetting\n",
795 dev->name, tx_code, tx_channel, macstatus);
796 }
797
798 netif_stop_queue(dev);
799 cpmac_hw_stop(dev);
800 if (schedule_work(&priv->reset_work))
801 atomic_inc(&priv->reset_pending);
802 if (unlikely(netif_msg_hw(priv)))
803 cpmac_dump_regs(dev);
804 }
805 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
656} 806}
657 807
658static irqreturn_t cpmac_irq(int irq, void *dev_id) 808static irqreturn_t cpmac_irq(int irq, void *dev_id)
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
683 833
684 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 834 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
685 835
686 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { 836 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
687 if (netif_msg_drv(priv) && net_ratelimit()) 837 cpmac_check_status(dev);
688 printk(KERN_ERR "%s: hw error, resetting...\n",
689 dev->name);
690 netif_stop_queue(dev);
691 napi_disable(&priv->napi);
692 cpmac_hw_stop(dev);
693 schedule_work(&priv->reset_work);
694 if (unlikely(netif_msg_hw(priv)))
695 cpmac_dump_regs(dev);
696 }
697 838
698 return IRQ_HANDLED; 839 return IRQ_HANDLED;
699} 840}
700 841
701static void cpmac_tx_timeout(struct net_device *dev) 842static void cpmac_tx_timeout(struct net_device *dev)
702{ 843{
703 struct cpmac_priv *priv = netdev_priv(dev);
704 int i; 844 int i;
845 struct cpmac_priv *priv = netdev_priv(dev);
705 846
706 spin_lock(&priv->lock); 847 spin_lock(&priv->lock);
707 dev->stats.tx_errors++; 848 dev->stats.tx_errors++;
708 spin_unlock(&priv->lock); 849 spin_unlock(&priv->lock);
709 if (netif_msg_tx_err(priv) && net_ratelimit()) 850 if (netif_msg_tx_err(priv) && net_ratelimit())
710 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 851 printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
711 /* 852
712 * FIXME: waking up random queue is not the best thing to 853 atomic_inc(&priv->reset_pending);
713 * do... on the other hand why we got here at all? 854 barrier();
714 */ 855 cpmac_clear_tx(dev);
715#ifdef CONFIG_NETDEVICES_MULTIQUEUE 856 barrier();
857 atomic_dec(&priv->reset_pending);
858
859 netif_wake_queue(priv->dev);
716 for (i = 0; i < CPMAC_QUEUES; i++) 860 for (i = 0; i < CPMAC_QUEUES; i++)
717 if (priv->desc_ring[i].skb) { 861 netif_wake_subqueue(dev, i);
718 priv->desc_ring[i].dataflags = 0;
719 dev_kfree_skb_any(priv->desc_ring[i].skb);
720 netif_wake_subqueue(dev, i);
721 break;
722 }
723#else
724 priv->desc_ring[0].dataflags = 0;
725 if (priv->desc_ring[0].skb)
726 dev_kfree_skb_any(priv->desc_ring[0].skb);
727 netif_wake_queue(dev);
728#endif
729} 862}
730 863
731static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 864static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
901 desc->buflen = CPMAC_SKB_SIZE; 1034 desc->buflen = CPMAC_SKB_SIZE;
902 desc->dataflags = CPMAC_OWN; 1035 desc->dataflags = CPMAC_OWN;
903 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1036 desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1037 desc->next->prev = desc;
904 desc->hw_next = (u32)desc->next->mapping; 1038 desc->hw_next = (u32)desc->next->mapping;
905 } 1039 }
906 1040
1041 priv->rx_head->prev->hw_next = (u32)0;
1042
907 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1043 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
908 dev->name, dev))) { 1044 dev->name, dev))) {
909 if (netif_msg_drv(priv)) 1045 if (netif_msg_drv(priv))
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
912 goto fail_irq; 1048 goto fail_irq;
913 } 1049 }
914 1050
1051 atomic_set(&priv->reset_pending, 0);
915 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1052 INIT_WORK(&priv->reset_work, cpmac_hw_error);
916 cpmac_hw_start(dev); 1053 cpmac_hw_start(dev);
917 1054
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1007 1144
1008 if (phy_id == PHY_MAX_ADDR) { 1145 if (phy_id == PHY_MAX_ADDR) {
1009 if (external_switch || dumb_switch) { 1146 if (external_switch || dumb_switch) {
1010 struct fixed_phy_status status = {}; 1147 mdio_bus_id = 0; /* fixed phys bus */
1011 1148 phy_id = pdev->id;
1012 /*
1013 * FIXME: this should be in the platform code!
1014 * Since there is not platform code at all (that is,
1015 * no mainline users of that driver), place it here
1016 * for now.
1017 */
1018 phy_id = 0;
1019 status.link = 1;
1020 status.duplex = 1;
1021 status.speed = 100;
1022 fixed_phy_add(PHY_POLL, phy_id, &status);
1023 } else { 1149 } else {
1024 printk(KERN_ERR "cpmac: no PHY present\n"); 1150 dev_err(&pdev->dev, "no PHY present\n");
1025 return -ENODEV; 1151 return -ENODEV;
1026 } 1152 }
1027 } 1153 }
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1064 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1190 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1065 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1191 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
1066 1192
1067 snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1193 priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id,
1068 1194 &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1069 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
1070 PHY_INTERFACE_MODE_MII);
1071 if (IS_ERR(priv->phy)) { 1195 if (IS_ERR(priv->phy)) {
1072 if (netif_msg_drv(priv)) 1196 if (netif_msg_drv(priv))
1073 printk(KERN_ERR "%s: Could not attach to PHY\n", 1197 printk(KERN_ERR "%s: Could not attach to PHY\n",
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 32a9a922f153..08a7365a7d10 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
903 if (netif_msg_ifdown(db)) 903 if (netif_msg_ifdown(db))
904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
905 905
906 cancel_delayed_work(&db->phy_poll); 906 cancel_delayed_work_sync(&db->phy_poll);
907 907
908 netif_stop_queue(ndev); 908 netif_stop_queue(ndev);
909 netif_carrier_off(ndev); 909 netif_carrier_off(ndev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8cbb40f3a506..cab1835173cd 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4201 struct e1000_adapter *adapter; 4201 struct e1000_adapter *adapter;
4202 struct e1000_hw *hw; 4202 struct e1000_hw *hw;
4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4204 unsigned long mmio_start, mmio_len; 4204 resource_size_t mmio_start, mmio_len;
4205 unsigned long flash_start, flash_len; 4205 resource_size_t flash_start, flash_len;
4206 4206
4207 static int cards_found; 4207 static int cards_found;
4208 int i, err, pci_using_dac; 4208 int i, err, pci_using_dac;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index d1b6d4e7495d..287a61918739 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2213 goto out; 2213 goto out;
2214 } 2214 }
2215 2215
2216 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
2217
2218 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2216 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2219 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2217 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2220 if (hret != H_SUCCESS) 2218 if (hret != H_SUCCESS)
@@ -3178,11 +3176,12 @@ out_err:
3178 3176
3179static void ehea_shutdown_single_port(struct ehea_port *port) 3177static void ehea_shutdown_single_port(struct ehea_port *port)
3180{ 3178{
3179 struct ehea_adapter *adapter = port->adapter;
3181 unregister_netdev(port->netdev); 3180 unregister_netdev(port->netdev);
3182 ehea_unregister_port(port); 3181 ehea_unregister_port(port);
3183 kfree(port->mc_list); 3182 kfree(port->mc_list);
3184 free_netdev(port->netdev); 3183 free_netdev(port->netdev);
3185 port->adapter->active_ports--; 3184 adapter->active_ports--;
3186} 3185}
3187 3186
3188static int ehea_setup_ports(struct ehea_adapter *adapter) 3187static int ehea_setup_ports(struct ehea_adapter *adapter)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 35f66d4a4595..9eca97fb0a54 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev)
5823 writel(txreg, base + NvRegTransmitPoll); 5823 writel(txreg, base + NvRegTransmitPoll);
5824 5824
5825 rc = nv_open(dev); 5825 rc = nv_open(dev);
5826 nv_set_multicast(dev);
5826out: 5827out:
5827 return rc; 5828 return rc;
5828} 5829}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 67b4b0728fce..a5baaf59ff66 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1093,7 +1093,7 @@ err:
1093 if (registered) 1093 if (registered)
1094 unregister_netdev(ndev); 1094 unregister_netdev(ndev);
1095 1095
1096 if (fep != NULL) { 1096 if (fep && fep->ops) {
1097 (*fep->ops->free_bd)(ndev); 1097 (*fep->ops->free_bd)(ndev);
1098 (*fep->ops->cleanup_data)(ndev); 1098 (*fep->ops->cleanup_data)(ndev);
1099 } 1099 }
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f90515935833..45ae9d1191d7 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
1340 case PARAM_RTS: 1340 case PARAM_RTS:
1341 if ( !(scc->wreg[R5] & RTS) ) 1341 if ( !(scc->wreg[R5] & RTS) )
1342 { 1342 {
1343 if (arg != TX_OFF) 1343 if (arg != TX_OFF) {
1344 scc_key_trx(scc, TX_ON); 1344 scc_key_trx(scc, TX_ON);
1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); 1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1346 }
1346 } else { 1347 } else {
1347 if (arg == TX_OFF) 1348 if (arg == TX_OFF)
1348 { 1349 {
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index c91b12ea26ad..36be6efc6398 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
631 return status; 631 return status;
632} 632}
633 633
634int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 634static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
635{ 635{
636 struct myri10ge_cmd cmd; 636 struct myri10ge_cmd cmd;
637 int status; 637 int status;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 8f328a03847b..a550c9bd126f 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
391 cardtype = CONTEC; 391 cardtype = CONTEC;
392 break; 392 break;
393 case MANFID_FUJITSU: 393 case MANFID_FUJITSU:
394 if (link->card_id == PRODID_FUJITSU_MBH10302) 394 if (link->conf.ConfigBase == 0x0fe0)
395 cardtype = MBH10302;
396 else if (link->card_id == PRODID_FUJITSU_MBH10302)
395 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 397 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
396 but these are MBH10304 based card. */ 398 but these are MBH10304 based card. */
397 cardtype = MBH10304; 399 cardtype = MBH10304;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index d041f831a18d..f6c4698ce738 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1461,22 +1461,25 @@ static void
1461set_multicast_list(struct net_device *dev) 1461set_multicast_list(struct net_device *dev)
1462{ 1462{
1463 unsigned int ioaddr = dev->base_addr; 1463 unsigned int ioaddr = dev->base_addr;
1464 unsigned value;
1464 1465
1465 SelectPage(0x42); 1466 SelectPage(0x42);
1467 value = GetByte(XIRCREG42_SWC1) & 0xC0;
1468
1466 if (dev->flags & IFF_PROMISC) { /* snoop */ 1469 if (dev->flags & IFF_PROMISC) { /* snoop */
1467 PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ 1470 PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
1468 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1471 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
1469 PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ 1472 PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
1470 } else if (dev->mc_count) { 1473 } else if (dev->mc_count) {
1471 /* the chip can filter 9 addresses perfectly */ 1474 /* the chip can filter 9 addresses perfectly */
1472 PutByte(XIRCREG42_SWC1, 0x01); 1475 PutByte(XIRCREG42_SWC1, value | 0x01);
1473 SelectPage(0x40); 1476 SelectPage(0x40);
1474 PutByte(XIRCREG40_CMD0, Offline); 1477 PutByte(XIRCREG40_CMD0, Offline);
1475 set_addresses(dev); 1478 set_addresses(dev);
1476 SelectPage(0x40); 1479 SelectPage(0x40);
1477 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1480 PutByte(XIRCREG40_CMD0, EnableRecv | Online);
1478 } else { /* standard usage */ 1481 } else { /* standard usage */
1479 PutByte(XIRCREG42_SWC1, 0x00); 1482 PutByte(XIRCREG42_SWC1, value | 0x00);
1480 } 1483 }
1481 SelectPage(0); 1484 SelectPage(0);
1482} 1485}
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
1722 1725
1723 /* enable receiver and put the mac online */ 1726 /* enable receiver and put the mac online */
1724 if (full) { 1727 if (full) {
1728 set_multicast_list(dev);
1725 SelectPage(0x40); 1729 SelectPage(0x40);
1726 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1730 PutByte(XIRCREG40_CMD0, EnableRecv | Online);
1727 } 1731 }
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 81fd85214b98..ca8c0e037400 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
326 void *ptr); 326 void *ptr);
327static void pcnet32_purge_tx_ring(struct net_device *dev); 327static void pcnet32_purge_tx_ring(struct net_device *dev);
328static int pcnet32_alloc_ring(struct net_device *dev, char *name); 328static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
329static void pcnet32_free_ring(struct net_device *dev); 329static void pcnet32_free_ring(struct net_device *dev);
330static void pcnet32_check_media(struct net_device *dev, int verbose); 330static void pcnet32_check_media(struct net_device *dev, int verbose);
331 331
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1983} 1983}
1984 1984
1985/* if any allocation fails, caller must also call pcnet32_free_ring */ 1985/* if any allocation fails, caller must also call pcnet32_free_ring */
1986static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1986static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1987{ 1987{
1988 struct pcnet32_private *lp = netdev_priv(dev); 1988 struct pcnet32_private *lp = netdev_priv(dev);
1989 1989
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35b431991cc4..284217c12839 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
5menuconfig PHYLIB 5menuconfig PHYLIB
6 tristate "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390 7 depends on !S390
8 depends on NET_ETHERNET && (BROKEN || !S390) 8 depends on NET_ETHERNET
9 help 9 help
10 Ethernet controllers are usually attached to PHY 10 Ethernet controllers are usually attached to PHY
11 devices. This option provides infrastructure for 11 devices. This option provides infrastructure for
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ac3c01d28fdf..16a0e7de5888 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
207 207
208 return 0; 208 return 0;
209} 209}
210EXPORT_SYMBOL(get_phy_id);
210 211
211/** 212/**
212 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 213 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1f4ca2b54a73..c926bf0b190e 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -361,7 +361,7 @@ static int ppp_open(struct inode *inode, struct file *file)
361 return 0; 361 return 0;
362} 362}
363 363
364static int ppp_release(struct inode *inode, struct file *file) 364static int ppp_release(struct inode *unused, struct file *file)
365{ 365{
366 struct ppp_file *pf = file->private_data; 366 struct ppp_file *pf = file->private_data;
367 struct ppp *ppp; 367 struct ppp *ppp;
@@ -545,8 +545,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
545} 545}
546#endif /* CONFIG_PPP_FILTER */ 546#endif /* CONFIG_PPP_FILTER */
547 547
548static int ppp_ioctl(struct inode *inode, struct file *file, 548static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
549 unsigned int cmd, unsigned long arg)
550{ 549{
551 struct ppp_file *pf = file->private_data; 550 struct ppp_file *pf = file->private_data;
552 struct ppp *ppp; 551 struct ppp *ppp;
@@ -574,24 +573,29 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
574 * this fd and reopening /dev/ppp. 573 * this fd and reopening /dev/ppp.
575 */ 574 */
576 err = -EINVAL; 575 err = -EINVAL;
576 lock_kernel();
577 if (pf->kind == INTERFACE) { 577 if (pf->kind == INTERFACE) {
578 ppp = PF_TO_PPP(pf); 578 ppp = PF_TO_PPP(pf);
579 if (file == ppp->owner) 579 if (file == ppp->owner)
580 ppp_shutdown_interface(ppp); 580 ppp_shutdown_interface(ppp);
581 } 581 }
582 if (atomic_read(&file->f_count) <= 2) { 582 if (atomic_read(&file->f_count) <= 2) {
583 ppp_release(inode, file); 583 ppp_release(NULL, file);
584 err = 0; 584 err = 0;
585 } else 585 } else
586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
587 atomic_read(&file->f_count)); 587 atomic_read(&file->f_count));
588 unlock_kernel();
588 return err; 589 return err;
589 } 590 }
590 591
591 if (pf->kind == CHANNEL) { 592 if (pf->kind == CHANNEL) {
592 struct channel *pch = PF_TO_CHANNEL(pf); 593 struct channel *pch;
593 struct ppp_channel *chan; 594 struct ppp_channel *chan;
594 595
596 lock_kernel();
597 pch = PF_TO_CHANNEL(pf);
598
595 switch (cmd) { 599 switch (cmd) {
596 case PPPIOCCONNECT: 600 case PPPIOCCONNECT:
597 if (get_user(unit, p)) 601 if (get_user(unit, p))
@@ -611,6 +615,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
611 err = chan->ops->ioctl(chan, cmd, arg); 615 err = chan->ops->ioctl(chan, cmd, arg);
612 up_read(&pch->chan_sem); 616 up_read(&pch->chan_sem);
613 } 617 }
618 unlock_kernel();
614 return err; 619 return err;
615 } 620 }
616 621
@@ -620,6 +625,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
620 return -EINVAL; 625 return -EINVAL;
621 } 626 }
622 627
628 lock_kernel();
623 ppp = PF_TO_PPP(pf); 629 ppp = PF_TO_PPP(pf);
624 switch (cmd) { 630 switch (cmd) {
625 case PPPIOCSMRU: 631 case PPPIOCSMRU:
@@ -767,7 +773,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
767 default: 773 default:
768 err = -ENOTTY; 774 err = -ENOTTY;
769 } 775 }
770 776 unlock_kernel();
771 return err; 777 return err;
772} 778}
773 779
@@ -779,6 +785,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
779 struct channel *chan; 785 struct channel *chan;
780 int __user *p = (int __user *)arg; 786 int __user *p = (int __user *)arg;
781 787
788 lock_kernel();
782 switch (cmd) { 789 switch (cmd) {
783 case PPPIOCNEWUNIT: 790 case PPPIOCNEWUNIT:
784 /* Create a new ppp unit */ 791 /* Create a new ppp unit */
@@ -827,6 +834,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
827 default: 834 default:
828 err = -ENOTTY; 835 err = -ENOTTY;
829 } 836 }
837 unlock_kernel();
830 return err; 838 return err;
831} 839}
832 840
@@ -835,7 +843,7 @@ static const struct file_operations ppp_device_fops = {
835 .read = ppp_read, 843 .read = ppp_read,
836 .write = ppp_write, 844 .write = ppp_write,
837 .poll = ppp_poll, 845 .poll = ppp_poll,
838 .ioctl = ppp_ioctl, 846 .unlocked_ioctl = ppp_ioctl,
839 .open = ppp_open, 847 .open = ppp_open,
840 .release = ppp_release 848 .release = ppp_release
841}; 849};
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 79359919335b..8db342f2fdc9 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -980,6 +980,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
980 __wsum csum = 0; 980 __wsum csum = 0;
981 struct udphdr *uh; 981 struct udphdr *uh;
982 unsigned int len; 982 unsigned int len;
983 int old_headroom;
984 int new_headroom;
983 985
984 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 986 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
985 goto abort; 987 goto abort;
@@ -1001,16 +1003,18 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1001 1003
1002 /* Check that there's enough headroom in the skb to insert IP, 1004 /* Check that there's enough headroom in the skb to insert IP,
1003 * UDP and L2TP and PPP headers. If not enough, expand it to 1005 * UDP and L2TP and PPP headers. If not enough, expand it to
1004 * make room. Note that a new skb (or a clone) is 1006 * make room. Adjust truesize.
1005 * allocated. If we return an error from this point on, make
1006 * sure we free the new skb but do not free the original skb
1007 * since that is done by the caller for the error case.
1008 */ 1007 */
1009 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1008 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1010 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 1009 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
1010 old_headroom = skb_headroom(skb);
1011 if (skb_cow_head(skb, headroom)) 1011 if (skb_cow_head(skb, headroom))
1012 goto abort; 1012 goto abort;
1013 1013
1014 new_headroom = skb_headroom(skb);
1015 skb_orphan(skb);
1016 skb->truesize += new_headroom - old_headroom;
1017
1014 /* Setup PPP header */ 1018 /* Setup PPP header */
1015 __skb_push(skb, sizeof(ppph)); 1019 __skb_push(skb, sizeof(ppph));
1016 skb->data[0] = ppph[0]; 1020 skb->data[0] = ppph[0];
@@ -1065,7 +1069,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1065 /* Get routing info from the tunnel socket */ 1069 /* Get routing info from the tunnel socket */
1066 dst_release(skb->dst); 1070 dst_release(skb->dst);
1067 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1071 skb->dst = dst_clone(__sk_dst_get(sk_tun));
1068 skb_orphan(skb);
1069 skb->sk = sk_tun; 1072 skb->sk = sk_tun;
1070 1073
1071 /* Queue the packet to IP for output */ 1074 /* Queue the packet to IP for output */
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 2109508c047a..f8274f8941ea 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -250,7 +250,7 @@ struct XENA_dev_config {
250 u64 tx_mat0_n[0x8]; 250 u64 tx_mat0_n[0x8];
251#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) 251#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
252 252
253 u8 unused_1[0x8]; 253 u64 xmsi_mask_reg;
254 u64 stat_byte_cnt; 254 u64 stat_byte_cnt;
255#define STAT_BC(n) vBIT(n,4,12) 255#define STAT_BC(n) vBIT(n,4,12)
256 256
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 75bde6475832..326c94122b26 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.23" 89#define DRV_VERSION "2.0.26.24"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1113 struct pci_dev *tdev = NULL; 1113 struct pci_dev *tdev = NULL;
1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116 if (tdev->bus == s2io_pdev->bus->parent) 1116 if (tdev->bus == s2io_pdev->bus->parent) {
1117 pci_dev_put(tdev); 1117 pci_dev_put(tdev);
1118 return 1; 1118 return 1;
1119 }
1119 } 1120 }
1120 } 1121 }
1121 return 0; 1122 return 0;
@@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link)
1219 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1220 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1220 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1221 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1221 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1222 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1222 1223 if (i == 0)
1223 if (use_continuous_tx_intrs && (link == LINK_UP)) 1224 if (use_continuous_tx_intrs && (link == LINK_UP))
1224 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1225 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1225 writeq(val64, &bar0->tti_data1_mem); 1226 writeq(val64, &bar0->tti_data1_mem);
1226 1227
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1228 if (nic->config.intr_type == MSI_X) {
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1229 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) | 1230 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80); 1231 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1232 TTI_DATA2_MEM_TX_UFC_D(0x300);
1233 } else {
1234 if ((nic->config.tx_steering_type ==
1235 TX_DEFAULT_STEERING) &&
1236 (config->tx_fifo_num > 1) &&
1237 (i >= nic->udp_fifo_idx) &&
1238 (i < (nic->udp_fifo_idx +
1239 nic->total_udp_fifos)))
1240 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1241 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1242 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1243 TTI_DATA2_MEM_TX_UFC_D(0x120);
1244 else
1245 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1246 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1247 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1248 TTI_DATA2_MEM_TX_UFC_D(0x80);
1249 }
1231 1250
1232 writeq(val64, &bar0->tti_data2_mem); 1251 writeq(val64, &bar0->tti_data2_mem);
1233 1252
@@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp)
2813 } 2832 }
2814} 2833}
2815 2834
2835static int s2io_chk_rx_buffers(struct ring_info *ring)
2836{
2837 if (fill_rx_buffers(ring) == -ENOMEM) {
2838 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2839 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2840 }
2841 return 0;
2842}
2843
2816/** 2844/**
2817 * s2io_poll - Rx interrupt handler for NAPI support 2845 * s2io_poll - Rx interrupt handler for NAPI support
2818 * @napi : pointer to the napi structure. 2846 * @napi : pointer to the napi structure.
@@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp)
2826 * 0 on success and 1 if there are No Rx packets to be processed. 2854 * 0 on success and 1 if there are No Rx packets to be processed.
2827 */ 2855 */
2828 2856
2829static int s2io_poll(struct napi_struct *napi, int budget) 2857static int s2io_poll_msix(struct napi_struct *napi, int budget)
2830{ 2858{
2831 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2859 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2832 struct net_device *dev = nic->dev; 2860 struct net_device *dev = ring->dev;
2833 int pkt_cnt = 0, org_pkts_to_process;
2834 struct mac_info *mac_control;
2835 struct config_param *config; 2861 struct config_param *config;
2862 struct mac_info *mac_control;
2863 int pkts_processed = 0;
2864 u8 *addr = NULL, val8 = 0;
2865 struct s2io_nic *nic = dev->priv;
2836 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2837 int i; 2867 int budget_org = budget;
2838 2868
2839 mac_control = &nic->mac_control;
2840 config = &nic->config; 2869 config = &nic->config;
2870 mac_control = &nic->mac_control;
2841 2871
2842 nic->pkts_to_process = budget; 2872 if (unlikely(!is_s2io_card_up(nic)))
2843 org_pkts_to_process = nic->pkts_to_process; 2873 return 0;
2844 2874
2845 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 2875 pkts_processed = rx_intr_handler(ring, budget);
2846 readl(&bar0->rx_traffic_int); 2876 s2io_chk_rx_buffers(ring);
2847 2877
2848 for (i = 0; i < config->rx_ring_num; i++) { 2878 if (pkts_processed < budget_org) {
2849 rx_intr_handler(&mac_control->rings[i]); 2879 netif_rx_complete(dev, napi);
2850 pkt_cnt = org_pkts_to_process - nic->pkts_to_process; 2880 /*Re Enable MSI-Rx Vector*/
2851 if (!nic->pkts_to_process) { 2881 addr = (u8 *)&bar0->xmsi_mask_reg;
2852 /* Quota for the current iteration has been met */ 2882 addr += 7 - ring->ring_no;
2853 goto no_rx; 2883 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2854 } 2884 writeb(val8, addr);
2885 val8 = readb(addr);
2855 } 2886 }
2887 return pkts_processed;
2888}
2889static int s2io_poll_inta(struct napi_struct *napi, int budget)
2890{
2891 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2892 struct ring_info *ring;
2893 struct net_device *dev = nic->dev;
2894 struct config_param *config;
2895 struct mac_info *mac_control;
2896 int pkts_processed = 0;
2897 int ring_pkts_processed, i;
2898 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2899 int budget_org = budget;
2856 2900
2857 netif_rx_complete(dev, napi); 2901 config = &nic->config;
2902 mac_control = &nic->mac_control;
2858 2903
2859 for (i = 0; i < config->rx_ring_num; i++) { 2904 if (unlikely(!is_s2io_card_up(nic)))
2860 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2905 return 0;
2861 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2862 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2863 break;
2864 }
2865 }
2866 /* Re enable the Rx interrupts. */
2867 writeq(0x0, &bar0->rx_traffic_mask);
2868 readl(&bar0->rx_traffic_mask);
2869 return pkt_cnt;
2870 2906
2871no_rx:
2872 for (i = 0; i < config->rx_ring_num; i++) { 2907 for (i = 0; i < config->rx_ring_num; i++) {
2873 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2908 ring = &mac_control->rings[i];
2874 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2909 ring_pkts_processed = rx_intr_handler(ring, budget);
2875 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2910 s2io_chk_rx_buffers(ring);
2911 pkts_processed += ring_pkts_processed;
2912 budget -= ring_pkts_processed;
2913 if (budget <= 0)
2876 break; 2914 break;
2877 }
2878 } 2915 }
2879 return pkt_cnt; 2916 if (pkts_processed < budget_org) {
2917 netif_rx_complete(dev, napi);
2918 /* Re enable the Rx interrupts for the ring */
2919 writeq(0, &bar0->rx_traffic_mask);
2920 readl(&bar0->rx_traffic_mask);
2921 }
2922 return pkts_processed;
2880} 2923}
2881 2924
2882#ifdef CONFIG_NET_POLL_CONTROLLER 2925#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev)
2918 2961
2919 /* check for received packet and indicate up to network */ 2962 /* check for received packet and indicate up to network */
2920 for (i = 0; i < config->rx_ring_num; i++) 2963 for (i = 0; i < config->rx_ring_num; i++)
2921 rx_intr_handler(&mac_control->rings[i]); 2964 rx_intr_handler(&mac_control->rings[i], 0);
2922 2965
2923 for (i = 0; i < config->rx_ring_num; i++) { 2966 for (i = 0; i < config->rx_ring_num; i++) {
2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2967 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
@@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev)
2934 2977
2935/** 2978/**
2936 * rx_intr_handler - Rx interrupt handler 2979 * rx_intr_handler - Rx interrupt handler
2937 * @nic: device private variable. 2980 * @ring_info: per ring structure.
2981 * @budget: budget for napi processing.
2938 * Description: 2982 * Description:
2939 * If the interrupt is because of a received frame or if the 2983 * If the interrupt is because of a received frame or if the
2940 * receive ring contains fresh as yet un-processed frames,this function is 2984 * receive ring contains fresh as yet un-processed frames,this function is
@@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev)
2942 * stopped and sends the skb to the OSM's Rx handler and then increments 2986 * stopped and sends the skb to the OSM's Rx handler and then increments
2943 * the offset. 2987 * the offset.
2944 * Return Value: 2988 * Return Value:
2945 * NONE. 2989 * No. of napi packets processed.
2946 */ 2990 */
2947static void rx_intr_handler(struct ring_info *ring_data) 2991static int rx_intr_handler(struct ring_info *ring_data, int budget)
2948{ 2992{
2949 int get_block, put_block; 2993 int get_block, put_block;
2950 struct rx_curr_get_info get_info, put_info; 2994 struct rx_curr_get_info get_info, put_info;
2951 struct RxD_t *rxdp; 2995 struct RxD_t *rxdp;
2952 struct sk_buff *skb; 2996 struct sk_buff *skb;
2953 int pkt_cnt = 0; 2997 int pkt_cnt = 0, napi_pkts = 0;
2954 int i; 2998 int i;
2955 struct RxD1* rxdp1; 2999 struct RxD1* rxdp1;
2956 struct RxD3* rxdp3; 3000 struct RxD3* rxdp3;
@@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
2977 DBG_PRINT(ERR_DBG, "%s: The skb is ", 3021 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2978 ring_data->dev->name); 3022 ring_data->dev->name);
2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 3023 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2980 return; 3024 return 0;
2981 } 3025 }
2982 if (ring_data->rxd_mode == RXD_MODE_1) { 3026 if (ring_data->rxd_mode == RXD_MODE_1) {
2983 rxdp1 = (struct RxD1*)rxdp; 3027 rxdp1 = (struct RxD1*)rxdp;
@@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data)
3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3058 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3015 } 3059 }
3016 3060
3017 if(ring_data->nic->config.napi){ 3061 if (ring_data->nic->config.napi) {
3018 ring_data->nic->pkts_to_process -= 1; 3062 budget--;
3019 if (!ring_data->nic->pkts_to_process) 3063 napi_pkts++;
3064 if (!budget)
3020 break; 3065 break;
3021 } 3066 }
3022 pkt_cnt++; 3067 pkt_cnt++;
@@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
3034 } 3079 }
3035 } 3080 }
3036 } 3081 }
3082 return(napi_pkts);
3037} 3083}
3038 3084
3039/** 3085/**
@@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic)
3730{ 3776{
3731 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3777 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3732 u64 val64; 3778 u64 val64;
3733 int i; 3779 int i, msix_index;
3780
3781
3782 if (nic->device_type == XFRAME_I_DEVICE)
3783 return;
3734 3784
3735 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3785 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3786 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3736 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3787 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3737 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3788 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3738 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); 3789 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3739 writeq(val64, &bar0->xmsi_access); 3790 writeq(val64, &bar0->xmsi_access);
3740 if (wait_for_msix_trans(nic, i)) { 3791 if (wait_for_msix_trans(nic, msix_index)) {
3741 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3792 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3742 continue; 3793 continue;
3743 } 3794 }
@@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic)
3748{ 3799{
3749 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3800 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3750 u64 val64, addr, data; 3801 u64 val64, addr, data;
3751 int i; 3802 int i, msix_index;
3803
3804 if (nic->device_type == XFRAME_I_DEVICE)
3805 return;
3752 3806
3753 /* Store and display */ 3807 /* Store and display */
3754 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3808 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3755 val64 = (s2BIT(15) | vBIT(i, 26, 6)); 3809 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3810 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3756 writeq(val64, &bar0->xmsi_access); 3811 writeq(val64, &bar0->xmsi_access);
3757 if (wait_for_msix_trans(nic, i)) { 3812 if (wait_for_msix_trans(nic, msix_index)) {
3758 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3813 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3759 continue; 3814 continue;
3760 } 3815 }
@@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic)
3770static int s2io_enable_msi_x(struct s2io_nic *nic) 3825static int s2io_enable_msi_x(struct s2io_nic *nic)
3771{ 3826{
3772 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3827 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3773 u64 tx_mat, rx_mat; 3828 u64 rx_mat;
3774 u16 msi_control; /* Temp variable */ 3829 u16 msi_control; /* Temp variable */
3775 int ret, i, j, msix_indx = 1; 3830 int ret, i, j, msix_indx = 1;
3776 3831
3777 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), 3832 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3778 GFP_KERNEL); 3833 GFP_KERNEL);
3779 if (!nic->entries) { 3834 if (!nic->entries) {
3780 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3835 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
@@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3783 return -ENOMEM; 3838 return -ENOMEM;
3784 } 3839 }
3785 nic->mac_control.stats_info->sw_stat.mem_allocated 3840 nic->mac_control.stats_info->sw_stat.mem_allocated
3786 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3841 += (nic->num_entries * sizeof(struct msix_entry));
3842
3843 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3787 3844
3788 nic->s2io_entries = 3845 nic->s2io_entries =
3789 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), 3846 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3790 GFP_KERNEL); 3847 GFP_KERNEL);
3791 if (!nic->s2io_entries) { 3848 if (!nic->s2io_entries) {
3792 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3849 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
@@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3851 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3795 kfree(nic->entries); 3852 kfree(nic->entries);
3796 nic->mac_control.stats_info->sw_stat.mem_freed 3853 nic->mac_control.stats_info->sw_stat.mem_freed
3797 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3854 += (nic->num_entries * sizeof(struct msix_entry));
3798 return -ENOMEM; 3855 return -ENOMEM;
3799 } 3856 }
3800 nic->mac_control.stats_info->sw_stat.mem_allocated 3857 nic->mac_control.stats_info->sw_stat.mem_allocated
3801 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3858 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3802 3859 memset(nic->s2io_entries, 0,
3803 for (i=0; i< MAX_REQUESTED_MSI_X; i++) { 3860 nic->num_entries * sizeof(struct s2io_msix_entry));
3804 nic->entries[i].entry = i; 3861
3805 nic->s2io_entries[i].entry = i; 3862 nic->entries[0].entry = 0;
3863 nic->s2io_entries[0].entry = 0;
3864 nic->s2io_entries[0].in_use = MSIX_FLG;
3865 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3866 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3867
3868 for (i = 1; i < nic->num_entries; i++) {
3869 nic->entries[i].entry = ((i - 1) * 8) + 1;
3870 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3806 nic->s2io_entries[i].arg = NULL; 3871 nic->s2io_entries[i].arg = NULL;
3807 nic->s2io_entries[i].in_use = 0; 3872 nic->s2io_entries[i].in_use = 0;
3808 } 3873 }
3809 3874
3810 tx_mat = readq(&bar0->tx_mat0_n[0]);
3811 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3812 tx_mat |= TX_MAT_SET(i, msix_indx);
3813 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3814 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3815 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3816 }
3817 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3818
3819 rx_mat = readq(&bar0->rx_mat); 3875 rx_mat = readq(&bar0->rx_mat);
3820 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { 3876 for (j = 0; j < nic->config.rx_ring_num; j++) {
3821 rx_mat |= RX_MAT_SET(j, msix_indx); 3877 rx_mat |= RX_MAT_SET(j, msix_indx);
3822 nic->s2io_entries[msix_indx].arg 3878 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3823 = &nic->mac_control.rings[j]; 3879 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3824 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3880 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3825 nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3881 msix_indx += 8;
3826 } 3882 }
3827 writeq(rx_mat, &bar0->rx_mat); 3883 writeq(rx_mat, &bar0->rx_mat);
3884 readq(&bar0->rx_mat);
3828 3885
3829 nic->avail_msix_vectors = 0; 3886 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3830 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3831 /* We fail init if error or we get less vectors than min required */ 3887 /* We fail init if error or we get less vectors than min required */
3832 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3833 nic->avail_msix_vectors = ret;
3834 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3835 }
3836 if (ret) { 3888 if (ret) {
3837 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3889 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3838 kfree(nic->entries); 3890 kfree(nic->entries);
3839 nic->mac_control.stats_info->sw_stat.mem_freed 3891 nic->mac_control.stats_info->sw_stat.mem_freed
3840 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3892 += (nic->num_entries * sizeof(struct msix_entry));
3841 kfree(nic->s2io_entries); 3893 kfree(nic->s2io_entries);
3842 nic->mac_control.stats_info->sw_stat.mem_freed 3894 nic->mac_control.stats_info->sw_stat.mem_freed
3843 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3895 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3844 nic->entries = NULL; 3896 nic->entries = NULL;
3845 nic->s2io_entries = NULL; 3897 nic->s2io_entries = NULL;
3846 nic->avail_msix_vectors = 0;
3847 return -ENOMEM; 3898 return -ENOMEM;
3848 } 3899 }
3849 if (!nic->avail_msix_vectors)
3850 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3851 3900
3852 /* 3901 /*
3853 * To enable MSI-X, MSI also needs to be enabled, due to a bug 3902 * To enable MSI-X, MSI also needs to be enabled, due to a bug
@@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
3919 int i; 3968 int i;
3920 u16 msi_control; 3969 u16 msi_control;
3921 3970
3922 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3971 for (i = 0; i < sp->num_entries; i++) {
3923 if (sp->s2io_entries[i].in_use == 3972 if (sp->s2io_entries[i].in_use ==
3924 MSIX_REGISTERED_SUCCESS) { 3973 MSIX_REGISTERED_SUCCESS) {
3925 int vector = sp->entries[i].vector; 3974 int vector = sp->entries[i].vector;
@@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev)
3975 netif_carrier_off(dev); 4024 netif_carrier_off(dev);
3976 sp->last_link_state = 0; 4025 sp->last_link_state = 0;
3977 4026
3978 if (sp->config.intr_type == MSI_X) {
3979 int ret = s2io_enable_msi_x(sp);
3980
3981 if (!ret) {
3982 ret = s2io_test_msi(sp);
3983 /* rollback MSI-X, will re-enable during add_isr() */
3984 remove_msix_isr(sp);
3985 }
3986 if (ret) {
3987
3988 DBG_PRINT(ERR_DBG,
3989 "%s: MSI-X requested but failed to enable\n",
3990 dev->name);
3991 sp->config.intr_type = INTA;
3992 }
3993 }
3994
3995 /* NAPI doesn't work well with MSI(X) */
3996 if (sp->config.intr_type != INTA) {
3997 if(sp->config.napi)
3998 sp->config.napi = 0;
3999 }
4000
4001 /* Initialize H/W and enable interrupts */ 4027 /* Initialize H/W and enable interrupts */
4002 err = s2io_card_up(sp); 4028 err = s2io_card_up(sp);
4003 if (err) { 4029 if (err) {
@@ -4020,12 +4046,12 @@ hw_init_failed:
4020 if (sp->entries) { 4046 if (sp->entries) {
4021 kfree(sp->entries); 4047 kfree(sp->entries);
4022 sp->mac_control.stats_info->sw_stat.mem_freed 4048 sp->mac_control.stats_info->sw_stat.mem_freed
4023 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 4049 += (sp->num_entries * sizeof(struct msix_entry));
4024 } 4050 }
4025 if (sp->s2io_entries) { 4051 if (sp->s2io_entries) {
4026 kfree(sp->s2io_entries); 4052 kfree(sp->s2io_entries);
4027 sp->mac_control.stats_info->sw_stat.mem_freed 4053 sp->mac_control.stats_info->sw_stat.mem_freed
4028 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 4054 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4029 } 4055 }
4030 } 4056 }
4031 return err; 4057 return err;
@@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data)
4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4353 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4328} 4354}
4329 4355
4330static int s2io_chk_rx_buffers(struct ring_info *ring)
4331{
4332 if (fill_rx_buffers(ring) == -ENOMEM) {
4333 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4334 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4335 }
4336 return 0;
4337}
4338
4339static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4356static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4340{ 4357{
4341 struct ring_info *ring = (struct ring_info *)dev_id; 4358 struct ring_info *ring = (struct ring_info *)dev_id;
4342 struct s2io_nic *sp = ring->nic; 4359 struct s2io_nic *sp = ring->nic;
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361 struct net_device *dev = sp->dev;
4343 4362
4344 if (!is_s2io_card_up(sp)) 4363 if (unlikely(!is_s2io_card_up(sp)))
4345 return IRQ_HANDLED; 4364 return IRQ_HANDLED;
4346 4365
4347 rx_intr_handler(ring); 4366 if (sp->config.napi) {
4348 s2io_chk_rx_buffers(ring); 4367 u8 *addr = NULL, val8 = 0;
4368
4369 addr = (u8 *)&bar0->xmsi_mask_reg;
4370 addr += (7 - ring->ring_no);
4371 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4372 writeb(val8, addr);
4373 val8 = readb(addr);
4374 netif_rx_schedule(dev, &ring->napi);
4375 } else {
4376 rx_intr_handler(ring, 0);
4377 s2io_chk_rx_buffers(ring);
4378 }
4349 4379
4350 return IRQ_HANDLED; 4380 return IRQ_HANDLED;
4351} 4381}
4352 4382
4353static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4383static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4354{ 4384{
4355 struct fifo_info *fifo = (struct fifo_info *)dev_id; 4385 int i;
4356 struct s2io_nic *sp = fifo->nic; 4386 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4387 struct s2io_nic *sp = fifos->nic;
4388 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4389 struct config_param *config = &sp->config;
4390 u64 reason;
4357 4391
4358 if (!is_s2io_card_up(sp)) 4392 if (unlikely(!is_s2io_card_up(sp)))
4393 return IRQ_NONE;
4394
4395 reason = readq(&bar0->general_int_status);
4396 if (unlikely(reason == S2IO_MINUS_ONE))
4397 /* Nothing much can be done. Get out */
4359 return IRQ_HANDLED; 4398 return IRQ_HANDLED;
4360 4399
4361 tx_intr_handler(fifo); 4400 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4401
4402 if (reason & GEN_INTR_TXTRAFFIC)
4403 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4404
4405 for (i = 0; i < config->tx_fifo_num; i++)
4406 tx_intr_handler(&fifos[i]);
4407
4408 writeq(sp->general_int_mask, &bar0->general_int_mask);
4409 readl(&bar0->general_int_status);
4410
4362 return IRQ_HANDLED; 4411 return IRQ_HANDLED;
4363} 4412}
4413
4364static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4414static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4365{ 4415{
4366 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4416 struct XENA_dev_config __iomem *bar0 = sp->bar0;
@@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4762 4812
4763 if (config->napi) { 4813 if (config->napi) {
4764 if (reason & GEN_INTR_RXTRAFFIC) { 4814 if (reason & GEN_INTR_RXTRAFFIC) {
4765 if (likely(netif_rx_schedule_prep(dev, 4815 netif_rx_schedule(dev, &sp->napi);
4766 &sp->napi))) { 4816 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4767 __netif_rx_schedule(dev, &sp->napi); 4817 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4768 writeq(S2IO_MINUS_ONE, 4818 readl(&bar0->rx_traffic_int);
4769 &bar0->rx_traffic_mask);
4770 } else
4771 writeq(S2IO_MINUS_ONE,
4772 &bar0->rx_traffic_int);
4773 } 4819 }
4774 } else { 4820 } else {
4775 /* 4821 /*
@@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4781 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4827 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4782 4828
4783 for (i = 0; i < config->rx_ring_num; i++) 4829 for (i = 0; i < config->rx_ring_num; i++)
4784 rx_intr_handler(&mac_control->rings[i]); 4830 rx_intr_handler(&mac_control->rings[i], 0);
4785 } 4831 }
4786 4832
4787 /* 4833 /*
@@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp)
6984 7030
6985 /* After proper initialization of H/W, register ISR */ 7031 /* After proper initialization of H/W, register ISR */
6986 if (sp->config.intr_type == MSI_X) { 7032 if (sp->config.intr_type == MSI_X) {
6987 int i, msix_tx_cnt=0,msix_rx_cnt=0; 7033 int i, msix_rx_cnt = 0;
6988 7034
6989 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 7035 for (i = 0; i < sp->num_entries; i++) {
6990 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 7036 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6991 sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7037 if (sp->s2io_entries[i].type ==
7038 MSIX_RING_TYPE) {
7039 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7040 dev->name, i);
7041 err = request_irq(sp->entries[i].vector,
7042 s2io_msix_ring_handle, 0,
7043 sp->desc[i],
7044 sp->s2io_entries[i].arg);
7045 } else if (sp->s2io_entries[i].type ==
7046 MSIX_ALARM_TYPE) {
7047 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6992 dev->name, i); 7048 dev->name, i);
6993 err = request_irq(sp->entries[i].vector, 7049 err = request_irq(sp->entries[i].vector,
6994 s2io_msix_fifo_handle, 0, sp->desc[i], 7050 s2io_msix_fifo_handle, 0,
6995 sp->s2io_entries[i].arg); 7051 sp->desc[i],
6996 /* If either data or addr is zero print it */ 7052 sp->s2io_entries[i].arg);
6997 if(!(sp->msix_info[i].addr && 7053
6998 sp->msix_info[i].data)) {
6999 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7000 "Data:0x%llx\n",sp->desc[i],
7001 (unsigned long long)
7002 sp->msix_info[i].addr,
7003 (unsigned long long)
7004 sp->msix_info[i].data);
7005 } else {
7006 msix_tx_cnt++;
7007 } 7054 }
7008 } else { 7055 /* if either data or addr is zero print it. */
7009 sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 7056 if (!(sp->msix_info[i].addr &&
7010 dev->name, i);
7011 err = request_irq(sp->entries[i].vector,
7012 s2io_msix_ring_handle, 0, sp->desc[i],
7013 sp->s2io_entries[i].arg);
7014 /* If either data or addr is zero print it */
7015 if(!(sp->msix_info[i].addr &&
7016 sp->msix_info[i].data)) { 7057 sp->msix_info[i].data)) {
7017 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7058 DBG_PRINT(ERR_DBG,
7018 "Data:0x%llx\n",sp->desc[i], 7059 "%s @Addr:0x%llx Data:0x%llx\n",
7060 sp->desc[i],
7019 (unsigned long long) 7061 (unsigned long long)
7020 sp->msix_info[i].addr, 7062 sp->msix_info[i].addr,
7021 (unsigned long long) 7063 (unsigned long long)
7022 sp->msix_info[i].data); 7064 ntohl(sp->msix_info[i].data));
7023 } else { 7065 } else
7024 msix_rx_cnt++; 7066 msix_rx_cnt++;
7067 if (err) {
7068 remove_msix_isr(sp);
7069
7070 DBG_PRINT(ERR_DBG,
7071 "%s:MSI-X-%d registration "
7072 "failed\n", dev->name, i);
7073
7074 DBG_PRINT(ERR_DBG,
7075 "%s: Defaulting to INTA\n",
7076 dev->name);
7077 sp->config.intr_type = INTA;
7078 break;
7025 } 7079 }
7080 sp->s2io_entries[i].in_use =
7081 MSIX_REGISTERED_SUCCESS;
7026 } 7082 }
7027 if (err) {
7028 remove_msix_isr(sp);
7029 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
7030 "failed\n", dev->name, i);
7031 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
7032 dev->name);
7033 sp->config.intr_type = INTA;
7034 break;
7035 }
7036 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
7037 } 7083 }
7038 if (!err) { 7084 if (!err) {
7039 printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
7040 msix_tx_cnt);
7041 printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 7085 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7042 msix_rx_cnt); 7086 --msix_rx_cnt);
7087 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7088 " through alarm vector\n");
7043 } 7089 }
7044 } 7090 }
7045 if (sp->config.intr_type == INTA) { 7091 if (sp->config.intr_type == INTA) {
@@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7080 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7126 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7081 7127
7082 /* Disable napi */ 7128 /* Disable napi */
7083 if (config->napi) 7129 if (sp->config.napi) {
7084 napi_disable(&sp->napi); 7130 int off = 0;
7131 if (config->intr_type == MSI_X) {
7132 for (; off < sp->config.rx_ring_num; off++)
7133 napi_disable(&sp->mac_control.rings[off].napi);
7134 }
7135 else
7136 napi_disable(&sp->napi);
7137 }
7085 7138
7086 /* disable Tx and Rx traffic on the NIC */ 7139 /* disable Tx and Rx traffic on the NIC */
7087 if (do_io) 7140 if (do_io)
@@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp)
7173 } 7226 }
7174 7227
7175 /* Initialise napi */ 7228 /* Initialise napi */
7176 if (config->napi) 7229 if (config->napi) {
7177 napi_enable(&sp->napi); 7230 int i;
7231 if (config->intr_type == MSI_X) {
7232 for (i = 0; i < sp->config.rx_ring_num; i++)
7233 napi_enable(&sp->mac_control.rings[i].napi);
7234 } else {
7235 napi_enable(&sp->napi);
7236 }
7237 }
7178 7238
7179 /* Maintain the state prior to the open */ 7239 /* Maintain the state prior to the open */
7180 if (sp->promisc_flg) 7240 if (sp->promisc_flg)
@@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7217 /* Enable select interrupts */ 7277 /* Enable select interrupts */
7218 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7278 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7219 if (sp->config.intr_type != INTA) 7279 if (sp->config.intr_type != INTA)
7220 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); 7280 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7221 else { 7281 else {
7222 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7282 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7223 interruptible |= TX_PIC_INTR; 7283 interruptible |= TX_PIC_INTR;
@@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7615 rx_ring_num = MAX_RX_RINGS; 7675 rx_ring_num = MAX_RX_RINGS;
7616 } 7676 }
7617 7677
7618 if (*dev_intr_type != INTA)
7619 napi = 0;
7620
7621 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7678 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7622 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 7679 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7623 "Defaulting to INTA\n"); 7680 "Defaulting to INTA\n");
@@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7918 * will use eth_mac_addr() for dev->set_mac_address 7975 * will use eth_mac_addr() for dev->set_mac_address
7919 * mac address will be set every time dev->open() is called 7976 * mac address will be set every time dev->open() is called
7920 */ 7977 */
7921 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7922
7923#ifdef CONFIG_NET_POLL_CONTROLLER 7978#ifdef CONFIG_NET_POLL_CONTROLLER
7924 dev->poll_controller = s2io_netpoll; 7979 dev->poll_controller = s2io_netpoll;
7925#endif 7980#endif
@@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7963 } 8018 }
7964 } 8019 }
7965 8020
8021 if (sp->config.intr_type == MSI_X) {
8022 sp->num_entries = config->rx_ring_num + 1;
8023 ret = s2io_enable_msi_x(sp);
8024
8025 if (!ret) {
8026 ret = s2io_test_msi(sp);
8027 /* rollback MSI-X, will re-enable during add_isr() */
8028 remove_msix_isr(sp);
8029 }
8030 if (ret) {
8031
8032 DBG_PRINT(ERR_DBG,
8033 "%s: MSI-X requested but failed to enable\n",
8034 dev->name);
8035 sp->config.intr_type = INTA;
8036 }
8037 }
8038
8039 if (config->intr_type == MSI_X) {
8040 for (i = 0; i < config->rx_ring_num ; i++)
8041 netif_napi_add(dev, &mac_control->rings[i].napi,
8042 s2io_poll_msix, 64);
8043 } else {
8044 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8045 }
8046
7966 /* Not needed for Herc */ 8047 /* Not needed for Herc */
7967 if (sp->device_type & XFRAME_I_DEVICE) { 8048 if (sp->device_type & XFRAME_I_DEVICE) {
7968 /* 8049 /*
@@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8013 /* store mac addresses from CAM to s2io_nic structure */ 8094 /* store mac addresses from CAM to s2io_nic structure */
8014 do_s2io_store_unicast_mc(sp); 8095 do_s2io_store_unicast_mc(sp);
8015 8096
8097 /* Configure MSIX vector for number of rings configured plus one */
8098 if ((sp->device_type == XFRAME_II_DEVICE) &&
8099 (config->intr_type == MSI_X))
8100 sp->num_entries = config->rx_ring_num + 1;
8101
8016 /* Store the values of the MSIX table in the s2io_nic structure */ 8102 /* Store the values of the MSIX table in the s2io_nic structure */
8017 store_xmsi_data(sp); 8103 store_xmsi_data(sp);
8018 /* reset Nic and bring it to known state */ 8104 /* reset Nic and bring it to known state */
@@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8078 break; 8164 break;
8079 } 8165 }
8080 8166
8081 if (napi) 8167 switch (sp->config.napi) {
8168 case 0:
8169 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8170 break;
8171 case 1:
8082 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8172 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8173 break;
8174 }
8083 8175
8084 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8176 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8085 sp->config.tx_fifo_num); 8177 sp->config.tx_fifo_num);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 58229dcbf5ec..d0a84ba887a5 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -706,7 +706,7 @@ struct ring_info {
706 /* per-ring buffer counter */ 706 /* per-ring buffer counter */
707 u32 rx_bufs_left; 707 u32 rx_bufs_left;
708 708
709 #define MAX_LRO_SESSIONS 32 709#define MAX_LRO_SESSIONS 32
710 struct lro lro0_n[MAX_LRO_SESSIONS]; 710 struct lro lro0_n[MAX_LRO_SESSIONS];
711 u8 lro; 711 u8 lro;
712 712
@@ -725,6 +725,11 @@ struct ring_info {
725 /* copy of sp->pdev pointer */ 725 /* copy of sp->pdev pointer */
726 struct pci_dev *pdev; 726 struct pci_dev *pdev;
727 727
728 /* Per ring napi struct */
729 struct napi_struct napi;
730
731 unsigned long interrupt_count;
732
728 /* 733 /*
729 * Place holders for the virtual and physical addresses of 734 * Place holders for the virtual and physical addresses of
730 * all the Rx Blocks 735 * all the Rx Blocks
@@ -841,7 +846,7 @@ struct usr_addr {
841 * Structure to keep track of the MSI-X vectors and the corresponding 846 * Structure to keep track of the MSI-X vectors and the corresponding
842 * argument registered against each vector 847 * argument registered against each vector
843 */ 848 */
844#define MAX_REQUESTED_MSI_X 17 849#define MAX_REQUESTED_MSI_X 9
845struct s2io_msix_entry 850struct s2io_msix_entry
846{ 851{
847 u16 vector; 852 u16 vector;
@@ -849,8 +854,8 @@ struct s2io_msix_entry
849 void *arg; 854 void *arg;
850 855
851 u8 type; 856 u8 type;
852#define MSIX_FIFO_TYPE 1 857#define MSIX_ALARM_TYPE 1
853#define MSIX_RING_TYPE 2 858#define MSIX_RING_TYPE 2
854 859
855 u8 in_use; 860 u8 in_use;
856#define MSIX_REGISTERED_SUCCESS 0xAA 861#define MSIX_REGISTERED_SUCCESS 0xAA
@@ -877,7 +882,6 @@ struct s2io_nic {
877 */ 882 */
878 int pkts_to_process; 883 int pkts_to_process;
879 struct net_device *dev; 884 struct net_device *dev;
880 struct napi_struct napi;
881 struct mac_info mac_control; 885 struct mac_info mac_control;
882 struct config_param config; 886 struct config_param config;
883 struct pci_dev *pdev; 887 struct pci_dev *pdev;
@@ -948,6 +952,7 @@ struct s2io_nic {
948 */ 952 */
949 u8 other_fifo_idx; 953 u8 other_fifo_idx;
950 954
955 struct napi_struct napi;
951 /* after blink, the adapter must be restored with original 956 /* after blink, the adapter must be restored with original
952 * values. 957 * values.
953 */ 958 */
@@ -962,6 +967,7 @@ struct s2io_nic {
962 unsigned long long start_time; 967 unsigned long long start_time;
963 struct vlan_group *vlgrp; 968 struct vlan_group *vlgrp;
964#define MSIX_FLG 0xA5 969#define MSIX_FLG 0xA5
970 int num_entries;
965 struct msix_entry *entries; 971 struct msix_entry *entries;
966 int msi_detected; 972 int msi_detected;
967 wait_queue_head_t msi_wait; 973 wait_queue_head_t msi_wait;
@@ -982,6 +988,7 @@ struct s2io_nic {
982 u16 lro_max_aggr_per_sess; 988 u16 lro_max_aggr_per_sess;
983 volatile unsigned long state; 989 volatile unsigned long state;
984 u64 general_int_mask; 990 u64 general_int_mask;
991
985#define VPD_STRING_LEN 80 992#define VPD_STRING_LEN 80
986 u8 product_name[VPD_STRING_LEN]; 993 u8 product_name[VPD_STRING_LEN];
987 u8 serial_num[VPD_STRING_LEN]; 994 u8 serial_num[VPD_STRING_LEN];
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
1103static int init_shared_mem(struct s2io_nic *sp); 1110static int init_shared_mem(struct s2io_nic *sp);
1104static void free_shared_mem(struct s2io_nic *sp); 1111static void free_shared_mem(struct s2io_nic *sp);
1105static int init_nic(struct s2io_nic *nic); 1112static int init_nic(struct s2io_nic *nic);
1106static void rx_intr_handler(struct ring_info *ring_data); 1113static int rx_intr_handler(struct ring_info *ring_data, int budget);
1107static void tx_intr_handler(struct fifo_info *fifo_data); 1114static void tx_intr_handler(struct fifo_info *fifo_data);
1108static void s2io_handle_errors(void * dev_id); 1115static void s2io_handle_errors(void * dev_id);
1109 1116
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
1114static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1121static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
1115static void s2io_link(struct s2io_nic * sp, int link); 1122static void s2io_link(struct s2io_nic * sp, int link);
1116static void s2io_reset(struct s2io_nic * sp); 1123static void s2io_reset(struct s2io_nic * sp);
1117static int s2io_poll(struct napi_struct *napi, int budget); 1124static int s2io_poll_msix(struct napi_struct *napi, int budget);
1125static int s2io_poll_inta(struct napi_struct *napi, int budget);
1118static void s2io_init_pci(struct s2io_nic * sp); 1126static void s2io_init_pci(struct s2io_nic * sp);
1119static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); 1127static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
1120static void s2io_alarm_handle(unsigned long data); 1128static void s2io_alarm_handle(unsigned long data);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index f64a860029b7..b4b63805ee8f 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
953 unsigned entry; 953 unsigned entry;
954 u32 tx_status; 954 u32 tx_status;
955 955
956 if (skb_padto(skb, ETH_ZLEN))
957 return NETDEV_TX_OK;
958
959 if (unlikely(skb->len > TX_BUF_SIZE)) { 956 if (unlikely(skb->len > TX_BUF_SIZE)) {
960 dev->stats.tx_dropped++; 957 dev->stats.tx_dropped++;
961 goto out; 958 goto out;
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
976 973
977 len = skb->len; 974 len = skb->len;
975 if (unlikely(len < ETH_ZLEN)) {
976 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
977 0, ETH_ZLEN - len);
978 len = ETH_ZLEN;
979 }
978 980
979 wmb(); 981 wmb();
980 982
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2806201644cc..2c79d27404e0 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -483,7 +483,7 @@ typedef union efx_oword {
483#endif 483#endif
484 484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \ 486 if (falcon_rev(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \ 488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@@ -491,7 +491,7 @@ typedef union efx_oword {
491} while (0) 491} while (0)
492 492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \ 493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \ 494 (falcon_rev(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \ 495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1)) 496 EFX_QWORD_FIELD((qword), field##_A1))
497 497
@@ -501,8 +501,5 @@ typedef union efx_oword {
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \ 502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507 504
508#endif /* EFX_BITFIELD_H */ 505#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index eecaa6d58584..7fc0328dc055 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
27 struct efx_blinker *bl = &efx->board_info.blinker; 27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state); 28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state; 29 bl->state = !bl->state;
30 if (bl->resubmit) { 30 if (bl->resubmit)
31 bl->timer.expires = jiffies + BLINK_INTERVAL; 31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
32 add_timer(&bl->timer);
33 }
34} 32}
35 33
36static void board_blink(struct efx_nic *efx, int blink) 34static void board_blink(struct efx_nic *efx, int blink)
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
44 blinker->state = 0; 42 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer, 43 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx); 44 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL; 45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
48 add_timer(&blinker->timer);
49 } else { 46 } else {
50 blinker->resubmit = 0; 47 blinker->resubmit = 0;
51 if (blinker->timer.function) 48 if (blinker->timer.function)
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 418f2e53a95b..449760642e31 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
199 */ 199 */
200static inline void efx_channel_processed(struct efx_channel *channel) 200static inline void efx_channel_processed(struct efx_channel *channel)
201{ 201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race 202 /* The interrupt handler for this channel may set work_pending
203 * with finishing processing, a new interrupt will be raised. 203 * as soon as we acknowledge the events we've seen. Make sure
204 */ 204 * it's cleared before then. */
205 channel->work_pending = 0; 205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */ 206 smp_wmb();
207
207 falcon_eventq_read_ack(channel); 208 falcon_eventq_read_ack(channel);
208} 209}
209 210
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
265 napi_disable(&channel->napi_str); 266 napi_disable(&channel->napi_str);
266 267
267 /* Poll the channel */ 268 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size); 269 efx_process_channel(channel, efx->type->evq_size);
269 270
270 /* Ack the eventq. This may cause an interrupt to be generated 271 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */ 272 * when they are reenabled */
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
317 * 318 *
318 *************************************************************************/ 319 *************************************************************************/
319 320
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel) 321static int efx_probe_channel(struct efx_channel *channel)
341{ 322{
342 struct efx_tx_queue *tx_queue; 323 struct efx_tx_queue *tx_queue;
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
387 struct efx_channel *channel; 368 struct efx_channel *channel;
388 int rc = 0; 369 int rc = 0;
389 370
390 efx_calc_rx_buffer_params(efx); 371 /* Calculate the rx buffer allocation parameters required to
372 * support the current MTU, including padding for header
373 * alignment and overruns.
374 */
375 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
376 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
377 efx->type->rx_buffer_padding);
378 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
391 379
392 /* Initialise the channels */ 380 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) { 381 efx_for_each_channel(channel, efx) {
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
440 netif_napi_add(channel->napi_dev, &channel->napi_str, 428 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight); 429 efx_poll, napi_weight);
442 430
431 /* The interrupt handler for this channel may set work_pending
432 * as soon as we enable it. Make sure it's cleared before
433 * then. Similarly, make sure it sees the enabled flag set. */
443 channel->work_pending = 0; 434 channel->work_pending = 0;
444 channel->enabled = 1; 435 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */ 436 smp_wmb();
446 437
447 napi_enable(&channel->napi_str); 438 napi_enable(&channel->napi_str);
448 439
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
704 mutex_unlock(&efx->mac_lock); 695 mutex_unlock(&efx->mac_lock);
705 696
706 /* Serialise against efx_set_multicast_list() */ 697 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) { 698 if (efx_dev_registered(efx)) {
708 netif_tx_lock_bh(efx->net_dev); 699 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev); 700 netif_tx_unlock_bh(efx->net_dev);
710 } 701 }
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
791 efx->membase = ioremap_nocache(efx->membase_phys, 782 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size); 783 efx->type->mem_map_size);
793 if (!efx->membase) { 784 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", 785 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
795 efx->type->mem_bar, efx->membase_phys, 786 efx->type->mem_bar,
787 (unsigned long long)efx->membase_phys,
796 efx->type->mem_map_size); 788 efx->type->mem_map_size);
797 rc = -ENOMEM; 789 rc = -ENOMEM;
798 goto fail4; 790 goto fail4;
799 } 791 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", 792 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, 793 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
802 efx->membase); 794 efx->type->mem_map_size, efx->membase);
803 795
804 return 0; 796 return 0;
805 797
806 fail4: 798 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 799 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3: 800 fail3:
809 efx->membase_phys = 0UL; 801 efx->membase_phys = 0;
810 fail2: 802 fail2:
811 pci_disable_device(efx->pci_dev); 803 pci_disable_device(efx->pci_dev);
812 fail1: 804 fail1:
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
824 816
825 if (efx->membase_phys) { 817 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar); 818 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL; 819 efx->membase_phys = 0;
828 } 820 }
829 821
830 pci_disable_device(efx->pci_dev); 822 pci_disable_device(efx->pci_dev);
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
1043 return; 1035 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return; 1037 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) 1038 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1047 return; 1039 return;
1048 1040
1049 /* Mark the port as enabled so port reconfigurations can start, then 1041 /* Mark the port as enabled so port reconfigurations can start, then
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
1073 cancel_delayed_work_sync(&efx->monitor_work); 1065 cancel_delayed_work_sync(&efx->monitor_work);
1074 1066
1075 /* Ensure that all RX slow refills are complete. */ 1067 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) { 1068 efx_for_each_rx_queue(rx_queue, efx)
1077 cancel_delayed_work_sync(&rx_queue->work); 1069 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079 1070
1080 /* Stop scheduled port reconfigurations */ 1071 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work); 1072 cancel_work_sync(&efx->reconfigure_work);
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
1101 falcon_disable_interrupts(efx); 1092 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq) 1093 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq); 1094 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx) 1095 efx_for_each_channel_with_interrupt(channel, efx) {
1105 if (channel->irq) 1096 if (channel->irq)
1106 synchronize_irq(channel->irq); 1097 synchronize_irq(channel->irq);
1098 }
1107 1099
1108 /* Stop all NAPI processing and synchronous rx refills */ 1100 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx) 1101 efx_for_each_channel(channel, efx)
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
1125 /* Stop the kernel transmit interface late, so the watchdog 1117 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */ 1118 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx); 1119 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) { 1120 if (efx_dev_registered(efx)) {
1129 netif_tx_lock_bh(efx->net_dev); 1121 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev); 1122 netif_tx_unlock_bh(efx->net_dev);
1131 } 1123 }
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
1344 return 0; 1336 return 0;
1345} 1337}
1346 1338
1347/* Context: process, dev_base_lock held, non-blocking. */ 1339/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1340static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{ 1341{
1350 struct efx_nic *efx = net_dev->priv; 1342 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1343 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats; 1344 struct net_device_stats *stats = &net_dev->stats;
1353 1345
1346 /* Update stats if possible, but do not wait if another thread
1347 * is updating them (or resetting the NIC); slightly stale
1348 * stats are acceptable.
1349 */
1354 if (!spin_trylock(&efx->stats_lock)) 1350 if (!spin_trylock(&efx->stats_lock))
1355 return stats; 1351 return stats;
1356 if (efx->state == STATE_RUNNING) { 1352 if (efx->state == STATE_RUNNING) {
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1494static int efx_netdev_event(struct notifier_block *this, 1490static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr) 1491 unsigned long event, void *ptr)
1496{ 1492{
1497 struct net_device *net_dev = (struct net_device *)ptr; 1493 struct net_device *net_dev = ptr;
1498 1494
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1495 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv; 1496 struct efx_nic *efx = net_dev->priv;
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1563 efx_for_each_tx_queue(tx_queue, efx) 1559 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue); 1560 efx_release_tx_buffers(tx_queue);
1565 1561
1566 if (NET_DEV_REGISTERED(efx)) { 1562 if (efx_dev_registered(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1563 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev); 1564 unregister_netdev(efx->net_dev);
1569 } 1565 }
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
1688 if (method == RESET_TYPE_DISABLE) { 1684 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence 1685 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */ 1686 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx); 1687 falcon_init_nic(efx);
1692 rc = -EIO; 1688 rc = -EIO;
1693 goto fail4; 1689 goto fail4;
1694 } 1690 }
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index b57cc68058c0..d3f749c72d41 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
116 ************************************************************************** 116 **************************************************************************
117 */ 117 */
118 118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings) 119/* DMA address mask */
120 * 120#define FALCON_DMA_MASK DMA_BIT_MASK(46)
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130 121
131/* TX DMA length mask (13-bit) */ 122/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1) 123#define FALCON_TX_DMA_MASK (4096 - 1)
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 136#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146 137
147#define FALCON_IS_DUAL_FUNC(efx) \ 138#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0) 139 (falcon_rev(efx) < FALCON_REV_B0)
149 140
150/************************************************************************** 141/**************************************************************************
151 * 142 *
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
465 TX_DESCQ_TYPE, 0, 456 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1); 457 TX_NON_IP_DROP_DIS_B0, 1);
467 458
468 if (FALCON_REV(efx) >= FALCON_REV_B0) { 459 if (falcon_rev(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 460 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 461 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 462 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 465 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue); 466 tx_queue->queue);
476 467
477 if (FALCON_REV(efx) < FALCON_REV_B0) { 468 if (falcon_rev(efx) < FALCON_REV_B0) {
478 efx_oword_t reg; 469 efx_oword_t reg;
479 470
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 471 BUG_ON(tx_queue->queue >= 128); /* HW limit */
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
635 efx_oword_t rx_desc_ptr; 626 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
637 int rc; 628 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 629 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0; 630 int iscsi_digest_en = is_b0;
640 631
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 632 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 813 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 814 tx_queue = &efx->tx_queue[tx_ev_q_label];
824 815
825 if (NET_DEV_REGISTERED(efx)) 816 if (efx_dev_registered(efx))
826 netif_tx_lock(efx->net_dev); 817 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue); 818 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx)) 819 if (efx_dev_registered(efx))
829 netif_tx_unlock(efx->net_dev); 820 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 821 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) { 822 EFX_WORKAROUND_10727(efx)) {
@@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
884 RX_EV_TCP_UDP_CHKSUM_ERR); 875 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 876 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 877 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 878 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 879 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 880 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890 881
@@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1056 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1; 1057 is_phy_event = 1;
1067 1058
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1059 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1060 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1; 1061 is_phy_event = 1;
1071 1062
@@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1405static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1396static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1406{ 1397{
1407 struct falcon_nic_data *nic_data = efx->nic_data; 1398 struct falcon_nic_data *nic_data = efx->nic_data;
1408 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1399 efx_oword_t *int_ker = efx->irq_status.addr;
1409 efx_oword_t fatal_intr; 1400 efx_oword_t fatal_intr;
1410 int error, mem_perr; 1401 int error, mem_perr;
1411 static int n_int_errors; 1402 static int n_int_errors;
@@ -1451,8 +1442,8 @@ out:
1451 */ 1442 */
1452static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1443static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1453{ 1444{
1454 struct efx_nic *efx = (struct efx_nic *)dev_id; 1445 struct efx_nic *efx = dev_id;
1455 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1446 efx_oword_t *int_ker = efx->irq_status.addr;
1456 struct efx_channel *channel; 1447 struct efx_channel *channel;
1457 efx_dword_t reg; 1448 efx_dword_t reg;
1458 u32 queues; 1449 u32 queues;
@@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1489 1480
1490static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1481static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491{ 1482{
1492 struct efx_nic *efx = (struct efx_nic *)dev_id; 1483 struct efx_nic *efx = dev_id;
1493 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1484 efx_oword_t *int_ker = efx->irq_status.addr;
1494 struct efx_channel *channel; 1485 struct efx_channel *channel;
1495 int syserr; 1486 int syserr;
1496 int queues; 1487 int queues;
@@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1542 */ 1533 */
1543static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1534static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1544{ 1535{
1545 struct efx_channel *channel = (struct efx_channel *)dev_id; 1536 struct efx_channel *channel = dev_id;
1546 struct efx_nic *efx = channel->efx; 1537 struct efx_nic *efx = channel->efx;
1547 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1538 efx_oword_t *int_ker = efx->irq_status.addr;
1548 int syserr; 1539 int syserr;
1549 1540
1550 efx->last_irq_cpu = raw_smp_processor_id(); 1541 efx->last_irq_cpu = raw_smp_processor_id();
@@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1572 unsigned long offset; 1563 unsigned long offset;
1573 efx_dword_t dword; 1564 efx_dword_t dword;
1574 1565
1575 if (FALCON_REV(efx) < FALCON_REV_B0) 1566 if (falcon_rev(efx) < FALCON_REV_B0)
1576 return; 1567 return;
1577 1568
1578 for (offset = RX_RSS_INDIR_TBL_B0; 1569 for (offset = RX_RSS_INDIR_TBL_B0;
@@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1595 1586
1596 if (!EFX_INT_MODE_USE_MSI(efx)) { 1587 if (!EFX_INT_MODE_USE_MSI(efx)) {
1597 irq_handler_t handler; 1588 irq_handler_t handler;
1598 if (FALCON_REV(efx) >= FALCON_REV_B0) 1589 if (falcon_rev(efx) >= FALCON_REV_B0)
1599 handler = falcon_legacy_interrupt_b0; 1590 handler = falcon_legacy_interrupt_b0;
1600 else 1591 else
1601 handler = falcon_legacy_interrupt_a1; 1592 handler = falcon_legacy_interrupt_a1;
@@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1636 efx_oword_t reg; 1627 efx_oword_t reg;
1637 1628
1638 /* Disable MSI/MSI-X interrupts */ 1629 /* Disable MSI/MSI-X interrupts */
1639 efx_for_each_channel_with_interrupt(channel, efx) 1630 efx_for_each_channel_with_interrupt(channel, efx) {
1640 if (channel->irq) 1631 if (channel->irq)
1641 free_irq(channel->irq, channel); 1632 free_irq(channel->irq, channel);
1633 }
1642 1634
1643 /* ACK legacy interrupt */ 1635 /* ACK legacy interrupt */
1644 if (FALCON_REV(efx) >= FALCON_REV_B0) 1636 if (falcon_rev(efx) >= FALCON_REV_B0)
1645 falcon_read(efx, &reg, INT_ISR0_B0); 1637 falcon_read(efx, &reg, INT_ISR0_B0);
1646 else 1638 else
1647 falcon_irq_ack_a1(efx); 1639 falcon_irq_ack_a1(efx);
@@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1732 efx_oword_t temp; 1724 efx_oword_t temp;
1733 int count; 1725 int count;
1734 1726
1735 if ((FALCON_REV(efx) < FALCON_REV_B0) || 1727 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1736 (efx->loopback_mode != LOOPBACK_NONE)) 1728 (efx->loopback_mode != LOOPBACK_NONE))
1737 return; 1729 return;
1738 1730
@@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1785{ 1777{
1786 efx_oword_t temp; 1778 efx_oword_t temp;
1787 1779
1788 if (FALCON_REV(efx) < FALCON_REV_B0) 1780 if (falcon_rev(efx) < FALCON_REV_B0)
1789 return; 1781 return;
1790 1782
1791 /* Isolate the MAC -> RX */ 1783 /* Isolate the MAC -> RX */
@@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1823 MAC_SPEED, link_speed); 1815 MAC_SPEED, link_speed);
1824 /* On B0, MAC backpressure can be disabled and packets get 1816 /* On B0, MAC backpressure can be disabled and packets get
1825 * discarded. */ 1817 * discarded. */
1826 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1818 if (falcon_rev(efx) >= FALCON_REV_B0) {
1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1819 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1828 !efx->link_up); 1820 !efx->link_up);
1829 } 1821 }
@@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1833 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1842 1834
1843 /* Unisolate the MAC -> RX */ 1835 /* Unisolate the MAC -> RX */
1844 if (FALCON_REV(efx) >= FALCON_REV_B0) 1836 if (falcon_rev(efx) >= FALCON_REV_B0)
1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1837 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1846 falcon_write(efx, &reg, RX_CFG_REG_KER); 1838 falcon_write(efx, &reg, RX_CFG_REG_KER);
1847} 1839}
@@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1856 return 0; 1848 return 0;
1857 1849
1858 /* Statistics fetch will fail if the MAC is in TX drain */ 1850 /* Statistics fetch will fail if the MAC is in TX drain */
1859 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1851 if (falcon_rev(efx) >= FALCON_REV_B0) {
1860 efx_oword_t temp; 1852 efx_oword_t temp;
1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1853 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1854 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1940static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1932static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1941 int addr, int value) 1933 int addr, int value)
1942{ 1934{
1943 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1935 struct efx_nic *efx = net_dev->priv;
1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1936 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1945 efx_oword_t reg; 1937 efx_oword_t reg;
1946 1938
@@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2008 * could be read, -1 will be returned. */ 2000 * could be read, -1 will be returned. */
2009static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2001static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2010{ 2002{
2011 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2003 struct efx_nic *efx = net_dev->priv;
2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2004 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2013 efx_oword_t reg; 2005 efx_oword_t reg;
2014 int value = -1; 2006 int value = -1;
@@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx)
2113 falcon_init_mdio(&efx->mii); 2105 falcon_init_mdio(&efx->mii);
2114 2106
2115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2107 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2116 if (FALCON_REV(efx) >= FALCON_REV_B0) 2108 if (falcon_rev(efx) >= FALCON_REV_B0)
2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2109 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2118 else 2110 else
2119 efx->flow_control = EFX_FC_RX; 2111 efx->flow_control = EFX_FC_RX;
@@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2373 return -ENODEV; 2365 return -ENODEV;
2374 } 2366 }
2375 2367
2376 switch (FALCON_REV(efx)) { 2368 switch (falcon_rev(efx)) {
2377 case FALCON_REV_A0: 2369 case FALCON_REV_A0:
2378 case 0xff: 2370 case 0xff:
2379 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2371 EFX_ERR(efx, "Falcon rev A0 not supported\n");
@@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2399 break; 2391 break;
2400 2392
2401 default: 2393 default:
2402 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2394 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2403 return -ENODEV; 2395 return -ENODEV;
2404 } 2396 }
2405 2397
@@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2419 2411
2420 /* Allocate storage for hardware specific data */ 2412 /* Allocate storage for hardware specific data */
2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2413 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2422 efx->nic_data = (void *) nic_data; 2414 efx->nic_data = nic_data;
2423 2415
2424 /* Determine number of ports etc. */ 2416 /* Determine number of ports etc. */
2425 rc = falcon_probe_nic_variant(efx); 2417 rc = falcon_probe_nic_variant(efx);
@@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2489 */ 2481 */
2490int falcon_init_nic(struct efx_nic *efx) 2482int falcon_init_nic(struct efx_nic *efx)
2491{ 2483{
2492 struct falcon_nic_data *data;
2493 efx_oword_t temp; 2484 efx_oword_t temp;
2494 unsigned thresh; 2485 unsigned thresh;
2495 int rc; 2486 int rc;
2496 2487
2497 data = (struct falcon_nic_data *)efx->nic_data;
2498
2499 /* Set up the address region register. This is only needed 2488 /* Set up the address region register. This is only needed
2500 * for the B0 FPGA, but since we are just pushing in the 2489 * for the B0 FPGA, but since we are just pushing in the
2501 * reset defaults this may as well be unconditional. */ 2490 * reset defaults this may as well be unconditional. */
@@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx)
2562 2551
2563 /* Set number of RSS queues for receive path. */ 2552 /* Set number of RSS queues for receive path. */
2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2553 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2565 if (FALCON_REV(efx) >= FALCON_REV_B0) 2554 if (falcon_rev(efx) >= FALCON_REV_B0)
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2555 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2567 else 2556 else
2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); 2557 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx)
2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2589 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2590 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2602 /* Squash TX of packets of 16 bytes or less */ 2591 /* Squash TX of packets of 16 bytes or less */
2603 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2592 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2593 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2605 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2594 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2606 2595
@@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx)
2617 if (EFX_WORKAROUND_7575(efx)) 2606 if (EFX_WORKAROUND_7575(efx))
2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2607 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2619 (3 * 4096) / 32); 2608 (3 * 4096) / 32);
2620 if (FALCON_REV(efx) >= FALCON_REV_B0) 2609 if (falcon_rev(efx) >= FALCON_REV_B0)
2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2610 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2622 2611
2623 /* RX FIFO flow control thresholds */ 2612 /* RX FIFO flow control thresholds */
@@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx)
2633 falcon_write(efx, &temp, RX_CFG_REG_KER); 2622 falcon_write(efx, &temp, RX_CFG_REG_KER);
2634 2623
2635 /* Set destination of both TX and RX Flush events */ 2624 /* Set destination of both TX and RX Flush events */
2636 if (FALCON_REV(efx) >= FALCON_REV_B0) { 2625 if (falcon_rev(efx) >= FALCON_REV_B0) {
2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2626 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2638 falcon_write(efx, &temp, DP_CTRL_REG); 2627 falcon_write(efx, &temp, DP_CTRL_REG);
2639 } 2628 }
@@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx)
2647 2636
2648 falcon_free_buffer(efx, &efx->irq_status); 2637 falcon_free_buffer(efx, &efx->irq_status);
2649 2638
2650 (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2639 falcon_reset_hw(efx, RESET_TYPE_ALL);
2651 2640
2652 /* Release the second function after the reset */ 2641 /* Release the second function after the reset */
2653 if (nic_data->pci_dev2) { 2642 if (nic_data->pci_dev2) {
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 6117403b0c03..492f9bc28840 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -23,7 +23,10 @@ enum falcon_revision {
23 FALCON_REV_B0 = 2, 23 FALCON_REV_B0 = 2,
24}; 24};
25 25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision) 26static inline int falcon_rev(struct efx_nic *efx)
27{
28 return efx->pci_dev->revision;
29}
27 30
28extern struct efx_nic_type falcon_a_nic_type; 31extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type; 32extern struct efx_nic_type falcon_b_nic_type;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 06e2d68fc3d1..6d003114eeab 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
1125 u8 port1_phy_type; 1125 u8 port1_phy_type;
1126 __le16 asic_sub_revision; 1126 __le16 asic_sub_revision;
1127 __le16 board_revision; 1127 __le16 board_revision;
1128} __attribute__ ((packed)); 1128} __packed;
1129 1129
1130#define NVCONFIG_BASE 0x300 1130#define NVCONFIG_BASE 0x300
1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
1144 __le16 board_struct_ver; 1144 __le16 board_struct_ver;
1145 __le16 board_checksum; 1145 __le16 board_checksum;
1146 struct falcon_nvconfig_board_v2 board_v2; 1146 struct falcon_nvconfig_board_v2 board_v2;
1147} __attribute__ ((packed)); 1147} __packed;
1148 1148
1149#endif /* EFX_FALCON_HWDEFS_H */ 1149#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index ea08184ddfa9..6670cdfc41ab 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -56,14 +56,27 @@
56#define FALCON_USE_QWORD_IO 1 56#define FALCON_USE_QWORD_IO 1
57#endif 57#endif
58 58
59#define _falcon_writeq(efx, value, reg) \ 59#ifdef FALCON_USE_QWORD_IO
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg)) 60static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
61#define _falcon_writel(efx, value, reg) \ 61 unsigned int reg)
62 __raw_writel((__force u32) (value), (efx)->membase + (reg)) 62{
63#define _falcon_readq(efx, reg) \ 63 __raw_writeq((__force u64)value, efx->membase + reg);
64 ((__force __le64) __raw_readq((efx)->membase + (reg))) 64}
65#define _falcon_readl(efx, reg) \ 65static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
66 ((__force __le32) __raw_readl((efx)->membase + (reg))) 66{
67 return (__force __le64)__raw_readq(efx->membase + reg);
68}
69#endif
70
71static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
72 unsigned int reg)
73{
74 __raw_writel((__force u32)value, efx->membase + reg);
75}
76static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
77{
78 return (__force __le32)__raw_readl(efx->membase + reg);
79}
67 80
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 81/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, 82static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index a74b7931a3c4..dbdcee4b0f8d 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
221{ 221{
222 efx_dword_t reg; 222 efx_dword_t reg;
223 223
224 if (FALCON_REV(efx) < FALCON_REV_B0) 224 if (falcon_rev(efx) < FALCON_REV_B0)
225 return 1; 225 return 1;
226 226
227 /* The ISR latches, so clear it and re-read */ 227 /* The ISR latches, so clear it and re-read */
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
241{ 241{
242 efx_dword_t reg; 242 efx_dword_t reg;
243 243
244 if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 244 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
245 return; 245 return;
246 246
247 /* Flush the ISR */ 247 /* Flush the ISR */
@@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
454 454
455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
456 __func__, tries); 456 __func__, tries);
457 (void) falcon_reset_xaui(efx); 457 falcon_reset_xaui(efx);
458 udelay(200); 458 udelay(200);
459 tries--; 459 tries--;
460 } 460 }
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
572 xaui_link_ok = falcon_xaui_link_ok(efx); 572 xaui_link_ok = falcon_xaui_link_ok(efx);
573 573
574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
575 (void) falcon_reset_xaui(efx); 575 falcon_reset_xaui(efx);
576 576
577 /* Call the PHY check_hw routine */ 577 /* Call the PHY check_hw routine */
578 rc = efx->phy_op->check_hw(efx); 578 rc = efx->phy_op->check_hw(efx);
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
639 reset = ((flow_control & EFX_FC_TX) && 639 reset = ((flow_control & EFX_FC_TX) &&
640 !(efx->flow_control & EFX_FC_TX)); 640 !(efx->flow_control & EFX_FC_TX));
641 if (EFX_WORKAROUND_11482(efx) && reset) { 641 if (EFX_WORKAROUND_11482(efx) && reset) {
642 if (FALCON_REV(efx) >= FALCON_REV_B0) { 642 if (falcon_rev(efx) >= FALCON_REV_B0) {
643 /* Recover by resetting the EM block */ 643 /* Recover by resetting the EM block */
644 if (efx->link_up) 644 if (efx->link_up)
645 falcon_drain_tx_fifo(efx); 645 falcon_drain_tx_fifo(efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 59f261b4171f..5e20e7551dae 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -42,7 +42,7 @@
42#ifndef EFX_DRIVER_NAME 42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc" 43#define EFX_DRIVER_NAME "sfc"
44#endif 44#endif
45#define EFX_DRIVER_VERSION "2.2.0136" 45#define EFX_DRIVER_VERSION "2.2"
46 46
47#ifdef EFX_ENABLE_DEBUG 47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -52,28 +52,19 @@
52#define EFX_WARN_ON_PARANOID(x) do {} while (0) 52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif 53#endif
54 54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */ 55/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \ 56#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) 57dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
67 58
68#define EFX_INFO(efx, fmt, args...) \ 59#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) 60dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
70 61
71#ifdef EFX_ENABLE_DEBUG 62#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \ 63#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 64dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
74#else 65#else
75#define EFX_LOG(efx, fmt, args...) \ 66#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 67dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
77#endif 68#endif
78 69
79#define EFX_TRACE(efx, fmt, args...) do {} while (0) 70#define EFX_TRACE(efx, fmt, args...) do {} while (0)
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
90#define EFX_LOG_RL(efx, fmt, args...) \ 81#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 82do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92 83
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/************************************************************************** 84/**************************************************************************
99 * 85 *
100 * Efx data structures 86 * Efx data structures
@@ -695,7 +681,7 @@ struct efx_nic {
695 struct workqueue_struct *workqueue; 681 struct workqueue_struct *workqueue;
696 struct work_struct reset_work; 682 struct work_struct reset_work;
697 struct delayed_work monitor_work; 683 struct delayed_work monitor_work;
698 unsigned long membase_phys; 684 resource_size_t membase_phys;
699 void __iomem *membase; 685 void __iomem *membase;
700 spinlock_t biu_lock; 686 spinlock_t biu_lock;
701 enum efx_int_mode interrupt_mode; 687 enum efx_int_mode interrupt_mode;
@@ -719,7 +705,7 @@ struct efx_nic {
719 705
720 unsigned n_rx_nodesc_drop_cnt; 706 unsigned n_rx_nodesc_drop_cnt;
721 707
722 void *nic_data; 708 struct falcon_nic_data *nic_data;
723 709
724 struct mutex mac_lock; 710 struct mutex mac_lock;
725 int port_enabled; 711 int port_enabled;
@@ -760,6 +746,20 @@ struct efx_nic {
760 void *loopback_selftest; 746 void *loopback_selftest;
761}; 747};
762 748
749static inline int efx_dev_registered(struct efx_nic *efx)
750{
751 return efx->net_dev->reg_state == NETREG_REGISTERED;
752}
753
754/* Net device name, for inclusion in log messages if it has been registered.
755 * Use efx->name not efx->net_dev->name so that races with (un)registration
756 * are harmless.
757 */
758static inline const char *efx_dev_name(struct efx_nic *efx)
759{
760 return efx_dev_registered(efx) ? efx->name : "";
761}
762
763/** 763/**
764 * struct efx_nic_type - Efx device type definition 764 * struct efx_nic_type - Efx device type definition
765 * @mem_bar: Memory BAR number 765 * @mem_bar: Memory BAR number
@@ -795,7 +795,7 @@ struct efx_nic_type {
795 unsigned int txd_ring_mask; 795 unsigned int txd_ring_mask;
796 unsigned int rxd_ring_mask; 796 unsigned int rxd_ring_mask;
797 unsigned int evq_size; 797 unsigned int evq_size;
798 dma_addr_t max_dma_mask; 798 u64 max_dma_mask;
799 unsigned int tx_dma_mask; 799 unsigned int tx_dma_mask;
800 unsigned bug5391_mask; 800 unsigned bug5391_mask;
801 801
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 670622373ddf..601b001437c0 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
86 */ 86 */
87#define EFX_RXD_HEAD_ROOM 2 87#define EFX_RXD_HEAD_ROOM 2
88 88
89/* Macros for zero-order pages (potentially) containing multiple RX buffers */ 89static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
90#define RX_DATA_OFFSET(_data) \ 90{
91 (((unsigned long) (_data)) & (PAGE_SIZE-1)) 91 /* Offset is always within one page, so we don't need to consider
92#define RX_BUF_OFFSET(_rx_buf) \ 92 * the page order.
93 RX_DATA_OFFSET((_rx_buf)->data) 93 */
94 94 return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
95#define RX_PAGE_SIZE(_efx) \ 95}
96 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) 96static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
97{
98 return PAGE_SIZE << efx->rx_buffer_order;
99}
97 100
98 101
99/************************************************************************** 102/**************************************************************************
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
106static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
107 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
108{ 111{
109 struct efx_channel *channel = (struct efx_channel *)priv; 112 struct efx_channel *channel = priv;
110 struct iphdr *iph; 113 struct iphdr *iph;
111 struct tcphdr *th; 114 struct tcphdr *th;
112 115
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
131 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
132 void *priv) 135 void *priv)
133{ 136{
134 struct efx_channel *channel = (struct efx_channel *)priv; 137 struct efx_channel *channel = priv;
135 struct ethhdr *eh; 138 struct ethhdr *eh;
136 struct iphdr *iph; 139 struct iphdr *iph;
137 140
138 /* We support EtherII and VLAN encapsulated IPv4 */ 141 /* We support EtherII and VLAN encapsulated IPv4 */
139 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); 142 eh = page_address(frag->page) + frag->page_offset;
140 *mac_hdr = eh; 143 *mac_hdr = eh;
141 144
142 if (eh->h_proto == htons(ETH_P_IP)) { 145 if (eh->h_proto == htons(ETH_P_IP)) {
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
269 return -ENOMEM; 272 return -ENOMEM;
270 273
271 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 274 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
272 0, RX_PAGE_SIZE(efx), 275 0, efx_rx_buf_size(efx),
273 PCI_DMA_FROMDEVICE); 276 PCI_DMA_FROMDEVICE);
274 277
275 if (unlikely(pci_dma_mapping_error(dma_addr))) { 278 if (unlikely(pci_dma_mapping_error(dma_addr))) {
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
280 283
281 rx_queue->buf_page = rx_buf->page; 284 rx_queue->buf_page = rx_buf->page;
282 rx_queue->buf_dma_addr = dma_addr; 285 rx_queue->buf_dma_addr = dma_addr;
283 rx_queue->buf_data = ((char *) page_address(rx_buf->page) + 286 rx_queue->buf_data = (page_address(rx_buf->page) +
284 EFX_PAGE_IP_ALIGN); 287 EFX_PAGE_IP_ALIGN);
285 } 288 }
286 289
287 offset = RX_DATA_OFFSET(rx_queue->buf_data);
288 rx_buf->len = bytes; 290 rx_buf->len = bytes;
289 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
290 rx_buf->data = rx_queue->buf_data; 291 rx_buf->data = rx_queue->buf_data;
292 offset = efx_rx_buf_offset(rx_buf);
293 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
291 294
292 /* Try to pack multiple buffers per page */ 295 /* Try to pack multiple buffers per page */
293 if (efx->rx_buffer_order == 0) { 296 if (efx->rx_buffer_order == 0) {
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
295 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 298 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
296 offset += ((bytes + 0x1ff) & ~0x1ff); 299 offset += ((bytes + 0x1ff) & ~0x1ff);
297 300
298 space = RX_PAGE_SIZE(efx) - offset; 301 space = efx_rx_buf_size(efx) - offset;
299 if (space >= bytes) { 302 if (space >= bytes) {
300 /* Refs dropped on kernel releasing each skb */ 303 /* Refs dropped on kernel releasing each skb */
301 get_page(rx_queue->buf_page); 304 get_page(rx_queue->buf_page);
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
344 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
345 if (rx_buf->unmap_addr) { 348 if (rx_buf->unmap_addr) {
346 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 349 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
347 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); 350 efx_rx_buf_size(efx),
351 PCI_DMA_FROMDEVICE);
348 rx_buf->unmap_addr = 0; 352 rx_buf->unmap_addr = 0;
349 } 353 }
350 } else if (likely(rx_buf->skb)) { 354 } else if (likely(rx_buf->skb)) {
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
400 return 0; 404 return 0;
401 405
402 /* Record minimum fill level */ 406 /* Record minimum fill level */
403 if (unlikely(fill_level < rx_queue->min_fill)) 407 if (unlikely(fill_level < rx_queue->min_fill)) {
404 if (fill_level) 408 if (fill_level)
405 rx_queue->min_fill = fill_level; 409 rx_queue->min_fill = fill_level;
410 }
406 411
407 /* Acquire RX add lock. If this lock is contended, then a fast 412 /* Acquire RX add lock. If this lock is contended, then a fast
408 * fill must already be in progress (e.g. in the refill 413 * fill must already be in progress (e.g. in the refill
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
552 struct skb_frag_struct frags; 557 struct skb_frag_struct frags;
553 558
554 frags.page = rx_buf->page; 559 frags.page = rx_buf->page;
555 frags.page_offset = RX_BUF_OFFSET(rx_buf); 560 frags.page_offset = efx_rx_buf_offset(rx_buf);
556 frags.size = rx_buf->len; 561 frags.size = rx_buf->len;
557 562
558 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 563 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
597 if (unlikely(rx_buf->len > hdr_len)) { 602 if (unlikely(rx_buf->len > hdr_len)) {
598 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
599 frag->page = rx_buf->page; 604 frag->page = rx_buf->page;
600 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; 605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
601 frag->size = skb->len - hdr_len; 606 frag->size = skb->len - hdr_len;
602 skb_shinfo(skb)->nr_frags = 1; 607 skb_shinfo(skb)->nr_frags = 1;
603 skb->data_len = frag->size; 608 skb->data_len = frag->size;
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
851 /* For a page that is part-way through splitting into RX buffers */ 856 /* For a page that is part-way through splitting into RX buffers */
852 if (rx_queue->buf_page != NULL) { 857 if (rx_queue->buf_page != NULL) {
853 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 858 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
854 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); 859 efx_rx_buf_size(rx_queue->efx),
860 PCI_DMA_FROMDEVICE);
855 __free_pages(rx_queue->buf_page, 861 __free_pages(rx_queue->buf_page,
856 rx_queue->efx->rx_buffer_order); 862 rx_queue->efx->rx_buffer_order);
857 rx_queue->buf_page = NULL; 863 rx_queue->buf_page = NULL;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index cbda15946e8f..3b2de9fe7f27 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
290 290
291 payload = &state->payload; 291 payload = &state->payload;
292 292
293 received = (struct efx_loopback_payload *)(char *) buf_ptr; 293 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 294 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 295 received->ip.check = payload->ip.check;
296 296
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
424 * interrupt handler. */ 424 * interrupt handler. */
425 smp_wmb(); 425 smp_wmb();
426 426
427 if (NET_DEV_REGISTERED(efx)) 427 if (efx_dev_registered(efx))
428 netif_tx_lock_bh(efx->net_dev); 428 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb); 429 rc = efx_xmit(efx, tx_queue, skb);
430 if (NET_DEV_REGISTERED(efx)) 430 if (efx_dev_registered(efx))
431 netif_tx_unlock_bh(efx->net_dev); 431 netif_tx_unlock_bh(efx->net_dev);
432 432
433 if (rc != NETDEV_TX_OK) { 433 if (rc != NETDEV_TX_OK) {
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
453 int tx_done = 0, rx_good, rx_bad; 453 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0; 454 int i, rc = 0;
455 455
456 if (NET_DEV_REGISTERED(efx)) 456 if (efx_dev_registered(efx))
457 netif_tx_lock_bh(efx->net_dev); 457 netif_tx_lock_bh(efx->net_dev);
458 458
459 /* Count the number of tx completions, and decrement the refcnt. Any 459 /* Count the number of tx completions, and decrement the refcnt. Any
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
465 dev_kfree_skb_any(skb); 465 dev_kfree_skb_any(skb);
466 } 466 }
467 467
468 if (NET_DEV_REGISTERED(efx)) 468 if (efx_dev_registered(efx))
469 netif_tx_unlock_bh(efx->net_dev); 469 netif_tx_unlock_bh(efx->net_dev);
470 470
471 /* Check TX completion and received packet counts */ 471 /* Check TX completion and received packet counts */
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
517 state->packet_count = min(1 << (i << 2), state->packet_count); 517 state->packet_count = min(1 << (i << 2), state->packet_count);
518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 518 state->skbs = kzalloc(sizeof(state->skbs[0]) *
519 state->packet_count, GFP_KERNEL); 519 state->packet_count, GFP_KERNEL);
520 if (!state->skbs)
521 return -ENOMEM;
520 state->flush = 0; 522 state->flush = 0;
521 523
522 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
700 * "flushing" so all inflight packets are dropped */ 702 * "flushing" so all inflight packets are dropped */
701 BUG_ON(efx->loopback_selftest); 703 BUG_ON(efx->loopback_selftest);
702 state->flush = 1; 704 state->flush = 1;
703 efx->loopback_selftest = (void *)state; 705 efx->loopback_selftest = state;
704 706
705 rc = efx_test_loopbacks(efx, tests, loopback_modes); 707 rc = efx_test_loopbacks(efx, tests, loopback_modes);
706 708
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 725d1a539c49..66a0d1442aba 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
116 116
117 /* Turn off all power rails */ 117 /* Turn off all power rails */
118 out = 0xff; 118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 119 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120 120
121 /* Disable port 1 outputs on IO expander */ 121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff; 122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 123 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124 124
125 /* Disable port 0 outputs on IO expander */ 125 /* Disable port 0 outputs on IO expander */
126 cfg = 0xff; 126 cfg = 0xff;
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 127 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
128 128
129 /* Clear any over-temperature alert */ 129 /* Clear any over-temperature alert */
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 130 efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131} 131}
132 132
133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@@ -253,14 +253,14 @@ done:
253fail3: 253fail3:
254 /* Turn off all power rails */ 254 /* Turn off all power rails */
255 out = 0xff; 255 out = 0xff;
256 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 256 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
257 /* Disable port 1 outputs on IO expander */ 257 /* Disable port 1 outputs on IO expander */
258 out = 0xff; 258 out = 0xff;
259 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 259 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
260fail2: 260fail2:
261 /* Disable port 0 outputs on IO expander */ 261 /* Disable port 0 outputs on IO expander */
262 out = 0xff; 262 out = 0xff;
263 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 263 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
264fail1: 264fail1:
265 return rc; 265 return rc;
266} 266}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index b1cd6deec01f..c0146061c326 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
211 int rc = 0; 211 int rc = 0;
212 212
213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
214 if (!phy_data)
215 return -ENOMEM;
214 efx->phy_data = phy_data; 216 efx->phy_data = phy_data;
215 217
216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
376 * perform a special software reset */ 378 * perform a special software reset */
377 if ((phy_data->tx_disabled && !efx->tx_disabled) || 379 if ((phy_data->tx_disabled && !efx->tx_disabled) ||
378 loop_change) { 380 loop_change) {
379 (void) tenxpress_special_reset(efx); 381 tenxpress_special_reset(efx);
380 falcon_reset_xaui(efx); 382 falcon_reset_xaui(efx);
381 } 383 }
382 384
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 9b436f5b4888..5cdd082ab8f6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
387 if (unlikely(tx_queue->stopped)) { 387 if (unlikely(tx_queue->stopped)) {
388 fill_level = tx_queue->insert_count - tx_queue->read_count; 388 fill_level = tx_queue->insert_count - tx_queue->read_count;
389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
390 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); 390 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
391 391
392 /* Do this under netif_tx_lock(), to avoid racing 392 /* Do this under netif_tx_lock(), to avoid racing
393 * with efx_xmit(). */ 393 * with efx_xmit(). */
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
639 base_dma = tsoh->dma_addr & PAGE_MASK; 639 base_dma = tsoh->dma_addr & PAGE_MASK;
640 640
641 p = &tx_queue->tso_headers_free; 641 p = &tx_queue->tso_headers_free;
642 while (*p != NULL) 642 while (*p != NULL) {
643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 643 if (((unsigned long)*p & PAGE_MASK) == base_kva)
644 *p = (*p)->next; 644 *p = (*p)->next;
645 else 645 else
646 p = &(*p)->next; 646 p = &(*p)->next;
647 }
647 648
648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
649} 650}
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
939 940
940 /* Allocate a DMA-mapped header buffer. */ 941 /* Allocate a DMA-mapped header buffer. */
941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 942 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
942 if (tx_queue->tso_headers_free == NULL) 943 if (tx_queue->tso_headers_free == NULL) {
943 if (efx_tsoh_block_alloc(tx_queue)) 944 if (efx_tsoh_block_alloc(tx_queue))
944 return -1; 945 return -1;
946 }
945 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
946 tsoh = tx_queue->tso_headers_free; 948 tsoh = tx_queue->tso_headers_free;
947 tx_queue->tso_headers_free = tsoh->next; 949 tx_queue->tso_headers_free = tsoh->next;
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1106{ 1108{
1107 unsigned i; 1109 unsigned i;
1108 1110
1109 if (tx_queue->buffer) 1111 if (tx_queue->buffer) {
1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1112 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1113 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1114 }
1112 1115
1113 while (tx_queue->tso_headers_free != NULL) 1116 while (tx_queue->tso_headers_free != NULL)
1114 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1117 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index dca62f190198..35ab19c27f8d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
20 20
21/* XAUI resets if link not detected */ 21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 3b9f9ddbc372..f3684ad28887 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
85 int rc; 85 int rc;
86 86
87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
88 efx->phy_data = (void *) phy_data; 88 if (!phy_data)
89 return -ENOMEM;
90 efx->phy_data = phy_data;
89 91
90 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 92 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
91 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 4b0f03358777..535f6cc96c8d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1159} 1159}
1160 1160
1161#ifdef SKY2_VLAN_TAG_USED 1161#ifdef SKY2_VLAN_TAG_USED
1162static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1162static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
1163{ 1163{
1164 struct sky2_port *sky2 = netdev_priv(dev); 1164 if (onoff) {
1165 struct sky2_hw *hw = sky2->hw;
1166 u16 port = sky2->port;
1167
1168 netif_tx_lock_bh(dev);
1169 napi_disable(&hw->napi);
1170
1171 sky2->vlgrp = grp;
1172 if (grp) {
1173 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1165 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1174 RX_VLAN_STRIP_ON); 1166 RX_VLAN_STRIP_ON);
1175 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1167 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
@@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
1180 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1172 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1181 TX_VLAN_TAG_OFF); 1173 TX_VLAN_TAG_OFF);
1182 } 1174 }
1175}
1176
1177static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1178{
1179 struct sky2_port *sky2 = netdev_priv(dev);
1180 struct sky2_hw *hw = sky2->hw;
1181 u16 port = sky2->port;
1182
1183 netif_tx_lock_bh(dev);
1184 napi_disable(&hw->napi);
1185
1186 sky2->vlgrp = grp;
1187 sky2_set_vlan_mode(hw, port, grp != NULL);
1183 1188
1184 sky2_read32(hw, B0_Y2_SP_LISR); 1189 sky2_read32(hw, B0_Y2_SP_LISR);
1185 napi_enable(&hw->napi); 1190 napi_enable(&hw->napi);
@@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev)
1418 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1423 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1419 TX_RING_SIZE - 1); 1424 TX_RING_SIZE - 1);
1420 1425
1426#ifdef SKY2_VLAN_TAG_USED
1427 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1428#endif
1429
1421 err = sky2_rx_start(sky2); 1430 err = sky2_rx_start(sky2);
1422 if (err) 1431 if (err)
1423 goto err_out; 1432 goto err_out;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ae11c6724766..d9f248f23b97 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,8 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34#include <linux/mii.h> 34#include <linux/mii.h>
35#include <linux/phy.h>
36#include <linux/brcmphy.h>
35#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
36#include <linux/ip.h> 38#include <linux/ip.h>
37#include <linux/tcp.h> 39#include <linux/tcp.h>
@@ -64,8 +66,8 @@
64 66
65#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.92" 69#define DRV_MODULE_VERSION "3.93"
68#define DRV_MODULE_RELDATE "May 2, 2008" 70#define DRV_MODULE_RELDATE "May 22, 2008"
69 71
70#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -203,6 +205,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 209 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 210 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -804,6 +807,569 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
804 return ret; 807 return ret;
805} 808}
806 809
810static int tg3_bmcr_reset(struct tg3 *tp)
811{
812 u32 phy_control;
813 int limit, err;
814
815 /* OK, reset it, and poll the BMCR_RESET bit until it
816 * clears or we time out.
817 */
818 phy_control = BMCR_RESET;
819 err = tg3_writephy(tp, MII_BMCR, phy_control);
820 if (err != 0)
821 return -EBUSY;
822
823 limit = 5000;
824 while (limit--) {
825 err = tg3_readphy(tp, MII_BMCR, &phy_control);
826 if (err != 0)
827 return -EBUSY;
828
829 if ((phy_control & BMCR_RESET) == 0) {
830 udelay(40);
831 break;
832 }
833 udelay(10);
834 }
835 if (limit <= 0)
836 return -EBUSY;
837
838 return 0;
839}
840
841static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
842{
843 struct tg3 *tp = (struct tg3 *)bp->priv;
844 u32 val;
845
846 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
847 return -EAGAIN;
848
849 if (tg3_readphy(tp, reg, &val))
850 return -EIO;
851
852 return val;
853}
854
855static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
856{
857 struct tg3 *tp = (struct tg3 *)bp->priv;
858
859 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860 return -EAGAIN;
861
862 if (tg3_writephy(tp, reg, val))
863 return -EIO;
864
865 return 0;
866}
867
868static int tg3_mdio_reset(struct mii_bus *bp)
869{
870 return 0;
871}
872
873static void tg3_mdio_config(struct tg3 *tp)
874{
875 u32 val;
876
877 if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
878 PHY_INTERFACE_MODE_RGMII)
879 return;
880
881 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
882 MAC_PHYCFG1_RGMII_SND_STAT_EN);
883 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
884 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
885 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
886 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
887 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
888 }
889 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
890
891 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
892 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
893 val |= MAC_PHYCFG2_INBAND_ENABLE;
894 tw32(MAC_PHYCFG2, val);
895
896 val = tr32(MAC_EXT_RGMII_MODE);
897 val &= ~(MAC_RGMII_MODE_RX_INT_B |
898 MAC_RGMII_MODE_RX_QUALITY |
899 MAC_RGMII_MODE_RX_ACTIVITY |
900 MAC_RGMII_MODE_RX_ENG_DET |
901 MAC_RGMII_MODE_TX_ENABLE |
902 MAC_RGMII_MODE_TX_LOWPWR |
903 MAC_RGMII_MODE_TX_RESET);
904 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
905 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
906 val |= MAC_RGMII_MODE_RX_INT_B |
907 MAC_RGMII_MODE_RX_QUALITY |
908 MAC_RGMII_MODE_RX_ACTIVITY |
909 MAC_RGMII_MODE_RX_ENG_DET;
910 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
911 val |= MAC_RGMII_MODE_TX_ENABLE |
912 MAC_RGMII_MODE_TX_LOWPWR |
913 MAC_RGMII_MODE_TX_RESET;
914 }
915 tw32(MAC_EXT_RGMII_MODE, val);
916}
917
918static void tg3_mdio_start(struct tg3 *tp)
919{
920 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
921 mutex_lock(&tp->mdio_bus.mdio_lock);
922 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
923 mutex_unlock(&tp->mdio_bus.mdio_lock);
924 }
925
926 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
927 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 udelay(80);
929
930 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
931 tg3_mdio_config(tp);
932}
933
934static void tg3_mdio_stop(struct tg3 *tp)
935{
936 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
937 mutex_lock(&tp->mdio_bus.mdio_lock);
938 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
939 mutex_unlock(&tp->mdio_bus.mdio_lock);
940 }
941}
942
943static int tg3_mdio_init(struct tg3 *tp)
944{
945 int i;
946 u32 reg;
947 struct phy_device *phydev;
948 struct mii_bus *mdio_bus = &tp->mdio_bus;
949
950 tg3_mdio_start(tp);
951
952 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
953 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
954 return 0;
955
956 memset(mdio_bus, 0, sizeof(*mdio_bus));
957
958 mdio_bus->name = "tg3 mdio bus";
959 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
960 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
961 mdio_bus->priv = tp;
962 mdio_bus->dev = &tp->pdev->dev;
963 mdio_bus->read = &tg3_mdio_read;
964 mdio_bus->write = &tg3_mdio_write;
965 mdio_bus->reset = &tg3_mdio_reset;
966 mdio_bus->phy_mask = ~(1 << PHY_ADDR);
967 mdio_bus->irq = &tp->mdio_irq[0];
968
969 for (i = 0; i < PHY_MAX_ADDR; i++)
970 mdio_bus->irq[i] = PHY_POLL;
971
972 /* The bus registration will look for all the PHYs on the mdio bus.
973 * Unfortunately, it does not ensure the PHY is powered up before
974 * accessing the PHY ID registers. A chip reset is the
975 * quickest way to bring the device back to an operational state..
976 */
977 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
978 tg3_bmcr_reset(tp);
979
980 i = mdiobus_register(mdio_bus);
981 if (i) {
982 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
983 tp->dev->name, i);
984 return i;
985 }
986
987 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
988
989 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
990
991 switch (phydev->phy_id) {
992 case TG3_PHY_ID_BCM50610:
993 phydev->interface = PHY_INTERFACE_MODE_RGMII;
994 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
995 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
997 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
998 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
999 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1000 break;
1001 case TG3_PHY_ID_BCMAC131:
1002 phydev->interface = PHY_INTERFACE_MODE_MII;
1003 break;
1004 }
1005
1006 tg3_mdio_config(tp);
1007
1008 return 0;
1009}
1010
1011static void tg3_mdio_fini(struct tg3 *tp)
1012{
1013 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1014 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1015 mdiobus_unregister(&tp->mdio_bus);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 }
1018}
1019
1020/* tp->lock is held. */
1021static void tg3_wait_for_event_ack(struct tg3 *tp)
1022{
1023 int i;
1024
1025 /* Wait for up to 2.5 milliseconds */
1026 for (i = 0; i < 250000; i++) {
1027 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1028 break;
1029 udelay(10);
1030 }
1031}
1032
1033/* tp->lock is held. */
1034static void tg3_ump_link_report(struct tg3 *tp)
1035{
1036 u32 reg;
1037 u32 val;
1038
1039 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1040 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1041 return;
1042
1043 tg3_wait_for_event_ack(tp);
1044
1045 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1046
1047 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1048
1049 val = 0;
1050 if (!tg3_readphy(tp, MII_BMCR, &reg))
1051 val = reg << 16;
1052 if (!tg3_readphy(tp, MII_BMSR, &reg))
1053 val |= (reg & 0xffff);
1054 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1055
1056 val = 0;
1057 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1058 val = reg << 16;
1059 if (!tg3_readphy(tp, MII_LPA, &reg))
1060 val |= (reg & 0xffff);
1061 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1062
1063 val = 0;
1064 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1065 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1066 val = reg << 16;
1067 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1068 val |= (reg & 0xffff);
1069 }
1070 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1071
1072 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1073 val = reg << 16;
1074 else
1075 val = 0;
1076 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1077
1078 val = tr32(GRC_RX_CPU_EVENT);
1079 val |= GRC_RX_CPU_DRIVER_EVENT;
1080 tw32_f(GRC_RX_CPU_EVENT, val);
1081}
1082
1083static void tg3_link_report(struct tg3 *tp)
1084{
1085 if (!netif_carrier_ok(tp->dev)) {
1086 if (netif_msg_link(tp))
1087 printk(KERN_INFO PFX "%s: Link is down.\n",
1088 tp->dev->name);
1089 tg3_ump_link_report(tp);
1090 } else if (netif_msg_link(tp)) {
1091 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1092 tp->dev->name,
1093 (tp->link_config.active_speed == SPEED_1000 ?
1094 1000 :
1095 (tp->link_config.active_speed == SPEED_100 ?
1096 100 : 10)),
1097 (tp->link_config.active_duplex == DUPLEX_FULL ?
1098 "full" : "half"));
1099
1100 printk(KERN_INFO PFX
1101 "%s: Flow control is %s for TX and %s for RX.\n",
1102 tp->dev->name,
1103 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1104 "on" : "off",
1105 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1106 "on" : "off");
1107 tg3_ump_link_report(tp);
1108 }
1109}
1110
1111static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1112{
1113 u16 miireg;
1114
1115 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1116 miireg = ADVERTISE_PAUSE_CAP;
1117 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1118 miireg = ADVERTISE_PAUSE_ASYM;
1119 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1120 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1121 else
1122 miireg = 0;
1123
1124 return miireg;
1125}
1126
1127static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1128{
1129 u16 miireg;
1130
1131 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1132 miireg = ADVERTISE_1000XPAUSE;
1133 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1134 miireg = ADVERTISE_1000XPSE_ASYM;
1135 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1136 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1137 else
1138 miireg = 0;
1139
1140 return miireg;
1141}
1142
1143static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1144{
1145 u8 cap = 0;
1146
1147 if (lcladv & ADVERTISE_PAUSE_CAP) {
1148 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1149 if (rmtadv & LPA_PAUSE_CAP)
1150 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1151 else if (rmtadv & LPA_PAUSE_ASYM)
1152 cap = TG3_FLOW_CTRL_RX;
1153 } else {
1154 if (rmtadv & LPA_PAUSE_CAP)
1155 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1156 }
1157 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1158 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1159 cap = TG3_FLOW_CTRL_TX;
1160 }
1161
1162 return cap;
1163}
1164
1165static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1166{
1167 u8 cap = 0;
1168
1169 if (lcladv & ADVERTISE_1000XPAUSE) {
1170 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1171 if (rmtadv & LPA_1000XPAUSE)
1172 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1173 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1174 cap = TG3_FLOW_CTRL_RX;
1175 } else {
1176 if (rmtadv & LPA_1000XPAUSE)
1177 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1178 }
1179 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1180 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1181 cap = TG3_FLOW_CTRL_TX;
1182 }
1183
1184 return cap;
1185}
1186
1187static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1188{
1189 u8 autoneg;
1190 u8 flowctrl = 0;
1191 u32 old_rx_mode = tp->rx_mode;
1192 u32 old_tx_mode = tp->tx_mode;
1193
1194 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1195 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1196 else
1197 autoneg = tp->link_config.autoneg;
1198
1199 if (autoneg == AUTONEG_ENABLE &&
1200 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1201 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1202 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1203 else
1204 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1205 } else
1206 flowctrl = tp->link_config.flowctrl;
1207
1208 tp->link_config.active_flowctrl = flowctrl;
1209
1210 if (flowctrl & TG3_FLOW_CTRL_RX)
1211 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1212 else
1213 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1214
1215 if (old_rx_mode != tp->rx_mode)
1216 tw32_f(MAC_RX_MODE, tp->rx_mode);
1217
1218 if (flowctrl & TG3_FLOW_CTRL_TX)
1219 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1220 else
1221 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1222
1223 if (old_tx_mode != tp->tx_mode)
1224 tw32_f(MAC_TX_MODE, tp->tx_mode);
1225}
1226
1227static void tg3_adjust_link(struct net_device *dev)
1228{
1229 u8 oldflowctrl, linkmesg = 0;
1230 u32 mac_mode, lcl_adv, rmt_adv;
1231 struct tg3 *tp = netdev_priv(dev);
1232 struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1233
1234 spin_lock(&tp->lock);
1235
1236 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1237 MAC_MODE_HALF_DUPLEX);
1238
1239 oldflowctrl = tp->link_config.active_flowctrl;
1240
1241 if (phydev->link) {
1242 lcl_adv = 0;
1243 rmt_adv = 0;
1244
1245 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1246 mac_mode |= MAC_MODE_PORT_MODE_MII;
1247 else
1248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1249
1250 if (phydev->duplex == DUPLEX_HALF)
1251 mac_mode |= MAC_MODE_HALF_DUPLEX;
1252 else {
1253 lcl_adv = tg3_advert_flowctrl_1000T(
1254 tp->link_config.flowctrl);
1255
1256 if (phydev->pause)
1257 rmt_adv = LPA_PAUSE_CAP;
1258 if (phydev->asym_pause)
1259 rmt_adv |= LPA_PAUSE_ASYM;
1260 }
1261
1262 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1263 } else
1264 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1265
1266 if (mac_mode != tp->mac_mode) {
1267 tp->mac_mode = mac_mode;
1268 tw32_f(MAC_MODE, tp->mac_mode);
1269 udelay(40);
1270 }
1271
1272 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1273 tw32(MAC_TX_LENGTHS,
1274 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1275 (6 << TX_LENGTHS_IPG_SHIFT) |
1276 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1277 else
1278 tw32(MAC_TX_LENGTHS,
1279 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1280 (6 << TX_LENGTHS_IPG_SHIFT) |
1281 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1282
1283 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1284 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1285 phydev->speed != tp->link_config.active_speed ||
1286 phydev->duplex != tp->link_config.active_duplex ||
1287 oldflowctrl != tp->link_config.active_flowctrl)
1288 linkmesg = 1;
1289
1290 tp->link_config.active_speed = phydev->speed;
1291 tp->link_config.active_duplex = phydev->duplex;
1292
1293 spin_unlock(&tp->lock);
1294
1295 if (linkmesg)
1296 tg3_link_report(tp);
1297}
1298
1299static int tg3_phy_init(struct tg3 *tp)
1300{
1301 struct phy_device *phydev;
1302
1303 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1304 return 0;
1305
1306 /* Bring the PHY back to a known state. */
1307 tg3_bmcr_reset(tp);
1308
1309 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1310
1311 /* Attach the MAC to the PHY. */
1312 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1313 phydev->dev_flags, phydev->interface);
1314 if (IS_ERR(phydev)) {
1315 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1316 return PTR_ERR(phydev);
1317 }
1318
1319 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1320
1321 /* Mask with MAC supported features. */
1322 phydev->supported &= (PHY_GBIT_FEATURES |
1323 SUPPORTED_Pause |
1324 SUPPORTED_Asym_Pause);
1325
1326 phydev->advertising = phydev->supported;
1327
1328 printk(KERN_INFO
1329 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1330 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1331
1332 return 0;
1333}
1334
1335static void tg3_phy_start(struct tg3 *tp)
1336{
1337 struct phy_device *phydev;
1338
1339 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1340 return;
1341
1342 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1343
1344 if (tp->link_config.phy_is_low_power) {
1345 tp->link_config.phy_is_low_power = 0;
1346 phydev->speed = tp->link_config.orig_speed;
1347 phydev->duplex = tp->link_config.orig_duplex;
1348 phydev->autoneg = tp->link_config.orig_autoneg;
1349 phydev->advertising = tp->link_config.orig_advertising;
1350 }
1351
1352 phy_start(phydev);
1353
1354 phy_start_aneg(phydev);
1355}
1356
1357static void tg3_phy_stop(struct tg3 *tp)
1358{
1359 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1360 return;
1361
1362 phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1363}
1364
1365static void tg3_phy_fini(struct tg3 *tp)
1366{
1367 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1368 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1369 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1370 }
1371}
1372
807static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1373static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808{ 1374{
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1375 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
@@ -861,37 +1427,6 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
861 (val | (1 << 15) | (1 << 4))); 1427 (val | (1 << 15) | (1 << 4)));
862} 1428}
863 1429
864static int tg3_bmcr_reset(struct tg3 *tp)
865{
866 u32 phy_control;
867 int limit, err;
868
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
871 */
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 limit = 5000;
878 while (limit--) {
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880 if (err != 0)
881 return -EBUSY;
882
883 if ((phy_control & BMCR_RESET) == 0) {
884 udelay(40);
885 break;
886 }
887 udelay(10);
888 }
889 if (limit <= 0)
890 return -EBUSY;
891
892 return 0;
893}
894
895static void tg3_phy_apply_otp(struct tg3 *tp) 1430static void tg3_phy_apply_otp(struct tg3 *tp)
896{ 1431{
897 u32 otp, phy; 1432 u32 otp, phy;
@@ -1115,8 +1650,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1115 return err; 1650 return err;
1116} 1651}
1117 1652
1118static void tg3_link_report(struct tg3 *);
1119
1120/* This will reset the tigon3 PHY if there is no valid 1653/* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero. 1654 * link unless the FORCE argument is non-zero.
1122 */ 1655 */
@@ -1406,7 +1939,7 @@ static void tg3_power_down_phy(struct tg3 *tp)
1406 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 1939 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1407 udelay(40); 1940 udelay(40);
1408 return; 1941 return;
1409 } else { 1942 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1410 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1943 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1944 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1945 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
@@ -1488,18 +2021,55 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1488 tw32(TG3PCI_MISC_HOST_CTRL, 2021 tw32(TG3PCI_MISC_HOST_CTRL,
1489 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2022 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1490 2023
1491 if (tp->link_config.phy_is_low_power == 0) { 2024 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1492 tp->link_config.phy_is_low_power = 1; 2025 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
1493 tp->link_config.orig_speed = tp->link_config.speed; 2026 !tp->link_config.phy_is_low_power) {
1494 tp->link_config.orig_duplex = tp->link_config.duplex; 2027 struct phy_device *phydev;
1495 tp->link_config.orig_autoneg = tp->link_config.autoneg; 2028 u32 advertising;
1496 } 2029
2030 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2031
2032 tp->link_config.phy_is_low_power = 1;
2033
2034 tp->link_config.orig_speed = phydev->speed;
2035 tp->link_config.orig_duplex = phydev->duplex;
2036 tp->link_config.orig_autoneg = phydev->autoneg;
2037 tp->link_config.orig_advertising = phydev->advertising;
2038
2039 advertising = ADVERTISED_TP |
2040 ADVERTISED_Pause |
2041 ADVERTISED_Autoneg |
2042 ADVERTISED_10baseT_Half;
2043
2044 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2045 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2046 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2047 advertising |=
2048 ADVERTISED_100baseT_Half |
2049 ADVERTISED_100baseT_Full |
2050 ADVERTISED_10baseT_Full;
2051 else
2052 advertising |= ADVERTISED_10baseT_Full;
2053 }
1497 2054
1498 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 2055 phydev->advertising = advertising;
1499 tp->link_config.speed = SPEED_10; 2056
1500 tp->link_config.duplex = DUPLEX_HALF; 2057 phy_start_aneg(phydev);
1501 tp->link_config.autoneg = AUTONEG_ENABLE; 2058 }
1502 tg3_setup_phy(tp, 0); 2059 } else {
2060 if (tp->link_config.phy_is_low_power == 0) {
2061 tp->link_config.phy_is_low_power = 1;
2062 tp->link_config.orig_speed = tp->link_config.speed;
2063 tp->link_config.orig_duplex = tp->link_config.duplex;
2064 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2065 }
2066
2067 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2068 tp->link_config.speed = SPEED_10;
2069 tp->link_config.duplex = DUPLEX_HALF;
2070 tp->link_config.autoneg = AUTONEG_ENABLE;
2071 tg3_setup_phy(tp, 0);
2072 }
1503 } 2073 }
1504 2074
1505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -1530,8 +2100,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1530 u32 mac_mode; 2100 u32 mac_mode;
1531 2101
1532 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2102 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2103 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1534 udelay(40); 2104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2105 udelay(40);
2106 }
1535 2107
1536 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 2108 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537 mac_mode = MAC_MODE_PORT_MODE_GMII; 2109 mac_mode = MAC_MODE_PORT_MODE_GMII;
@@ -1656,212 +2228,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1656 return 0; 2228 return 0;
1657} 2229}
1658 2230
1659/* tp->lock is held. */
1660static void tg3_wait_for_event_ack(struct tg3 *tp)
1661{
1662 int i;
1663
1664 /* Wait for up to 2.5 milliseconds */
1665 for (i = 0; i < 250000; i++) {
1666 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1667 break;
1668 udelay(10);
1669 }
1670}
1671
1672/* tp->lock is held. */
1673static void tg3_ump_link_report(struct tg3 *tp)
1674{
1675 u32 reg;
1676 u32 val;
1677
1678 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1679 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1680 return;
1681
1682 tg3_wait_for_event_ack(tp);
1683
1684 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1685
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1687
1688 val = 0;
1689 if (!tg3_readphy(tp, MII_BMCR, &reg))
1690 val = reg << 16;
1691 if (!tg3_readphy(tp, MII_BMSR, &reg))
1692 val |= (reg & 0xffff);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1694
1695 val = 0;
1696 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1697 val = reg << 16;
1698 if (!tg3_readphy(tp, MII_LPA, &reg))
1699 val |= (reg & 0xffff);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1701
1702 val = 0;
1703 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1704 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1705 val = reg << 16;
1706 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1707 val |= (reg & 0xffff);
1708 }
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1710
1711 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1712 val = reg << 16;
1713 else
1714 val = 0;
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1716
1717 val = tr32(GRC_RX_CPU_EVENT);
1718 val |= GRC_RX_CPU_DRIVER_EVENT;
1719 tw32_f(GRC_RX_CPU_EVENT, val);
1720}
1721
1722static void tg3_link_report(struct tg3 *tp)
1723{
1724 if (!netif_carrier_ok(tp->dev)) {
1725 if (netif_msg_link(tp))
1726 printk(KERN_INFO PFX "%s: Link is down.\n",
1727 tp->dev->name);
1728 tg3_ump_link_report(tp);
1729 } else if (netif_msg_link(tp)) {
1730 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1731 tp->dev->name,
1732 (tp->link_config.active_speed == SPEED_1000 ?
1733 1000 :
1734 (tp->link_config.active_speed == SPEED_100 ?
1735 100 : 10)),
1736 (tp->link_config.active_duplex == DUPLEX_FULL ?
1737 "full" : "half"));
1738
1739 printk(KERN_INFO PFX
1740 "%s: Flow control is %s for TX and %s for RX.\n",
1741 tp->dev->name,
1742 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1743 "on" : "off",
1744 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1745 "on" : "off");
1746 tg3_ump_link_report(tp);
1747 }
1748}
1749
1750static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1751{
1752 u16 miireg;
1753
1754 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1755 miireg = ADVERTISE_PAUSE_CAP;
1756 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1757 miireg = ADVERTISE_PAUSE_ASYM;
1758 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1759 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1760 else
1761 miireg = 0;
1762
1763 return miireg;
1764}
1765
1766static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1767{
1768 u16 miireg;
1769
1770 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1771 miireg = ADVERTISE_1000XPAUSE;
1772 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1773 miireg = ADVERTISE_1000XPSE_ASYM;
1774 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1775 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1776 else
1777 miireg = 0;
1778
1779 return miireg;
1780}
1781
1782static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1783{
1784 u8 cap = 0;
1785
1786 if (lcladv & ADVERTISE_PAUSE_CAP) {
1787 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1788 if (rmtadv & LPA_PAUSE_CAP)
1789 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1790 else if (rmtadv & LPA_PAUSE_ASYM)
1791 cap = TG3_FLOW_CTRL_RX;
1792 } else {
1793 if (rmtadv & LPA_PAUSE_CAP)
1794 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1795 }
1796 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1797 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1798 cap = TG3_FLOW_CTRL_TX;
1799 }
1800
1801 return cap;
1802}
1803
1804static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1805{
1806 u8 cap = 0;
1807
1808 if (lcladv & ADVERTISE_1000XPAUSE) {
1809 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1810 if (rmtadv & LPA_1000XPAUSE)
1811 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1812 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1813 cap = TG3_FLOW_CTRL_RX;
1814 } else {
1815 if (rmtadv & LPA_1000XPAUSE)
1816 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1817 }
1818 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1819 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1820 cap = TG3_FLOW_CTRL_TX;
1821 }
1822
1823 return cap;
1824}
1825
1826static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1827{
1828 u8 new_tg3_flags = 0;
1829 u32 old_rx_mode = tp->rx_mode;
1830 u32 old_tx_mode = tp->tx_mode;
1831
1832 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1833 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1834 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1835 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1836 remote_adv);
1837 else
1838 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1839 remote_adv);
1840 } else {
1841 new_tg3_flags = tp->link_config.flowctrl;
1842 }
1843
1844 tp->link_config.active_flowctrl = new_tg3_flags;
1845
1846 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1847 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1848 else
1849 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1850
1851 if (old_rx_mode != tp->rx_mode) {
1852 tw32_f(MAC_RX_MODE, tp->rx_mode);
1853 }
1854
1855 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1856 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1857 else
1858 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1859
1860 if (old_tx_mode != tp->tx_mode) {
1861 tw32_f(MAC_TX_MODE, tp->tx_mode);
1862 }
1863}
1864
1865static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 2231static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1866{ 2232{
1867 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 2233 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
@@ -3828,7 +4194,15 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3828 sblk->status = SD_STATUS_UPDATED | 4194 sblk->status = SD_STATUS_UPDATED |
3829 (sblk->status & ~SD_STATUS_LINK_CHG); 4195 (sblk->status & ~SD_STATUS_LINK_CHG);
3830 spin_lock(&tp->lock); 4196 spin_lock(&tp->lock);
3831 tg3_setup_phy(tp, 0); 4197 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4198 tw32_f(MAC_STATUS,
4199 (MAC_STATUS_SYNC_CHANGED |
4200 MAC_STATUS_CFG_CHANGED |
4201 MAC_STATUS_MI_COMPLETION |
4202 MAC_STATUS_LNKSTATE_CHANGED));
4203 udelay(40);
4204 } else
4205 tg3_setup_phy(tp, 0);
3832 spin_unlock(&tp->lock); 4206 spin_unlock(&tp->lock);
3833 } 4207 }
3834 } 4208 }
@@ -4116,6 +4490,7 @@ static void tg3_poll_controller(struct net_device *dev)
4116static void tg3_reset_task(struct work_struct *work) 4490static void tg3_reset_task(struct work_struct *work)
4117{ 4491{
4118 struct tg3 *tp = container_of(work, struct tg3, reset_task); 4492 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4493 int err;
4119 unsigned int restart_timer; 4494 unsigned int restart_timer;
4120 4495
4121 tg3_full_lock(tp, 0); 4496 tg3_full_lock(tp, 0);
@@ -4127,6 +4502,8 @@ static void tg3_reset_task(struct work_struct *work)
4127 4502
4128 tg3_full_unlock(tp); 4503 tg3_full_unlock(tp);
4129 4504
4505 tg3_phy_stop(tp);
4506
4130 tg3_netif_stop(tp); 4507 tg3_netif_stop(tp);
4131 4508
4132 tg3_full_lock(tp, 1); 4509 tg3_full_lock(tp, 1);
@@ -4142,7 +4519,8 @@ static void tg3_reset_task(struct work_struct *work)
4142 } 4519 }
4143 4520
4144 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 4521 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4145 if (tg3_init_hw(tp, 1)) 4522 err = tg3_init_hw(tp, 1);
4523 if (err)
4146 goto out; 4524 goto out;
4147 4525
4148 tg3_netif_start(tp); 4526 tg3_netif_start(tp);
@@ -4152,6 +4530,9 @@ static void tg3_reset_task(struct work_struct *work)
4152 4530
4153out: 4531out:
4154 tg3_full_unlock(tp); 4532 tg3_full_unlock(tp);
4533
4534 if (!err)
4535 tg3_phy_start(tp);
4155} 4536}
4156 4537
4157static void tg3_dump_short_state(struct tg3 *tp) 4538static void tg3_dump_short_state(struct tg3 *tp)
@@ -4655,6 +5036,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4655 return 0; 5036 return 0;
4656 } 5037 }
4657 5038
5039 tg3_phy_stop(tp);
5040
4658 tg3_netif_stop(tp); 5041 tg3_netif_stop(tp);
4659 5042
4660 tg3_full_lock(tp, 1); 5043 tg3_full_lock(tp, 1);
@@ -4670,6 +5053,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4670 5053
4671 tg3_full_unlock(tp); 5054 tg3_full_unlock(tp);
4672 5055
5056 if (!err)
5057 tg3_phy_start(tp);
5058
4673 return err; 5059 return err;
4674} 5060}
4675 5061
@@ -5379,6 +5765,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5379 5765
5380 tg3_nvram_lock(tp); 5766 tg3_nvram_lock(tp);
5381 5767
5768 tg3_mdio_stop(tp);
5769
5382 /* No matching tg3_nvram_unlock() after this because 5770 /* No matching tg3_nvram_unlock() after this because
5383 * chip reset below will undo the nvram lock. 5771 * chip reset below will undo the nvram lock.
5384 */ 5772 */
@@ -5394,7 +5782,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 5782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 5783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 5784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 5785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5398 tw32(GRC_FASTBOOT_PC, 0); 5787 tw32(GRC_FASTBOOT_PC, 0);
5399 5788
5400 /* 5789 /*
@@ -5530,6 +5919,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5530 tw32_f(MAC_MODE, 0); 5919 tw32_f(MAC_MODE, 0);
5531 udelay(40); 5920 udelay(40);
5532 5921
5922 tg3_mdio_start(tp);
5923
5533 err = tg3_poll_fw(tp); 5924 err = tg3_poll_fw(tp);
5534 if (err) 5925 if (err)
5535 return err; 5926 return err;
@@ -6609,7 +7000,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6609 tg3_abort_hw(tp, 1); 7000 tg3_abort_hw(tp, 1);
6610 } 7001 }
6611 7002
6612 if (reset_phy) 7003 if (reset_phy &&
7004 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6613 tg3_phy_reset(tp); 7005 tg3_phy_reset(tp);
6614 7006
6615 err = tg3_chip_reset(tp); 7007 err = tg3_chip_reset(tp);
@@ -6685,7 +7077,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6685 return err; 7077 return err;
6686 7078
6687 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7079 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6688 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7080 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7081 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6689 /* This value is determined during the probe time DMA 7082 /* This value is determined during the probe time DMA
6690 * engine test, tg3_test_dma. 7083 * engine test, tg3_test_dma.
6691 */ 7084 */
@@ -6924,7 +7317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6924 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7317 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6925 RDMAC_MODE_LNGREAD_ENAB); 7318 RDMAC_MODE_LNGREAD_ENAB);
6926 7319
6927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 7320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
6928 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 7322 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6929 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 7323 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6930 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 7324 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
@@ -7092,8 +7486,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7092 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || 7486 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7093 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || 7487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7094 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || 7488 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7095 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) 7489 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7096 val |= (1 << 29); 7490 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7491 val |= WDMAC_MODE_STATUS_TAG_FIX;
7097 7492
7098 tw32_f(WDMAC_MODE, val); 7493 tw32_f(WDMAC_MODE, val);
7099 udelay(40); 7494 udelay(40);
@@ -7154,23 +7549,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7154 7549
7155 tp->rx_mode = RX_MODE_ENABLE; 7550 tp->rx_mode = RX_MODE_ENABLE;
7156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 7552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7158 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 7555 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7159 7556
7160 tw32_f(MAC_RX_MODE, tp->rx_mode); 7557 tw32_f(MAC_RX_MODE, tp->rx_mode);
7161 udelay(10); 7558 udelay(10);
7162 7559
7163 if (tp->link_config.phy_is_low_power) {
7164 tp->link_config.phy_is_low_power = 0;
7165 tp->link_config.speed = tp->link_config.orig_speed;
7166 tp->link_config.duplex = tp->link_config.orig_duplex;
7167 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7168 }
7169
7170 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7171 tw32_f(MAC_MI_MODE, tp->mi_mode);
7172 udelay(80);
7173
7174 tw32(MAC_LED_CTRL, tp->led_ctrl); 7560 tw32(MAC_LED_CTRL, tp->led_ctrl);
7175 7561
7176 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 7562 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
@@ -7217,19 +7603,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7217 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 7603 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7218 } 7604 }
7219 7605
7220 err = tg3_setup_phy(tp, 0); 7606 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7221 if (err) 7607 if (tp->link_config.phy_is_low_power) {
7222 return err; 7608 tp->link_config.phy_is_low_power = 0;
7609 tp->link_config.speed = tp->link_config.orig_speed;
7610 tp->link_config.duplex = tp->link_config.orig_duplex;
7611 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7612 }
7223 7613
7224 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7614 err = tg3_setup_phy(tp, 0);
7225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { 7615 if (err)
7226 u32 tmp; 7616 return err;
7227 7617
7228 /* Clear CRC stats. */ 7618 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7229 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 7619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7230 tg3_writephy(tp, MII_TG3_TEST1, 7620 u32 tmp;
7231 tmp | MII_TG3_TEST1_CRC_EN); 7621
7232 tg3_readphy(tp, 0x14, &tmp); 7622 /* Clear CRC stats. */
7623 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7624 tg3_writephy(tp, MII_TG3_TEST1,
7625 tmp | MII_TG3_TEST1_CRC_EN);
7626 tg3_readphy(tp, 0x14, &tmp);
7627 }
7233 } 7628 }
7234 } 7629 }
7235 7630
@@ -7744,6 +8139,8 @@ static int tg3_open(struct net_device *dev)
7744 } 8139 }
7745 } 8140 }
7746 8141
8142 tg3_phy_start(tp);
8143
7747 tg3_full_lock(tp, 0); 8144 tg3_full_lock(tp, 0);
7748 8145
7749 add_timer(&tp->timer); 8146 add_timer(&tp->timer);
@@ -8545,7 +8942,13 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
8545 8942
8546static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 8943static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8547{ 8944{
8548 struct tg3 *tp = netdev_priv(dev); 8945 struct tg3 *tp = netdev_priv(dev);
8946
8947 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8948 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8949 return -EAGAIN;
8950 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8951 }
8549 8952
8550 cmd->supported = (SUPPORTED_Autoneg); 8953 cmd->supported = (SUPPORTED_Autoneg);
8551 8954
@@ -8582,6 +8985,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8582{ 8985{
8583 struct tg3 *tp = netdev_priv(dev); 8986 struct tg3 *tp = netdev_priv(dev);
8584 8987
8988 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8989 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8990 return -EAGAIN;
8991 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8992 }
8993
8585 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 8994 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8586 /* These are the only valid advertisement bits allowed. */ 8995 /* These are the only valid advertisement bits allowed. */
8587 if (cmd->autoneg == AUTONEG_ENABLE && 8996 if (cmd->autoneg == AUTONEG_ENABLE &&
@@ -8614,7 +9023,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8614 tp->link_config.advertising = 0; 9023 tp->link_config.advertising = 0;
8615 tp->link_config.speed = cmd->speed; 9024 tp->link_config.speed = cmd->speed;
8616 tp->link_config.duplex = cmd->duplex; 9025 tp->link_config.duplex = cmd->duplex;
8617 } 9026 }
8618 9027
8619 tp->link_config.orig_speed = tp->link_config.speed; 9028 tp->link_config.orig_speed = tp->link_config.speed;
8620 tp->link_config.orig_duplex = tp->link_config.duplex; 9029 tp->link_config.orig_duplex = tp->link_config.duplex;
@@ -8697,7 +9106,10 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8697 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { 9106 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8698 if (value) { 9107 if (value) {
8699 dev->features |= NETIF_F_TSO6; 9108 dev->features |= NETIF_F_TSO6;
8700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9110 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9111 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8701 dev->features |= NETIF_F_TSO_ECN; 9113 dev->features |= NETIF_F_TSO_ECN;
8702 } else 9114 } else
8703 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9115 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -8708,7 +9120,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8708static int tg3_nway_reset(struct net_device *dev) 9120static int tg3_nway_reset(struct net_device *dev)
8709{ 9121{
8710 struct tg3 *tp = netdev_priv(dev); 9122 struct tg3 *tp = netdev_priv(dev);
8711 u32 bmcr;
8712 int r; 9123 int r;
8713 9124
8714 if (!netif_running(dev)) 9125 if (!netif_running(dev))
@@ -8717,17 +9128,25 @@ static int tg3_nway_reset(struct net_device *dev)
8717 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 9128 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8718 return -EINVAL; 9129 return -EINVAL;
8719 9130
8720 spin_lock_bh(&tp->lock); 9131 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8721 r = -EINVAL; 9132 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8722 tg3_readphy(tp, MII_BMCR, &bmcr); 9133 return -EAGAIN;
8723 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 9134 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
8724 ((bmcr & BMCR_ANENABLE) || 9135 } else {
8725 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 9136 u32 bmcr;
8726 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 9137
8727 BMCR_ANENABLE); 9138 spin_lock_bh(&tp->lock);
8728 r = 0; 9139 r = -EINVAL;
9140 tg3_readphy(tp, MII_BMCR, &bmcr);
9141 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9142 ((bmcr & BMCR_ANENABLE) ||
9143 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9144 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9145 BMCR_ANENABLE);
9146 r = 0;
9147 }
9148 spin_unlock_bh(&tp->lock);
8729 } 9149 }
8730 spin_unlock_bh(&tp->lock);
8731 9150
8732 return r; 9151 return r;
8733} 9152}
@@ -8769,6 +9188,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8769 return -EINVAL; 9188 return -EINVAL;
8770 9189
8771 if (netif_running(dev)) { 9190 if (netif_running(dev)) {
9191 tg3_phy_stop(tp);
8772 tg3_netif_stop(tp); 9192 tg3_netif_stop(tp);
8773 irq_sync = 1; 9193 irq_sync = 1;
8774 } 9194 }
@@ -8792,6 +9212,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8792 9212
8793 tg3_full_unlock(tp); 9213 tg3_full_unlock(tp);
8794 9214
9215 if (irq_sync && !err)
9216 tg3_phy_start(tp);
9217
8795 return err; 9218 return err;
8796} 9219}
8797 9220
@@ -8815,36 +9238,92 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8815static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 9238static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8816{ 9239{
8817 struct tg3 *tp = netdev_priv(dev); 9240 struct tg3 *tp = netdev_priv(dev);
8818 int irq_sync = 0, err = 0; 9241 int err = 0;
8819 9242
8820 if (netif_running(dev)) { 9243 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8821 tg3_netif_stop(tp); 9244 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8822 irq_sync = 1; 9245 return -EAGAIN;
8823 }
8824 9246
8825 tg3_full_lock(tp, irq_sync); 9247 if (epause->autoneg) {
9248 u32 newadv;
9249 struct phy_device *phydev;
8826 9250
8827 if (epause->autoneg) 9251 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
8828 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8829 else
8830 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8831 if (epause->rx_pause)
8832 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8833 else
8834 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8835 if (epause->tx_pause)
8836 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8837 else
8838 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8839 9252
8840 if (netif_running(dev)) { 9253 if (epause->rx_pause) {
8841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9254 if (epause->tx_pause)
8842 err = tg3_restart_hw(tp, 1); 9255 newadv = ADVERTISED_Pause;
8843 if (!err) 9256 else
8844 tg3_netif_start(tp); 9257 newadv = ADVERTISED_Pause |
8845 } 9258 ADVERTISED_Asym_Pause;
9259 } else if (epause->tx_pause) {
9260 newadv = ADVERTISED_Asym_Pause;
9261 } else
9262 newadv = 0;
9263
9264 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9265 u32 oldadv = phydev->advertising &
9266 (ADVERTISED_Pause |
9267 ADVERTISED_Asym_Pause);
9268 if (oldadv != newadv) {
9269 phydev->advertising &=
9270 ~(ADVERTISED_Pause |
9271 ADVERTISED_Asym_Pause);
9272 phydev->advertising |= newadv;
9273 err = phy_start_aneg(phydev);
9274 }
9275 } else {
9276 tp->link_config.advertising &=
9277 ~(ADVERTISED_Pause |
9278 ADVERTISED_Asym_Pause);
9279 tp->link_config.advertising |= newadv;
9280 }
9281 } else {
9282 if (epause->rx_pause)
9283 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9284 else
9285 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8846 9286
8847 tg3_full_unlock(tp); 9287 if (epause->tx_pause)
9288 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9289 else
9290 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9291
9292 if (netif_running(dev))
9293 tg3_setup_flow_control(tp, 0, 0);
9294 }
9295 } else {
9296 int irq_sync = 0;
9297
9298 if (netif_running(dev)) {
9299 tg3_netif_stop(tp);
9300 irq_sync = 1;
9301 }
9302
9303 tg3_full_lock(tp, irq_sync);
9304
9305 if (epause->autoneg)
9306 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9307 else
9308 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9309 if (epause->rx_pause)
9310 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9311 else
9312 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9313 if (epause->tx_pause)
9314 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9315 else
9316 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9317
9318 if (netif_running(dev)) {
9319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9320 err = tg3_restart_hw(tp, 1);
9321 if (!err)
9322 tg3_netif_start(tp);
9323 }
9324
9325 tg3_full_unlock(tp);
9326 }
8848 9327
8849 return err; 9328 return err;
8850} 9329}
@@ -8888,7 +9367,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8892 ethtool_op_set_tx_ipv6_csum(dev, data); 9372 ethtool_op_set_tx_ipv6_csum(dev, data);
8893 else 9373 else
8894 ethtool_op_set_tx_csum(dev, data); 9374 ethtool_op_set_tx_csum(dev, data);
@@ -9409,7 +9889,8 @@ static int tg3_test_memory(struct tg3 *tp)
9409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9413 mem_tbl = mem_tbl_5755; 9894 mem_tbl = mem_tbl_5755;
9414 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 9895 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9415 mem_tbl = mem_tbl_5906; 9896 mem_tbl = mem_tbl_5906;
@@ -9616,7 +10097,8 @@ static int tg3_test_loopback(struct tg3 *tp)
9616 return TG3_LOOPBACK_FAILED; 10097 return TG3_LOOPBACK_FAILED;
9617 10098
9618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9620 int i; 10102 int i;
9621 u32 status; 10103 u32 status;
9622 10104
@@ -9644,14 +10126,16 @@ static int tg3_test_loopback(struct tg3 *tp)
9644 err |= TG3_MAC_LOOPBACK_FAILED; 10126 err |= TG3_MAC_LOOPBACK_FAILED;
9645 10127
9646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9648 tw32(TG3_CPMU_CTRL, cpmuctrl); 10131 tw32(TG3_CPMU_CTRL, cpmuctrl);
9649 10132
9650 /* Release the mutex */ 10133 /* Release the mutex */
9651 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); 10134 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9652 } 10135 }
9653 10136
9654 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 10137 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10138 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9655 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 10139 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9656 err |= TG3_PHY_LOOPBACK_FAILED; 10140 err |= TG3_PHY_LOOPBACK_FAILED;
9657 } 10141 }
@@ -9678,9 +10162,10 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9678 data[1] = 1; 10162 data[1] = 1;
9679 } 10163 }
9680 if (etest->flags & ETH_TEST_FL_OFFLINE) { 10164 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9681 int err, irq_sync = 0; 10165 int err, err2 = 0, irq_sync = 0;
9682 10166
9683 if (netif_running(dev)) { 10167 if (netif_running(dev)) {
10168 tg3_phy_stop(tp);
9684 tg3_netif_stop(tp); 10169 tg3_netif_stop(tp);
9685 irq_sync = 1; 10170 irq_sync = 1;
9686 } 10171 }
@@ -9721,11 +10206,15 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9721 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9722 if (netif_running(dev)) { 10207 if (netif_running(dev)) {
9723 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 10208 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9724 if (!tg3_restart_hw(tp, 1)) 10209 err2 = tg3_restart_hw(tp, 1);
10210 if (!err2)
9725 tg3_netif_start(tp); 10211 tg3_netif_start(tp);
9726 } 10212 }
9727 10213
9728 tg3_full_unlock(tp); 10214 tg3_full_unlock(tp);
10215
10216 if (irq_sync && !err2)
10217 tg3_phy_start(tp);
9729 } 10218 }
9730 if (tp->link_config.phy_is_low_power) 10219 if (tp->link_config.phy_is_low_power)
9731 tg3_set_power_state(tp, PCI_D3hot); 10220 tg3_set_power_state(tp, PCI_D3hot);
@@ -9738,6 +10227,12 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9738 struct tg3 *tp = netdev_priv(dev); 10227 struct tg3 *tp = netdev_priv(dev);
9739 int err; 10228 int err;
9740 10229
10230 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10231 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10232 return -EAGAIN;
10233 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10234 }
10235
9741 switch(cmd) { 10236 switch(cmd) {
9742 case SIOCGMIIPHY: 10237 case SIOCGMIIPHY:
9743 data->phy_id = PHY_ADDR; 10238 data->phy_id = PHY_ADDR;
@@ -10280,7 +10775,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
10280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 10775 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10281 tg3_get_5755_nvram_info(tp); 10776 tg3_get_5755_nvram_info(tp);
10282 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 10777 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 10778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10284 tg3_get_5787_nvram_info(tp); 10780 tg3_get_5787_nvram_info(tp);
10285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 10781 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10286 tg3_get_5761_nvram_info(tp); 10782 tg3_get_5761_nvram_info(tp);
@@ -10611,6 +11107,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10611 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && 11107 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10612 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && 11108 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10613 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && 11109 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11110 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
10614 (tp->nvram_jedecnum == JEDEC_ST) && 11111 (tp->nvram_jedecnum == JEDEC_ST) &&
10615 (nvram_cmd & NVRAM_CMD_FIRST)) { 11112 (nvram_cmd & NVRAM_CMD_FIRST)) {
10616 11113
@@ -10793,7 +11290,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10793 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 11290 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10794 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 11291 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10795 u32 nic_cfg, led_cfg; 11292 u32 nic_cfg, led_cfg;
10796 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; 11293 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
10797 int eeprom_phy_serdes = 0; 11294 int eeprom_phy_serdes = 0;
10798 11295
10799 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 11296 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -10807,6 +11304,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10807 (ver > 0) && (ver < 0x100)) 11304 (ver > 0) && (ver < 0x100))
10808 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 11305 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10809 11306
11307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11309
10810 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 11310 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10811 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 11311 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10812 eeprom_phy_serdes = 1; 11312 eeprom_phy_serdes = 1;
@@ -10931,6 +11431,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10931 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 11431 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10932 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11432 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10933 } 11433 }
11434
11435 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11436 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11437 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11438 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11439 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11440 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
10934 } 11441 }
10935} 11442}
10936 11443
@@ -10989,6 +11496,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
10989 u32 hw_phy_id, hw_phy_id_masked; 11496 u32 hw_phy_id, hw_phy_id_masked;
10990 int err; 11497 int err;
10991 11498
11499 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11500 return tg3_phy_init(tp);
11501
10992 /* Reading the PHY ID register can conflict with ASF 11502 /* Reading the PHY ID register can conflict with ASF
10993 * firwmare access to the PHY hardware. 11503 * firwmare access to the PHY hardware.
10994 */ 11504 */
@@ -11511,6 +12021,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 12025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11515 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12026 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11516 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 12027 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
@@ -11532,6 +12043,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11536 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 12048 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11537 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12049 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
@@ -11544,14 +12056,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11544 } 12056 }
11545 } 12057 }
11546 12058
11547 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 12059 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 12060 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11550 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11551 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11552 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11553 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11554 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11555 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 12061 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11556 12062
11557 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); 12063 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
@@ -11740,7 +12246,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11740 } 12246 }
11741 12247
11742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 12249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
11744 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 12251 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11745 12252
11746 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || 12253 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
@@ -11824,7 +12331,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11824 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 12331 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11825 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 12332 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11826 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 12333 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11827 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 12334 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
11828 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 12336 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11829 } 12337 }
11830 12338
@@ -11835,8 +12343,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11835 tp->phy_otp = TG3_OTP_DEFAULT; 12343 tp->phy_otp = TG3_OTP_DEFAULT;
11836 } 12344 }
11837 12345
11838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12346 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11840 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 12347 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11841 else 12348 else
11842 tp->mi_mode = MAC_MI_MODE_BASE; 12349 tp->mi_mode = MAC_MI_MODE_BASE;
@@ -11846,9 +12353,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11846 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 12353 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11847 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 12354 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11848 12355
11849 /* Initialize MAC MI mode, polling disabled. */ 12356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11850 tw32_f(MAC_MI_MODE, tp->mi_mode); 12357 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
11851 udelay(80); 12358
12359 err = tg3_mdio_init(tp);
12360 if (err)
12361 return err;
11852 12362
11853 /* Initialize data/descriptor byte/word swapping. */ 12363 /* Initialize data/descriptor byte/word swapping. */
11854 val = tr32(GRC_MODE); 12364 val = tr32(GRC_MODE);
@@ -11929,6 +12439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11929 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", 12439 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11930 pci_name(tp->pdev), err); 12440 pci_name(tp->pdev), err);
11931 /* ... but do not return immediately ... */ 12441 /* ... but do not return immediately ... */
12442 tg3_mdio_fini(tp);
11932 } 12443 }
11933 12444
11934 tg3_read_partno(tp); 12445 tg3_read_partno(tp);
@@ -11976,6 +12487,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11980 tp->dev->hard_start_xmit = tg3_start_xmit; 12492 tp->dev->hard_start_xmit = tg3_start_xmit;
11981 else 12493 else
@@ -12900,7 +13412,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12900 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && 13412 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12901 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) 13413 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12902 dev->features |= NETIF_F_TSO6; 13414 dev->features |= NETIF_F_TSO6;
12903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13416 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13417 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12904 dev->features |= NETIF_F_TSO_ECN; 13419 dev->features |= NETIF_F_TSO_ECN;
12905 } 13420 }
12906 13421
@@ -12966,7 +13481,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12970 dev->features |= NETIF_F_IPV6_CSUM; 13486 dev->features |= NETIF_F_IPV6_CSUM;
12971 13487
12972 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 13488 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
@@ -13048,6 +13564,12 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
13048 struct tg3 *tp = netdev_priv(dev); 13564 struct tg3 *tp = netdev_priv(dev);
13049 13565
13050 flush_scheduled_work(); 13566 flush_scheduled_work();
13567
13568 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13569 tg3_phy_fini(tp);
13570 tg3_mdio_fini(tp);
13571 }
13572
13051 unregister_netdev(dev); 13573 unregister_netdev(dev);
13052 if (tp->aperegs) { 13574 if (tp->aperegs) {
13053 iounmap(tp->aperegs); 13575 iounmap(tp->aperegs);
@@ -13080,6 +13602,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13080 return 0; 13602 return 0;
13081 13603
13082 flush_scheduled_work(); 13604 flush_scheduled_work();
13605 tg3_phy_stop(tp);
13083 tg3_netif_stop(tp); 13606 tg3_netif_stop(tp);
13084 13607
13085 del_timer_sync(&tp->timer); 13608 del_timer_sync(&tp->timer);
@@ -13097,10 +13620,13 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13097 13620
13098 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 13621 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13099 if (err) { 13622 if (err) {
13623 int err2;
13624
13100 tg3_full_lock(tp, 0); 13625 tg3_full_lock(tp, 0);
13101 13626
13102 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 13627 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13103 if (tg3_restart_hw(tp, 1)) 13628 err2 = tg3_restart_hw(tp, 1);
13629 if (err2)
13104 goto out; 13630 goto out;
13105 13631
13106 tp->timer.expires = jiffies + tp->timer_offset; 13632 tp->timer.expires = jiffies + tp->timer_offset;
@@ -13111,6 +13637,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13111 13637
13112out: 13638out:
13113 tg3_full_unlock(tp); 13639 tg3_full_unlock(tp);
13640
13641 if (!err2)
13642 tg3_phy_start(tp);
13114 } 13643 }
13115 13644
13116 return err; 13645 return err;
@@ -13148,6 +13677,9 @@ static int tg3_resume(struct pci_dev *pdev)
13148out: 13677out:
13149 tg3_full_unlock(tp); 13678 tg3_full_unlock(tp);
13150 13679
13680 if (!err)
13681 tg3_phy_start(tp);
13682
13151 return err; 13683 return err;
13152} 13684}
13153 13685
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0404f93baa29..df07842172b7 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -128,6 +128,7 @@
128#define ASIC_REV_USE_PROD_ID_REG 0x0f 128#define ASIC_REV_USE_PROD_ID_REG 0x0f
129#define ASIC_REV_5784 0x5784 129#define ASIC_REV_5784 0x5784
130#define ASIC_REV_5761 0x5761 130#define ASIC_REV_5761 0x5761
131#define ASIC_REV_5785 0x5785
131#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 132#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
132#define CHIPREV_5700_AX 0x70 133#define CHIPREV_5700_AX 0x70
133#define CHIPREV_5700_BX 0x71 134#define CHIPREV_5700_BX 0x71
@@ -528,7 +529,23 @@
528#define MAC_SERDES_CFG 0x00000590 529#define MAC_SERDES_CFG 0x00000590
529#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 530#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
530#define MAC_SERDES_STAT 0x00000594 531#define MAC_SERDES_STAT 0x00000594
531/* 0x598 --> 0x5b0 unused */ 532/* 0x598 --> 0x5a0 unused */
533#define MAC_PHYCFG1 0x000005a0
534#define MAC_PHYCFG1_RGMII_INT 0x00000001
535#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000
536#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000
537#define MAC_PHYCFG1_TXC_DRV 0x20000000
538#define MAC_PHYCFG2 0x000005a4
539#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001
540#define MAC_EXT_RGMII_MODE 0x000005a8
541#define MAC_RGMII_MODE_TX_ENABLE 0x00000001
542#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002
543#define MAC_RGMII_MODE_TX_RESET 0x00000004
544#define MAC_RGMII_MODE_RX_INT_B 0x00000100
545#define MAC_RGMII_MODE_RX_QUALITY 0x00000200
546#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400
547#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800
548/* 0x5ac --> 0x5b0 unused */
532#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ 549#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
533#define SERDES_RX_SIG_DETECT 0x00000400 550#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 551#define SG_DIG_CTRL 0x000005b0
@@ -1109,6 +1126,7 @@
1109#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1126#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1110#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1127#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1111#define WDMAC_MODE_RX_ACCEL 0x00000400 1128#define WDMAC_MODE_RX_ACCEL 0x00000400
1129#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1112#define WDMAC_STATUS 0x00004c04 1130#define WDMAC_STATUS 0x00004c04
1113#define WDMAC_STATUS_TGTABORT 0x00000004 1131#define WDMAC_STATUS_TGTABORT 0x00000004
1114#define WDMAC_STATUS_MSTABORT 0x00000008 1132#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1713,6 +1731,12 @@
1713#define NIC_SRAM_DATA_CFG_3 0x00000d3c 1731#define NIC_SRAM_DATA_CFG_3 0x00000d3c
1714#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 1732#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
1715 1733
1734#define NIC_SRAM_DATA_CFG_4 0x00000d60
1735#define NIC_SRAM_GMII_MODE 0x00000002
1736#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004
1737#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
1738#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
1739
1716#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 1740#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
1717 1741
1718#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 1742#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2204,6 +2228,7 @@ struct tg3_link_config {
2204 u16 orig_speed; 2228 u16 orig_speed;
2205 u8 orig_duplex; 2229 u8 orig_duplex;
2206 u8 orig_autoneg; 2230 u8 orig_autoneg;
2231 u32 orig_advertising;
2207}; 2232};
2208 2233
2209struct tg3_bufmgr_config { 2234struct tg3_bufmgr_config {
@@ -2479,6 +2504,13 @@ struct tg3 {
2479#define TG3_FLG3_ENABLE_APE 0x00000002 2504#define TG3_FLG3_ENABLE_APE 0x00000002
2480#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004 2505#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
2481#define TG3_FLG3_5701_DMA_BUG 0x00000008 2506#define TG3_FLG3_5701_DMA_BUG 0x00000008
2507#define TG3_FLG3_USE_PHYLIB 0x00000010
2508#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2509#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2510#define TG3_FLG3_PHY_CONNECTED 0x00000080
2511#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2512#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2513#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2482 2514
2483 struct timer_list timer; 2515 struct timer_list timer;
2484 u16 timer_counter; 2516 u16 timer_counter;
@@ -2519,6 +2551,9 @@ struct tg3 {
2519 int msi_cap; 2551 int msi_cap;
2520 int pcix_cap; 2552 int pcix_cap;
2521 2553
2554 struct mii_bus mdio_bus;
2555 int mdio_irq[PHY_MAX_ADDR];
2556
2522 /* PHY info */ 2557 /* PHY info */
2523 u32 phy_id; 2558 u32 phy_id;
2524#define PHY_ID_MASK 0xfffffff0 2559#define PHY_ID_MASK 0xfffffff0
@@ -2546,6 +2581,9 @@ struct tg3 {
2546#define PHY_REV_BCM5401_B2 0x3 2581#define PHY_REV_BCM5401_B2 0x3
2547#define PHY_REV_BCM5401_C0 0x6 2582#define PHY_REV_BCM5401_C0 0x6
2548#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2583#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2584#define TG3_PHY_ID_BCM50610 0x143bd60
2585#define TG3_PHY_ID_BCMAC131 0x143bc70
2586
2549 2587
2550 u32 led_ctrl; 2588 u32 led_ctrl;
2551 u32 phy_otp; 2589 u32 phy_otp;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
index 0f8b4ec8695a..66b1ff603234 100644
--- a/drivers/net/tokenring/3c359.h
+++ b/drivers/net/tokenring/3c359.h
@@ -264,7 +264,7 @@ struct xl_private {
264 u16 asb; 264 u16 asb;
265 265
266 u8 __iomem *xl_mmio; 266 u8 __iomem *xl_mmio;
267 char *xl_card_name; 267 const char *xl_card_name;
268 struct pci_dev *pdev ; 268 struct pci_dev *pdev ;
269 269
270 spinlock_t xl_lock ; 270 spinlock_t xl_lock ;
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
index c91956310fb2..10fbba08978f 100644
--- a/drivers/net/tokenring/olympic.h
+++ b/drivers/net/tokenring/olympic.h
@@ -254,7 +254,7 @@ struct olympic_private {
254 u8 __iomem *olympic_mmio; 254 u8 __iomem *olympic_mmio;
255 u8 __iomem *olympic_lap; 255 u8 __iomem *olympic_lap;
256 struct pci_dev *pdev ; 256 struct pci_dev *pdev ;
257 char *olympic_card_name ; 257 const char *olympic_card_name;
258 258
259 spinlock_t olympic_lock ; 259 spinlock_t olympic_lock ;
260 260
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 2511ca7a12aa..e9e628621639 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
225static const struct ethtool_ops netdev_ethtool_ops; 225static const struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int); 226static u16 read_srom_word(long, int);
227static irqreturn_t uli526x_interrupt(int, void *); 227static irqreturn_t uli526x_interrupt(int, void *);
228#ifdef CONFIG_NET_POLL_CONTROLLER
229static void uli526x_poll(struct net_device *dev);
230#endif
228static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 231static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
229static void allocate_rx_buffer(struct uli526x_board_info *); 232static void allocate_rx_buffer(struct uli526x_board_info *);
230static void update_cr6(u32, unsigned long); 233static void update_cr6(u32, unsigned long);
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
339 dev->get_stats = &uli526x_get_stats; 342 dev->get_stats = &uli526x_get_stats;
340 dev->set_multicast_list = &uli526x_set_filter_mode; 343 dev->set_multicast_list = &uli526x_set_filter_mode;
341 dev->ethtool_ops = &netdev_ethtool_ops; 344 dev->ethtool_ops = &netdev_ethtool_ops;
345#ifdef CONFIG_NET_POLL_CONTROLLER
346 dev->poll_controller = &uli526x_poll;
347#endif
342 spin_lock_init(&db->lock); 348 spin_lock_init(&db->lock);
343 349
344 350
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
681 db->cr5_data = inl(ioaddr + DCR5); 687 db->cr5_data = inl(ioaddr + DCR5);
682 outl(db->cr5_data, ioaddr + DCR5); 688 outl(db->cr5_data, ioaddr + DCR5);
683 if ( !(db->cr5_data & 0x180c1) ) { 689 if ( !(db->cr5_data & 0x180c1) ) {
684 spin_unlock_irqrestore(&db->lock, flags); 690 /* Restore CR7 to enable interrupt mask */
685 outl(db->cr7_data, ioaddr + DCR7); 691 outl(db->cr7_data, ioaddr + DCR7);
692 spin_unlock_irqrestore(&db->lock, flags);
686 return IRQ_HANDLED; 693 return IRQ_HANDLED;
687 } 694 }
688 695
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
715 return IRQ_HANDLED; 722 return IRQ_HANDLED;
716} 723}
717 724
725#ifdef CONFIG_NET_POLL_CONTROLLER
726static void uli526x_poll(struct net_device *dev)
727{
728 /* ISR grabs the irqsave lock, so this should be safe */
729 uli526x_interrupt(dev->irq, dev);
730}
731#endif
718 732
719/* 733/*
720 * Free TX resource after TX complete 734 * Free TX resource after TX complete
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index ca0bdac07a78..fb0b918e5ccb 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
237 skb->dev = ugeth->dev; 237 skb->dev = ugeth->dev;
238 238
239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 239 out_be32(&((struct qe_bd __iomem *)bd)->buf,
240 dma_map_single(NULL, 240 dma_map_single(&ugeth->dev->dev,
241 skb->data, 241 skb->data,
242 ugeth->ug_info->uf_info.max_rx_buf_length + 242 ugeth->ug_info->uf_info.max_rx_buf_length +
243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2158 continue; 2158 continue;
2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2160 if (ugeth->tx_skbuff[i][j]) { 2160 if (ugeth->tx_skbuff[i][j]) {
2161 dma_unmap_single(NULL, 2161 dma_unmap_single(&ugeth->dev->dev,
2162 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2163 (in_be32((u32 __iomem *)bd) & 2163 (in_be32((u32 __iomem *)bd) &
2164 BD_LENGTH_MASK), 2164 BD_LENGTH_MASK),
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2186 bd = ugeth->p_rx_bd_ring[i]; 2186 bd = ugeth->p_rx_bd_ring[i];
2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2188 if (ugeth->rx_skbuff[i][j]) { 2188 if (ugeth->rx_skbuff[i][j]) {
2189 dma_unmap_single(NULL, 2189 dma_unmap_single(&ugeth->dev->dev,
2190 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2191 ugeth->ug_info-> 2191 ugeth->ug_info->
2192 uf_info.max_rx_buf_length + 2192 uf_info.max_rx_buf_length +
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3406 3406
3407 /* set up the buffer descriptor */ 3407 /* set up the buffer descriptor */
3408 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3409 dma_map_single(&ugeth->dev->dev, skb->data,
3410 skb->len, DMA_TO_DEVICE));
3410 3411
3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3412 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3412 3413
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index dc6f097062df..37ecf845edfe 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
1440 // Belkin F5D5055 1440 // Belkin F5D5055
1441 USB_DEVICE(0x050d, 0x5055), 1441 USB_DEVICE(0x050d, 0x5055),
1442 .driver_info = (unsigned long) &ax88178_info, 1442 .driver_info = (unsigned long) &ax88178_info,
1443}, {
1444 // Apple USB Ethernet Adapter
1445 USB_DEVICE(0x05ac, 0x1402),
1446 .driver_info = (unsigned long) &ax88772_info,
1443}, 1447},
1444 { }, // END 1448 { }, // END
1445}; 1449};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 3969b7a2b8e6..ae467f182c40 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
194 dev_dbg(&info->control->dev, 194 dev_dbg(&info->control->dev,
195 "rndis response error, code %d\n", retval); 195 "rndis response error, code %d\n", retval);
196 } 196 }
197 msleep(2); 197 msleep(20);
198 } 198 }
199 dev_dbg(&info->control->dev, "rndis response timeout\n"); 199 dev_dbg(&info->control->dev, "rndis response timeout\n");
200 return -ETIMEDOUT; 200 return -ETIMEDOUT;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f926b5ab3d09..fe7cdf2a2a23 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
470 kfree_skb(skb); 470 kfree_skb(skb);
471 vi->num--; 471 vi->num--;
472 } 472 }
473 while ((skb = __skb_dequeue(&vi->send)) != NULL) 473 __skb_queue_purge(&vi->send);
474 kfree_skb(skb);
475 474
476 BUG_ON(vi->num != 0); 475 BUG_ON(vi->num != 0);
477 476
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9a83c9d5b8cf..7f984895b0d5 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
43 43
44#undef DEBUG_LINK 44#undef DEBUG_LINK
45 45
46static struct hdlc_proto *first_proto = NULL; 46static struct hdlc_proto *first_proto;
47
48 47
49static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 48static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
50{ 49{
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
314 313
315void register_hdlc_protocol(struct hdlc_proto *proto) 314void register_hdlc_protocol(struct hdlc_proto *proto)
316{ 315{
316 rtnl_lock();
317 proto->next = first_proto; 317 proto->next = first_proto;
318 first_proto = proto; 318 first_proto = proto;
319 rtnl_unlock();
319} 320}
320 321
321 322
322void unregister_hdlc_protocol(struct hdlc_proto *proto) 323void unregister_hdlc_protocol(struct hdlc_proto *proto)
323{ 324{
324 struct hdlc_proto **p = &first_proto; 325 struct hdlc_proto **p;
325 while (*p) { 326
326 if (*p == proto) { 327 rtnl_lock();
327 *p = proto->next; 328 p = &first_proto;
328 return; 329 while (*p != proto) {
329 } 330 BUG_ON(!*p);
330 p = &((*p)->next); 331 p = &((*p)->next);
331 } 332 }
333 *p = proto->next;
334 rtnl_unlock();
332} 335}
333 336
334 337
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 7133c688cf20..762d21c1c703 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -56,6 +56,7 @@ struct cisco_state {
56 cisco_proto settings; 56 cisco_proto settings;
57 57
58 struct timer_list timer; 58 struct timer_list timer;
59 spinlock_t lock;
59 unsigned long last_poll; 60 unsigned long last_poll;
60 int up; 61 int up;
61 int request_sent; 62 int request_sent;
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
158{ 159{
159 struct net_device *dev = skb->dev; 160 struct net_device *dev = skb->dev;
160 hdlc_device *hdlc = dev_to_hdlc(dev); 161 hdlc_device *hdlc = dev_to_hdlc(dev);
162 struct cisco_state *st = state(hdlc);
161 struct hdlc_header *data = (struct hdlc_header*)skb->data; 163 struct hdlc_header *data = (struct hdlc_header*)skb->data;
162 struct cisco_packet *cisco_data; 164 struct cisco_packet *cisco_data;
163 struct in_device *in_dev; 165 struct in_device *in_dev;
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
220 goto rx_error; 222 goto rx_error;
221 223
222 case CISCO_KEEPALIVE_REQ: 224 case CISCO_KEEPALIVE_REQ:
223 state(hdlc)->rxseq = ntohl(cisco_data->par1); 225 spin_lock(&st->lock);
224 if (state(hdlc)->request_sent && 226 st->rxseq = ntohl(cisco_data->par1);
225 ntohl(cisco_data->par2) == state(hdlc)->txseq) { 227 if (st->request_sent &&
226 state(hdlc)->last_poll = jiffies; 228 ntohl(cisco_data->par2) == st->txseq) {
227 if (!state(hdlc)->up) { 229 st->last_poll = jiffies;
230 if (!st->up) {
228 u32 sec, min, hrs, days; 231 u32 sec, min, hrs, days;
229 sec = ntohl(cisco_data->time) / 1000; 232 sec = ntohl(cisco_data->time) / 1000;
230 min = sec / 60; sec -= min * 60; 233 min = sec / 60; sec -= min * 60;
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
232 days = hrs / 24; hrs -= days * 24; 235 days = hrs / 24; hrs -= days * 24;
233 printk(KERN_INFO "%s: Link up (peer " 236 printk(KERN_INFO "%s: Link up (peer "
234 "uptime %ud%uh%um%us)\n", 237 "uptime %ud%uh%um%us)\n",
235 dev->name, days, hrs, 238 dev->name, days, hrs, min, sec);
236 min, sec);
237 netif_dormant_off(dev); 239 netif_dormant_off(dev);
238 state(hdlc)->up = 1; 240 st->up = 1;
239 } 241 }
240 } 242 }
243 spin_unlock(&st->lock);
241 244
242 dev_kfree_skb_any(skb); 245 dev_kfree_skb_any(skb);
243 return NET_RX_SUCCESS; 246 return NET_RX_SUCCESS;
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
261{ 264{
262 struct net_device *dev = (struct net_device *)arg; 265 struct net_device *dev = (struct net_device *)arg;
263 hdlc_device *hdlc = dev_to_hdlc(dev); 266 hdlc_device *hdlc = dev_to_hdlc(dev);
267 struct cisco_state *st = state(hdlc);
264 268
265 if (state(hdlc)->up && 269 spin_lock(&st->lock);
266 time_after(jiffies, state(hdlc)->last_poll + 270 if (st->up &&
267 state(hdlc)->settings.timeout * HZ)) { 271 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
268 state(hdlc)->up = 0; 272 st->up = 0;
269 printk(KERN_INFO "%s: Link down\n", dev->name); 273 printk(KERN_INFO "%s: Link down\n", dev->name);
270 netif_dormant_on(dev); 274 netif_dormant_on(dev);
271 } 275 }
272 276
273 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, 277 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
274 htonl(++state(hdlc)->txseq), 278 htonl(st->rxseq));
275 htonl(state(hdlc)->rxseq)); 279 st->request_sent = 1;
276 state(hdlc)->request_sent = 1; 280 spin_unlock(&st->lock);
277 state(hdlc)->timer.expires = jiffies + 281
278 state(hdlc)->settings.interval * HZ; 282 st->timer.expires = jiffies + st->settings.interval * HZ;
279 state(hdlc)->timer.function = cisco_timer; 283 st->timer.function = cisco_timer;
280 state(hdlc)->timer.data = arg; 284 st->timer.data = arg;
281 add_timer(&state(hdlc)->timer); 285 add_timer(&st->timer);
282} 286}
283 287
284 288
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
286static void cisco_start(struct net_device *dev) 290static void cisco_start(struct net_device *dev)
287{ 291{
288 hdlc_device *hdlc = dev_to_hdlc(dev); 292 hdlc_device *hdlc = dev_to_hdlc(dev);
289 state(hdlc)->up = 0; 293 struct cisco_state *st = state(hdlc);
290 state(hdlc)->request_sent = 0; 294 unsigned long flags;
291 state(hdlc)->txseq = state(hdlc)->rxseq = 0; 295
292 296 spin_lock_irqsave(&st->lock, flags);
293 init_timer(&state(hdlc)->timer); 297 st->up = 0;
294 state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ 298 st->request_sent = 0;
295 state(hdlc)->timer.function = cisco_timer; 299 st->txseq = st->rxseq = 0;
296 state(hdlc)->timer.data = (unsigned long)dev; 300 spin_unlock_irqrestore(&st->lock, flags);
297 add_timer(&state(hdlc)->timer); 301
302 init_timer(&st->timer);
303 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
304 st->timer.function = cisco_timer;
305 st->timer.data = (unsigned long)dev;
306 add_timer(&st->timer);
298} 307}
299 308
300 309
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
302static void cisco_stop(struct net_device *dev) 311static void cisco_stop(struct net_device *dev)
303{ 312{
304 hdlc_device *hdlc = dev_to_hdlc(dev); 313 hdlc_device *hdlc = dev_to_hdlc(dev);
305 del_timer_sync(&state(hdlc)->timer); 314 struct cisco_state *st = state(hdlc);
315 unsigned long flags;
316
317 del_timer_sync(&st->timer);
318
319 spin_lock_irqsave(&st->lock, flags);
306 netif_dormant_on(dev); 320 netif_dormant_on(dev);
307 state(hdlc)->up = 0; 321 st->up = 0;
308 state(hdlc)->request_sent = 0; 322 st->request_sent = 0;
323 spin_unlock_irqrestore(&st->lock, flags);
309} 324}
310 325
311 326
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
367 return result; 382 return result;
368 383
369 memcpy(&state(hdlc)->settings, &new_settings, size); 384 memcpy(&state(hdlc)->settings, &new_settings, size);
385 spin_lock_init(&state(hdlc)->lock);
370 dev->hard_start_xmit = hdlc->xmit; 386 dev->hard_start_xmit = hdlc->xmit;
371 dev->header_ops = &cisco_header_ops; 387 dev->header_ops = &cisco_header_ops;
372 dev->type = ARPHRD_CISCO; 388 dev->type = ARPHRD_CISCO;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 8a78283e8607..17ced37e55ed 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2669,6 +2669,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2669 dev->irq = ethdev->irq; 2669 dev->irq = ethdev->irq;
2670 dev->base_addr = ethdev->base_addr; 2670 dev->base_addr = ethdev->base_addr;
2671 dev->wireless_data = ethdev->wireless_data; 2671 dev->wireless_data = ethdev->wireless_data;
2672 SET_NETDEV_DEV(dev, ethdev->dev.parent);
2672 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); 2673 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
2673 err = register_netdev(dev); 2674 err = register_netdev(dev);
2674 if (err<0) { 2675 if (err<0) {
@@ -2905,7 +2906,7 @@ EXPORT_SYMBOL(init_airo_card);
2905 2906
2906static int waitbusy (struct airo_info *ai) { 2907static int waitbusy (struct airo_info *ai) {
2907 int delay = 0; 2908 int delay = 0;
2908 while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) { 2909 while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
2909 udelay (10); 2910 udelay (10);
2910 if ((++delay % 20) == 0) 2911 if ((++delay % 20) == 0)
2911 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); 2912 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 437a9bcc9bd3..ed4317a17cbb 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -833,6 +833,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
833 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), 833 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
834 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), 834 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
835/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ 835/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */
836 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
836 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), 837 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
837 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), 838 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
838 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), 839 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 7be68db6f300..cdf90c40f11b 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3276,11 +3276,6 @@ while (0)
3276 } 3276 }
3277 printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); 3277 printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
3278 3278
3279#ifndef PRISM2_NO_PROCFS_DEBUG
3280 create_proc_read_entry("registers", 0, local->proc,
3281 prism2_registers_proc_read, local);
3282#endif /* PRISM2_NO_PROCFS_DEBUG */
3283
3284 hostap_init_data(local); 3279 hostap_init_data(local);
3285 return dev; 3280 return dev;
3286 3281
@@ -3307,6 +3302,10 @@ static int hostap_hw_ready(struct net_device *dev)
3307 netif_carrier_off(local->ddev); 3302 netif_carrier_off(local->ddev);
3308 } 3303 }
3309 hostap_init_proc(local); 3304 hostap_init_proc(local);
3305#ifndef PRISM2_NO_PROCFS_DEBUG
3306 create_proc_read_entry("registers", 0, local->proc,
3307 prism2_registers_proc_read, local);
3308#endif /* PRISM2_NO_PROCFS_DEBUG */
3310 hostap_init_ap_proc(local); 3309 hostap_init_ap_proc(local);
3311 return 0; 3310 return 0;
3312 } 3311 }
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index fa87c5c2ae0b..d74c061994ae 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -11584,6 +11584,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11584 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; 11584 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11585 11585
11586 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11586 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11587 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11587 11588
11588 rc = register_netdev(priv->prom_net_dev); 11589 rc = register_netdev(priv->prom_net_dev);
11589 if (rc) { 11590 if (rc) {
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index dcfdb404678b..688d60de55cb 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -73,8 +73,8 @@ out:
73 return ret; 73 return ret;
74} 74}
75 75
76static void lbs_ethtool_get_stats(struct net_device * dev, 76static void lbs_ethtool_get_stats(struct net_device *dev,
77 struct ethtool_stats * stats, u64 * data) 77 struct ethtool_stats *stats, uint64_t *data)
78{ 78{
79 struct lbs_private *priv = dev->priv; 79 struct lbs_private *priv = dev->priv;
80 struct cmd_ds_mesh_access mesh_access; 80 struct cmd_ds_mesh_access mesh_access;
@@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
83 lbs_deb_enter(LBS_DEB_ETHTOOL); 83 lbs_deb_enter(LBS_DEB_ETHTOOL);
84 84
85 /* Get Mesh Statistics */ 85 /* Get Mesh Statistics */
86 ret = lbs_prepare_and_send_command(priv, 86 ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
87 CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS,
88 CMD_OPTION_WAITFORRSP, 0, &mesh_access);
89 87
90 if (ret) 88 if (ret) {
89 memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t)));
91 return; 90 return;
91 }
92 92
93 priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); 93 priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]);
94 priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); 94 priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]);
@@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev,
111 lbs_deb_enter(LBS_DEB_ETHTOOL); 111 lbs_deb_enter(LBS_DEB_ETHTOOL);
112} 112}
113 113
114static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) 114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
115{ 115{
116 switch (sset) { 116 struct lbs_private *priv = dev->priv;
117 case ETH_SS_STATS: 117
118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
118 return MESH_STATS_NUM; 119 return MESH_STATS_NUM;
119 default: 120
120 return -EOPNOTSUPP; 121 return -EOPNOTSUPP;
121 }
122} 122}
123 123
124static void lbs_ethtool_get_strings(struct net_device *dev, 124static void lbs_ethtool_get_strings(struct net_device *dev,
125 u32 stringset, 125 uint32_t stringset, uint8_t *s)
126 u8 * s)
127{ 126{
128 int i; 127 int i;
129 128
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index e333f14dce23..02e4fb639428 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -756,6 +756,7 @@ static int lbs_thread(void *data)
756 priv->nr_retries = 0; 756 priv->nr_retries = 0;
757 } else { 757 } else {
758 priv->cur_cmd = NULL; 758 priv->cur_cmd = NULL;
759 priv->dnld_sent = DNLD_RES_RECEIVED;
759 lbs_pr_info("requeueing command %x due to timeout (#%d)\n", 760 lbs_pr_info("requeueing command %x due to timeout (#%d)\n",
760 le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); 761 le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries);
761 762
@@ -1556,6 +1557,7 @@ static int lbs_add_rtap(struct lbs_private *priv)
1556 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; 1557 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
1557 rtap_dev->set_multicast_list = lbs_set_multicast_list; 1558 rtap_dev->set_multicast_list = lbs_set_multicast_list;
1558 rtap_dev->priv = priv; 1559 rtap_dev->priv = priv;
1560 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1559 1561
1560 ret = register_netdev(rtap_dev); 1562 ret = register_netdev(rtap_dev);
1561 if (ret) { 1563 if (ret) {
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 8b7f5768a103..1c216e015f64 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
461 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ 461 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
462 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ 462 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
463 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ 463 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
464 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
464 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ 465 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
465 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ 466 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
466 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ 467 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index e14c84248686..23514456d373 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
92 u8 data[4]; 92 u8 data[4];
93 struct usb_ctrlrequest dr; 93 struct usb_ctrlrequest dr;
94 } *buf; 94 } *buf;
95 int rc;
95 96
96 buf = kmalloc(sizeof(*buf), GFP_ATOMIC); 97 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
97 if (!buf) 98 if (!buf)
@@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
116 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), 117 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0),
117 (unsigned char *)dr, buf, len, 118 (unsigned char *)dr, buf, len,
118 rtl8187_iowrite_async_cb, buf); 119 rtl8187_iowrite_async_cb, buf);
119 usb_submit_urb(urb, GFP_ATOMIC); 120 rc = usb_submit_urb(urb, GFP_ATOMIC);
121 if (rc < 0) {
122 kfree(buf);
123 usb_free_urb(urb);
124 }
120} 125}
121 126
122static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, 127static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv,
@@ -169,6 +174,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
169 struct urb *urb; 174 struct urb *urb;
170 __le16 rts_dur = 0; 175 __le16 rts_dur = 0;
171 u32 flags; 176 u32 flags;
177 int rc;
172 178
173 urb = usb_alloc_urb(0, GFP_ATOMIC); 179 urb = usb_alloc_urb(0, GFP_ATOMIC);
174 if (!urb) { 180 if (!urb) {
@@ -208,7 +214,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
208 info->dev = dev; 214 info->dev = dev;
209 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), 215 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
210 hdr, skb->len, rtl8187_tx_cb, skb); 216 hdr, skb->len, rtl8187_tx_cb, skb);
211 usb_submit_urb(urb, GFP_ATOMIC); 217 rc = usb_submit_urb(urb, GFP_ATOMIC);
218 if (rc < 0) {
219 usb_free_urb(urb);
220 kfree_skb(skb);
221 }
212 222
213 return 0; 223 return 0;
214} 224}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8bddff150c70..d26f69b0184f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -946,8 +946,7 @@ err:
946 work_done++; 946 work_done++;
947 } 947 }
948 948
949 while ((skb = __skb_dequeue(&errq))) 949 __skb_queue_purge(&errq);
950 kfree_skb(skb);
951 950
952 work_done -= handle_incoming_queue(dev, &rxq); 951 work_done -= handle_incoming_queue(dev, &rxq);
953 952
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
1079 } 1078 }
1080 } 1079 }
1081 1080
1082 while ((skb = __skb_dequeue(&free_list)) != NULL) 1081 __skb_queue_purge(&free_list);
1083 dev_kfree_skb(skb);
1084 1082
1085 spin_unlock_bh(&np->rx_lock); 1083 spin_unlock_bh(&np->rx_lock);
1086} 1084}
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b7d81b2a9041..54a4cfb50ed0 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -92,7 +92,6 @@ header-y += if_slip.h
92header-y += if_strip.h 92header-y += if_strip.h
93header-y += if_tun.h 93header-y += if_tun.h
94header-y += if_tunnel.h 94header-y += if_tunnel.h
95header-y += in6.h
96header-y += in_route.h 95header-y += in_route.h
97header-y += ioctl.h 96header-y += ioctl.h
98header-y += ip6_tunnel.h 97header-y += ip6_tunnel.h
@@ -236,6 +235,7 @@ unifdef-y += if_vlan.h
236unifdef-y += igmp.h 235unifdef-y += igmp.h
237unifdef-y += inet_diag.h 236unifdef-y += inet_diag.h
238unifdef-y += in.h 237unifdef-y += in.h
238unifdef-y += in6.h
239unifdef-y += inotify.h 239unifdef-y += inotify.h
240unifdef-y += input.h 240unifdef-y += input.h
241unifdef-y += ip.h 241unifdef-y += ip.h
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
new file mode 100644
index 000000000000..9b64b6d67873
--- /dev/null
+++ b/include/linux/brcmphy.h
@@ -0,0 +1,6 @@
1#define PHY_BRCM_WIRESPEED_ENABLE 0x00000001
2#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000002
3#define PHY_BRCM_APD_CLK125_ENABLE 0x00000004
4#define PHY_BRCM_STD_IBND_DISABLE 0x00000008
5#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00000010
6#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00000020
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b11e6e19e96c..f27fd2009334 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -745,6 +745,9 @@ struct net_device
745 /* rtnetlink link ops */ 745 /* rtnetlink link ops */
746 const struct rtnl_link_ops *rtnl_link_ops; 746 const struct rtnl_link_ops *rtnl_link_ops;
747 747
748 /* VLAN feature mask */
749 unsigned long vlan_features;
750
748 /* for setting kernel sock attribute on TCP connection setup */ 751 /* for setting kernel sock attribute on TCP connection setup */
749#define GSO_MAX_SIZE 65536 752#define GSO_MAX_SIZE 65536
750 unsigned int gso_max_size; 753 unsigned int gso_max_size;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index e4c66593b5c6..0c5eb7ed8b3f 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -3,7 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/types.h>
7#include <linux/skbuff.h> 6#include <linux/skbuff.h>
8#include <linux/net.h> 7#include <linux/net.h>
9#include <linux/netdevice.h> 8#include <linux/netdevice.h>
@@ -14,6 +13,7 @@
14#include <linux/list.h> 13#include <linux/list.h>
15#include <net/net_namespace.h> 14#include <net/net_namespace.h>
16#endif 15#endif
16#include <linux/types.h>
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18 18
19/* Responses from hook functions. */ 19/* Responses from hook functions. */
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index dd9c97f2d436..590ac3d6d5d6 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -11,11 +11,11 @@
11 11
12#ifdef __KERNEL__ 12#ifdef __KERNEL__
13#include <linux/if.h> 13#include <linux/if.h>
14#include <linux/types.h>
15#include <linux/in.h> 14#include <linux/in.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/skbuff.h> 16#include <linux/skbuff.h>
18#endif 17#endif
18#include <linux/types.h>
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20#include <linux/netfilter_arp.h> 20#include <linux/netfilter_arp.h>
21 21
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index bfc889f90276..092bd50581a9 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -17,11 +17,11 @@
17 17
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19#include <linux/if.h> 19#include <linux/if.h>
20#include <linux/types.h>
21#include <linux/in.h> 20#include <linux/in.h>
22#include <linux/ip.h> 21#include <linux/ip.h>
23#include <linux/skbuff.h> 22#include <linux/skbuff.h>
24#endif 23#endif
24#include <linux/types.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/netfilter_ipv4.h> 26#include <linux/netfilter_ipv4.h>
27 27
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index f2507dcc5750..1089e33cf633 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -17,11 +17,11 @@
17 17
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19#include <linux/if.h> 19#include <linux/if.h>
20#include <linux/types.h>
21#include <linux/in6.h> 20#include <linux/in6.h>
22#include <linux/ipv6.h> 21#include <linux/ipv6.h>
23#include <linux/skbuff.h> 22#include <linux/skbuff.h>
24#endif 23#endif
24#include <linux/types.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/netfilter_ipv6.h> 26#include <linux/netfilter_ipv6.h>
27 27
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cf6dbd759395..72c038560e7d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1979,6 +1979,7 @@
1979#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 1979#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
1980#define PCI_DEVICE_ID_TIGON3_5782 0x1696 1980#define PCI_DEVICE_ID_TIGON3_5782 0x1696
1981#define PCI_DEVICE_ID_TIGON3_5784 0x1698 1981#define PCI_DEVICE_ID_TIGON3_5784 0x1698
1982#define PCI_DEVICE_ID_TIGON3_5785 0x1699
1982#define PCI_DEVICE_ID_TIGON3_5786 0x169a 1983#define PCI_DEVICE_ID_TIGON3_5786 0x169a
1983#define PCI_DEVICE_ID_TIGON3_5787 0x169b 1984#define PCI_DEVICE_ID_TIGON3_5787 0x169b
1984#define PCI_DEVICE_ID_TIGON3_5788 0x169c 1985#define PCI_DEVICE_ID_TIGON3_5788 0x169c
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index d96d9b122304..9881295f3857 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -296,10 +296,9 @@ struct tcp_sock {
296 u32 rcv_ssthresh; /* Current window clamp */ 296 u32 rcv_ssthresh; /* Current window clamp */
297 297
298 u32 frto_highmark; /* snd_nxt when RTO occurred */ 298 u32 frto_highmark; /* snd_nxt when RTO occurred */
299 u8 reordering; /* Packet reordering metric. */ 299 u16 advmss; /* Advertised MSS */
300 u8 frto_counter; /* Number of new acks after RTO */ 300 u8 frto_counter; /* Number of new acks after RTO */
301 u8 nonagle; /* Disable Nagle algorithm? */ 301 u8 nonagle; /* Disable Nagle algorithm? */
302 u8 keepalive_probes; /* num of allowed keep alive probes */
303 302
304/* RTT measurement */ 303/* RTT measurement */
305 u32 srtt; /* smoothed round trip time << 3 */ 304 u32 srtt; /* smoothed round trip time << 3 */
@@ -310,6 +309,10 @@ struct tcp_sock {
310 309
311 u32 packets_out; /* Packets which are "in flight" */ 310 u32 packets_out; /* Packets which are "in flight" */
312 u32 retrans_out; /* Retransmitted packets out */ 311 u32 retrans_out; /* Retransmitted packets out */
312
313 u16 urg_data; /* Saved octet of OOB data and control flags */
314 u8 urg_mode; /* In urgent mode */
315 u8 ecn_flags; /* ECN status bits. */
313/* 316/*
314 * Options received (usually on last packet, some only on SYN packets). 317 * Options received (usually on last packet, some only on SYN packets).
315 */ 318 */
@@ -325,13 +328,24 @@ struct tcp_sock {
325 u32 snd_cwnd_used; 328 u32 snd_cwnd_used;
326 u32 snd_cwnd_stamp; 329 u32 snd_cwnd_stamp;
327 330
328 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
329
330 u32 rcv_wnd; /* Current receiver window */ 331 u32 rcv_wnd; /* Current receiver window */
331 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 332 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
332 u32 pushed_seq; /* Last pushed seq, required to talk to windows */ 333 u32 pushed_seq; /* Last pushed seq, required to talk to windows */
334 u32 lost_out; /* Lost packets */
335 u32 sacked_out; /* SACK'd packets */
336 u32 fackets_out; /* FACK'd packets */
337 u32 tso_deferred;
338 u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
333 339
334/* SACKs data */ 340 /* from STCP, retrans queue hinting */
341 struct sk_buff* lost_skb_hint;
342 struct sk_buff *scoreboard_skb_hint;
343 struct sk_buff *retransmit_skb_hint;
344 struct sk_buff *forward_skb_hint;
345
346 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
347
348 /* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */
335 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 349 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
336 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 350 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
337 351
@@ -342,23 +356,14 @@ struct tcp_sock {
342 * sacked_out > 0) 356 * sacked_out > 0)
343 */ 357 */
344 358
345 /* from STCP, retrans queue hinting */
346 struct sk_buff* lost_skb_hint;
347
348 struct sk_buff *scoreboard_skb_hint;
349 struct sk_buff *retransmit_skb_hint;
350 struct sk_buff *forward_skb_hint;
351
352 int lost_cnt_hint; 359 int lost_cnt_hint;
353 int retransmit_cnt_hint; 360 int retransmit_cnt_hint;
354 361
355 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ 362 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
356 363
357 u16 advmss; /* Advertised MSS */ 364 u8 reordering; /* Packet reordering metric. */
358 u16 prior_ssthresh; /* ssthresh saved at recovery start */ 365 u8 keepalive_probes; /* num of allowed keep alive probes */
359 u32 lost_out; /* Lost packets */ 366 u32 prior_ssthresh; /* ssthresh saved at recovery start */
360 u32 sacked_out; /* SACK'd packets */
361 u32 fackets_out; /* FACK'd packets */
362 u32 high_seq; /* snd_nxt at onset of congestion */ 367 u32 high_seq; /* snd_nxt at onset of congestion */
363 368
364 u32 retrans_stamp; /* Timestamp of the last retransmit, 369 u32 retrans_stamp; /* Timestamp of the last retransmit,
@@ -366,25 +371,18 @@ struct tcp_sock {
366 * the first SYN. */ 371 * the first SYN. */
367 u32 undo_marker; /* tracking retrans started here. */ 372 u32 undo_marker; /* tracking retrans started here. */
368 int undo_retrans; /* number of undoable retransmissions. */ 373 int undo_retrans; /* number of undoable retransmissions. */
374 u32 total_retrans; /* Total retransmits for entire connection */
375
369 u32 urg_seq; /* Seq of received urgent pointer */ 376 u32 urg_seq; /* Seq of received urgent pointer */
370 u16 urg_data; /* Saved octet of OOB data and control flags */
371 u8 urg_mode; /* In urgent mode */
372 u8 ecn_flags; /* ECN status bits. */
373 u32 snd_up; /* Urgent pointer */ 377 u32 snd_up; /* Urgent pointer */
374 378
375 u32 total_retrans; /* Total retransmits for entire connection */
376 u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
377
378 unsigned int keepalive_time; /* time before keep alive takes place */ 379 unsigned int keepalive_time; /* time before keep alive takes place */
379 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 380 unsigned int keepalive_intvl; /* time interval between keep alive probes */
380 int linger2;
381 381
382 struct tcp_deferred_accept_info defer_tcp_accept; 382 struct tcp_deferred_accept_info defer_tcp_accept;
383 383
384 unsigned long last_synq_overflow; 384 unsigned long last_synq_overflow;
385 385
386 u32 tso_deferred;
387
388/* Receiver side RTT estimation */ 386/* Receiver side RTT estimation */
389 struct { 387 struct {
390 u32 rtt; 388 u32 rtt;
@@ -412,6 +410,8 @@ struct tcp_sock {
412/* TCP MD5 Signagure Option information */ 410/* TCP MD5 Signagure Option information */
413 struct tcp_md5sig_info *md5sig_info; 411 struct tcp_md5sig_info *md5sig_info;
414#endif 412#endif
413
414 int linger2;
415}; 415};
416 416
417static inline struct tcp_sock *tcp_sk(const struct sock *sk) 417static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index 3add87465b1f..e0aa39612eba 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -522,7 +522,7 @@ extern int wanrouter_proc_init(void);
522extern void wanrouter_proc_cleanup(void); 522extern void wanrouter_proc_cleanup(void);
523extern int wanrouter_proc_add(struct wan_device *wandev); 523extern int wanrouter_proc_add(struct wan_device *wandev);
524extern int wanrouter_proc_delete(struct wan_device *wandev); 524extern int wanrouter_proc_delete(struct wan_device *wandev);
525extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); 525extern long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
526 526
527/* Public Data */ 527/* Public Data */
528/* list of registered devices */ 528/* list of registered devices */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 909956c97c44..2f9853997992 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1602,13 +1602,16 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
1602void ieee80211_scan_completed(struct ieee80211_hw *hw); 1602void ieee80211_scan_completed(struct ieee80211_hw *hw);
1603 1603
1604/** 1604/**
1605 * ieee80211_iterate_active_interfaces - iterate active interfaces 1605 * ieee80211_iterate_active_interfaces- iterate active interfaces
1606 * 1606 *
1607 * This function iterates over the interfaces associated with a given 1607 * This function iterates over the interfaces associated with a given
1608 * hardware that are currently active and calls the callback for them. 1608 * hardware that are currently active and calls the callback for them.
1609 * This function allows the iterator function to sleep, when the iterator
1610 * function is atomic @ieee80211_iterate_active_interfaces_atomic can
1611 * be used.
1609 * 1612 *
1610 * @hw: the hardware struct of which the interfaces should be iterated over 1613 * @hw: the hardware struct of which the interfaces should be iterated over
1611 * @iterator: the iterator function to call, cannot sleep 1614 * @iterator: the iterator function to call
1612 * @data: first argument of the iterator function 1615 * @data: first argument of the iterator function
1613 */ 1616 */
1614void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, 1617void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
@@ -1617,6 +1620,24 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
1617 void *data); 1620 void *data);
1618 1621
1619/** 1622/**
1623 * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
1624 *
1625 * This function iterates over the interfaces associated with a given
1626 * hardware that are currently active and calls the callback for them.
1627 * This function requires the iterator callback function to be atomic,
1628 * if that is not desired, use @ieee80211_iterate_active_interfaces instead.
1629 *
1630 * @hw: the hardware struct of which the interfaces should be iterated over
1631 * @iterator: the iterator function to call, cannot sleep
1632 * @data: first argument of the iterator function
1633 */
1634void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
1635 void (*iterator)(void *data,
1636 u8 *mac,
1637 struct ieee80211_vif *vif),
1638 void *data);
1639
1640/**
1620 * ieee80211_start_tx_ba_session - Start a tx Block Ack session. 1641 * ieee80211_start_tx_ba_session - Start a tx Block Ack session.
1621 * @hw: pointer as obtained from ieee80211_alloc_hw(). 1642 * @hw: pointer as obtained from ieee80211_alloc_hw().
1622 * @ra: receiver address of the BA session recipient 1643 * @ra: receiver address of the BA session recipient
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9c451ff2f4f4..a01b7c4dc763 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -129,6 +129,10 @@ extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
129 void __user *buffer, 129 void __user *buffer,
130 size_t *lenp, 130 size_t *lenp,
131 loff_t *ppos); 131 loff_t *ppos);
132int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
133 int nlen, void __user *oldval,
134 size_t __user *oldlenp,
135 void __user *newval, size_t newlen);
132#endif 136#endif
133 137
134extern void inet6_ifinfo_notify(int event, 138extern void inet6_ifinfo_notify(int event,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index a5506c42f03c..112dcdf7e34e 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -772,12 +772,13 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
772 const struct nla_policy *policy, 772 const struct nla_policy *policy,
773 int len) 773 int len)
774{ 774{
775 if (nla_len(nla) < len) 775 int nested_len = nla_len(nla) - NLA_ALIGN(len);
776
777 if (nested_len < 0)
776 return -1; 778 return -1;
777 if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr)) 779 if (nested_len >= nla_attr_size(0))
778 return nla_parse_nested(tb, maxtype, 780 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
779 nla_data(nla) + NLA_ALIGN(len), 781 nested_len, policy);
780 policy);
781 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 782 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
782 return 0; 783 return 0;
783} 784}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2a739adaa92b..ab2225da0ee2 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -382,6 +382,18 @@ static void vlan_sync_address(struct net_device *dev,
382 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 382 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
383} 383}
384 384
385static void vlan_transfer_features(struct net_device *dev,
386 struct net_device *vlandev)
387{
388 unsigned long old_features = vlandev->features;
389
390 vlandev->features &= ~dev->vlan_features;
391 vlandev->features |= dev->features & dev->vlan_features;
392
393 if (old_features != vlandev->features)
394 netdev_features_change(vlandev);
395}
396
385static void __vlan_device_event(struct net_device *dev, unsigned long event) 397static void __vlan_device_event(struct net_device *dev, unsigned long event)
386{ 398{
387 switch (event) { 399 switch (event) {
@@ -410,10 +422,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
410 int i, flgs; 422 int i, flgs;
411 struct net_device *vlandev; 423 struct net_device *vlandev;
412 424
413 if (is_vlan_dev(dev)) { 425 if (is_vlan_dev(dev))
414 __vlan_device_event(dev, event); 426 __vlan_device_event(dev, event);
415 goto out;
416 }
417 427
418 grp = __vlan_find_group(dev); 428 grp = __vlan_find_group(dev);
419 if (!grp) 429 if (!grp)
@@ -450,6 +460,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
450 } 460 }
451 break; 461 break;
452 462
463 case NETDEV_FEAT_CHANGE:
464 /* Propagate device features to underlying device */
465 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
466 vlandev = vlan_group_get_device(grp, i);
467 if (!vlandev)
468 continue;
469
470 vlan_transfer_features(dev, vlandev);
471 }
472
473 break;
474
453 case NETDEV_DOWN: 475 case NETDEV_DOWN:
454 /* Put all VLANs for this dev in the down state too. */ 476 /* Put all VLANs for this dev in the down state too. */
455 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 477 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c961f0826005..5d055c242ed8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -663,6 +663,8 @@ static int vlan_dev_init(struct net_device *dev)
663 (1<<__LINK_STATE_DORMANT))) | 663 (1<<__LINK_STATE_DORMANT))) |
664 (1<<__LINK_STATE_PRESENT); 664 (1<<__LINK_STATE_PRESENT);
665 665
666 dev->features |= real_dev->features & real_dev->vlan_features;
667
666 /* ipv6 shared card related stuff */ 668 /* ipv6 shared card related stuff */
667 dev->dev_id = real_dev->dev_id; 669 dev->dev_id = real_dev->dev_id;
668 670
diff --git a/net/core/dev.c b/net/core/dev.c
index ce88c0d3e354..582963077877 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3141,7 +3141,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3141 * Load in the correct multicast list now the flags have changed. 3141 * Load in the correct multicast list now the flags have changed.
3142 */ 3142 */
3143 3143
3144 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST) 3144 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3145 dev->change_rx_flags(dev, IFF_MULTICAST); 3145 dev->change_rx_flags(dev, IFF_MULTICAST);
3146 3146
3147 dev_set_rx_mode(dev); 3147 dev_set_rx_mode(dev);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8dca21110493..fdf537707e51 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -390,6 +390,7 @@ struct pktgen_thread {
390 int cpu; 390 int cpu;
391 391
392 wait_queue_head_t queue; 392 wait_queue_head_t queue;
393 struct completion start_done;
393}; 394};
394 395
395#define REMOVE 1 396#define REMOVE 1
@@ -3414,6 +3415,7 @@ static int pktgen_thread_worker(void *arg)
3414 BUG_ON(smp_processor_id() != cpu); 3415 BUG_ON(smp_processor_id() != cpu);
3415 3416
3416 init_waitqueue_head(&t->queue); 3417 init_waitqueue_head(&t->queue);
3418 complete(&t->start_done);
3417 3419
3418 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3420 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
3419 3421
@@ -3615,6 +3617,7 @@ static int __init pktgen_create_thread(int cpu)
3615 INIT_LIST_HEAD(&t->if_list); 3617 INIT_LIST_HEAD(&t->if_list);
3616 3618
3617 list_add_tail(&t->th_list, &pktgen_threads); 3619 list_add_tail(&t->th_list, &pktgen_threads);
3620 init_completion(&t->start_done);
3618 3621
3619 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); 3622 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
3620 if (IS_ERR(p)) { 3623 if (IS_ERR(p)) {
@@ -3639,6 +3642,7 @@ static int __init pktgen_create_thread(int cpu)
3639 } 3642 }
3640 3643
3641 wake_up_process(p); 3644 wake_up_process(p);
3645 wait_for_completion(&t->start_done);
3642 3646
3643 return 0; 3647 return 0;
3644} 3648}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 418862f1bf22..9b539fa9fe18 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1288,7 +1288,6 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1288 struct neighbour *n) 1288 struct neighbour *n)
1289{ 1289{
1290 char hbuffer[HBUFFERLEN]; 1290 char hbuffer[HBUFFERLEN];
1291 const char hexbuf[] = "0123456789ABCDEF";
1292 int k, j; 1291 int k, j;
1293 char tbuf[16]; 1292 char tbuf[16];
1294 struct net_device *dev = n->dev; 1293 struct net_device *dev = n->dev;
@@ -1302,8 +1301,8 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1302 else { 1301 else {
1303#endif 1302#endif
1304 for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) { 1303 for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) {
1305 hbuffer[k++] = hexbuf[(n->ha[j] >> 4) & 15]; 1304 hbuffer[k++] = hex_asc_hi(n->ha[j]);
1306 hbuffer[k++] = hexbuf[n->ha[j] & 15]; 1305 hbuffer[k++] = hex_asc_lo(n->ha[j]);
1307 hbuffer[k++] = ':'; 1306 hbuffer[k++] = ':';
1308 } 1307 }
1309 hbuffer[--k] = 0; 1308 hbuffer[--k] = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eede36e55702..2a61158ea722 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -313,9 +313,8 @@ static void ipgre_tunnel_uninit(struct net_device *dev)
313 313
314static void ipgre_err(struct sk_buff *skb, u32 info) 314static void ipgre_err(struct sk_buff *skb, u32 info)
315{ 315{
316#ifndef I_WISH_WORLD_WERE_PERFECT
317 316
318/* It is not :-( All the routers (except for Linux) return only 317/* All the routers (except for Linux) return only
319 8 bytes of packet payload. It means, that precise relaying of 318 8 bytes of packet payload. It means, that precise relaying of
320 ICMP in the real Internet is absolutely infeasible. 319 ICMP in the real Internet is absolutely infeasible.
321 320
@@ -398,149 +397,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
398out: 397out:
399 read_unlock(&ipgre_lock); 398 read_unlock(&ipgre_lock);
400 return; 399 return;
401#else
402 struct iphdr *iph = (struct iphdr*)dp;
403 struct iphdr *eiph;
404 __be16 *p = (__be16*)(dp+(iph->ihl<<2));
405 const int type = icmp_hdr(skb)->type;
406 const int code = icmp_hdr(skb)->code;
407 int rel_type = 0;
408 int rel_code = 0;
409 __be32 rel_info = 0;
410 __u32 n = 0;
411 __be16 flags;
412 int grehlen = (iph->ihl<<2) + 4;
413 struct sk_buff *skb2;
414 struct flowi fl;
415 struct rtable *rt;
416
417 if (p[1] != htons(ETH_P_IP))
418 return;
419
420 flags = p[0];
421 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
422 if (flags&(GRE_VERSION|GRE_ROUTING))
423 return;
424 if (flags&GRE_CSUM)
425 grehlen += 4;
426 if (flags&GRE_KEY)
427 grehlen += 4;
428 if (flags&GRE_SEQ)
429 grehlen += 4;
430 }
431 if (len < grehlen + sizeof(struct iphdr))
432 return;
433 eiph = (struct iphdr*)(dp + grehlen);
434
435 switch (type) {
436 default:
437 return;
438 case ICMP_PARAMETERPROB:
439 n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
440 if (n < (iph->ihl<<2))
441 return;
442
443 /* So... This guy found something strange INSIDE encapsulated
444 packet. Well, he is fool, but what can we do ?
445 */
446 rel_type = ICMP_PARAMETERPROB;
447 n -= grehlen;
448 rel_info = htonl(n << 24);
449 break;
450
451 case ICMP_DEST_UNREACH:
452 switch (code) {
453 case ICMP_SR_FAILED:
454 case ICMP_PORT_UNREACH:
455 /* Impossible event. */
456 return;
457 case ICMP_FRAG_NEEDED:
458 /* And it is the only really necessary thing :-) */
459 n = ntohs(icmp_hdr(skb)->un.frag.mtu);
460 if (n < grehlen+68)
461 return;
462 n -= grehlen;
463 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
464 if (n > ntohs(eiph->tot_len))
465 return;
466 rel_info = htonl(n);
467 break;
468 default:
469 /* All others are translated to HOST_UNREACH.
470 rfc2003 contains "deep thoughts" about NET_UNREACH,
471 I believe, it is just ether pollution. --ANK
472 */
473 rel_type = ICMP_DEST_UNREACH;
474 rel_code = ICMP_HOST_UNREACH;
475 break;
476 }
477 break;
478 case ICMP_TIME_EXCEEDED:
479 if (code != ICMP_EXC_TTL)
480 return;
481 break;
482 }
483
484 /* Prepare fake skb to feed it to icmp_send */
485 skb2 = skb_clone(skb, GFP_ATOMIC);
486 if (skb2 == NULL)
487 return;
488 dst_release(skb2->dst);
489 skb2->dst = NULL;
490 skb_pull(skb2, skb->data - (u8*)eiph);
491 skb_reset_network_header(skb2);
492
493 /* Try to guess incoming interface */
494 memset(&fl, 0, sizeof(fl));
495 fl.fl4_dst = eiph->saddr;
496 fl.fl4_tos = RT_TOS(eiph->tos);
497 fl.proto = IPPROTO_GRE;
498 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) {
499 kfree_skb(skb2);
500 return;
501 }
502 skb2->dev = rt->u.dst.dev;
503
504 /* route "incoming" packet */
505 if (rt->rt_flags&RTCF_LOCAL) {
506 ip_rt_put(rt);
507 rt = NULL;
508 fl.fl4_dst = eiph->daddr;
509 fl.fl4_src = eiph->saddr;
510 fl.fl4_tos = eiph->tos;
511 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
512 rt->u.dst.dev->type != ARPHRD_IPGRE) {
513 ip_rt_put(rt);
514 kfree_skb(skb2);
515 return;
516 }
517 } else {
518 ip_rt_put(rt);
519 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
520 skb2->dst->dev->type != ARPHRD_IPGRE) {
521 kfree_skb(skb2);
522 return;
523 }
524 }
525
526 /* change mtu on this route */
527 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
528 if (n > dst_mtu(skb2->dst)) {
529 kfree_skb(skb2);
530 return;
531 }
532 skb2->dst->ops->update_pmtu(skb2->dst, n);
533 } else if (type == ICMP_TIME_EXCEEDED) {
534 struct ip_tunnel *t = netdev_priv(skb2->dev);
535 if (t->parms.iph.ttl) {
536 rel_type = ICMP_DEST_UNREACH;
537 rel_code = ICMP_HOST_UNREACH;
538 }
539 }
540
541 icmp_send(skb2, rel_type, rel_code, rel_info);
542 kfree_skb(skb2);
543#endif
544} 400}
545 401
546static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 402static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 26c85c23ca4f..86d8836551b9 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -278,9 +278,8 @@ static void ipip_tunnel_uninit(struct net_device *dev)
278 278
279static int ipip_err(struct sk_buff *skb, u32 info) 279static int ipip_err(struct sk_buff *skb, u32 info)
280{ 280{
281#ifndef I_WISH_WORLD_WERE_PERFECT
282 281
283/* It is not :-( All the routers (except for Linux) return only 282/* All the routers (except for Linux) return only
284 8 bytes of packet payload. It means, that precise relaying of 283 8 bytes of packet payload. It means, that precise relaying of
285 ICMP in the real Internet is absolutely infeasible. 284 ICMP in the real Internet is absolutely infeasible.
286 */ 285 */
@@ -337,133 +336,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
337out: 336out:
338 read_unlock(&ipip_lock); 337 read_unlock(&ipip_lock);
339 return err; 338 return err;
340#else
341 struct iphdr *iph = (struct iphdr*)dp;
342 int hlen = iph->ihl<<2;
343 struct iphdr *eiph;
344 const int type = icmp_hdr(skb)->type;
345 const int code = icmp_hdr(skb)->code;
346 int rel_type = 0;
347 int rel_code = 0;
348 __be32 rel_info = 0;
349 __u32 n = 0;
350 struct sk_buff *skb2;
351 struct flowi fl;
352 struct rtable *rt;
353
354 if (len < hlen + sizeof(struct iphdr))
355 return 0;
356 eiph = (struct iphdr*)(dp + hlen);
357
358 switch (type) {
359 default:
360 return 0;
361 case ICMP_PARAMETERPROB:
362 n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
363 if (n < hlen)
364 return 0;
365
366 /* So... This guy found something strange INSIDE encapsulated
367 packet. Well, he is fool, but what can we do ?
368 */
369 rel_type = ICMP_PARAMETERPROB;
370 rel_info = htonl((n - hlen) << 24);
371 break;
372
373 case ICMP_DEST_UNREACH:
374 switch (code) {
375 case ICMP_SR_FAILED:
376 case ICMP_PORT_UNREACH:
377 /* Impossible event. */
378 return 0;
379 case ICMP_FRAG_NEEDED:
380 /* And it is the only really necessary thing :-) */
381 n = ntohs(icmp_hdr(skb)->un.frag.mtu);
382 if (n < hlen+68)
383 return 0;
384 n -= hlen;
385 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
386 if (n > ntohs(eiph->tot_len))
387 return 0;
388 rel_info = htonl(n);
389 break;
390 default:
391 /* All others are translated to HOST_UNREACH.
392 rfc2003 contains "deep thoughts" about NET_UNREACH,
393 I believe, it is just ether pollution. --ANK
394 */
395 rel_type = ICMP_DEST_UNREACH;
396 rel_code = ICMP_HOST_UNREACH;
397 break;
398 }
399 break;
400 case ICMP_TIME_EXCEEDED:
401 if (code != ICMP_EXC_TTL)
402 return 0;
403 break;
404 }
405
406 /* Prepare fake skb to feed it to icmp_send */
407 skb2 = skb_clone(skb, GFP_ATOMIC);
408 if (skb2 == NULL)
409 return 0;
410 dst_release(skb2->dst);
411 skb2->dst = NULL;
412 skb_pull(skb2, skb->data - (u8*)eiph);
413 skb_reset_network_header(skb2);
414
415 /* Try to guess incoming interface */
416 memset(&fl, 0, sizeof(fl));
417 fl.fl4_daddr = eiph->saddr;
418 fl.fl4_tos = RT_TOS(eiph->tos);
419 fl.proto = IPPROTO_IPIP;
420 if (ip_route_output_key(dev_net(skb->dev), &rt, &key)) {
421 kfree_skb(skb2);
422 return 0;
423 }
424 skb2->dev = rt->u.dst.dev;
425
426 /* route "incoming" packet */
427 if (rt->rt_flags&RTCF_LOCAL) {
428 ip_rt_put(rt);
429 rt = NULL;
430 fl.fl4_daddr = eiph->daddr;
431 fl.fl4_src = eiph->saddr;
432 fl.fl4_tos = eiph->tos;
433 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
434 rt->u.dst.dev->type != ARPHRD_TUNNEL) {
435 ip_rt_put(rt);
436 kfree_skb(skb2);
437 return 0;
438 }
439 } else {
440 ip_rt_put(rt);
441 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
442 skb2->dst->dev->type != ARPHRD_TUNNEL) {
443 kfree_skb(skb2);
444 return 0;
445 }
446 }
447
448 /* change mtu on this route */
449 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
450 if (n > dst_mtu(skb2->dst)) {
451 kfree_skb(skb2);
452 return 0;
453 }
454 skb2->dst->ops->update_pmtu(skb2->dst, n);
455 } else if (type == ICMP_TIME_EXCEEDED) {
456 struct ip_tunnel *t = netdev_priv(skb2->dev);
457 if (t->parms.iph.ttl) {
458 rel_type = ICMP_DEST_UNREACH;
459 rel_code = ICMP_HOST_UNREACH;
460 }
461 }
462
463 icmp_send(skb2, rel_type, rel_code, rel_info);
464 kfree_skb(skb2);
465 return 0;
466#endif
467} 339}
468 340
469static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, 341static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 92f90ae46f4a..df41026b60db 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -160,7 +160,7 @@ static struct dst_ops ipv4_dst_ops = {
160 .negative_advice = ipv4_negative_advice, 160 .negative_advice = ipv4_negative_advice,
161 .link_failure = ipv4_link_failure, 161 .link_failure = ipv4_link_failure,
162 .update_pmtu = ip_rt_update_pmtu, 162 .update_pmtu = ip_rt_update_pmtu,
163 .local_out = ip_local_out, 163 .local_out = __ip_local_out,
164 .entry_size = sizeof(struct rtable), 164 .entry_size = sizeof(struct rtable),
165 .entries = ATOMIC_INIT(0), 165 .entries = ATOMIC_INIT(0),
166}; 166};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index debf23581606..e399bde7813a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1836,7 +1836,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1836{ 1836{
1837 struct tcp_sock *tp = tcp_sk(sk); 1837 struct tcp_sock *tp = tcp_sk(sk);
1838 struct inet_connection_sock *icsk = inet_csk(sk); 1838 struct inet_connection_sock *icsk = inet_csk(sk);
1839 unsigned int cur_mss = tcp_current_mss(sk, 0); 1839 unsigned int cur_mss;
1840 int err; 1840 int err;
1841 1841
1842 /* Inconslusive MTU probe */ 1842 /* Inconslusive MTU probe */
@@ -1858,6 +1858,11 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1858 return -ENOMEM; 1858 return -ENOMEM;
1859 } 1859 }
1860 1860
1861 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1862 return -EHOSTUNREACH; /* Routing failure or similar. */
1863
1864 cur_mss = tcp_current_mss(sk, 0);
1865
1861 /* If receiver has shrunk his window, and skb is out of 1866 /* If receiver has shrunk his window, and skb is out of
1862 * new window, do not retransmit it. The exception is the 1867 * new window, do not retransmit it. The exception is the
1863 * case, when window is shrunk to zero. In this case 1868 * case, when window is shrunk to zero. In this case
@@ -1884,9 +1889,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1884 (sysctl_tcp_retrans_collapse != 0)) 1889 (sysctl_tcp_retrans_collapse != 0))
1885 tcp_retrans_try_collapse(sk, skb, cur_mss); 1890 tcp_retrans_try_collapse(sk, skb, cur_mss);
1886 1891
1887 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1888 return -EHOSTUNREACH; /* Routing failure or similar. */
1889
1890 /* Some Solaris stacks overoptimize and ignore the FIN on a 1892 /* Some Solaris stacks overoptimize and ignore the FIN on a
1891 * retransmit when old data is attached. So strip it off 1893 * retransmit when old data is attached. So strip it off
1892 * since it is cheap to do so and saves bytes on the network. 1894 * since it is cheap to do so and saves bytes on the network.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e591e09e5e4e..3a835578fd1c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1764,14 +1764,16 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1764 * 2) Configure prefixes with the auto flag set 1764 * 2) Configure prefixes with the auto flag set
1765 */ 1765 */
1766 1766
1767 /* Avoid arithmetic overflow. Really, we could 1767 if (valid_lft == INFINITY_LIFE_TIME)
1768 save rt_expires in seconds, likely valid_lft, 1768 rt_expires = ~0UL;
1769 but it would require division in fib gc, that it 1769 else if (valid_lft >= 0x7FFFFFFF/HZ) {
1770 not good. 1770 /* Avoid arithmetic overflow. Really, we could
1771 */ 1771 * save rt_expires in seconds, likely valid_lft,
1772 if (valid_lft >= 0x7FFFFFFF/HZ) 1772 * but it would require division in fib gc, that it
1773 * not good.
1774 */
1773 rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ); 1775 rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ);
1774 else 1776 } else
1775 rt_expires = valid_lft * HZ; 1777 rt_expires = valid_lft * HZ;
1776 1778
1777 /* 1779 /*
@@ -1779,7 +1781,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1779 * Avoid arithmetic overflow there as well. 1781 * Avoid arithmetic overflow there as well.
1780 * Overflow can happen only if HZ < USER_HZ. 1782 * Overflow can happen only if HZ < USER_HZ.
1781 */ 1783 */
1782 if (HZ < USER_HZ && rt_expires > 0x7FFFFFFF / USER_HZ) 1784 if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ)
1783 rt_expires = 0x7FFFFFFF / USER_HZ; 1785 rt_expires = 0x7FFFFFFF / USER_HZ;
1784 1786
1785 if (pinfo->onlink) { 1787 if (pinfo->onlink) {
@@ -1788,17 +1790,28 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1788 dev->ifindex, 1); 1790 dev->ifindex, 1);
1789 1791
1790 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1792 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
1791 if (rt->rt6i_flags&RTF_EXPIRES) { 1793 /* Autoconf prefix route */
1792 if (valid_lft == 0) { 1794 if (valid_lft == 0) {
1793 ip6_del_rt(rt); 1795 ip6_del_rt(rt);
1794 rt = NULL; 1796 rt = NULL;
1795 } else { 1797 } else if (~rt_expires) {
1796 rt->rt6i_expires = jiffies + rt_expires; 1798 /* not infinity */
1797 } 1799 rt->rt6i_expires = jiffies + rt_expires;
1800 rt->rt6i_flags |= RTF_EXPIRES;
1801 } else {
1802 rt->rt6i_flags &= ~RTF_EXPIRES;
1803 rt->rt6i_expires = 0;
1798 } 1804 }
1799 } else if (valid_lft) { 1805 } else if (valid_lft) {
1806 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
1807 clock_t expires = 0;
1808 if (~rt_expires) {
1809 /* not infinity */
1810 flags |= RTF_EXPIRES;
1811 expires = jiffies_to_clock_t(rt_expires);
1812 }
1800 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, 1813 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
1801 dev, jiffies_to_clock_t(rt_expires), RTF_ADDRCONF|RTF_EXPIRES|RTF_PREFIX_RT); 1814 dev, expires, flags);
1802 } 1815 }
1803 if (rt) 1816 if (rt)
1804 dst_release(&rt->u.dst); 1817 dst_release(&rt->u.dst);
@@ -2021,7 +2034,8 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2021 struct inet6_dev *idev; 2034 struct inet6_dev *idev;
2022 struct net_device *dev; 2035 struct net_device *dev;
2023 int scope; 2036 int scope;
2024 u32 flags = RTF_EXPIRES; 2037 u32 flags;
2038 clock_t expires;
2025 2039
2026 ASSERT_RTNL(); 2040 ASSERT_RTNL();
2027 2041
@@ -2041,8 +2055,13 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2041 if (valid_lft == INFINITY_LIFE_TIME) { 2055 if (valid_lft == INFINITY_LIFE_TIME) {
2042 ifa_flags |= IFA_F_PERMANENT; 2056 ifa_flags |= IFA_F_PERMANENT;
2043 flags = 0; 2057 flags = 0;
2044 } else if (valid_lft >= 0x7FFFFFFF/HZ) 2058 expires = 0;
2045 valid_lft = 0x7FFFFFFF/HZ; 2059 } else {
2060 if (valid_lft >= 0x7FFFFFFF/HZ)
2061 valid_lft = 0x7FFFFFFF/HZ;
2062 flags = RTF_EXPIRES;
2063 expires = jiffies_to_clock_t(valid_lft * HZ);
2064 }
2046 2065
2047 if (prefered_lft == 0) 2066 if (prefered_lft == 0)
2048 ifa_flags |= IFA_F_DEPRECATED; 2067 ifa_flags |= IFA_F_DEPRECATED;
@@ -2060,7 +2079,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2060 spin_unlock_bh(&ifp->lock); 2079 spin_unlock_bh(&ifp->lock);
2061 2080
2062 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, 2081 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2063 jiffies_to_clock_t(valid_lft * HZ), flags); 2082 expires, flags);
2064 /* 2083 /*
2065 * Note that section 3.1 of RFC 4429 indicates 2084 * Note that section 3.1 of RFC 4429 indicates
2066 * that the Optimistic flag should not be set for 2085 * that the Optimistic flag should not be set for
@@ -3148,7 +3167,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3148static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, 3167static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3149 u32 prefered_lft, u32 valid_lft) 3168 u32 prefered_lft, u32 valid_lft)
3150{ 3169{
3151 u32 flags = RTF_EXPIRES; 3170 u32 flags;
3171 clock_t expires;
3152 3172
3153 if (!valid_lft || (prefered_lft > valid_lft)) 3173 if (!valid_lft || (prefered_lft > valid_lft))
3154 return -EINVAL; 3174 return -EINVAL;
@@ -3156,8 +3176,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3156 if (valid_lft == INFINITY_LIFE_TIME) { 3176 if (valid_lft == INFINITY_LIFE_TIME) {
3157 ifa_flags |= IFA_F_PERMANENT; 3177 ifa_flags |= IFA_F_PERMANENT;
3158 flags = 0; 3178 flags = 0;
3159 } else if (valid_lft >= 0x7FFFFFFF/HZ) 3179 expires = 0;
3160 valid_lft = 0x7FFFFFFF/HZ; 3180 } else {
3181 if (valid_lft >= 0x7FFFFFFF/HZ)
3182 valid_lft = 0x7FFFFFFF/HZ;
3183 flags = RTF_EXPIRES;
3184 expires = jiffies_to_clock_t(valid_lft * HZ);
3185 }
3161 3186
3162 if (prefered_lft == 0) 3187 if (prefered_lft == 0)
3163 ifa_flags |= IFA_F_DEPRECATED; 3188 ifa_flags |= IFA_F_DEPRECATED;
@@ -3176,7 +3201,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3176 ipv6_ifa_notify(0, ifp); 3201 ipv6_ifa_notify(0, ifp);
3177 3202
3178 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, 3203 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
3179 jiffies_to_clock_t(valid_lft * HZ), flags); 3204 expires, flags);
3180 addrconf_verify(0); 3205 addrconf_verify(0);
3181 3206
3182 return 0; 3207 return 0;
@@ -4242,7 +4267,7 @@ static void addrconf_sysctl_register(struct inet6_dev *idev)
4242 neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6, 4267 neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6,
4243 NET_IPV6_NEIGH, "ipv6", 4268 NET_IPV6_NEIGH, "ipv6",
4244 &ndisc_ifinfo_sysctl_change, 4269 &ndisc_ifinfo_sysctl_change,
4245 NULL); 4270 ndisc_ifinfo_sysctl_strategy);
4246 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 4271 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
4247 idev->dev->ifindex, idev, &idev->cnf); 4272 idev->dev->ifindex, idev, &idev->cnf);
4248} 4273}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index a55fc05b8125..282fdb31f8ed 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1727,10 +1727,10 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
1727 return ret; 1727 return ret;
1728} 1728}
1729 1729
1730static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name, 1730int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
1731 int nlen, void __user *oldval, 1731 int nlen, void __user *oldval,
1732 size_t __user *oldlenp, 1732 size_t __user *oldlenp,
1733 void __user *newval, size_t newlen) 1733 void __user *newval, size_t newlen)
1734{ 1734{
1735 struct net_device *dev = ctl->extra1; 1735 struct net_device *dev = ctl->extra1;
1736 struct inet6_dev *idev; 1736 struct inet6_dev *idev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 12bba0880345..48534c6c0735 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -109,7 +109,7 @@ static struct dst_ops ip6_dst_ops_template = {
109 .negative_advice = ip6_negative_advice, 109 .negative_advice = ip6_negative_advice,
110 .link_failure = ip6_link_failure, 110 .link_failure = ip6_link_failure,
111 .update_pmtu = ip6_rt_update_pmtu, 111 .update_pmtu = ip6_rt_update_pmtu,
112 .local_out = ip6_local_out, 112 .local_out = __ip6_local_out,
113 .entry_size = sizeof(struct rt6_info), 113 .entry_size = sizeof(struct rt6_info),
114 .entries = ATOMIC_INIT(0), 114 .entries = ATOMIC_INIT(0),
115}; 115};
@@ -475,7 +475,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
475 lifetime = ntohl(rinfo->lifetime); 475 lifetime = ntohl(rinfo->lifetime);
476 if (lifetime == 0xffffffff) { 476 if (lifetime == 0xffffffff) {
477 /* infinity */ 477 /* infinity */
478 } else if (lifetime > 0x7fffffff/HZ) { 478 } else if (lifetime > 0x7fffffff/HZ - 1) {
479 /* Avoid arithmetic overflow */ 479 /* Avoid arithmetic overflow */
480 lifetime = 0x7fffffff/HZ - 1; 480 lifetime = 0x7fffffff/HZ - 1;
481 } 481 }
@@ -1106,7 +1106,9 @@ int ip6_route_add(struct fib6_config *cfg)
1106 } 1106 }
1107 1107
1108 rt->u.dst.obsolete = -1; 1108 rt->u.dst.obsolete = -1;
1109 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires); 1109 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1110 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1111 0;
1110 1112
1111 if (cfg->fc_protocol == RTPROT_UNSPEC) 1113 if (cfg->fc_protocol == RTPROT_UNSPEC)
1112 cfg->fc_protocol = RTPROT_BOOT; 1114 cfg->fc_protocol = RTPROT_BOOT;
@@ -2200,7 +2202,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2200 2202
2201 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); 2203 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2202 2204
2203 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0; 2205 expires = (rt->rt6i_flags & RTF_EXPIRES) ?
2206 rt->rt6i_expires - jiffies : 0;
2207
2204 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, 2208 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2205 expires, rt->u.dst.error) < 0) 2209 expires, rt->u.dst.error) < 0)
2206 goto nla_put_failure; 2210 goto nla_put_failure;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b0ee96187633..6b8f0583b637 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -403,9 +403,8 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
403 403
404static int ipip6_err(struct sk_buff *skb, u32 info) 404static int ipip6_err(struct sk_buff *skb, u32 info)
405{ 405{
406#ifndef I_WISH_WORLD_WERE_PERFECT
407 406
408/* It is not :-( All the routers (except for Linux) return only 407/* All the routers (except for Linux) return only
409 8 bytes of packet payload. It means, that precise relaying of 408 8 bytes of packet payload. It means, that precise relaying of
410 ICMP in the real Internet is absolutely infeasible. 409 ICMP in the real Internet is absolutely infeasible.
411 */ 410 */
@@ -462,92 +461,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
462out: 461out:
463 read_unlock(&ipip6_lock); 462 read_unlock(&ipip6_lock);
464 return err; 463 return err;
465#else
466 struct iphdr *iph = (struct iphdr*)dp;
467 int hlen = iph->ihl<<2;
468 struct ipv6hdr *iph6;
469 const int type = icmp_hdr(skb)->type;
470 const int code = icmp_hdr(skb)->code;
471 int rel_type = 0;
472 int rel_code = 0;
473 int rel_info = 0;
474 struct sk_buff *skb2;
475 struct rt6_info *rt6i;
476
477 if (len < hlen + sizeof(struct ipv6hdr))
478 return;
479 iph6 = (struct ipv6hdr*)(dp + hlen);
480
481 switch (type) {
482 default:
483 return;
484 case ICMP_PARAMETERPROB:
485 if (icmp_hdr(skb)->un.gateway < hlen)
486 return;
487
488 /* So... This guy found something strange INSIDE encapsulated
489 packet. Well, he is fool, but what can we do ?
490 */
491 rel_type = ICMPV6_PARAMPROB;
492 rel_info = icmp_hdr(skb)->un.gateway - hlen;
493 break;
494
495 case ICMP_DEST_UNREACH:
496 switch (code) {
497 case ICMP_SR_FAILED:
498 case ICMP_PORT_UNREACH:
499 /* Impossible event. */
500 return;
501 case ICMP_FRAG_NEEDED:
502 /* Too complicated case ... */
503 return;
504 default:
505 /* All others are translated to HOST_UNREACH.
506 rfc2003 contains "deep thoughts" about NET_UNREACH,
507 I believe, it is just ether pollution. --ANK
508 */
509 rel_type = ICMPV6_DEST_UNREACH;
510 rel_code = ICMPV6_ADDR_UNREACH;
511 break;
512 }
513 break;
514 case ICMP_TIME_EXCEEDED:
515 if (code != ICMP_EXC_TTL)
516 return;
517 rel_type = ICMPV6_TIME_EXCEED;
518 rel_code = ICMPV6_EXC_HOPLIMIT;
519 break;
520 }
521
522 /* Prepare fake skb to feed it to icmpv6_send */
523 skb2 = skb_clone(skb, GFP_ATOMIC);
524 if (skb2 == NULL)
525 return 0;
526 dst_release(skb2->dst);
527 skb2->dst = NULL;
528 skb_pull(skb2, skb->data - (u8*)iph6);
529 skb_reset_network_header(skb2);
530
531 /* Try to guess incoming interface */
532 rt6i = rt6_lookup(dev_net(skb->dev), &iph6->saddr, NULL, NULL, 0);
533 if (rt6i && rt6i->rt6i_dev) {
534 skb2->dev = rt6i->rt6i_dev;
535
536 rt6i = rt6_lookup(dev_net(skb->dev),
537 &iph6->daddr, &iph6->saddr, NULL, 0);
538
539 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
540 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
541 if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) {
542 rel_type = ICMPV6_DEST_UNREACH;
543 rel_code = ICMPV6_ADDR_UNREACH;
544 }
545 icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
546 }
547 }
548 kfree_skb(skb2);
549 return 0;
550#endif
551} 464}
552 465
553static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 466static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e0eab5927c4f..f6e54fa97f47 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -628,8 +628,8 @@ dev_irnet_poll(struct file * file,
628 * This is the way pppd configure us and control us while the PPP 628 * This is the way pppd configure us and control us while the PPP
629 * instance is active. 629 * instance is active.
630 */ 630 */
631static int 631static long
632dev_irnet_ioctl(struct inode * inode, 632dev_irnet_ioctl(
633 struct file * file, 633 struct file * file,
634 unsigned int cmd, 634 unsigned int cmd,
635 unsigned long arg) 635 unsigned long arg)
@@ -660,6 +660,7 @@ dev_irnet_ioctl(struct inode * inode,
660 { 660 {
661 DEBUG(FS_INFO, "Entering PPP discipline.\n"); 661 DEBUG(FS_INFO, "Entering PPP discipline.\n");
662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ 662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/
663 lock_kernel();
663 err = ppp_register_channel(&ap->chan); 664 err = ppp_register_channel(&ap->chan);
664 if(err == 0) 665 if(err == 0)
665 { 666 {
@@ -672,12 +673,14 @@ dev_irnet_ioctl(struct inode * inode,
672 } 673 }
673 else 674 else
674 DERROR(FS_ERROR, "Can't setup PPP channel...\n"); 675 DERROR(FS_ERROR, "Can't setup PPP channel...\n");
676 unlock_kernel();
675 } 677 }
676 else 678 else
677 { 679 {
678 /* In theory, should be N_TTY */ 680 /* In theory, should be N_TTY */
679 DEBUG(FS_INFO, "Exiting PPP discipline.\n"); 681 DEBUG(FS_INFO, "Exiting PPP discipline.\n");
680 /* Disconnect from the generic PPP layer */ 682 /* Disconnect from the generic PPP layer */
683 lock_kernel();
681 if(ap->ppp_open) 684 if(ap->ppp_open)
682 { 685 {
683 ap->ppp_open = 0; 686 ap->ppp_open = 0;
@@ -686,24 +689,20 @@ dev_irnet_ioctl(struct inode * inode,
686 else 689 else
687 DERROR(FS_ERROR, "Channel not registered !\n"); 690 DERROR(FS_ERROR, "Channel not registered !\n");
688 err = 0; 691 err = 0;
692 unlock_kernel();
689 } 693 }
690 break; 694 break;
691 695
692 /* Query PPP channel and unit number */ 696 /* Query PPP channel and unit number */
693 case PPPIOCGCHAN: 697 case PPPIOCGCHAN:
694 if(!ap->ppp_open) 698 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
695 break; 699 (int __user *)argp))
696 if(put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) 700 err = 0;
697 break;
698 DEBUG(FS_INFO, "Query channel.\n");
699 err = 0;
700 break; 701 break;
701 case PPPIOCGUNIT: 702 case PPPIOCGUNIT:
702 if(!ap->ppp_open) 703 lock_kernel();
703 break; 704 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
704 if(put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) 705 (int __user *)argp))
705 break;
706 DEBUG(FS_INFO, "Query unit number.\n");
707 err = 0; 706 err = 0;
708 break; 707 break;
709 708
@@ -723,34 +722,39 @@ dev_irnet_ioctl(struct inode * inode,
723 DEBUG(FS_INFO, "Standard PPP ioctl.\n"); 722 DEBUG(FS_INFO, "Standard PPP ioctl.\n");
724 if(!capable(CAP_NET_ADMIN)) 723 if(!capable(CAP_NET_ADMIN))
725 err = -EPERM; 724 err = -EPERM;
726 else 725 else {
726 lock_kernel();
727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg); 727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg);
728 unlock_kernel();
729 }
728 break; 730 break;
729 731
730 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ 732 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */
731 /* Get termios */ 733 /* Get termios */
732 case TCGETS: 734 case TCGETS:
733 DEBUG(FS_INFO, "Get termios.\n"); 735 DEBUG(FS_INFO, "Get termios.\n");
736 lock_kernel();
734#ifndef TCGETS2 737#ifndef TCGETS2
735 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) 738 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
736 break; 739 err = 0;
737#else 740#else
738 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) 741 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
739 break; 742 err = 0;
740#endif 743#endif
741 err = 0; 744 unlock_kernel();
742 break; 745 break;
743 /* Set termios */ 746 /* Set termios */
744 case TCSETSF: 747 case TCSETSF:
745 DEBUG(FS_INFO, "Set termios.\n"); 748 DEBUG(FS_INFO, "Set termios.\n");
749 lock_kernel();
746#ifndef TCGETS2 750#ifndef TCGETS2
747 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) 751 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
748 break; 752 err = 0;
749#else 753#else
750 if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) 754 if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
751 break; 755 err = 0;
752#endif 756#endif
753 err = 0; 757 unlock_kernel();
754 break; 758 break;
755 759
756 /* Set DTR/RTS */ 760 /* Set DTR/RTS */
@@ -773,7 +777,9 @@ dev_irnet_ioctl(struct inode * inode,
773 * We should also worry that we don't accept junk here and that 777 * We should also worry that we don't accept junk here and that
774 * we get rid of our own buffers */ 778 * we get rid of our own buffers */
775#ifdef FLUSH_TO_PPP 779#ifdef FLUSH_TO_PPP
780 lock_kernel();
776 ppp_output_wakeup(&ap->chan); 781 ppp_output_wakeup(&ap->chan);
782 unlock_kernel();
777#endif /* FLUSH_TO_PPP */ 783#endif /* FLUSH_TO_PPP */
778 err = 0; 784 err = 0;
779 break; 785 break;
@@ -788,7 +794,7 @@ dev_irnet_ioctl(struct inode * inode,
788 794
789 default: 795 default:
790 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); 796 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd);
791 err = -ENOIOCTLCMD; 797 err = -ENOTTY;
792 } 798 }
793 799
794 DEXIT(FS_TRACE, " - err = 0x%X\n", err); 800 DEXIT(FS_TRACE, " - err = 0x%X\n", err);
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d2beb7df8f7f..d9f8bd4ebd05 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -76,9 +76,8 @@ static ssize_t
76static unsigned int 76static unsigned int
77 dev_irnet_poll(struct file *, 77 dev_irnet_poll(struct file *,
78 poll_table *); 78 poll_table *);
79static int 79static long
80 dev_irnet_ioctl(struct inode *, 80 dev_irnet_ioctl(struct file *,
81 struct file *,
82 unsigned int, 81 unsigned int,
83 unsigned long); 82 unsigned long);
84/* ------------------------ PPP INTERFACE ------------------------ */ 83/* ------------------------ PPP INTERFACE ------------------------ */
@@ -102,7 +101,7 @@ static struct file_operations irnet_device_fops =
102 .read = dev_irnet_read, 101 .read = dev_irnet_read,
103 .write = dev_irnet_write, 102 .write = dev_irnet_write,
104 .poll = dev_irnet_poll, 103 .poll = dev_irnet_poll,
105 .ioctl = dev_irnet_ioctl, 104 .unlocked_ioctl = dev_irnet_ioctl,
106 .open = dev_irnet_open, 105 .open = dev_irnet_open,
107 .release = dev_irnet_close 106 .release = dev_irnet_close
108 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ 107 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9e7236ff6bcc..9bba7ac5fee0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1251,7 +1251,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
1251 x->sel.prefixlen_s = addr->sadb_address_prefixlen; 1251 x->sel.prefixlen_s = addr->sadb_address_prefixlen;
1252 } 1252 }
1253 1253
1254 if (x->props.mode == XFRM_MODE_TRANSPORT) 1254 if (!x->sel.family)
1255 x->sel.family = x->props.family; 1255 x->sel.family = x->props.family;
1256 1256
1257 if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { 1257 if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7877d3b3f4cb..3f7f92a2f227 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -721,7 +721,17 @@ static void ieee80211_send_assoc(struct net_device *dev,
721 capab |= WLAN_CAPABILITY_PRIVACY; 721 capab |= WLAN_CAPABILITY_PRIVACY;
722 if (bss->wmm_ie) 722 if (bss->wmm_ie)
723 wmm = 1; 723 wmm = 1;
724
725 /* get all rates supported by the device and the AP as
726 * some APs don't like getting a superset of their rates
727 * in the association request (e.g. D-Link DAP 1353 in
728 * b-only mode) */
729 rates_len = ieee80211_compatible_rates(bss, sband, &rates);
730
724 ieee80211_rx_bss_put(dev, bss); 731 ieee80211_rx_bss_put(dev, bss);
732 } else {
733 rates = ~0;
734 rates_len = sband->n_bitrates;
725 } 735 }
726 736
727 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 737 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -752,10 +762,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
752 *pos++ = ifsta->ssid_len; 762 *pos++ = ifsta->ssid_len;
753 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 763 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
754 764
755 /* all supported rates should be added here but some APs 765 /* add all rates which were marked to be used above */
756 * (e.g. D-Link DAP 1353 in b-only mode) don't like that
757 * Therefore only add rates the AP supports */
758 rates_len = ieee80211_compatible_rates(bss, sband, &rates);
759 supp_rates_len = rates_len; 766 supp_rates_len = rates_len;
760 if (supp_rates_len > 8) 767 if (supp_rates_len > 8)
761 supp_rates_len = 8; 768 supp_rates_len = 8;
@@ -3434,21 +3441,17 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3434 struct ieee80211_sta_bss *bss, *selected = NULL; 3441 struct ieee80211_sta_bss *bss, *selected = NULL;
3435 int top_rssi = 0, freq; 3442 int top_rssi = 0, freq;
3436 3443
3437 if (!(ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
3438 IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL))) {
3439 ifsta->state = IEEE80211_AUTHENTICATE;
3440 ieee80211_sta_reset_auth(dev, ifsta);
3441 return 0;
3442 }
3443
3444 spin_lock_bh(&local->sta_bss_lock); 3444 spin_lock_bh(&local->sta_bss_lock);
3445 freq = local->oper_channel->center_freq; 3445 freq = local->oper_channel->center_freq;
3446 list_for_each_entry(bss, &local->sta_bss_list, list) { 3446 list_for_each_entry(bss, &local->sta_bss_list, list) {
3447 if (!(bss->capability & WLAN_CAPABILITY_ESS)) 3447 if (!(bss->capability & WLAN_CAPABILITY_ESS))
3448 continue; 3448 continue;
3449 3449
3450 if (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ 3450 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
3451 !!sdata->default_key) 3451 IEEE80211_STA_AUTO_BSSID_SEL |
3452 IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
3453 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
3454 !!sdata->default_key))
3452 continue; 3455 continue;
3453 3456
3454 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && 3457 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 24a465c4df09..131e9e6c8a32 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -389,6 +389,41 @@ void ieee80211_iterate_active_interfaces(
389 struct ieee80211_local *local = hw_to_local(hw); 389 struct ieee80211_local *local = hw_to_local(hw);
390 struct ieee80211_sub_if_data *sdata; 390 struct ieee80211_sub_if_data *sdata;
391 391
392 rtnl_lock();
393
394 list_for_each_entry(sdata, &local->interfaces, list) {
395 switch (sdata->vif.type) {
396 case IEEE80211_IF_TYPE_INVALID:
397 case IEEE80211_IF_TYPE_MNTR:
398 case IEEE80211_IF_TYPE_VLAN:
399 continue;
400 case IEEE80211_IF_TYPE_AP:
401 case IEEE80211_IF_TYPE_STA:
402 case IEEE80211_IF_TYPE_IBSS:
403 case IEEE80211_IF_TYPE_WDS:
404 case IEEE80211_IF_TYPE_MESH_POINT:
405 break;
406 }
407 if (sdata->dev == local->mdev)
408 continue;
409 if (netif_running(sdata->dev))
410 iterator(data, sdata->dev->dev_addr,
411 &sdata->vif);
412 }
413
414 rtnl_unlock();
415}
416EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
417
418void ieee80211_iterate_active_interfaces_atomic(
419 struct ieee80211_hw *hw,
420 void (*iterator)(void *data, u8 *mac,
421 struct ieee80211_vif *vif),
422 void *data)
423{
424 struct ieee80211_local *local = hw_to_local(hw);
425 struct ieee80211_sub_if_data *sdata;
426
392 rcu_read_lock(); 427 rcu_read_lock();
393 428
394 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 429 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -413,4 +448,4 @@ void ieee80211_iterate_active_interfaces(
413 448
414 rcu_read_unlock(); 449 rcu_read_unlock();
415} 450}
416EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); 451EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 6a342a9a40cd..c2e2378af082 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -221,7 +221,6 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
221 range->num_frequency = c; 221 range->num_frequency = c;
222 222
223 IW_EVENT_CAPA_SET_KERNEL(range->event_capa); 223 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
224 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
225 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); 224 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
226 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); 225 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
227 226
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1086df7478bc..9360fc81e8c7 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -220,7 +220,7 @@ replay:
220 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 220 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
221 if (tp == NULL) 221 if (tp == NULL)
222 goto errout; 222 goto errout;
223 err = -EINVAL; 223 err = -ENOENT;
224 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); 224 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]);
225 if (tp_ops == NULL) { 225 if (tp_ops == NULL) {
226#ifdef CONFIG_KMOD 226#ifdef CONFIG_KMOD
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 9ab31a3ce3ad..b210a88d0960 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -350,9 +350,9 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
350 * o execute requested action or pass command to the device driver 350 * o execute requested action or pass command to the device driver
351 */ 351 */
352 352
353int wanrouter_ioctl(struct inode *inode, struct file *file, 353long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
354 unsigned int cmd, unsigned long arg)
355{ 354{
355 struct inode *inode = file->f_path.dentry->d_inode;
356 int err = 0; 356 int err = 0;
357 struct proc_dir_entry *dent; 357 struct proc_dir_entry *dent;
358 struct wan_device *wandev; 358 struct wan_device *wandev;
@@ -372,6 +372,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
372 if (wandev->magic != ROUTER_MAGIC) 372 if (wandev->magic != ROUTER_MAGIC)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 lock_kernel();
375 switch (cmd) { 376 switch (cmd) {
376 case ROUTER_SETUP: 377 case ROUTER_SETUP:
377 err = wanrouter_device_setup(wandev, data); 378 err = wanrouter_device_setup(wandev, data);
@@ -403,6 +404,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
403 err = wandev->ioctl(wandev, cmd, arg); 404 err = wandev->ioctl(wandev, cmd, arg);
404 else err = -EINVAL; 405 else err = -EINVAL;
405 } 406 }
407 unlock_kernel();
406 return err; 408 return err;
407} 409}
408 410
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 5bebe40bf4e6..267f7ff49827 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -278,7 +278,7 @@ static const struct file_operations wandev_fops = {
278 .read = seq_read, 278 .read = seq_read,
279 .llseek = seq_lseek, 279 .llseek = seq_lseek,
280 .release = single_release, 280 .release = single_release,
281 .ioctl = wanrouter_ioctl, 281 .unlocked_ioctl = wanrouter_ioctl,
282}; 282};
283 283
284/* 284/*
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index a1b0fbe3ea35..b976d9ed10e4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -50,19 +50,8 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
50 50
51 switch (type) { 51 switch (type) {
52 case XFRMA_ALG_AUTH: 52 case XFRMA_ALG_AUTH:
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "digest_null") != 0)
55 return -EINVAL;
56 break;
57
58 case XFRMA_ALG_CRYPT: 53 case XFRMA_ALG_CRYPT:
59 if (!algp->alg_key_len &&
60 strcmp(algp->alg_name, "cipher_null") != 0)
61 return -EINVAL;
62 break;
63
64 case XFRMA_ALG_COMP: 54 case XFRMA_ALG_COMP:
65 /* Zero length keys are legal. */
66 break; 55 break;
67 56
68 default: 57 default: