aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-24 19:09:13 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-24 19:09:13 -0400
commit152cbcf94baec68b45832db5024184906ab798b1 (patch)
treee469a602535fec4355e5b63671eaf52cdd94caaf
parent96ca4a2cc1454cf633a1e0796b7ef39d937b87ec (diff)
parentfa53ebac42d3de04619c813f5f6628ca2a7ce97f (diff)
Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/net/3c505.c4
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/arcnet/arcnet.c18
-rw-r--r--drivers/net/arcnet/com20020.c16
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/ax88796.c14
-rw-r--r--drivers/net/bfin_mac.c8
-rw-r--r--drivers/net/bonding/bond_alb.c24
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c8
-rw-r--r--drivers/net/cxgb3/sge.c35
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/ehea/ehea.h4
-rw-r--r--drivers/net/ehea/ehea_main.c26
-rw-r--r--drivers/net/ehea/ehea_phyp.c2
-rw-r--r--drivers/net/ehea/ehea_qmr.c3
-rw-r--r--drivers/net/enc28j60.c56
-rw-r--r--drivers/net/ibm_newemac/phy.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe.h60
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c628
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c972
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h62
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c298
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1299
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c244
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h63
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h528
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c33
-rw-r--r--drivers/net/ne.c9
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/pci-skeleton.c4
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c27
-rw-r--r--drivers/net/s2io.c58
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/falcon.c260
-rw-r--r--drivers/net/sfc/falcon.h1
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1
-rw-r--r--drivers/net/sfc/falcon_io.h1
-rw-r--r--drivers/net/sfc/falcon_xmac.c88
-rw-r--r--drivers/net/sfc/net_driver.h8
-rw-r--r--drivers/net/sfc/sfe4001.c12
-rw-r--r--drivers/net/sfc/tenxpress.c69
-rw-r--r--drivers/net/sfc/tx.c2
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sfc/xfp_phy.c1
-rw-r--r--drivers/net/skfp/pmf.c29
-rw-r--r--drivers/net/smc911x.c68
-rw-r--r--drivers/net/smc91x.c43
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/sundance.c95
-rw-r--r--drivers/net/tehuti.h8
-rw-r--r--drivers/net/tsi108_eth.c6
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/ucc_geth.c116
-rw-r--r--drivers/net/usb/hso.c335
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c20
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/cycx_drv.c6
-rw-r--r--drivers/net/wan/cycx_x25.c12
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--include/linux/pci_ids.h10
77 files changed, 3422 insertions, 2376 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 106684e45e15..c29b420fc1ca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -750,11 +750,13 @@ P: Ville Syrjala
750M: syrjala@sci.fi 750M: syrjala@sci.fi
751S: Maintained 751S: Maintained
752 752
753ATL1 ETHERNET DRIVER 753ATLX ETHERNET DRIVERS
754P: Jay Cliburn 754P: Jay Cliburn
755M: jcliburn@gmail.com 755M: jcliburn@gmail.com
756P: Chris Snook 756P: Chris Snook
757M: csnook@redhat.com 757M: csnook@redhat.com
758P: Jie Yang
759M: jie.yang@atheros.com
758L: atl1-devel@lists.sourceforge.net 760L: atl1-devel@lists.sourceforge.net
759W: http://sourceforge.net/projects/atl1 761W: http://sourceforge.net/projects/atl1
760W: http://atl1.sourceforge.net 762W: http://atl1.sourceforge.net
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index fdfb2b2cb734..a424869707a5 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -130,12 +130,12 @@ static const char filename[] = __FILE__;
130 130
131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n"; 131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
132#define TIMEOUT_MSG(lineno) \ 132#define TIMEOUT_MSG(lineno) \
133 printk(timeout_msg, filename,__FUNCTION__,(lineno)) 133 printk(timeout_msg, filename,__func__,(lineno))
134 134
135static const char invalid_pcb_msg[] = 135static const char invalid_pcb_msg[] =
136"*** invalid pcb length %d at %s:%s (line %d) ***\n"; 136"*** invalid pcb length %d at %s:%s (line %d) ***\n";
137#define INVALID_PCB_MSG(len) \ 137#define INVALID_PCB_MSG(len) \
138 printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__) 138 printk(invalid_pcb_msg, (len),filename,__func__,__LINE__)
139 139
140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x..."; 140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
141 141
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f6ca99774cc2..32e66f0d4344 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -309,7 +309,7 @@ enum RTL8139_registers {
309 Cfg9346 = 0x50, 309 Cfg9346 = 0x50,
310 Config0 = 0x51, 310 Config0 = 0x51,
311 Config1 = 0x52, 311 Config1 = 0x52,
312 FlashReg = 0x54, 312 TimerInt = 0x54,
313 MediaStatus = 0x58, 313 MediaStatus = 0x58,
314 Config3 = 0x59, 314 Config3 = 0x59,
315 Config4 = 0x5A, /* absent on RTL-8139A */ 315 Config4 = 0x5A, /* absent on RTL-8139A */
@@ -325,6 +325,7 @@ enum RTL8139_registers {
325 FIFOTMS = 0x70, /* FIFO Control and test. */ 325 FIFOTMS = 0x70, /* FIFO Control and test. */
326 CSCR = 0x74, /* Chip Status and Configuration Register. */ 326 CSCR = 0x74, /* Chip Status and Configuration Register. */
327 PARA78 = 0x78, 327 PARA78 = 0x78,
328 FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */
328 PARA7c = 0x7c, /* Magic transceiver parameter register. */ 329 PARA7c = 0x7c, /* Magic transceiver parameter register. */
329 Config5 = 0xD8, /* absent on RTL-8139A */ 330 Config5 = 0xD8, /* absent on RTL-8139A */
330}; 331};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 69c81da48ebc..031b95b1f229 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2057,6 +2057,7 @@ config R8169
2057 tristate "Realtek 8169 gigabit ethernet support" 2057 tristate "Realtek 8169 gigabit ethernet support"
2058 depends on PCI 2058 depends on PCI
2059 select CRC32 2059 select CRC32
2060 select MII
2060 ---help--- 2061 ---help---
2061 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. 2062 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
2062 2063
@@ -2411,6 +2412,7 @@ config IXGBE
2411 tristate "Intel(R) 10GbE PCI Express adapters support" 2412 tristate "Intel(R) 10GbE PCI Express adapters support"
2412 depends on PCI && INET 2413 depends on PCI && INET
2413 select INET_LRO 2414 select INET_LRO
2415 select INTEL_IOATDMA
2414 ---help--- 2416 ---help---
2415 This driver supports Intel(R) 10GbE PCI Express family of 2417 This driver supports Intel(R) 10GbE PCI Express family of
2416 adapters. For more information on how to identify your adapter, go 2418 adapters. For more information on how to identify your adapter, go
@@ -2462,6 +2464,7 @@ config MYRI10GE
2462 select FW_LOADER 2464 select FW_LOADER
2463 select CRC32 2465 select CRC32
2464 select INET_LRO 2466 select INET_LRO
2467 select INTEL_IOATDMA
2465 ---help--- 2468 ---help---
2466 This driver supports Myricom Myri-10G Dual Protocol interface in 2469 This driver supports Myricom Myri-10G Dual Protocol interface in
2467 Ethernet mode. If the eeprom on your board is not recent enough, 2470 Ethernet mode. If the eeprom on your board is not recent enough,
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index bdc4c0bb56d9..a5b07691e466 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -442,24 +442,24 @@ static int arcnet_open(struct net_device *dev)
442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " 442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
443 "DOS networking programs!\n"); 443 "DOS networking programs!\n");
444 444
445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
446 if (ASTATUS() & RESETflag) { 446 if (ASTATUS() & RESETflag) {
447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
448 ACOMMAND(CFLAGScmd | RESETclear); 448 ACOMMAND(CFLAGScmd | RESETclear);
449 } 449 }
450 450
451 451
452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
453 /* make sure we're ready to receive IRQ's. */ 453 /* make sure we're ready to receive IRQ's. */
454 AINTMASK(0); 454 AINTMASK(0);
455 udelay(1); /* give it time to set the mask before 455 udelay(1); /* give it time to set the mask before
456 * we reset it again. (may not even be 456 * we reset it again. (may not even be
457 * necessary) 457 * necessary)
458 */ 458 */
459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
460 lp->intmask = NORXflag | RECONflag; 460 lp->intmask = NORXflag | RECONflag;
461 AINTMASK(lp->intmask); 461 AINTMASK(lp->intmask);
462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
463 463
464 netif_start_queue(dev); 464 netif_start_queue(dev);
465 465
@@ -670,14 +670,14 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
670 freeskb = 0; 670 freeskb = 0;
671 } 671 }
672 672
673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
674 /* make sure we didn't ignore a TX IRQ while we were in here */ 674 /* make sure we didn't ignore a TX IRQ while we were in here */
675 AINTMASK(0); 675 AINTMASK(0);
676 676
677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
678 lp->intmask |= TXFREEflag|EXCNAKflag; 678 lp->intmask |= TXFREEflag|EXCNAKflag;
679 AINTMASK(lp->intmask); 679 AINTMASK(lp->intmask);
680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
681 681
682 spin_unlock_irqrestore(&lp->lock, flags); 682 spin_unlock_irqrestore(&lp->lock, flags);
683 if (freeskb) { 683 if (freeskb) {
@@ -798,7 +798,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
798 diagstatus = (status >> 8) & 0xFF; 798 diagstatus = (status >> 8) & 0xFF;
799 799
800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", 800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
801 __FILE__,__LINE__,__FUNCTION__,status); 801 __FILE__,__LINE__,__func__,status);
802 didsomething = 0; 802 didsomething = 0;
803 803
804 /* 804 /*
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 8b51313b1300..70124a944e7d 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -238,15 +238,15 @@ static int com20020_reset(struct net_device *dev, int really_reset)
238 u_char inbyte; 238 u_char inbyte;
239 239
240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n", 240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
241 __FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name); 241 __FILE__,__LINE__,__func__,dev,lp,dev->name);
242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", 242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
243 dev->name, ASTATUS()); 243 dev->name, ASTATUS());
244 244
245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); 246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
247 /* power-up defaults */ 247 /* power-up defaults */
248 SETCONF; 248 SETCONF;
249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
250 250
251 if (really_reset) { 251 if (really_reset) {
252 /* reset the card */ 252 /* reset the card */
@@ -254,22 +254,22 @@ static int com20020_reset(struct net_device *dev, int really_reset)
254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */ 254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
255 } 255 }
256 /* clear flags & end reset */ 256 /* clear flags & end reset */
257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); 258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
259 259
260 /* verify that the ARCnet signature byte is present */ 260 /* verify that the ARCnet signature byte is present */
261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
262 262
263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1); 263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
265 if (inbyte != TESTvalue) { 265 if (inbyte != TESTvalue) {
266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); 267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
268 return 1; 268 return 1;
269 } 269 }
270 /* enable extended (512-byte) packets */ 270 /* enable extended (512-byte) packets */
271 ACOMMAND(CONFIGcmd | EXTconf); 271 ACOMMAND(CONFIGcmd | EXTconf);
272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
273 273
274 /* done! return success. */ 274 /* done! return success. */
275 return 0; 275 return 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 7685b995ff9b..9b603528143d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2390,9 +2390,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2390 } 2390 }
2391 2391
2392 /* Init GPHY as early as possible due to power saving issue */ 2392 /* Init GPHY as early as possible due to power saving issue */
2393 spin_lock(&adapter->mdio_lock);
2394 atl1e_phy_init(&adapter->hw); 2393 atl1e_phy_init(&adapter->hw);
2395 spin_unlock(&adapter->mdio_lock);
2396 /* reset the controller to 2394 /* reset the controller to
2397 * put the device in a known good starting state */ 2395 * put the device in a known good starting state */
2398 err = atl1e_reset_hw(&adapter->hw); 2396 err = atl1e_reset_hw(&adapter->hw);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 5ee1b0557a02..92c16c37ff23 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -653,6 +653,8 @@ static struct net_device * au1000_probe(int port_num)
653 653
654 aup = dev->priv; 654 aup = dev->priv;
655 655
656 spin_lock_init(&aup->lock);
657
656 /* Allocate the data buffers */ 658 /* Allocate the data buffers */
657 /* Snooping works fine with eth on all au1xxx */ 659 /* Snooping works fine with eth on all au1xxx */
658 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * 660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
@@ -753,7 +755,6 @@ static struct net_device * au1000_probe(int port_num)
753 aup->tx_db_inuse[i] = pDB; 755 aup->tx_db_inuse[i] = pDB;
754 } 756 }
755 757
756 spin_lock_init(&aup->lock);
757 dev->base_addr = base; 758 dev->base_addr = base;
758 dev->irq = irq; 759 dev->irq = irq;
759 dev->open = au1000_open; 760 dev->open = au1000_open;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index a886a4b9f7e5..4207d6efddc0 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -153,7 +153,7 @@ static void ax_reset_8390(struct net_device *dev)
153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
154 if (jiffies - reset_start_time > 2*HZ/100) { 154 if (jiffies - reset_start_time > 2*HZ/100) {
155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
156 __FUNCTION__, dev->name); 156 __func__, dev->name);
157 break; 157 break;
158 } 158 }
159 } 159 }
@@ -173,7 +173,7 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
173 if (ei_status.dmaing) { 173 if (ei_status.dmaing) {
174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
175 "[DMAstat:%d][irqlock:%d].\n", 175 "[DMAstat:%d][irqlock:%d].\n",
176 dev->name, __FUNCTION__, 176 dev->name, __func__,
177 ei_status.dmaing, ei_status.irqlock); 177 ei_status.dmaing, ei_status.irqlock);
178 return; 178 return;
179 } 179 }
@@ -215,7 +215,7 @@ static void ax_block_input(struct net_device *dev, int count,
215 dev_err(&ax->dev->dev, 215 dev_err(&ax->dev->dev,
216 "%s: DMAing conflict in %s " 216 "%s: DMAing conflict in %s "
217 "[DMAstat:%d][irqlock:%d].\n", 217 "[DMAstat:%d][irqlock:%d].\n",
218 dev->name, __FUNCTION__, 218 dev->name, __func__,
219 ei_status.dmaing, ei_status.irqlock); 219 ei_status.dmaing, ei_status.irqlock);
220 return; 220 return;
221 } 221 }
@@ -260,7 +260,7 @@ static void ax_block_output(struct net_device *dev, int count,
260 if (ei_status.dmaing) { 260 if (ei_status.dmaing) {
261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
262 "[DMAstat:%d][irqlock:%d]\n", 262 "[DMAstat:%d][irqlock:%d]\n",
263 dev->name, __FUNCTION__, 263 dev->name, __func__,
264 ei_status.dmaing, ei_status.irqlock); 264 ei_status.dmaing, ei_status.irqlock);
265 return; 265 return;
266 } 266 }
@@ -396,7 +396,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
396{ 396{
397 if (phy_debug) 397 if (phy_debug)
398 pr_debug("%s: dev %p, %04x, %04x, %d\n", 398 pr_debug("%s: dev %p, %04x, %04x, %d\n",
399 __FUNCTION__, dev, phy_addr, reg, opc); 399 __func__, dev, phy_addr, reg, opc);
400 400
401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */ 401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */ 402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
@@ -422,7 +422,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
422 spin_unlock_irqrestore(&ei_local->page_lock, flags); 422 spin_unlock_irqrestore(&ei_local->page_lock, flags);
423 423
424 if (phy_debug) 424 if (phy_debug)
425 pr_debug("%s: %04x.%04x => read %04x\n", __FUNCTION__, 425 pr_debug("%s: %04x.%04x => read %04x\n", __func__,
426 phy_addr, reg, result); 426 phy_addr, reg, result);
427 427
428 return result; 428 return result;
@@ -436,7 +436,7 @@ ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
436 unsigned long flags; 436 unsigned long flags;
437 437
438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", 438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
439 __FUNCTION__, dev, phy_addr, reg, value); 439 __func__, dev, phy_addr, reg, value);
440 440
441 spin_lock_irqsave(&ei->page_lock, flags); 441 spin_lock_irqsave(&ei->page_lock, flags);
442 442
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 3db7db1828e7..df896e23e2c5 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -811,7 +811,7 @@ static void bfin_mac_enable(void)
811{ 811{
812 u32 opmode; 812 u32 opmode;
813 813
814 pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); 814 pr_debug("%s: %s\n", DRV_NAME, __func__);
815 815
816 /* Set RX DMA */ 816 /* Set RX DMA */
817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -847,7 +847,7 @@ static void bfin_mac_enable(void)
847/* Our watchdog timed out. Called by the networking layer */ 847/* Our watchdog timed out. Called by the networking layer */
848static void bfin_mac_timeout(struct net_device *dev) 848static void bfin_mac_timeout(struct net_device *dev)
849{ 849{
850 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 850 pr_debug("%s: %s\n", dev->name, __func__);
851 851
852 bfin_mac_disable(); 852 bfin_mac_disable();
853 853
@@ -949,7 +949,7 @@ static int bfin_mac_open(struct net_device *dev)
949{ 949{
950 struct bfin_mac_local *lp = netdev_priv(dev); 950 struct bfin_mac_local *lp = netdev_priv(dev);
951 int retval; 951 int retval;
952 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 952 pr_debug("%s: %s\n", dev->name, __func__);
953 953
954 /* 954 /*
955 * Check that the address is valid. If its not, refuse 955 * Check that the address is valid. If its not, refuse
@@ -989,7 +989,7 @@ static int bfin_mac_open(struct net_device *dev)
989static int bfin_mac_close(struct net_device *dev) 989static int bfin_mac_close(struct net_device *dev)
990{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev); 991 struct bfin_mac_local *lp = netdev_priv(dev);
992 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 992 pr_debug("%s: %s\n", dev->name, __func__);
993 993
994 netif_stop_queue(dev); 994 netif_stop_queue(dev);
995 netif_carrier_off(dev); 995 netif_carrier_off(dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 3d39278a63e3..ade5f3f6693b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -38,6 +38,7 @@
38#include <linux/in.h> 38#include <linux/in.h>
39#include <net/ipx.h> 39#include <net/ipx.h>
40#include <net/arp.h> 40#include <net/arp.h>
41#include <net/ipv6.h>
41#include <asm/byteorder.h> 42#include <asm/byteorder.h>
42#include "bonding.h" 43#include "bonding.h"
43#include "bond_alb.h" 44#include "bond_alb.h"
@@ -81,6 +82,7 @@
81#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC 82#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
82 83
83static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; 84static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
85static const u8 mac_v6_allmcast[ETH_ALEN] = {0x33,0x33,0x00,0x00,0x00,0x01};
84static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 86static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
85 87
86#pragma pack(1) 88#pragma pack(1)
@@ -1290,6 +1292,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1290 u32 hash_index = 0; 1292 u32 hash_index = 0;
1291 const u8 *hash_start = NULL; 1293 const u8 *hash_start = NULL;
1292 int res = 1; 1294 int res = 1;
1295 struct ipv6hdr *ip6hdr;
1293 1296
1294 skb_reset_mac_header(skb); 1297 skb_reset_mac_header(skb);
1295 eth_data = eth_hdr(skb); 1298 eth_data = eth_hdr(skb);
@@ -1319,11 +1322,32 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1319 } 1322 }
1320 break; 1323 break;
1321 case ETH_P_IPV6: 1324 case ETH_P_IPV6:
1325 /* IPv6 doesn't really use broadcast mac address, but leave
1326 * that here just in case.
1327 */
1322 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { 1328 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
1323 do_tx_balance = 0; 1329 do_tx_balance = 0;
1324 break; 1330 break;
1325 } 1331 }
1326 1332
1333 /* IPv6 uses all-nodes multicast as an equivalent to
1334 * broadcasts in IPv4.
1335 */
1336 if (memcmp(eth_data->h_dest, mac_v6_allmcast, ETH_ALEN) == 0) {
1337 do_tx_balance = 0;
1338 break;
1339 }
1340
1341 /* Additianally, DAD probes should not be tx-balanced as that
1342 * will lead to false positives for duplicate addresses and
1343 * prevent address configuration from working.
1344 */
1345 ip6hdr = ipv6_hdr(skb);
1346 if (ipv6_addr_any(&ip6hdr->saddr)) {
1347 do_tx_balance = 0;
1348 break;
1349 }
1350
1327 hash_start = (char *)&(ipv6_hdr(skb)->daddr); 1351 hash_start = (char *)&(ipv6_hdr(skb)->daddr);
1328 hash_size = sizeof(ipv6_hdr(skb)->daddr); 1352 hash_size = sizeof(ipv6_hdr(skb)->daddr);
1329 break; 1353 break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index babe4610c39a..8e2be24f3fe4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4493,6 +4493,12 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4493 4493
4494static const struct ethtool_ops bond_ethtool_ops = { 4494static const struct ethtool_ops bond_ethtool_ops = {
4495 .get_drvinfo = bond_ethtool_get_drvinfo, 4495 .get_drvinfo = bond_ethtool_get_drvinfo,
4496 .get_link = ethtool_op_get_link,
4497 .get_tx_csum = ethtool_op_get_tx_csum,
4498 .get_sg = ethtool_op_get_sg,
4499 .get_tso = ethtool_op_get_tso,
4500 .get_ufo = ethtool_op_get_ufo,
4501 .get_flags = ethtool_op_get_flags,
4496}; 4502};
4497 4503
4498/* 4504/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index fb730ec0396f..f7b40edabfd8 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -32,7 +32,7 @@
32#ifdef BONDING_DEBUG 32#ifdef BONDING_DEBUG
33#define dprintk(fmt, args...) \ 33#define dprintk(fmt, args...) \
34 printk(KERN_DEBUG \ 34 printk(KERN_DEBUG \
35 DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args ) 35 DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args )
36#else 36#else
37#define dprintk(fmt, args...) 37#define dprintk(fmt, args...)
38#endif /* BONDING_DEBUG */ 38#endif /* BONDING_DEBUG */
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ea6144a9565e..b0b66766ed27 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1397,9 +1397,7 @@ net_open(struct net_device *dev)
1397release_dma: 1397release_dma:
1398#if ALLOW_DMA 1398#if ALLOW_DMA
1399 free_dma(dev->dma); 1399 free_dma(dev->dma);
1400#endif
1401release_irq: 1400release_irq:
1402#if ALLOW_DMA
1403 release_dma_buff(lp); 1401 release_dma_buff(lp);
1404#endif 1402#endif
1405 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); 1403 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c5b3de1bb456..0f6fd63b2847 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1018,7 +1018,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1018 1018
1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1020 if (!skb) { 1020 if (!skb) {
1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__); 1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1022 return; 1022 return;
1023 } 1023 }
1024 skb->priority = CPL_PRIORITY_CONTROL; 1024 skb->priority = CPL_PRIORITY_CONTROL;
@@ -1049,14 +1049,14 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1049 return; 1049 return;
1050 if (!is_offloading(newdev)) { 1050 if (!is_offloading(newdev)) {
1051 printk(KERN_WARNING "%s: Redirect to non-offload " 1051 printk(KERN_WARNING "%s: Redirect to non-offload "
1052 "device ignored.\n", __FUNCTION__); 1052 "device ignored.\n", __func__);
1053 return; 1053 return;
1054 } 1054 }
1055 tdev = dev2t3cdev(olddev); 1055 tdev = dev2t3cdev(olddev);
1056 BUG_ON(!tdev); 1056 BUG_ON(!tdev);
1057 if (tdev != dev2t3cdev(newdev)) { 1057 if (tdev != dev2t3cdev(newdev)) {
1058 printk(KERN_WARNING "%s: Redirect to different " 1058 printk(KERN_WARNING "%s: Redirect to different "
1059 "offload device ignored.\n", __FUNCTION__); 1059 "offload device ignored.\n", __func__);
1060 return; 1060 return;
1061 } 1061 }
1062 1062
@@ -1064,7 +1064,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1064 e = t3_l2t_get(tdev, new->neighbour, newdev); 1064 e = t3_l2t_get(tdev, new->neighbour, newdev);
1065 if (!e) { 1065 if (!e) {
1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1067 __FUNCTION__); 1067 __func__);
1068 return; 1068 return;
1069 } 1069 }
1070 1070
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 6990c0ddc854..89efd04be4e0 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1937,38 +1937,6 @@ static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); 1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938} 1938}
1939 1939
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph, 1940static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv) 1941 u64 *hdr_flags, void *priv)
1974{ 1942{
@@ -1981,9 +1949,6 @@ static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); 1949 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); 1950 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983 1951
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP; 1952 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0; 1953 return 0;
1989} 1954}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 453115acaad2..7d7dfa512bfa 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -191,7 +191,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
191#define DPRINTK(nlevel, klevel, fmt, args...) \ 191#define DPRINTK(nlevel, klevel, fmt, args...) \
192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
194 __FUNCTION__ , ## args)) 194 __func__ , ## args))
195 195
196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index e01926b7b5b7..5524271eedca 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,13 +40,13 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0092" 43#define DRV_VERSION "EHEA_0093"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
47#define DLPAR_MEM_ADD 2 47#define DLPAR_MEM_ADD 2
48#define DLPAR_MEM_REM 4 48#define DLPAR_MEM_REM 4
49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) 49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50 50
51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b70c5314f537..c765ec609462 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -219,9 +219,11 @@ static void ehea_update_firmware_handles(void)
219 } 219 }
220 220
221out_update: 221out_update:
222 mutex_lock(&ehea_fw_handles.lock);
222 kfree(ehea_fw_handles.arr); 223 kfree(ehea_fw_handles.arr);
223 ehea_fw_handles.arr = arr; 224 ehea_fw_handles.arr = arr;
224 ehea_fw_handles.num_entries = i; 225 ehea_fw_handles.num_entries = i;
226 mutex_unlock(&ehea_fw_handles.lock);
225} 227}
226 228
227static void ehea_update_bcmc_registrations(void) 229static void ehea_update_bcmc_registrations(void)
@@ -293,9 +295,11 @@ static void ehea_update_bcmc_registrations(void)
293 } 295 }
294 296
295out_update: 297out_update:
298 spin_lock(&ehea_bcmc_regs.lock);
296 kfree(ehea_bcmc_regs.arr); 299 kfree(ehea_bcmc_regs.arr);
297 ehea_bcmc_regs.arr = arr; 300 ehea_bcmc_regs.arr = arr;
298 ehea_bcmc_regs.num_entries = i; 301 ehea_bcmc_regs.num_entries = i;
302 spin_unlock(&ehea_bcmc_regs.lock);
299} 303}
300 304
301static struct net_device_stats *ehea_get_stats(struct net_device *dev) 305static struct net_device_stats *ehea_get_stats(struct net_device *dev)
@@ -1770,8 +1774,6 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1770 1774
1771 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1775 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1772 1776
1773 spin_lock(&ehea_bcmc_regs.lock);
1774
1775 /* Deregister old MAC in pHYP */ 1777 /* Deregister old MAC in pHYP */
1776 if (port->state == EHEA_PORT_UP) { 1778 if (port->state == EHEA_PORT_UP) {
1777 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1779 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@@ -1792,7 +1794,6 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1792 1794
1793out_upregs: 1795out_upregs:
1794 ehea_update_bcmc_registrations(); 1796 ehea_update_bcmc_registrations();
1795 spin_unlock(&ehea_bcmc_regs.lock);
1796out_free: 1797out_free:
1797 kfree(cb0); 1798 kfree(cb0);
1798out: 1799out:
@@ -1954,8 +1955,6 @@ static void ehea_set_multicast_list(struct net_device *dev)
1954 } 1955 }
1955 ehea_promiscuous(dev, 0); 1956 ehea_promiscuous(dev, 0);
1956 1957
1957 spin_lock(&ehea_bcmc_regs.lock);
1958
1959 if (dev->flags & IFF_ALLMULTI) { 1958 if (dev->flags & IFF_ALLMULTI) {
1960 ehea_allmulti(dev, 1); 1959 ehea_allmulti(dev, 1);
1961 goto out; 1960 goto out;
@@ -1985,7 +1984,6 @@ static void ehea_set_multicast_list(struct net_device *dev)
1985 } 1984 }
1986out: 1985out:
1987 ehea_update_bcmc_registrations(); 1986 ehea_update_bcmc_registrations();
1988 spin_unlock(&ehea_bcmc_regs.lock);
1989 return; 1987 return;
1990} 1988}
1991 1989
@@ -2466,8 +2464,6 @@ static int ehea_up(struct net_device *dev)
2466 if (port->state == EHEA_PORT_UP) 2464 if (port->state == EHEA_PORT_UP)
2467 return 0; 2465 return 0;
2468 2466
2469 mutex_lock(&ehea_fw_handles.lock);
2470
2471 ret = ehea_port_res_setup(port, port->num_def_qps, 2467 ret = ehea_port_res_setup(port, port->num_def_qps,
2472 port->num_add_tx_qps); 2468 port->num_add_tx_qps);
2473 if (ret) { 2469 if (ret) {
@@ -2504,8 +2500,6 @@ static int ehea_up(struct net_device *dev)
2504 } 2500 }
2505 } 2501 }
2506 2502
2507 spin_lock(&ehea_bcmc_regs.lock);
2508
2509 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2503 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2510 if (ret) { 2504 if (ret) {
2511 ret = -EIO; 2505 ret = -EIO;
@@ -2527,10 +2521,8 @@ out:
2527 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2521 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2528 2522
2529 ehea_update_bcmc_registrations(); 2523 ehea_update_bcmc_registrations();
2530 spin_unlock(&ehea_bcmc_regs.lock);
2531 2524
2532 ehea_update_firmware_handles(); 2525 ehea_update_firmware_handles();
2533 mutex_unlock(&ehea_fw_handles.lock);
2534 2526
2535 return ret; 2527 return ret;
2536} 2528}
@@ -2580,9 +2572,6 @@ static int ehea_down(struct net_device *dev)
2580 if (port->state == EHEA_PORT_DOWN) 2572 if (port->state == EHEA_PORT_DOWN)
2581 return 0; 2573 return 0;
2582 2574
2583 mutex_lock(&ehea_fw_handles.lock);
2584
2585 spin_lock(&ehea_bcmc_regs.lock);
2586 ehea_drop_multicast_list(dev); 2575 ehea_drop_multicast_list(dev);
2587 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2576 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2588 2577
@@ -2591,7 +2580,6 @@ static int ehea_down(struct net_device *dev)
2591 port->state = EHEA_PORT_DOWN; 2580 port->state = EHEA_PORT_DOWN;
2592 2581
2593 ehea_update_bcmc_registrations(); 2582 ehea_update_bcmc_registrations();
2594 spin_unlock(&ehea_bcmc_regs.lock);
2595 2583
2596 ret = ehea_clean_all_portres(port); 2584 ret = ehea_clean_all_portres(port);
2597 if (ret) 2585 if (ret)
@@ -2599,7 +2587,6 @@ static int ehea_down(struct net_device *dev)
2599 dev->name, ret); 2587 dev->name, ret);
2600 2588
2601 ehea_update_firmware_handles(); 2589 ehea_update_firmware_handles();
2602 mutex_unlock(&ehea_fw_handles.lock);
2603 2590
2604 return ret; 2591 return ret;
2605} 2592}
@@ -3378,7 +3365,6 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3378 ehea_error("Invalid ibmebus device probed"); 3365 ehea_error("Invalid ibmebus device probed");
3379 return -EINVAL; 3366 return -EINVAL;
3380 } 3367 }
3381 mutex_lock(&ehea_fw_handles.lock);
3382 3368
3383 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3369 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3384 if (!adapter) { 3370 if (!adapter) {
@@ -3462,7 +3448,6 @@ out_free_ad:
3462 3448
3463out: 3449out:
3464 ehea_update_firmware_handles(); 3450 ehea_update_firmware_handles();
3465 mutex_unlock(&ehea_fw_handles.lock);
3466 return ret; 3451 return ret;
3467} 3452}
3468 3453
@@ -3481,8 +3466,6 @@ static int __devexit ehea_remove(struct of_device *dev)
3481 3466
3482 flush_scheduled_work(); 3467 flush_scheduled_work();
3483 3468
3484 mutex_lock(&ehea_fw_handles.lock);
3485
3486 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3469 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3487 tasklet_kill(&adapter->neq_tasklet); 3470 tasklet_kill(&adapter->neq_tasklet);
3488 3471
@@ -3492,7 +3475,6 @@ static int __devexit ehea_remove(struct of_device *dev)
3492 kfree(adapter); 3475 kfree(adapter);
3493 3476
3494 ehea_update_firmware_handles(); 3477 ehea_update_firmware_handles();
3495 mutex_unlock(&ehea_fw_handles.lock);
3496 3478
3497 return 0; 3479 return 0;
3498} 3480}
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 156eb6320b4e..2a33a613d9e6 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -535,7 +535,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
535 cb_logaddr, /* R5 */ 535 cb_logaddr, /* R5 */
536 0, 0, 0, 0, 0); /* R6-R10 */ 536 0, 0, 0, 0, 0); /* R6-R10 */
537#ifdef DEBUG 537#ifdef DEBUG
538 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 538 ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539#endif 539#endif
540 return hret; 540 return hret;
541} 541}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 140f05baafd8..db8a9257e680 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -595,7 +595,8 @@ static int ehea_create_busmap_callback(unsigned long pfn,
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg; 596 mr_len = *(unsigned long *)arg;
597 597
598 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 598 if (!ehea_bmap)
599 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
599 if (!ehea_bmap) 600 if (!ehea_bmap)
600 return -ENOMEM; 601 return -ENOMEM;
601 602
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index aa0bf6e1c694..e1b441effbbe 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -110,7 +110,7 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
110 } 110 }
111 if (ret && netif_msg_drv(priv)) 111 if (ret && netif_msg_drv(priv))
112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
113 __FUNCTION__, ret); 113 __func__, ret);
114 114
115 return ret; 115 return ret;
116} 116}
@@ -131,7 +131,7 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); 131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
132 if (ret && netif_msg_drv(priv)) 132 if (ret && netif_msg_drv(priv))
133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
134 __FUNCTION__, ret); 134 __func__, ret);
135 } 135 }
136 return ret; 136 return ret;
137} 137}
@@ -156,7 +156,7 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); 156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
157 if (ret) 157 if (ret)
158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
159 __FUNCTION__, ret); 159 __func__, ret);
160 else 160 else
161 val = rx_buf[slen - 1]; 161 val = rx_buf[slen - 1];
162 162
@@ -176,14 +176,14 @@ static int spi_write_op(struct enc28j60_net *priv, u8 op,
176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); 176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
177 if (ret && netif_msg_drv(priv)) 177 if (ret && netif_msg_drv(priv))
178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
179 __FUNCTION__, ret); 179 __func__, ret);
180 return ret; 180 return ret;
181} 181}
182 182
183static void enc28j60_soft_reset(struct enc28j60_net *priv) 183static void enc28j60_soft_reset(struct enc28j60_net *priv)
184{ 184{
185 if (netif_msg_hw(priv)) 185 if (netif_msg_hw(priv))
186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
187 187
188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); 188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
189 /* Errata workaround #1, CLKRDY check is unreliable, 189 /* Errata workaround #1, CLKRDY check is unreliable,
@@ -357,7 +357,7 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
357 reg = nolock_regw_read(priv, ERDPTL); 357 reg = nolock_regw_read(priv, ERDPTL);
358 if (reg != addr) 358 if (reg != addr)
359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " 359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
360 "(0x%04x - 0x%04x)\n", __FUNCTION__, reg, addr); 360 "(0x%04x - 0x%04x)\n", __func__, reg, addr);
361 } 361 }
362#endif 362#endif
363 spi_read_buf(priv, len, data); 363 spi_read_buf(priv, len, data);
@@ -380,7 +380,7 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
380 if (reg != TXSTART_INIT) 380 if (reg != TXSTART_INIT)
381 printk(KERN_DEBUG DRV_NAME 381 printk(KERN_DEBUG DRV_NAME
382 ": %s() ERWPT:0x%04x != 0x%04x\n", 382 ": %s() ERWPT:0x%04x != 0x%04x\n",
383 __FUNCTION__, reg, TXSTART_INIT); 383 __func__, reg, TXSTART_INIT);
384 } 384 }
385#endif 385#endif
386 /* Set the TXND pointer to correspond to the packet size given */ 386 /* Set the TXND pointer to correspond to the packet size given */
@@ -390,13 +390,13 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
390 if (netif_msg_hw(priv)) 390 if (netif_msg_hw(priv))
391 printk(KERN_DEBUG DRV_NAME 391 printk(KERN_DEBUG DRV_NAME
392 ": %s() after control byte ERWPT:0x%04x\n", 392 ": %s() after control byte ERWPT:0x%04x\n",
393 __FUNCTION__, nolock_regw_read(priv, EWRPTL)); 393 __func__, nolock_regw_read(priv, EWRPTL));
394 /* copy the packet into the transmit buffer */ 394 /* copy the packet into the transmit buffer */
395 spi_write_buf(priv, len, data); 395 spi_write_buf(priv, len, data);
396 if (netif_msg_hw(priv)) 396 if (netif_msg_hw(priv))
397 printk(KERN_DEBUG DRV_NAME 397 printk(KERN_DEBUG DRV_NAME
398 ": %s() after write packet ERWPT:0x%04x, len=%d\n", 398 ": %s() after write packet ERWPT:0x%04x, len=%d\n",
399 __FUNCTION__, nolock_regw_read(priv, EWRPTL), len); 399 __func__, nolock_regw_read(priv, EWRPTL), len);
400 mutex_unlock(&priv->lock); 400 mutex_unlock(&priv->lock);
401} 401}
402 402
@@ -495,7 +495,7 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
495 if (netif_msg_drv(priv)) 495 if (netif_msg_drv(priv))
496 printk(KERN_DEBUG DRV_NAME 496 printk(KERN_DEBUG DRV_NAME
497 ": %s() Hardware must be disabled to set " 497 ": %s() Hardware must be disabled to set "
498 "Mac address\n", __FUNCTION__); 498 "Mac address\n", __func__);
499 ret = -EBUSY; 499 ret = -EBUSY;
500 } 500 }
501 mutex_unlock(&priv->lock); 501 mutex_unlock(&priv->lock);
@@ -575,7 +575,7 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
575 if (start > 0x1FFF || end > 0x1FFF || start > end) { 575 if (start > 0x1FFF || end > 0x1FFF || start > end) {
576 if (netif_msg_drv(priv)) 576 if (netif_msg_drv(priv))
577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " 577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
578 "bad parameters!\n", __FUNCTION__, start, end); 578 "bad parameters!\n", __func__, start, end);
579 return; 579 return;
580 } 580 }
581 /* set receive buffer start + end */ 581 /* set receive buffer start + end */
@@ -591,7 +591,7 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
591 if (start > 0x1FFF || end > 0x1FFF || start > end) { 591 if (start > 0x1FFF || end > 0x1FFF || start > end) {
592 if (netif_msg_drv(priv)) 592 if (netif_msg_drv(priv))
593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " 593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
594 "bad parameters!\n", __FUNCTION__, start, end); 594 "bad parameters!\n", __func__, start, end);
595 return; 595 return;
596 } 596 }
597 /* set transmit buffer start + end */ 597 /* set transmit buffer start + end */
@@ -630,7 +630,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
630 u8 reg; 630 u8 reg;
631 631
632 if (netif_msg_drv(priv)) 632 if (netif_msg_drv(priv))
633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __FUNCTION__, 633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
634 priv->full_duplex ? "FullDuplex" : "HalfDuplex"); 634 priv->full_duplex ? "FullDuplex" : "HalfDuplex");
635 635
636 mutex_lock(&priv->lock); 636 mutex_lock(&priv->lock);
@@ -661,7 +661,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
661 if (reg == 0x00 || reg == 0xff) { 661 if (reg == 0x00 || reg == 0xff) {
662 if (netif_msg_drv(priv)) 662 if (netif_msg_drv(priv))
663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", 663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
664 __FUNCTION__, reg); 664 __func__, reg);
665 return 0; 665 return 0;
666 } 666 }
667 667
@@ -724,7 +724,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
724 /* enable interrupts */ 724 /* enable interrupts */
725 if (netif_msg_hw(priv)) 725 if (netif_msg_hw(priv))
726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", 726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
727 __FUNCTION__); 727 __func__);
728 728
729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); 729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
730 730
@@ -888,7 +888,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
888 if (netif_msg_rx_err(priv)) 888 if (netif_msg_rx_err(priv))
889 dev_err(&ndev->dev, 889 dev_err(&ndev->dev,
890 "%s() Invalid packet address!! 0x%04x\n", 890 "%s() Invalid packet address!! 0x%04x\n",
891 __FUNCTION__, priv->next_pk_ptr); 891 __func__, priv->next_pk_ptr);
892 /* packet address corrupted: reset RX logic */ 892 /* packet address corrupted: reset RX logic */
893 mutex_lock(&priv->lock); 893 mutex_lock(&priv->lock);
894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); 894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -917,7 +917,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
917 rxstat |= rsv[4]; 917 rxstat |= rsv[4];
918 918
919 if (netif_msg_rx_status(priv)) 919 if (netif_msg_rx_status(priv))
920 enc28j60_dump_rsv(priv, __FUNCTION__, next_packet, len, rxstat); 920 enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
921 921
922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) { 922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) {
923 if (netif_msg_rx_err(priv)) 923 if (netif_msg_rx_err(priv))
@@ -941,7 +941,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
942 len, skb_put(skb, len)); 942 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 943 if (netif_msg_pktdata(priv))
944 dump_packet(__FUNCTION__, skb->len, skb->data); 944 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 945 skb->protocol = eth_type_trans(skb, ndev);
946 /* update statistics */ 946 /* update statistics */
947 ndev->stats.rx_packets++; 947 ndev->stats.rx_packets++;
@@ -958,7 +958,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); 958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
959 if (netif_msg_hw(priv)) 959 if (netif_msg_hw(priv))
960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", 960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
961 __FUNCTION__, erxrdpt); 961 __func__, erxrdpt);
962 962
963 mutex_lock(&priv->lock); 963 mutex_lock(&priv->lock);
964 nolock_regw_write(priv, ERXRDPTL, erxrdpt); 964 nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -968,7 +968,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
968 reg = nolock_regw_read(priv, ERXRDPTL); 968 reg = nolock_regw_read(priv, ERXRDPTL);
969 if (reg != erxrdpt) 969 if (reg != erxrdpt)
970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " 970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
971 "error (0x%04x - 0x%04x)\n", __FUNCTION__, 971 "error (0x%04x - 0x%04x)\n", __func__,
972 reg, erxrdpt); 972 reg, erxrdpt);
973 } 973 }
974#endif 974#endif
@@ -1006,7 +1006,7 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
1006 mutex_unlock(&priv->lock); 1006 mutex_unlock(&priv->lock);
1007 if (netif_msg_rx_status(priv)) 1007 if (netif_msg_rx_status(priv))
1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", 1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
1009 __FUNCTION__, free_space); 1009 __func__, free_space);
1010 return free_space; 1010 return free_space;
1011} 1011}
1012 1012
@@ -1022,7 +1022,7 @@ static void enc28j60_check_link_status(struct net_device *ndev)
1022 reg = enc28j60_phy_read(priv, PHSTAT2); 1022 reg = enc28j60_phy_read(priv, PHSTAT2);
1023 if (netif_msg_hw(priv)) 1023 if (netif_msg_hw(priv))
1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " 1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
1025 "PHSTAT2: %04x\n", __FUNCTION__, 1025 "PHSTAT2: %04x\n", __func__,
1026 enc28j60_phy_read(priv, PHSTAT1), reg); 1026 enc28j60_phy_read(priv, PHSTAT1), reg);
1027 duplex = reg & PHSTAT2_DPXSTAT; 1027 duplex = reg & PHSTAT2_DPXSTAT;
1028 1028
@@ -1095,7 +1095,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1095 int intflags, loop; 1095 int intflags, loop;
1096 1096
1097 if (netif_msg_intr(priv)) 1097 if (netif_msg_intr(priv))
1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1099 /* disable further interrupts */ 1099 /* disable further interrupts */
1100 locked_reg_bfclr(priv, EIE, EIE_INTIE); 1100 locked_reg_bfclr(priv, EIE, EIE_INTIE);
1101 1101
@@ -1198,7 +1198,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1198 /* re-enable interrupts */ 1198 /* re-enable interrupts */
1199 locked_reg_bfset(priv, EIE, EIE_INTIE); 1199 locked_reg_bfset(priv, EIE, EIE_INTIE);
1200 if (netif_msg_intr(priv)) 1200 if (netif_msg_intr(priv))
1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__); 1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
1202} 1202}
1203 1203
1204/* 1204/*
@@ -1213,7 +1213,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
1213 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1213 ": Tx Packet Len:%d\n", priv->tx_skb->len);
1214 1214
1215 if (netif_msg_pktdata(priv)) 1215 if (netif_msg_pktdata(priv))
1216 dump_packet(__FUNCTION__, 1216 dump_packet(__func__,
1217 priv->tx_skb->len, priv->tx_skb->data); 1217 priv->tx_skb->len, priv->tx_skb->data);
1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); 1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
1219 1219
@@ -1254,7 +1254,7 @@ static int enc28j60_send_packet(struct sk_buff *skb, struct net_device *dev)
1254 struct enc28j60_net *priv = netdev_priv(dev); 1254 struct enc28j60_net *priv = netdev_priv(dev);
1255 1255
1256 if (netif_msg_tx_queued(priv)) 1256 if (netif_msg_tx_queued(priv))
1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1258 1258
1259 /* If some error occurs while trying to transmit this 1259 /* If some error occurs while trying to transmit this
1260 * packet, you should return '1' from this function. 1260 * packet, you should return '1' from this function.
@@ -1325,7 +1325,7 @@ static int enc28j60_net_open(struct net_device *dev)
1325 struct enc28j60_net *priv = netdev_priv(dev); 1325 struct enc28j60_net *priv = netdev_priv(dev);
1326 1326
1327 if (netif_msg_drv(priv)) 1327 if (netif_msg_drv(priv))
1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1329 1329
1330 if (!is_valid_ether_addr(dev->dev_addr)) { 1330 if (!is_valid_ether_addr(dev->dev_addr)) {
1331 if (netif_msg_ifup(priv)) { 1331 if (netif_msg_ifup(priv)) {
@@ -1363,7 +1363,7 @@ static int enc28j60_net_close(struct net_device *dev)
1363 struct enc28j60_net *priv = netdev_priv(dev); 1363 struct enc28j60_net *priv = netdev_priv(dev);
1364 1364
1365 if (netif_msg_drv(priv)) 1365 if (netif_msg_drv(priv))
1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1367 1367
1368 enc28j60_hw_disable(priv); 1368 enc28j60_hw_disable(priv);
1369 enc28j60_lowpower(priv, true); 1369 enc28j60_lowpower(priv, true);
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index 37bfeea8788a..9164abb72d9b 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -321,7 +321,7 @@ static struct mii_phy_def bcm5248_phy_def = {
321 321
322static int m88e1111_init(struct mii_phy *phy) 322static int m88e1111_init(struct mii_phy *phy)
323{ 323{
324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); 324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
325 phy_write(phy, 0x14, 0x0ce3); 325 phy_write(phy, 0x14, 0x0ce3);
326 phy_write(phy, 0x18, 0x4101); 326 phy_write(phy, 0x18, 0x4101);
327 phy_write(phy, 0x09, 0x0e00); 327 phy_write(phy, 0x09, 0x0e00);
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 804698fc6a8f..d85717e3022a 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -85,7 +85,7 @@ struct ixgb_adapter;
85#define DPRINTK(nlevel, klevel, fmt, args...) \ 85#define DPRINTK(nlevel, klevel, fmt, args...) \
86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
88 __FUNCTION__ , ## args)) 88 __func__ , ## args))
89 89
90 90
91/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 90b53830196c..2198b77c53ed 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -41,13 +40,11 @@
41#include <linux/dca.h> 40#include <linux/dca.h>
42#endif 41#endif
43 42
44#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
45
46#define PFX "ixgbe: " 43#define PFX "ixgbe: "
47#define DPRINTK(nlevel, klevel, fmt, args...) \ 44#define DPRINTK(nlevel, klevel, fmt, args...) \
48 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 45 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
49 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 46 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
50 __FUNCTION__ , ## args))) 47 __func__ , ## args)))
51 48
52/* TX/RX descriptor defines */ 49/* TX/RX descriptor defines */
53#define IXGBE_DEFAULT_TXD 1024 50#define IXGBE_DEFAULT_TXD 1024
@@ -58,15 +55,6 @@
58#define IXGBE_MAX_RXD 4096 55#define IXGBE_MAX_RXD 4096
59#define IXGBE_MIN_RXD 64 56#define IXGBE_MIN_RXD 64
60 57
61#define IXGBE_DEFAULT_RXQ 1
62#define IXGBE_MAX_RXQ 1
63#define IXGBE_MIN_RXQ 1
64
65#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */
66#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */
67#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */
68#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */
69
70/* flow control */ 58/* flow control */
71#define IXGBE_DEFAULT_FCRTL 0x10000 59#define IXGBE_DEFAULT_FCRTL 0x10000
72#define IXGBE_MIN_FCRTL 0x40 60#define IXGBE_MIN_FCRTL 0x40
@@ -88,9 +76,6 @@
88 76
89#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 77#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
90 78
91/* How many Tx Descriptors do we need to call netif_wake_queue? */
92#define IXGBE_TX_QUEUE_WAKE 16
93
94/* How many Rx Buffers do we bundle into one write to the hardware ? */ 79/* How many Rx Buffers do we bundle into one write to the hardware ? */
95#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 80#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
96 81
@@ -119,6 +104,7 @@ struct ixgbe_rx_buffer {
119 dma_addr_t dma; 104 dma_addr_t dma;
120 struct page *page; 105 struct page *page;
121 dma_addr_t page_dma; 106 dma_addr_t page_dma;
107 unsigned int page_offset;
122}; 108};
123 109
124struct ixgbe_queue_stats { 110struct ixgbe_queue_stats {
@@ -157,14 +143,11 @@ struct ixgbe_ring {
157 struct net_lro_mgr lro_mgr; 143 struct net_lro_mgr lro_mgr;
158 bool lro_used; 144 bool lro_used;
159 struct ixgbe_queue_stats stats; 145 struct ixgbe_queue_stats stats;
160 u8 v_idx; /* maps directly to the index for this ring in the hardware 146 u16 v_idx; /* maps directly to the index for this ring in the hardware
161 * vector array, can also be used for finding the bit in EICR 147 * vector array, can also be used for finding the bit in EICR
162 * and friends that represents the vector for this ring */ 148 * and friends that represents the vector for this ring */
163 149
164 u32 eims_value;
165 u16 itr_register;
166 150
167 char name[IFNAMSIZ + 5];
168 u16 work_limit; /* max work per interrupt */ 151 u16 work_limit; /* max work per interrupt */
169 u16 rx_buf_len; 152 u16 rx_buf_len;
170}; 153};
@@ -191,8 +174,8 @@ struct ixgbe_q_vector {
191 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 174 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
192 u8 rxr_count; /* Rx ring count assigned to this vector */ 175 u8 rxr_count; /* Rx ring count assigned to this vector */
193 u8 txr_count; /* Tx ring count assigned to this vector */ 176 u8 txr_count; /* Tx ring count assigned to this vector */
194 u8 tx_eitr; 177 u8 tx_itr;
195 u8 rx_eitr; 178 u8 rx_itr;
196 u32 eitr; 179 u32 eitr;
197}; 180};
198 181
@@ -240,7 +223,9 @@ struct ixgbe_adapter {
240 223
241 /* TX */ 224 /* TX */
242 struct ixgbe_ring *tx_ring; /* One per active queue */ 225 struct ixgbe_ring *tx_ring; /* One per active queue */
226 int num_tx_queues;
243 u64 restart_queue; 227 u64 restart_queue;
228 u64 hw_csum_tx_good;
244 u64 lsc_int; 229 u64 lsc_int;
245 u64 hw_tso_ctxt; 230 u64 hw_tso_ctxt;
246 u64 hw_tso6_ctxt; 231 u64 hw_tso6_ctxt;
@@ -249,12 +234,10 @@ struct ixgbe_adapter {
249 234
250 /* RX */ 235 /* RX */
251 struct ixgbe_ring *rx_ring; /* One per active queue */ 236 struct ixgbe_ring *rx_ring; /* One per active queue */
252 u64 hw_csum_tx_good; 237 int num_rx_queues;
253 u64 hw_csum_rx_error; 238 u64 hw_csum_rx_error;
254 u64 hw_csum_rx_good; 239 u64 hw_csum_rx_good;
255 u64 non_eop_descs; 240 u64 non_eop_descs;
256 int num_tx_queues;
257 int num_rx_queues;
258 int num_msix_vectors; 241 int num_msix_vectors;
259 struct ixgbe_ring_feature ring_feature[3]; 242 struct ixgbe_ring_feature ring_feature[3];
260 struct msix_entry *msix_entries; 243 struct msix_entry *msix_entries;
@@ -301,14 +284,21 @@ struct ixgbe_adapter {
301 struct ixgbe_hw_stats stats; 284 struct ixgbe_hw_stats stats;
302 285
303 /* Interrupt Throttle Rate */ 286 /* Interrupt Throttle Rate */
304 u32 rx_eitr; 287 u32 eitr_param;
305 u32 tx_eitr;
306 288
307 unsigned long state; 289 unsigned long state;
308 u64 tx_busy; 290 u64 tx_busy;
309 u64 lro_aggregated; 291 u64 lro_aggregated;
310 u64 lro_flushed; 292 u64 lro_flushed;
311 u64 lro_no_desc; 293 u64 lro_no_desc;
294 unsigned int tx_ring_count;
295 unsigned int rx_ring_count;
296
297 u32 link_speed;
298 bool link_up;
299 unsigned long link_check_timeout;
300
301 struct work_struct watchdog_task;
312}; 302};
313 303
314enum ixbge_state_t { 304enum ixbge_state_t {
@@ -330,11 +320,11 @@ extern int ixgbe_up(struct ixgbe_adapter *adapter);
330extern void ixgbe_down(struct ixgbe_adapter *adapter); 320extern void ixgbe_down(struct ixgbe_adapter *adapter);
331extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 321extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
332extern void ixgbe_reset(struct ixgbe_adapter *adapter); 322extern void ixgbe_reset(struct ixgbe_adapter *adapter);
333extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
334extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 323extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
335extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 324extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
336 struct ixgbe_ring *rxdr); 325extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
337extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 326extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
338 struct ixgbe_ring *txdr); 327extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
339 329
340#endif /* _IXGBE_H_ */ 330#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index ba09063260d9..7cddcfba809e 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -39,68 +38,59 @@
39#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
40#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
41 40
42static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); 41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
43static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 42 ixgbe_link_speed *speed,
44 bool *autoneg); 43 bool *autoneg);
45static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
46 u32 *speed, bool *autoneg);
47static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
48static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
49static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
50 bool *link_up);
51static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
52 bool autoneg,
53 bool autoneg_wait_to_complete);
54static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); 44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
55static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 45static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
56 bool autoneg, 46 ixgbe_link_speed speed,
57 bool autoneg_wait_to_complete); 47 bool autoneg,
58static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 48 bool autoneg_wait_to_complete);
59
60 49
50/**
51 */
61static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 52static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
62{ 53{
63 hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 54 struct ixgbe_mac_info *mac = &hw->mac;
64 hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 55 struct ixgbe_phy_info *phy = &hw->phy;
65 hw->mac.mcft_size = IXGBE_82598_MC_TBL_SIZE; 56
66 hw->mac.vft_size = IXGBE_82598_VFT_TBL_SIZE; 57 /* Call PHY identify routine to get the phy type */
67 hw->mac.num_rar_entries = IXGBE_82598_RAR_ENTRIES; 58 ixgbe_identify_phy_generic(hw);
68 59
69 /* PHY ops are filled in by default properly for Fiber only */ 60 /* PHY Init */
70 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 61 switch (phy->type) {
71 hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; 62 default:
72 hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; 63 break;
73 hw->mac.ops.get_link_settings =
74 &ixgbe_get_copper_link_settings_82598;
75
76 /* Call PHY identify routine to get the phy type */
77 ixgbe_identify_phy(hw);
78
79 switch (hw->phy.type) {
80 case ixgbe_phy_tn:
81 hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
82 hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
83 hw->phy.ops.setup_link_speed =
84 &ixgbe_setup_tnx_phy_link_speed;
85 break;
86 default:
87 break;
88 }
89 } 64 }
90 65
66 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
67 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
68 mac->ops.setup_link_speed =
69 &ixgbe_setup_copper_link_speed_82598;
70 mac->ops.get_link_capabilities =
71 &ixgbe_get_copper_link_capabilities_82598;
72 }
73
74 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
75 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
76 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
77 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
78 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
79
91 return 0; 80 return 0;
92} 81}
93 82
94/** 83/**
95 * ixgbe_get_link_settings_82598 - Determines default link settings 84 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
96 * @hw: pointer to hardware structure 85 * @hw: pointer to hardware structure
97 * @speed: pointer to link speed 86 * @speed: pointer to link speed
98 * @autoneg: boolean auto-negotiation value 87 * @autoneg: boolean auto-negotiation value
99 * 88 *
100 * Determines the default link settings by reading the AUTOC register. 89 * Determines the link capabilities by reading the AUTOC register.
101 **/ 90 **/
102static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 91static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
103 bool *autoneg) 92 ixgbe_link_speed *speed,
93 bool *autoneg)
104{ 94{
105 s32 status = 0; 95 s32 status = 0;
106 s32 autoc_reg; 96 s32 autoc_reg;
@@ -149,15 +139,16 @@ static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
149} 139}
150 140
151/** 141/**
152 * ixgbe_get_copper_link_settings_82598 - Determines default link settings 142 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
153 * @hw: pointer to hardware structure 143 * @hw: pointer to hardware structure
154 * @speed: pointer to link speed 144 * @speed: pointer to link speed
155 * @autoneg: boolean auto-negotiation value 145 * @autoneg: boolean auto-negotiation value
156 * 146 *
157 * Determines the default link settings by reading the AUTOC register. 147 * Determines the link capabilities by reading the AUTOC register.
158 **/ 148 **/
159static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, 149s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
160 u32 *speed, bool *autoneg) 150 ixgbe_link_speed *speed,
151 bool *autoneg)
161{ 152{
162 s32 status = IXGBE_ERR_LINK_SETUP; 153 s32 status = IXGBE_ERR_LINK_SETUP;
163 u16 speed_ability; 154 u16 speed_ability;
@@ -165,9 +156,9 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
165 *speed = 0; 156 *speed = 0;
166 *autoneg = true; 157 *autoneg = true;
167 158
168 status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, 159 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
169 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 160 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
170 &speed_ability); 161 &speed_ability);
171 162
172 if (status == 0) { 163 if (status == 0) {
173 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) 164 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -195,11 +186,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
195 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 186 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
196 case IXGBE_DEV_ID_82598EB_CX4: 187 case IXGBE_DEV_ID_82598EB_CX4:
197 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 188 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
189 case IXGBE_DEV_ID_82598EB_XF_LR:
198 media_type = ixgbe_media_type_fiber; 190 media_type = ixgbe_media_type_fiber;
199 break; 191 break;
200 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
201 media_type = ixgbe_media_type_copper;
202 break;
203 default: 192 default:
204 media_type = ixgbe_media_type_unknown; 193 media_type = ixgbe_media_type_unknown;
205 break; 194 break;
@@ -209,6 +198,122 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
209} 198}
210 199
211/** 200/**
201 * ixgbe_setup_fc_82598 - Configure flow control settings
202 * @hw: pointer to hardware structure
203 * @packetbuf_num: packet buffer number (0-7)
204 *
205 * Configures the flow control settings based on SW configuration. This
206 * function is used for 802.3x flow control configuration only.
207 **/
208s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
209{
210 u32 frctl_reg;
211 u32 rmcs_reg;
212
213 if (packetbuf_num < 0 || packetbuf_num > 7) {
214 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
215 " 0-7\n", packetbuf_num);
216 }
217
218 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
219 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
220
221 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
222 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
223
224 /*
225 * 10 gig parts do not have a word in the EEPROM to determine the
226 * default flow control setting, so we explicitly set it to full.
227 */
228 if (hw->fc.type == ixgbe_fc_default)
229 hw->fc.type = ixgbe_fc_full;
230
231 /*
232 * We want to save off the original Flow Control configuration just in
233 * case we get disconnected and then reconnected into a different hub
234 * or switch with different Flow Control capabilities.
235 */
236 hw->fc.original_type = hw->fc.type;
237
238 /*
239 * The possible values of the "flow_control" parameter are:
240 * 0: Flow control is completely disabled
241 * 1: Rx flow control is enabled (we can receive pause frames but not
242 * send pause frames).
243 * 2: Tx flow control is enabled (we can send pause frames but we do not
244 * support receiving pause frames)
245 * 3: Both Rx and Tx flow control (symmetric) are enabled.
246 * other: Invalid.
247 */
248 switch (hw->fc.type) {
249 case ixgbe_fc_none:
250 break;
251 case ixgbe_fc_rx_pause:
252 /*
253 * Rx Flow control is enabled,
254 * and Tx Flow control is disabled.
255 */
256 frctl_reg |= IXGBE_FCTRL_RFCE;
257 break;
258 case ixgbe_fc_tx_pause:
259 /*
260 * Tx Flow control is enabled, and Rx Flow control is disabled,
261 * by a software over-ride.
262 */
263 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
264 break;
265 case ixgbe_fc_full:
266 /*
267 * Flow control (both Rx and Tx) is enabled by a software
268 * over-ride.
269 */
270 frctl_reg |= IXGBE_FCTRL_RFCE;
271 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
272 break;
273 default:
274 /* We should never get here. The value should be 0-3. */
275 hw_dbg(hw, "Flow control param set incorrectly\n");
276 break;
277 }
278
279 /* Enable 802.3x based flow control settings. */
280 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
281 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
282
283 /*
284 * Check for invalid software configuration, zeros are completely
285 * invalid for all parameters used past this point, and if we enable
286 * flow control with zero water marks, we blast flow control packets.
287 */
288 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
289 hw_dbg(hw, "Flow control structure initialized incorrectly\n");
290 return IXGBE_ERR_INVALID_LINK_SETTINGS;
291 }
292
293 /*
294 * We need to set up the Receive Threshold high and low water
295 * marks as well as (optionally) enabling the transmission of
296 * XON frames.
297 */
298 if (hw->fc.type & ixgbe_fc_tx_pause) {
299 if (hw->fc.send_xon) {
300 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
301 (hw->fc.low_water | IXGBE_FCRTL_XONE));
302 } else {
303 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
304 hw->fc.low_water);
305 }
306 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
307 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
308 }
309
310 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
311 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
312
313 return 0;
314}
315
316/**
212 * ixgbe_setup_mac_link_82598 - Configures MAC link settings 317 * ixgbe_setup_mac_link_82598 - Configures MAC link settings
213 * @hw: pointer to hardware structure 318 * @hw: pointer to hardware structure
214 * 319 *
@@ -252,8 +357,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
252 } 357 }
253 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 358 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
254 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 359 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
255 hw_dbg(hw, 360 hw_dbg(hw, "Autonegotiation did not complete.\n");
256 "Autonegotiation did not complete.\n");
257 } 361 }
258 } 362 }
259 } 363 }
@@ -263,8 +367,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
263 * case we get disconnected and then reconnected into a different hub 367 * case we get disconnected and then reconnected into a different hub
264 * or switch with different Flow Control capabilities. 368 * or switch with different Flow Control capabilities.
265 */ 369 */
266 hw->fc.type = hw->fc.original_type; 370 hw->fc.original_type = hw->fc.type;
267 ixgbe_setup_fc(hw, 0); 371 ixgbe_setup_fc_82598(hw, 0);
268 372
269 /* Add delay to filter out noises during initial link setup */ 373 /* Add delay to filter out noises during initial link setup */
270 msleep(50); 374 msleep(50);
@@ -277,20 +381,35 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
277 * @hw: pointer to hardware structure 381 * @hw: pointer to hardware structure
278 * @speed: pointer to link speed 382 * @speed: pointer to link speed
279 * @link_up: true is link is up, false otherwise 383 * @link_up: true is link is up, false otherwise
384 * @link_up_wait_to_complete: bool used to wait for link up or not
280 * 385 *
281 * Reads the links register to determine if link is up and the current speed 386 * Reads the links register to determine if link is up and the current speed
282 **/ 387 **/
283static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, 388static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
284 bool *link_up) 389 ixgbe_link_speed *speed, bool *link_up,
390 bool link_up_wait_to_complete)
285{ 391{
286 u32 links_reg; 392 u32 links_reg;
393 u32 i;
287 394
288 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 395 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
289 396 if (link_up_wait_to_complete) {
290 if (links_reg & IXGBE_LINKS_UP) 397 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
291 *link_up = true; 398 if (links_reg & IXGBE_LINKS_UP) {
292 else 399 *link_up = true;
293 *link_up = false; 400 break;
401 } else {
402 *link_up = false;
403 }
404 msleep(100);
405 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
406 }
407 } else {
408 if (links_reg & IXGBE_LINKS_UP)
409 *link_up = true;
410 else
411 *link_up = false;
412 }
294 413
295 if (links_reg & IXGBE_LINKS_SPEED) 414 if (links_reg & IXGBE_LINKS_SPEED)
296 *speed = IXGBE_LINK_SPEED_10GB_FULL; 415 *speed = IXGBE_LINK_SPEED_10GB_FULL;
@@ -300,6 +419,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
300 return 0; 419 return 0;
301} 420}
302 421
422
303/** 423/**
304 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed 424 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
305 * @hw: pointer to hardware structure 425 * @hw: pointer to hardware structure
@@ -310,18 +430,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
310 * Set the link speed in the AUTOC register and restarts link. 430 * Set the link speed in the AUTOC register and restarts link.
311 **/ 431 **/
312static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, 432static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
313 u32 speed, bool autoneg, 433 ixgbe_link_speed speed, bool autoneg,
314 bool autoneg_wait_to_complete) 434 bool autoneg_wait_to_complete)
315{ 435{
316 s32 status = 0; 436 s32 status = 0;
317 437
318 /* If speed is 10G, then check for CX4 or XAUI. */ 438 /* If speed is 10G, then check for CX4 or XAUI. */
319 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 439 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
320 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) 440 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
321 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 441 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
322 else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) 442 } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
323 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 443 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
324 else if (autoneg) { 444 } else if (autoneg) {
325 /* BX mode - Autonegotiate 1G */ 445 /* BX mode - Autonegotiate 1G */
326 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) 446 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
327 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; 447 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
@@ -340,7 +460,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
340 * ixgbe_hw This will write the AUTOC register based on the new 460 * ixgbe_hw This will write the AUTOC register based on the new
341 * stored values 461 * stored values
342 */ 462 */
343 hw->mac.ops.setup_link(hw); 463 ixgbe_setup_mac_link_82598(hw);
344 } 464 }
345 465
346 return status; 466 return status;
@@ -358,18 +478,17 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
358 **/ 478 **/
359static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) 479static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
360{ 480{
361 s32 status = 0; 481 s32 status;
362 482
363 /* Restart autonegotiation on PHY */ 483 /* Restart autonegotiation on PHY */
364 if (hw->phy.ops.setup_link) 484 status = hw->phy.ops.setup_link(hw);
365 status = hw->phy.ops.setup_link(hw);
366 485
367 /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ 486 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
368 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 487 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
369 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 488 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
370 489
371 /* Set up MAC */ 490 /* Set up MAC */
372 hw->mac.ops.setup_link(hw); 491 ixgbe_setup_mac_link_82598(hw);
373 492
374 return status; 493 return status;
375} 494}
@@ -383,23 +502,23 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
383 * 502 *
384 * Sets the link speed in the AUTOC register in the MAC and restarts link. 503 * Sets the link speed in the AUTOC register in the MAC and restarts link.
385 **/ 504 **/
386static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 505static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
387 bool autoneg, 506 ixgbe_link_speed speed,
388 bool autoneg_wait_to_complete) 507 bool autoneg,
508 bool autoneg_wait_to_complete)
389{ 509{
390 s32 status = 0; 510 s32 status;
391 511
392 /* Setup the PHY according to input speed */ 512 /* Setup the PHY according to input speed */
393 if (hw->phy.ops.setup_link_speed) 513 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
394 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 514 autoneg_wait_to_complete);
395 autoneg_wait_to_complete);
396 515
397 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ 516 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
398 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 517 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
399 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 518 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
400 519
401 /* Set up MAC */ 520 /* Set up MAC */
402 hw->mac.ops.setup_link(hw); 521 ixgbe_setup_mac_link_82598(hw);
403 522
404 return status; 523 return status;
405} 524}
@@ -408,7 +527,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
408 * ixgbe_reset_hw_82598 - Performs hardware reset 527 * ixgbe_reset_hw_82598 - Performs hardware reset
409 * @hw: pointer to hardware structure 528 * @hw: pointer to hardware structure
410 * 529 *
411 * Resets the hardware by reseting the transmit and receive units, masks and 530 * Resets the hardware by resetting the transmit and receive units, masks and
412 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 531 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
413 * reset. 532 * reset.
414 **/ 533 **/
@@ -422,35 +541,44 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
422 u8 analog_val; 541 u8 analog_val;
423 542
424 /* Call adapter stop to disable tx/rx and clear interrupts */ 543 /* Call adapter stop to disable tx/rx and clear interrupts */
425 ixgbe_stop_adapter(hw); 544 hw->mac.ops.stop_adapter(hw);
426 545
427 /* 546 /*
428 * Power up the Atlas TX lanes if they are currently powered down. 547 * Power up the Atlas Tx lanes if they are currently powered down.
429 * Atlas TX lanes are powered down for MAC loopback tests, but 548 * Atlas Tx lanes are powered down for MAC loopback tests, but
430 * they are not automatically restored on reset. 549 * they are not automatically restored on reset.
431 */ 550 */
432 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 551 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
433 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 552 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
434 /* Enable TX Atlas so packets can be transmitted again */ 553 /* Enable Tx Atlas so packets can be transmitted again */
435 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 554 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
555 &analog_val);
436 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 556 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
437 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); 557 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
558 analog_val);
438 559
439 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); 560 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
561 &analog_val);
440 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 562 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
441 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); 563 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
564 analog_val);
442 565
443 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); 566 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
567 &analog_val);
444 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 568 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
445 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); 569 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
570 analog_val);
446 571
447 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); 572 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
573 &analog_val);
448 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 574 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
449 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); 575 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
576 analog_val);
450 } 577 }
451 578
452 /* Reset PHY */ 579 /* Reset PHY */
453 ixgbe_reset_phy(hw); 580 if (hw->phy.reset_disable == false)
581 hw->phy.ops.reset(hw);
454 582
455 /* 583 /*
456 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 584 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
@@ -503,29 +631,311 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
503 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
504 } else { 632 } else {
505 hw->mac.link_attach_type = 633 hw->mac.link_attach_type =
506 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); 634 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
507 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); 635 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
508 hw->mac.link_settings_loaded = true; 636 hw->mac.link_settings_loaded = true;
509 } 637 }
510 638
511 /* Store the permanent mac address */ 639 /* Store the permanent mac address */
512 ixgbe_get_mac_addr(hw, hw->mac.perm_addr); 640 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
513 641
514 return status; 642 return status;
515} 643}
516 644
645/**
646 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
647 * @hw: pointer to hardware struct
648 * @rar: receive address register index to associate with a VMDq index
649 * @vmdq: VMDq set index
650 **/
651s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
652{
653 u32 rar_high;
654
655 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
656 rar_high &= ~IXGBE_RAH_VIND_MASK;
657 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
658 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
659 return 0;
660}
661
662/**
663 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
664 * @hw: pointer to hardware struct
665 * @rar: receive address register index to associate with a VMDq index
666 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
667 **/
668static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
669{
670 u32 rar_high;
671 u32 rar_entries = hw->mac.num_rar_entries;
672
673 if (rar < rar_entries) {
674 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
675 if (rar_high & IXGBE_RAH_VIND_MASK) {
676 rar_high &= ~IXGBE_RAH_VIND_MASK;
677 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
678 }
679 } else {
680 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
681 }
682
683 return 0;
684}
685
686/**
687 * ixgbe_set_vfta_82598 - Set VLAN filter table
688 * @hw: pointer to hardware structure
689 * @vlan: VLAN id to write to VLAN filter
690 * @vind: VMDq output index that maps queue to VLAN id in VFTA
691 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
692 *
693 * Turn on/off specified VLAN in the VLAN filter table.
694 **/
695s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
696 bool vlan_on)
697{
698 u32 regindex;
699 u32 bitindex;
700 u32 bits;
701 u32 vftabyte;
702
703 if (vlan > 4095)
704 return IXGBE_ERR_PARAM;
705
706 /* Determine 32-bit word position in array */
707 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
708
709 /* Determine the location of the (VMD) queue index */
710 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
711 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
712
713 /* Set the nibble for VMD queue index */
714 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
715 bits &= (~(0x0F << bitindex));
716 bits |= (vind << bitindex);
717 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
718
719 /* Determine the location of the bit for this VLAN id */
720 bitindex = vlan & 0x1F; /* lower five bits */
721
722 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
723 if (vlan_on)
724 /* Turn on this VLAN id */
725 bits |= (1 << bitindex);
726 else
727 /* Turn off this VLAN id */
728 bits &= ~(1 << bitindex);
729 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
730
731 return 0;
732}
733
734/**
735 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
736 * @hw: pointer to hardware structure
737 *
738 * Clears the VLAN filer table, and the VMDq index associated with the filter
739 **/
740static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
741{
742 u32 offset;
743 u32 vlanbyte;
744
745 for (offset = 0; offset < hw->mac.vft_size; offset++)
746 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
747
748 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
749 for (offset = 0; offset < hw->mac.vft_size; offset++)
750 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
751 0);
752
753 return 0;
754}
755
756/**
757 * ixgbe_blink_led_start_82598 - Blink LED based on index.
758 * @hw: pointer to hardware structure
759 * @index: led number to blink
760 **/
761static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
762{
763 ixgbe_link_speed speed = 0;
764 bool link_up = 0;
765 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
766 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
767
768 /*
769 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
770 * force it if link is down.
771 */
772 hw->mac.ops.check_link(hw, &speed, &link_up, false);
773
774 if (!link_up) {
775 autoc_reg |= IXGBE_AUTOC_FLU;
776 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
777 msleep(10);
778 }
779
780 led_reg &= ~IXGBE_LED_MODE_MASK(index);
781 led_reg |= IXGBE_LED_BLINK(index);
782 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
783 IXGBE_WRITE_FLUSH(hw);
784
785 return 0;
786}
787
788/**
789 * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
790 * @hw: pointer to hardware structure
791 * @index: led number to stop blinking
792 **/
793static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
794{
795 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
796 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
797
798 autoc_reg &= ~IXGBE_AUTOC_FLU;
799 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
800 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
801
802 led_reg &= ~IXGBE_LED_MODE_MASK(index);
803 led_reg &= ~IXGBE_LED_BLINK(index);
804 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
805 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
806 IXGBE_WRITE_FLUSH(hw);
807
808 return 0;
809}
810
811/**
812 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
813 * @hw: pointer to hardware structure
814 * @reg: analog register to read
815 * @val: read value
816 *
817 * Performs read operation to Atlas analog register specified.
818 **/
819s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
820{
821 u32 atlas_ctl;
822
823 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
824 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
825 IXGBE_WRITE_FLUSH(hw);
826 udelay(10);
827 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
828 *val = (u8)atlas_ctl;
829
830 return 0;
831}
832
833/**
834 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
835 * @hw: pointer to hardware structure
836 * @reg: atlas register to write
837 * @val: value to write
838 *
839 * Performs write operation to Atlas analog register specified.
840 **/
841s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
842{
843 u32 atlas_ctl;
844
845 atlas_ctl = (reg << 8) | val;
846 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
847 IXGBE_WRITE_FLUSH(hw);
848 udelay(10);
849
850 return 0;
851}
852
853/**
854 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
855 * @hw: pointer to hardware structure
856 *
857 * Determines physical layer capabilities of the current configuration.
858 **/
859s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
860{
861 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
862
863 switch (hw->device_id) {
864 case IXGBE_DEV_ID_82598EB_CX4:
865 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
866 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
867 break;
868 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
869 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
870 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
871 break;
872 case IXGBE_DEV_ID_82598EB_XF_LR:
873 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
874 break;
875
876 default:
877 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
878 break;
879 }
880
881 return physical_layer;
882}
883
517static struct ixgbe_mac_operations mac_ops_82598 = { 884static struct ixgbe_mac_operations mac_ops_82598 = {
518 .reset = &ixgbe_reset_hw_82598, 885 .init_hw = &ixgbe_init_hw_generic,
886 .reset_hw = &ixgbe_reset_hw_82598,
887 .start_hw = &ixgbe_start_hw_generic,
888 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
519 .get_media_type = &ixgbe_get_media_type_82598, 889 .get_media_type = &ixgbe_get_media_type_82598,
890 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
891 .get_mac_addr = &ixgbe_get_mac_addr_generic,
892 .stop_adapter = &ixgbe_stop_adapter_generic,
893 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
894 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
520 .setup_link = &ixgbe_setup_mac_link_82598, 895 .setup_link = &ixgbe_setup_mac_link_82598,
521 .check_link = &ixgbe_check_mac_link_82598,
522 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, 896 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
523 .get_link_settings = &ixgbe_get_link_settings_82598, 897 .check_link = &ixgbe_check_mac_link_82598,
898 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
899 .led_on = &ixgbe_led_on_generic,
900 .led_off = &ixgbe_led_off_generic,
901 .blink_led_start = &ixgbe_blink_led_start_82598,
902 .blink_led_stop = &ixgbe_blink_led_stop_82598,
903 .set_rar = &ixgbe_set_rar_generic,
904 .clear_rar = &ixgbe_clear_rar_generic,
905 .set_vmdq = &ixgbe_set_vmdq_82598,
906 .clear_vmdq = &ixgbe_clear_vmdq_82598,
907 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
908 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
909 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
910 .enable_mc = &ixgbe_enable_mc_generic,
911 .disable_mc = &ixgbe_disable_mc_generic,
912 .clear_vfta = &ixgbe_clear_vfta_82598,
913 .set_vfta = &ixgbe_set_vfta_82598,
914 .setup_fc = &ixgbe_setup_fc_82598,
915};
916
917static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
918 .init_params = &ixgbe_init_eeprom_params_generic,
919 .read = &ixgbe_read_eeprom_generic,
920 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
921 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
922};
923
924static struct ixgbe_phy_operations phy_ops_82598 = {
925 .identify = &ixgbe_identify_phy_generic,
926 /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */
927 .reset = &ixgbe_reset_phy_generic,
928 .read_reg = &ixgbe_read_phy_reg_generic,
929 .write_reg = &ixgbe_write_phy_reg_generic,
930 .setup_link = &ixgbe_setup_phy_link_generic,
931 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
524}; 932};
525 933
526struct ixgbe_info ixgbe_82598_info = { 934struct ixgbe_info ixgbe_82598_info = {
527 .mac = ixgbe_mac_82598EB, 935 .mac = ixgbe_mac_82598EB,
528 .get_invariants = &ixgbe_get_invariants_82598, 936 .get_invariants = &ixgbe_get_invariants_82598,
529 .mac_ops = &mac_ops_82598, 937 .mac_ops = &mac_ops_82598,
938 .eeprom_ops = &eeprom_ops_82598,
939 .phy_ops = &phy_ops_82598,
530}; 940};
531 941
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 9c0d0a1964eb..f67c68404bb3 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,20 +32,28 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
36static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
37
38static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); 35static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
36static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 37static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 38static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
39static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
40static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
41static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
42 u16 count);
43static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
44static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
45static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
41static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); 47static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
42 48
43static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
44static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
45static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
46static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); 52static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
47 54
48/** 55/**
49 * ixgbe_start_hw - Prepare hardware for TX/RX 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
50 * @hw: pointer to hardware structure 57 * @hw: pointer to hardware structure
51 * 58 *
52 * Starts the hardware by filling the bus info structure and media type, clears 59 * Starts the hardware by filling the bus info structure and media type, clears
@@ -54,7 +61,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
54 * table, VLAN filter table, calls routine to set up link and flow control 61 * table, VLAN filter table, calls routine to set up link and flow control
55 * settings, and leaves transmit and receive units disabled and uninitialized 62 * settings, and leaves transmit and receive units disabled and uninitialized
56 **/ 63 **/
57s32 ixgbe_start_hw(struct ixgbe_hw *hw) 64s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
58{ 65{
59 u32 ctrl_ext; 66 u32 ctrl_ext;
60 67
@@ -62,22 +69,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
62 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 69 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
63 70
64 /* Identify the PHY */ 71 /* Identify the PHY */
65 ixgbe_identify_phy(hw); 72 hw->phy.ops.identify(hw);
66 73
67 /* 74 /*
68 * Store MAC address from RAR0, clear receive address registers, and 75 * Store MAC address from RAR0, clear receive address registers, and
69 * clear the multicast table 76 * clear the multicast table
70 */ 77 */
71 ixgbe_init_rx_addrs(hw); 78 hw->mac.ops.init_rx_addrs(hw);
72 79
73 /* Clear the VLAN filter table */ 80 /* Clear the VLAN filter table */
74 ixgbe_clear_vfta(hw); 81 hw->mac.ops.clear_vfta(hw);
75 82
76 /* Set up link */ 83 /* Set up link */
77 hw->mac.ops.setup_link(hw); 84 hw->mac.ops.setup_link(hw);
78 85
79 /* Clear statistics registers */ 86 /* Clear statistics registers */
80 ixgbe_clear_hw_cntrs(hw); 87 hw->mac.ops.clear_hw_cntrs(hw);
81 88
82 /* Set No Snoop Disable */ 89 /* Set No Snoop Disable */
83 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 90 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -92,34 +99,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
92} 99}
93 100
94/** 101/**
95 * ixgbe_init_hw - Generic hardware initialization 102 * ixgbe_init_hw_generic - Generic hardware initialization
96 * @hw: pointer to hardware structure 103 * @hw: pointer to hardware structure
97 * 104 *
98 * Initialize the hardware by reseting the hardware, filling the bus info 105 * Initialize the hardware by resetting the hardware, filling the bus info
99 * structure and media type, clears all on chip counters, initializes receive 106 * structure and media type, clears all on chip counters, initializes receive
100 * address registers, multicast table, VLAN filter table, calls routine to set 107 * address registers, multicast table, VLAN filter table, calls routine to set
101 * up link and flow control settings, and leaves transmit and receive units 108 * up link and flow control settings, and leaves transmit and receive units
102 * disabled and uninitialized 109 * disabled and uninitialized
103 **/ 110 **/
104s32 ixgbe_init_hw(struct ixgbe_hw *hw) 111s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
105{ 112{
106 /* Reset the hardware */ 113 /* Reset the hardware */
107 hw->mac.ops.reset(hw); 114 hw->mac.ops.reset_hw(hw);
108 115
109 /* Start the HW */ 116 /* Start the HW */
110 ixgbe_start_hw(hw); 117 hw->mac.ops.start_hw(hw);
111 118
112 return 0; 119 return 0;
113} 120}
114 121
115/** 122/**
116 * ixgbe_clear_hw_cntrs - Generic clear hardware counters 123 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
117 * @hw: pointer to hardware structure 124 * @hw: pointer to hardware structure
118 * 125 *
119 * Clears all hardware statistics counters by reading them from the hardware 126 * Clears all hardware statistics counters by reading them from the hardware
120 * Statistics counters are clear on read. 127 * Statistics counters are clear on read.
121 **/ 128 **/
122static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) 129s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
123{ 130{
124 u16 i = 0; 131 u16 i = 0;
125 132
@@ -191,7 +198,36 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
191} 198}
192 199
193/** 200/**
194 * ixgbe_get_mac_addr - Generic get MAC address 201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
202 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM
204 *
205 * Reads the part number from the EEPROM.
206 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
208{
209 s32 ret_val;
210 u16 data;
211
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val;
216 }
217 *pba_num = (u32)(data << 16);
218
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
220 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val;
223 }
224 *pba_num |= data;
225
226 return 0;
227}
228
229/**
230 * ixgbe_get_mac_addr_generic - Generic get MAC address
195 * @hw: pointer to hardware structure 231 * @hw: pointer to hardware structure
196 * @mac_addr: Adapter MAC address 232 * @mac_addr: Adapter MAC address
197 * 233 *
@@ -199,7 +235,7 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
199 * A reset of the adapter must be performed prior to calling this function 235 * A reset of the adapter must be performed prior to calling this function
200 * in order for the MAC address to have been loaded from the EEPROM into RAR0 236 * in order for the MAC address to have been loaded from the EEPROM into RAR0
201 **/ 237 **/
202s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) 238s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
203{ 239{
204 u32 rar_high; 240 u32 rar_high;
205 u32 rar_low; 241 u32 rar_low;
@@ -217,30 +253,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
217 return 0; 253 return 0;
218} 254}
219 255
220s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
221{
222 s32 ret_val;
223 u16 data;
224
225 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
226 if (ret_val) {
227 hw_dbg(hw, "NVM Read Error\n");
228 return ret_val;
229 }
230 *part_num = (u32)(data << 16);
231
232 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
233 if (ret_val) {
234 hw_dbg(hw, "NVM Read Error\n");
235 return ret_val;
236 }
237 *part_num |= data;
238
239 return 0;
240}
241
242/** 256/**
243 * ixgbe_stop_adapter - Generic stop TX/RX units 257 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
244 * @hw: pointer to hardware structure 258 * @hw: pointer to hardware structure
245 * 259 *
246 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 260 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
@@ -248,7 +262,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
248 * the shared code and drivers to determine if the adapter is in a stopped 262 * the shared code and drivers to determine if the adapter is in a stopped
249 * state and should not touch the hardware. 263 * state and should not touch the hardware.
250 **/ 264 **/
251s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) 265s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
252{ 266{
253 u32 number_of_queues; 267 u32 number_of_queues;
254 u32 reg_val; 268 u32 reg_val;
@@ -264,6 +278,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
264 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 278 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
265 reg_val &= ~(IXGBE_RXCTRL_RXEN); 279 reg_val &= ~(IXGBE_RXCTRL_RXEN);
266 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 280 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
281 IXGBE_WRITE_FLUSH(hw);
267 msleep(2); 282 msleep(2);
268 283
269 /* Clear interrupt mask to stop from interrupts being generated */ 284 /* Clear interrupt mask to stop from interrupts being generated */
@@ -273,7 +288,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
273 IXGBE_READ_REG(hw, IXGBE_EICR); 288 IXGBE_READ_REG(hw, IXGBE_EICR);
274 289
275 /* Disable the transmit unit. Each queue must be disabled. */ 290 /* Disable the transmit unit. Each queue must be disabled. */
276 number_of_queues = hw->mac.num_tx_queues; 291 number_of_queues = hw->mac.max_tx_queues;
277 for (i = 0; i < number_of_queues; i++) { 292 for (i = 0; i < number_of_queues; i++) {
278 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 293 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
279 if (reg_val & IXGBE_TXDCTL_ENABLE) { 294 if (reg_val & IXGBE_TXDCTL_ENABLE) {
@@ -282,15 +297,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
282 } 297 }
283 } 298 }
284 299
300 /*
301 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
302 * access and verify no pending requests
303 */
304 if (ixgbe_disable_pcie_master(hw) != 0)
305 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
306
285 return 0; 307 return 0;
286} 308}
287 309
288/** 310/**
289 * ixgbe_led_on - Turns on the software controllable LEDs. 311 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
290 * @hw: pointer to hardware structure 312 * @hw: pointer to hardware structure
291 * @index: led number to turn on 313 * @index: led number to turn on
292 **/ 314 **/
293s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) 315s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
294{ 316{
295 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 317 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
296 318
@@ -304,11 +326,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
304} 326}
305 327
306/** 328/**
307 * ixgbe_led_off - Turns off the software controllable LEDs. 329 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
308 * @hw: pointer to hardware structure 330 * @hw: pointer to hardware structure
309 * @index: led number to turn off 331 * @index: led number to turn off
310 **/ 332 **/
311s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) 333s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
312{ 334{
313 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 335 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
314 336
@@ -321,15 +343,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
321 return 0; 343 return 0;
322} 344}
323 345
324
325/** 346/**
326 * ixgbe_init_eeprom - Initialize EEPROM params 347 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
327 * @hw: pointer to hardware structure 348 * @hw: pointer to hardware structure
328 * 349 *
329 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 350 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
330 * ixgbe_hw struct in order to set up EEPROM access. 351 * ixgbe_hw struct in order to set up EEPROM access.
331 **/ 352 **/
332s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) 353s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
333{ 354{
334 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 355 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
335 u32 eec; 356 u32 eec;
@@ -337,6 +358,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
337 358
338 if (eeprom->type == ixgbe_eeprom_uninitialized) { 359 if (eeprom->type == ixgbe_eeprom_uninitialized) {
339 eeprom->type = ixgbe_eeprom_none; 360 eeprom->type = ixgbe_eeprom_none;
361 /* Set default semaphore delay to 10ms which is a well
362 * tested value */
363 eeprom->semaphore_delay = 10;
340 364
341 /* 365 /*
342 * Check for EEPROM present first. 366 * Check for EEPROM present first.
@@ -369,18 +393,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
369} 393}
370 394
371/** 395/**
372 * ixgbe_read_eeprom - Read EEPROM word using EERD 396 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
397 * @hw: pointer to hardware structure
398 * @offset: offset within the EEPROM to be read
399 * @data: read 16 bit value from EEPROM
400 *
401 * Reads 16 bit value from EEPROM through bit-bang method
402 **/
403s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
404 u16 *data)
405{
406 s32 status;
407 u16 word_in;
408 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
409
410 hw->eeprom.ops.init_params(hw);
411
412 if (offset >= hw->eeprom.word_size) {
413 status = IXGBE_ERR_EEPROM;
414 goto out;
415 }
416
417 /* Prepare the EEPROM for reading */
418 status = ixgbe_acquire_eeprom(hw);
419
420 if (status == 0) {
421 if (ixgbe_ready_eeprom(hw) != 0) {
422 ixgbe_release_eeprom(hw);
423 status = IXGBE_ERR_EEPROM;
424 }
425 }
426
427 if (status == 0) {
428 ixgbe_standby_eeprom(hw);
429
430 /*
431 * Some SPI eeproms use the 8th address bit embedded in the
432 * opcode
433 */
434 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
435 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
436
437 /* Send the READ command (opcode + addr) */
438 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
439 IXGBE_EEPROM_OPCODE_BITS);
440 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
441 hw->eeprom.address_bits);
442
443 /* Read the data. */
444 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
445 *data = (word_in >> 8) | (word_in << 8);
446
447 /* End this read operation */
448 ixgbe_release_eeprom(hw);
449 }
450
451out:
452 return status;
453}
454
455/**
456 * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
373 * @hw: pointer to hardware structure 457 * @hw: pointer to hardware structure
374 * @offset: offset of word in the EEPROM to read 458 * @offset: offset of word in the EEPROM to read
375 * @data: word read from the EEPROM 459 * @data: word read from the EEPROM
376 * 460 *
377 * Reads a 16 bit word from the EEPROM using the EERD register. 461 * Reads a 16 bit word from the EEPROM using the EERD register.
378 **/ 462 **/
379s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) 463s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
380{ 464{
381 u32 eerd; 465 u32 eerd;
382 s32 status; 466 s32 status;
383 467
468 hw->eeprom.ops.init_params(hw);
469
470 if (offset >= hw->eeprom.word_size) {
471 status = IXGBE_ERR_EEPROM;
472 goto out;
473 }
474
384 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 475 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
385 IXGBE_EEPROM_READ_REG_START; 476 IXGBE_EEPROM_READ_REG_START;
386 477
@@ -389,10 +480,11 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
389 480
390 if (status == 0) 481 if (status == 0)
391 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 482 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
392 IXGBE_EEPROM_READ_REG_DATA); 483 IXGBE_EEPROM_READ_REG_DATA);
393 else 484 else
394 hw_dbg(hw, "Eeprom read timed out\n"); 485 hw_dbg(hw, "Eeprom read timed out\n");
395 486
487out:
396 return status; 488 return status;
397} 489}
398 490
@@ -420,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
420} 512}
421 513
422/** 514/**
515 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
516 * @hw: pointer to hardware structure
517 *
518 * Prepares EEPROM for access using bit-bang method. This function should
519 * be called before issuing a command to the EEPROM.
520 **/
521static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
522{
523 s32 status = 0;
524 u32 eec;
525 u32 i;
526
527 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
528 status = IXGBE_ERR_SWFW_SYNC;
529
530 if (status == 0) {
531 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
532
533 /* Request EEPROM Access */
534 eec |= IXGBE_EEC_REQ;
535 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
536
537 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
538 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
539 if (eec & IXGBE_EEC_GNT)
540 break;
541 udelay(5);
542 }
543
544 /* Release if grant not acquired */
545 if (!(eec & IXGBE_EEC_GNT)) {
546 eec &= ~IXGBE_EEC_REQ;
547 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
548 hw_dbg(hw, "Could not acquire EEPROM grant\n");
549
550 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
551 status = IXGBE_ERR_EEPROM;
552 }
553 }
554
555 /* Setup EEPROM for Read/Write */
556 if (status == 0) {
557 /* Clear CS and SK */
558 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
559 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
560 IXGBE_WRITE_FLUSH(hw);
561 udelay(1);
562 }
563 return status;
564}
565
566/**
423 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 567 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
424 * @hw: pointer to hardware structure 568 * @hw: pointer to hardware structure
425 * 569 *
@@ -475,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
475 */ 619 */
476 if (i >= timeout) { 620 if (i >= timeout) {
477 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 621 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
478 "not granted.\n"); 622 "not granted.\n");
479 ixgbe_release_eeprom_semaphore(hw); 623 ixgbe_release_eeprom_semaphore(hw);
480 status = IXGBE_ERR_EEPROM; 624 status = IXGBE_ERR_EEPROM;
481 } 625 }
@@ -503,6 +647,217 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
503} 647}
504 648
505/** 649/**
650 * ixgbe_ready_eeprom - Polls for EEPROM ready
651 * @hw: pointer to hardware structure
652 **/
653static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
654{
655 s32 status = 0;
656 u16 i;
657 u8 spi_stat_reg;
658
659 /*
660 * Read "Status Register" repeatedly until the LSB is cleared. The
661 * EEPROM will signal that the command has been completed by clearing
662 * bit 0 of the internal status register. If it's not cleared within
663 * 5 milliseconds, then error out.
664 */
665 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
666 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
667 IXGBE_EEPROM_OPCODE_BITS);
668 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
669 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
670 break;
671
672 udelay(5);
673 ixgbe_standby_eeprom(hw);
674 };
675
676 /*
677 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
678 * devices (and only 0-5mSec on 5V devices)
679 */
680 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
681 hw_dbg(hw, "SPI EEPROM Status error\n");
682 status = IXGBE_ERR_EEPROM;
683 }
684
685 return status;
686}
687
688/**
689 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
690 * @hw: pointer to hardware structure
691 **/
692static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
693{
694 u32 eec;
695
696 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
697
698 /* Toggle CS to flush commands */
699 eec |= IXGBE_EEC_CS;
700 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
701 IXGBE_WRITE_FLUSH(hw);
702 udelay(1);
703 eec &= ~IXGBE_EEC_CS;
704 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
705 IXGBE_WRITE_FLUSH(hw);
706 udelay(1);
707}
708
709/**
710 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
711 * @hw: pointer to hardware structure
712 * @data: data to send to the EEPROM
713 * @count: number of bits to shift out
714 **/
715static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
716 u16 count)
717{
718 u32 eec;
719 u32 mask;
720 u32 i;
721
722 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
723
724 /*
725 * Mask is used to shift "count" bits of "data" out to the EEPROM
726 * one bit at a time. Determine the starting bit based on count
727 */
728 mask = 0x01 << (count - 1);
729
730 for (i = 0; i < count; i++) {
731 /*
732 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
733 * "1", and then raising and then lowering the clock (the SK
734 * bit controls the clock input to the EEPROM). A "0" is
735 * shifted out to the EEPROM by setting "DI" to "0" and then
736 * raising and then lowering the clock.
737 */
738 if (data & mask)
739 eec |= IXGBE_EEC_DI;
740 else
741 eec &= ~IXGBE_EEC_DI;
742
743 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
744 IXGBE_WRITE_FLUSH(hw);
745
746 udelay(1);
747
748 ixgbe_raise_eeprom_clk(hw, &eec);
749 ixgbe_lower_eeprom_clk(hw, &eec);
750
751 /*
752 * Shift mask to signify next bit of data to shift in to the
753 * EEPROM
754 */
755 mask = mask >> 1;
756 };
757
758 /* We leave the "DI" bit set to "0" when we leave this routine. */
759 eec &= ~IXGBE_EEC_DI;
760 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
761 IXGBE_WRITE_FLUSH(hw);
762}
763
764/**
765 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
766 * @hw: pointer to hardware structure
767 **/
768static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
769{
770 u32 eec;
771 u32 i;
772 u16 data = 0;
773
774 /*
775 * In order to read a register from the EEPROM, we need to shift
776 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
777 * the clock input to the EEPROM (setting the SK bit), and then reading
778 * the value of the "DO" bit. During this "shifting in" process the
779 * "DI" bit should always be clear.
780 */
781 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
782
783 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
784
785 for (i = 0; i < count; i++) {
786 data = data << 1;
787 ixgbe_raise_eeprom_clk(hw, &eec);
788
789 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
790
791 eec &= ~(IXGBE_EEC_DI);
792 if (eec & IXGBE_EEC_DO)
793 data |= 1;
794
795 ixgbe_lower_eeprom_clk(hw, &eec);
796 }
797
798 return data;
799}
800
801/**
802 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
803 * @hw: pointer to hardware structure
804 * @eec: EEC register's current value
805 **/
806static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
807{
808 /*
809 * Raise the clock input to the EEPROM
810 * (setting the SK bit), then delay
811 */
812 *eec = *eec | IXGBE_EEC_SK;
813 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
814 IXGBE_WRITE_FLUSH(hw);
815 udelay(1);
816}
817
818/**
819 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
820 * @hw: pointer to hardware structure
821 * @eecd: EECD's current value
822 **/
823static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
824{
825 /*
826 * Lower the clock input to the EEPROM (clearing the SK bit), then
827 * delay
828 */
829 *eec = *eec & ~IXGBE_EEC_SK;
830 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
831 IXGBE_WRITE_FLUSH(hw);
832 udelay(1);
833}
834
835/**
836 * ixgbe_release_eeprom - Release EEPROM, release semaphores
837 * @hw: pointer to hardware structure
838 **/
839static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
840{
841 u32 eec;
842
843 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
844
845 eec |= IXGBE_EEC_CS; /* Pull CS high */
846 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
847
848 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
849 IXGBE_WRITE_FLUSH(hw);
850
851 udelay(1);
852
853 /* Stop requesting EEPROM access */
854 eec &= ~IXGBE_EEC_REQ;
855 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
856
857 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
858}
859
860/**
506 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 861 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
507 * @hw: pointer to hardware structure 862 * @hw: pointer to hardware structure
508 **/ 863 **/
@@ -517,7 +872,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
517 872
518 /* Include 0x0-0x3F in the checksum */ 873 /* Include 0x0-0x3F in the checksum */
519 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 874 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
520 if (ixgbe_read_eeprom(hw, i, &word) != 0) { 875 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
521 hw_dbg(hw, "EEPROM read failed\n"); 876 hw_dbg(hw, "EEPROM read failed\n");
522 break; 877 break;
523 } 878 }
@@ -526,15 +881,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
526 881
527 /* Include all data from pointers except for the fw pointer */ 882 /* Include all data from pointers except for the fw pointer */
528 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 883 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
529 ixgbe_read_eeprom(hw, i, &pointer); 884 hw->eeprom.ops.read(hw, i, &pointer);
530 885
531 /* Make sure the pointer seems valid */ 886 /* Make sure the pointer seems valid */
532 if (pointer != 0xFFFF && pointer != 0) { 887 if (pointer != 0xFFFF && pointer != 0) {
533 ixgbe_read_eeprom(hw, pointer, &length); 888 hw->eeprom.ops.read(hw, pointer, &length);
534 889
535 if (length != 0xFFFF && length != 0) { 890 if (length != 0xFFFF && length != 0) {
536 for (j = pointer+1; j <= pointer+length; j++) { 891 for (j = pointer+1; j <= pointer+length; j++) {
537 ixgbe_read_eeprom(hw, j, &word); 892 hw->eeprom.ops.read(hw, j, &word);
538 checksum += word; 893 checksum += word;
539 } 894 }
540 } 895 }
@@ -547,14 +902,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
547} 902}
548 903
549/** 904/**
550 * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum 905 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
551 * @hw: pointer to hardware structure 906 * @hw: pointer to hardware structure
552 * @checksum_val: calculated checksum 907 * @checksum_val: calculated checksum
553 * 908 *
554 * Performs checksum calculation and validates the EEPROM checksum. If the 909 * Performs checksum calculation and validates the EEPROM checksum. If the
555 * caller does not need checksum_val, the value can be NULL. 910 * caller does not need checksum_val, the value can be NULL.
556 **/ 911 **/
557s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) 912s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
913 u16 *checksum_val)
558{ 914{
559 s32 status; 915 s32 status;
560 u16 checksum; 916 u16 checksum;
@@ -565,12 +921,12 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
565 * not continue or we could be in for a very long wait while every 921 * not continue or we could be in for a very long wait while every
566 * EEPROM read fails 922 * EEPROM read fails
567 */ 923 */
568 status = ixgbe_read_eeprom(hw, 0, &checksum); 924 status = hw->eeprom.ops.read(hw, 0, &checksum);
569 925
570 if (status == 0) { 926 if (status == 0) {
571 checksum = ixgbe_calc_eeprom_checksum(hw); 927 checksum = ixgbe_calc_eeprom_checksum(hw);
572 928
573 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 929 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
574 930
575 /* 931 /*
576 * Verify read checksum from EEPROM is the same as 932 * Verify read checksum from EEPROM is the same as
@@ -590,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
590} 946}
591 947
592/** 948/**
949 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
950 * @hw: pointer to hardware structure
951 **/
952s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
953{
954 s32 status;
955 u16 checksum;
956
957 /*
958 * Read the first word from the EEPROM. If this times out or fails, do
959 * not continue or we could be in for a very long wait while every
960 * EEPROM read fails
961 */
962 status = hw->eeprom.ops.read(hw, 0, &checksum);
963
964 if (status == 0) {
965 checksum = ixgbe_calc_eeprom_checksum(hw);
966 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
967 checksum);
968 } else {
969 hw_dbg(hw, "EEPROM read failed\n");
970 }
971
972 return status;
973}
974
975/**
593 * ixgbe_validate_mac_addr - Validate MAC address 976 * ixgbe_validate_mac_addr - Validate MAC address
594 * @mac_addr: pointer to MAC address. 977 * @mac_addr: pointer to MAC address.
595 * 978 *
@@ -607,58 +990,137 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
607 status = IXGBE_ERR_INVALID_MAC_ADDR; 990 status = IXGBE_ERR_INVALID_MAC_ADDR;
608 /* Reject the zero address */ 991 /* Reject the zero address */
609 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 992 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
610 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) 993 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
611 status = IXGBE_ERR_INVALID_MAC_ADDR; 994 status = IXGBE_ERR_INVALID_MAC_ADDR;
612 995
613 return status; 996 return status;
614} 997}
615 998
616/** 999/**
617 * ixgbe_set_rar - Set RX address register 1000 * ixgbe_set_rar_generic - Set Rx address register
618 * @hw: pointer to hardware structure 1001 * @hw: pointer to hardware structure
619 * @addr: Address to put into receive address register
620 * @index: Receive address register to write 1002 * @index: Receive address register to write
621 * @vind: Vind to set RAR to 1003 * @addr: Address to put into receive address register
1004 * @vmdq: VMDq "set" or "pool" index
622 * @enable_addr: set flag that address is active 1005 * @enable_addr: set flag that address is active
623 * 1006 *
624 * Puts an ethernet address into a receive address register. 1007 * Puts an ethernet address into a receive address register.
625 **/ 1008 **/
626s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 1009s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
627 u32 enable_addr) 1010 u32 enable_addr)
628{ 1011{
629 u32 rar_low, rar_high; 1012 u32 rar_low, rar_high;
1013 u32 rar_entries = hw->mac.num_rar_entries;
630 1014
631 /* 1015 /* setup VMDq pool selection before this RAR gets enabled */
632 * HW expects these in little endian so we reverse the byte order from 1016 hw->mac.ops.set_vmdq(hw, index, vmdq);
633 * network order (big endian) to little endian
634 */
635 rar_low = ((u32)addr[0] |
636 ((u32)addr[1] << 8) |
637 ((u32)addr[2] << 16) |
638 ((u32)addr[3] << 24));
639 1017
640 rar_high = ((u32)addr[4] | 1018 /* Make sure we are using a valid rar index range */
641 ((u32)addr[5] << 8) | 1019 if (index < rar_entries) {
642 ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); 1020 /*
1021 * HW expects these in little endian so we reverse the byte
1022 * order from network order (big endian) to little endian
1023 */
1024 rar_low = ((u32)addr[0] |
1025 ((u32)addr[1] << 8) |
1026 ((u32)addr[2] << 16) |
1027 ((u32)addr[3] << 24));
1028 /*
1029 * Some parts put the VMDq setting in the extra RAH bits,
1030 * so save everything except the lower 16 bits that hold part
1031 * of the address and the address valid bit.
1032 */
1033 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1034 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1035 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
643 1036
644 if (enable_addr != 0) 1037 if (enable_addr != 0)
645 rar_high |= IXGBE_RAH_AV; 1038 rar_high |= IXGBE_RAH_AV;
646 1039
647 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1040 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
648 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1041 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1042 } else {
1043 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1044 }
649 1045
650 return 0; 1046 return 0;
651} 1047}
652 1048
653/** 1049/**
654 * ixgbe_init_rx_addrs - Initializes receive address filters. 1050 * ixgbe_clear_rar_generic - Remove Rx address register
1051 * @hw: pointer to hardware structure
1052 * @index: Receive address register to write
1053 *
1054 * Clears an ethernet address from a receive address register.
1055 **/
1056s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1057{
1058 u32 rar_high;
1059 u32 rar_entries = hw->mac.num_rar_entries;
1060
1061 /* Make sure we are using a valid rar index range */
1062 if (index < rar_entries) {
1063 /*
1064 * Some parts put the VMDq setting in the extra RAH bits,
1065 * so save everything except the lower 16 bits that hold part
1066 * of the address and the address valid bit.
1067 */
1068 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1069 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1070
1071 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1072 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1073 } else {
1074 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1075 }
1076
1077 /* clear VMDq pool/queue selection for this RAR */
1078 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1079
1080 return 0;
1081}
1082
1083/**
1084 * ixgbe_enable_rar - Enable Rx address register
1085 * @hw: pointer to hardware structure
1086 * @index: index into the RAR table
1087 *
1088 * Enables the select receive address register.
1089 **/
1090static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1091{
1092 u32 rar_high;
1093
1094 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1095 rar_high |= IXGBE_RAH_AV;
1096 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1097}
1098
1099/**
1100 * ixgbe_disable_rar - Disable Rx address register
1101 * @hw: pointer to hardware structure
1102 * @index: index into the RAR table
1103 *
1104 * Disables the select receive address register.
1105 **/
1106static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1107{
1108 u32 rar_high;
1109
1110 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1111 rar_high &= (~IXGBE_RAH_AV);
1112 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1113}
1114
1115/**
1116 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
655 * @hw: pointer to hardware structure 1117 * @hw: pointer to hardware structure
656 * 1118 *
657 * Places the MAC address in receive address register 0 and clears the rest 1119 * Places the MAC address in receive address register 0 and clears the rest
658 * of the receive addresss registers. Clears the multicast table. Assumes 1120 * of the receive address registers. Clears the multicast table. Assumes
659 * the receiver is in reset when the routine is called. 1121 * the receiver is in reset when the routine is called.
660 **/ 1122 **/
661static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) 1123s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
662{ 1124{
663 u32 i; 1125 u32 i;
664 u32 rar_entries = hw->mac.num_rar_entries; 1126 u32 rar_entries = hw->mac.num_rar_entries;
@@ -671,29 +1133,30 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
671 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1133 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
672 IXGBE_ERR_INVALID_MAC_ADDR) { 1134 IXGBE_ERR_INVALID_MAC_ADDR) {
673 /* Get the MAC address from the RAR0 for later reference */ 1135 /* Get the MAC address from the RAR0 for later reference */
674 ixgbe_get_mac_addr(hw, hw->mac.addr); 1136 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
675 1137
676 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1138 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
677 hw->mac.addr[0], hw->mac.addr[1], 1139 hw->mac.addr[0], hw->mac.addr[1],
678 hw->mac.addr[2]); 1140 hw->mac.addr[2]);
679 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1141 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
680 hw->mac.addr[4], hw->mac.addr[5]); 1142 hw->mac.addr[4], hw->mac.addr[5]);
681 } else { 1143 } else {
682 /* Setup the receive address. */ 1144 /* Setup the receive address. */
683 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1145 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
684 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1146 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
685 hw->mac.addr[0], hw->mac.addr[1], 1147 hw->mac.addr[0], hw->mac.addr[1],
686 hw->mac.addr[2]); 1148 hw->mac.addr[2]);
687 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1149 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
688 hw->mac.addr[4], hw->mac.addr[5]); 1150 hw->mac.addr[4], hw->mac.addr[5]);
689 1151
690 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1152 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
691 } 1153 }
1154 hw->addr_ctrl.overflow_promisc = 0;
692 1155
693 hw->addr_ctrl.rar_used_count = 1; 1156 hw->addr_ctrl.rar_used_count = 1;
694 1157
695 /* Zero out the other receive addresses. */ 1158 /* Zero out the other receive addresses. */
696 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1159 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
697 for (i = 1; i < rar_entries; i++) { 1160 for (i = 1; i < rar_entries; i++) {
698 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1161 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
699 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1162 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -708,6 +1171,9 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
708 for (i = 0; i < hw->mac.mcft_size; i++) 1171 for (i = 0; i < hw->mac.mcft_size; i++)
709 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1172 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
710 1173
1174 if (hw->mac.ops.init_uta_tables)
1175 hw->mac.ops.init_uta_tables(hw);
1176
711 return 0; 1177 return 0;
712} 1178}
713 1179
@@ -718,7 +1184,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
718 * 1184 *
719 * Adds it to unused receive address register or goes into promiscuous mode. 1185 * Adds it to unused receive address register or goes into promiscuous mode.
720 **/ 1186 **/
721void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) 1187static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
722{ 1188{
723 u32 rar_entries = hw->mac.num_rar_entries; 1189 u32 rar_entries = hw->mac.num_rar_entries;
724 u32 rar; 1190 u32 rar;
@@ -733,7 +1199,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr)
733 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1199 if (hw->addr_ctrl.rar_used_count < rar_entries) {
734 rar = hw->addr_ctrl.rar_used_count - 1200 rar = hw->addr_ctrl.rar_used_count -
735 hw->addr_ctrl.mc_addr_in_rar_count; 1201 hw->addr_ctrl.mc_addr_in_rar_count;
736 ixgbe_set_rar(hw, rar, addr, 0, IXGBE_RAH_AV); 1202 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
737 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); 1203 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
738 hw->addr_ctrl.rar_used_count++; 1204 hw->addr_ctrl.rar_used_count++;
739 } else { 1205 } else {
@@ -744,7 +1210,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr)
744} 1210}
745 1211
746/** 1212/**
747 * ixgbe_update_uc_addr_list - Updates MAC list of secondary addresses 1213 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
748 * @hw: pointer to hardware structure 1214 * @hw: pointer to hardware structure
749 * @addr_list: the list of new addresses 1215 * @addr_list: the list of new addresses
750 * @addr_count: number of addresses 1216 * @addr_count: number of addresses
@@ -757,7 +1223,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr)
757 * Drivers using secondary unicast addresses must set user_set_promisc when 1223 * Drivers using secondary unicast addresses must set user_set_promisc when
758 * manually putting the device into promiscuous mode. 1224 * manually putting the device into promiscuous mode.
759 **/ 1225 **/
760s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, 1226s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
761 u32 addr_count, ixgbe_mc_addr_itr next) 1227 u32 addr_count, ixgbe_mc_addr_itr next)
762{ 1228{
763 u8 *addr; 1229 u8 *addr;
@@ -787,7 +1253,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
787 for (i = 0; i < addr_count; i++) { 1253 for (i = 0; i < addr_count; i++) {
788 hw_dbg(hw, " Adding the secondary addresses:\n"); 1254 hw_dbg(hw, " Adding the secondary addresses:\n");
789 addr = next(hw, &addr_list, &vmdq); 1255 addr = next(hw, &addr_list, &vmdq);
790 ixgbe_add_uc_addr(hw, addr); 1256 ixgbe_add_uc_addr(hw, addr, vmdq);
791 } 1257 }
792 1258
793 if (hw->addr_ctrl.overflow_promisc) { 1259 if (hw->addr_ctrl.overflow_promisc) {
@@ -808,7 +1274,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
808 } 1274 }
809 } 1275 }
810 1276
811 hw_dbg(hw, "ixgbe_update_uc_addr_list Complete\n"); 1277 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
812 return 0; 1278 return 0;
813} 1279}
814 1280
@@ -821,7 +1287,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
821 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1287 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
822 * incoming rx multicast addresses, to determine the bit-vector to check in 1288 * incoming rx multicast addresses, to determine the bit-vector to check in
823 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1289 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
824 * by the MO field of the MCSTCTRL. The MO field is set during initalization 1290 * by the MO field of the MCSTCTRL. The MO field is set during initialization
825 * to mc_filter_type. 1291 * to mc_filter_type.
826 **/ 1292 **/
827static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1293static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
@@ -829,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
829 u32 vector = 0; 1295 u32 vector = 0;
830 1296
831 switch (hw->mac.mc_filter_type) { 1297 switch (hw->mac.mc_filter_type) {
832 case 0: /* use bits [47:36] of the address */ 1298 case 0: /* use bits [47:36] of the address */
833 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1299 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
834 break; 1300 break;
835 case 1: /* use bits [46:35] of the address */ 1301 case 1: /* use bits [46:35] of the address */
836 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1302 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
837 break; 1303 break;
838 case 2: /* use bits [45:34] of the address */ 1304 case 2: /* use bits [45:34] of the address */
839 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1305 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
840 break; 1306 break;
841 case 3: /* use bits [43:32] of the address */ 1307 case 3: /* use bits [43:32] of the address */
842 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1308 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
843 break; 1309 break;
844 default: /* Invalid mc_filter_type */ 1310 default: /* Invalid mc_filter_type */
845 hw_dbg(hw, "MC filter type param set incorrectly\n"); 1311 hw_dbg(hw, "MC filter type param set incorrectly\n");
846 break; 1312 break;
847 } 1313 }
@@ -896,20 +1362,21 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
896static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) 1362static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
897{ 1363{
898 u32 rar_entries = hw->mac.num_rar_entries; 1364 u32 rar_entries = hw->mac.num_rar_entries;
1365 u32 rar;
899 1366
900 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", 1367 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
901 mc_addr[0], mc_addr[1], mc_addr[2], 1368 mc_addr[0], mc_addr[1], mc_addr[2],
902 mc_addr[3], mc_addr[4], mc_addr[5]); 1369 mc_addr[3], mc_addr[4], mc_addr[5]);
903 1370
904 /* 1371 /*
905 * Place this multicast address in the RAR if there is room, 1372 * Place this multicast address in the RAR if there is room,
906 * else put it in the MTA 1373 * else put it in the MTA
907 */ 1374 */
908 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1375 if (hw->addr_ctrl.rar_used_count < rar_entries) {
909 ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, 1376 /* use RAR from the end up for multicast */
910 mc_addr, 0, IXGBE_RAH_AV); 1377 rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
911 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", 1378 hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
912 hw->addr_ctrl.rar_used_count); 1379 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
913 hw->addr_ctrl.rar_used_count++; 1380 hw->addr_ctrl.rar_used_count++;
914 hw->addr_ctrl.mc_addr_in_rar_count++; 1381 hw->addr_ctrl.mc_addr_in_rar_count++;
915 } else { 1382 } else {
@@ -920,19 +1387,19 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
920} 1387}
921 1388
922/** 1389/**
923 * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses 1390 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
924 * @hw: pointer to hardware structure 1391 * @hw: pointer to hardware structure
925 * @mc_addr_list: the list of new multicast addresses 1392 * @mc_addr_list: the list of new multicast addresses
926 * @mc_addr_count: number of addresses 1393 * @mc_addr_count: number of addresses
927 * @next: iterator function to walk the multicast address list 1394 * @next: iterator function to walk the multicast address list
928 * 1395 *
929 * The given list replaces any existing list. Clears the MC addrs from receive 1396 * The given list replaces any existing list. Clears the MC addrs from receive
930 * address registers and the multicast table. Uses unsed receive address 1397 * address registers and the multicast table. Uses unused receive address
931 * registers for the first multicast addresses, and hashes the rest into the 1398 * registers for the first multicast addresses, and hashes the rest into the
932 * multicast table. 1399 * multicast table.
933 **/ 1400 **/
934s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 1401s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
935 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1402 u32 mc_addr_count, ixgbe_mc_addr_itr next)
936{ 1403{
937 u32 i; 1404 u32 i;
938 u32 rar_entries = hw->mac.num_rar_entries; 1405 u32 rar_entries = hw->mac.num_rar_entries;
@@ -948,7 +1415,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
948 hw->addr_ctrl.mta_in_use = 0; 1415 hw->addr_ctrl.mta_in_use = 0;
949 1416
950 /* Zero out the other receive addresses. */ 1417 /* Zero out the other receive addresses. */
951 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1418 hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
1419 rar_entries - 1);
952 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { 1420 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
953 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1421 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
954 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1422 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -968,190 +1436,55 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
968 /* Enable mta */ 1436 /* Enable mta */
969 if (hw->addr_ctrl.mta_in_use > 0) 1437 if (hw->addr_ctrl.mta_in_use > 0)
970 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1438 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
971 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1439 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
972 1440
973 hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); 1441 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
974 return 0; 1442 return 0;
975} 1443}
976 1444
977/** 1445/**
978 * ixgbe_clear_vfta - Clear VLAN filter table 1446 * ixgbe_enable_mc_generic - Enable multicast address in RAR
979 * @hw: pointer to hardware structure 1447 * @hw: pointer to hardware structure
980 * 1448 *
981 * Clears the VLAN filer table, and the VMDq index associated with the filter 1449 * Enables multicast address in RAR and the use of the multicast hash table.
982 **/ 1450 **/
983static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) 1451s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
984{ 1452{
985 u32 offset; 1453 u32 i;
986 u32 vlanbyte; 1454 u32 rar_entries = hw->mac.num_rar_entries;
1455 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
987 1456
988 for (offset = 0; offset < hw->mac.vft_size; offset++) 1457 if (a->mc_addr_in_rar_count > 0)
989 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1458 for (i = (rar_entries - a->mc_addr_in_rar_count);
1459 i < rar_entries; i++)
1460 ixgbe_enable_rar(hw, i);
990 1461
991 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1462 if (a->mta_in_use > 0)
992 for (offset = 0; offset < hw->mac.vft_size; offset++) 1463 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
993 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1464 hw->mac.mc_filter_type);
994 0);
995 1465
996 return 0; 1466 return 0;
997} 1467}
998 1468
999/** 1469/**
1000 * ixgbe_set_vfta - Set VLAN filter table 1470 * ixgbe_disable_mc_generic - Disable multicast address in RAR
1001 * @hw: pointer to hardware structure 1471 * @hw: pointer to hardware structure
1002 * @vlan: VLAN id to write to VLAN filter
1003 * @vind: VMDq output index that maps queue to VLAN id in VFTA
1004 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1005 * 1472 *
1006 * Turn on/off specified VLAN in the VLAN filter table. 1473 * Disables multicast address in RAR and the use of the multicast hash table.
1007 **/ 1474 **/
1008s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, 1475s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1009 bool vlan_on)
1010{ 1476{
1011 u32 VftaIndex; 1477 u32 i;
1012 u32 BitOffset; 1478 u32 rar_entries = hw->mac.num_rar_entries;
1013 u32 VftaReg; 1479 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1014 u32 VftaByte;
1015
1016 /* Determine 32-bit word position in array */
1017 VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
1018
1019 /* Determine the location of the (VMD) queue index */
1020 VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1021 BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1022
1023 /* Set the nibble for VMD queue index */
1024 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
1025 VftaReg &= (~(0x0F << BitOffset));
1026 VftaReg |= (vind << BitOffset);
1027 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
1028
1029 /* Determine the location of the bit for this VLAN id */
1030 BitOffset = vlan & 0x1F; /* lower five bits */
1031
1032 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
1033 if (vlan_on)
1034 /* Turn on this VLAN id */
1035 VftaReg |= (1 << BitOffset);
1036 else
1037 /* Turn off this VLAN id */
1038 VftaReg &= ~(1 << BitOffset);
1039 IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
1040
1041 return 0;
1042}
1043
1044/**
1045 * ixgbe_setup_fc - Configure flow control settings
1046 * @hw: pointer to hardware structure
1047 * @packetbuf_num: packet buffer number (0-7)
1048 *
1049 * Configures the flow control settings based on SW configuration.
1050 * This function is used for 802.3x flow control configuration only.
1051 **/
1052s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1053{
1054 u32 frctl_reg;
1055 u32 rmcs_reg;
1056
1057 if (packetbuf_num < 0 || packetbuf_num > 7)
1058 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
1059 "is 0-7\n", packetbuf_num);
1060
1061 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1062 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
1063
1064 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
1065 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
1066
1067 /*
1068 * 10 gig parts do not have a word in the EEPROM to determine the
1069 * default flow control setting, so we explicitly set it to full.
1070 */
1071 if (hw->fc.type == ixgbe_fc_default)
1072 hw->fc.type = ixgbe_fc_full;
1073
1074 /*
1075 * We want to save off the original Flow Control configuration just in
1076 * case we get disconnected and then reconnected into a different hub
1077 * or switch with different Flow Control capabilities.
1078 */
1079 hw->fc.type = hw->fc.original_type;
1080
1081 /*
1082 * The possible values of the "flow_control" parameter are:
1083 * 0: Flow control is completely disabled
1084 * 1: Rx flow control is enabled (we can receive pause frames but not
1085 * send pause frames).
1086 * 2: Tx flow control is enabled (we can send pause frames but we do not
1087 * support receiving pause frames)
1088 * 3: Both Rx and TX flow control (symmetric) are enabled.
1089 * other: Invalid.
1090 */
1091 switch (hw->fc.type) {
1092 case ixgbe_fc_none:
1093 break;
1094 case ixgbe_fc_rx_pause:
1095 /*
1096 * RX Flow control is enabled,
1097 * and TX Flow control is disabled.
1098 */
1099 frctl_reg |= IXGBE_FCTRL_RFCE;
1100 break;
1101 case ixgbe_fc_tx_pause:
1102 /*
1103 * TX Flow control is enabled, and RX Flow control is disabled,
1104 * by a software over-ride.
1105 */
1106 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
1107 break;
1108 case ixgbe_fc_full:
1109 /*
1110 * Flow control (both RX and TX) is enabled by a software
1111 * over-ride.
1112 */
1113 frctl_reg |= IXGBE_FCTRL_RFCE;
1114 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
1115 break;
1116 default:
1117 /* We should never get here. The value should be 0-3. */
1118 hw_dbg(hw, "Flow control param set incorrectly\n");
1119 break;
1120 }
1121 1480
1122 /* Enable 802.3x based flow control settings. */ 1481 if (a->mc_addr_in_rar_count > 0)
1123 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); 1482 for (i = (rar_entries - a->mc_addr_in_rar_count);
1124 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 1483 i < rar_entries; i++)
1484 ixgbe_disable_rar(hw, i);
1125 1485
1126 /* 1486 if (a->mta_in_use > 0)
1127 * Check for invalid software configuration, zeros are completely 1487 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1128 * invalid for all parameters used past this point, and if we enable
1129 * flow control with zero water marks, we blast flow control packets.
1130 */
1131 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
1132 hw_dbg(hw, "Flow control structure initialized incorrectly\n");
1133 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1134 }
1135
1136 /*
1137 * We need to set up the Receive Threshold high and low water
1138 * marks as well as (optionally) enabling the transmission of
1139 * XON frames.
1140 */
1141 if (hw->fc.type & ixgbe_fc_tx_pause) {
1142 if (hw->fc.send_xon) {
1143 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1144 (hw->fc.low_water | IXGBE_FCRTL_XONE));
1145 } else {
1146 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1147 hw->fc.low_water);
1148 }
1149 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
1150 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
1151 }
1152
1153 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
1154 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
1155 1488
1156 return 0; 1489 return 0;
1157} 1490}
@@ -1167,13 +1500,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1167 **/ 1500 **/
1168s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 1501s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1169{ 1502{
1170 u32 ctrl; 1503 u32 i;
1171 s32 i; 1504 u32 reg_val;
1505 u32 number_of_queues;
1172 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 1506 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
1173 1507
1174 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1508 /* Disable the receive unit by stopping each queue */
1175 ctrl |= IXGBE_CTRL_GIO_DIS; 1509 number_of_queues = hw->mac.max_rx_queues;
1176 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1510 for (i = 0; i < number_of_queues; i++) {
1511 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1512 if (reg_val & IXGBE_RXDCTL_ENABLE) {
1513 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1514 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1515 }
1516 }
1517
1518 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
1519 reg_val |= IXGBE_CTRL_GIO_DIS;
1520 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
1177 1521
1178 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 1522 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
1179 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 1523 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
@@ -1188,11 +1532,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1188 1532
1189 1533
1190/** 1534/**
1191 * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore 1535 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
1192 * @hw: pointer to hardware structure 1536 * @hw: pointer to hardware structure
1193 * @mask: Mask to specify wich semaphore to acquire 1537 * @mask: Mask to specify which semaphore to acquire
1194 * 1538 *
1195 * Aquires the SWFW semaphore throught the GSSR register for the specified 1539 * Acquires the SWFW semaphore thought the GSSR register for the specified
1196 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1540 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1197 **/ 1541 **/
1198s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1542s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1234,9 +1578,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1234/** 1578/**
1235 * ixgbe_release_swfw_sync - Release SWFW semaphore 1579 * ixgbe_release_swfw_sync - Release SWFW semaphore
1236 * @hw: pointer to hardware structure 1580 * @hw: pointer to hardware structure
1237 * @mask: Mask to specify wich semaphore to release 1581 * @mask: Mask to specify which semaphore to release
1238 * 1582 *
1239 * Releases the SWFW semaphore throught the GSSR register for the specified 1583 * Releases the SWFW semaphore thought the GSSR register for the specified
1240 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1584 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1241 **/ 1585 **/
1242void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1586void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1253,45 +1597,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1253 ixgbe_release_eeprom_semaphore(hw); 1597 ixgbe_release_eeprom_semaphore(hw);
1254} 1598}
1255 1599
1256/**
1257 * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
1258 * @hw: pointer to hardware structure
1259 * @reg: analog register to read
1260 * @val: read value
1261 *
1262 * Performs write operation to analog register specified.
1263 **/
1264s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
1265{
1266 u32 atlas_ctl;
1267
1268 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1269 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1270 IXGBE_WRITE_FLUSH(hw);
1271 udelay(10);
1272 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1273 *val = (u8)atlas_ctl;
1274
1275 return 0;
1276}
1277
1278/**
1279 * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
1280 * @hw: pointer to hardware structure
1281 * @reg: atlas register to write
1282 * @val: value to write
1283 *
1284 * Performs write operation to Atlas analog register specified.
1285 **/
1286s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
1287{
1288 u32 atlas_ctl;
1289
1290 atlas_ctl = (reg << 8) | val;
1291 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1292 IXGBE_WRITE_FLUSH(hw);
1293 udelay(10);
1294
1295 return 0;
1296}
1297
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index c75ecba9ccda..192f8d012911 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -31,36 +30,45 @@
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
33 32
34s32 ixgbe_init_hw(struct ixgbe_hw *hw); 33s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_start_hw(struct ixgbe_hw *hw); 34s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); 35s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); 36s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); 37s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
39 38s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
40s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); 39s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
41s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); 40s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
42 41
43s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); 42s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
44s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); 43s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
45s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); 44
46 45s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
47s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 46s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
48 u32 enable_addr); 47s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
49s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 48 u16 *data);
50 u32 mc_addr_count, ixgbe_mc_addr_itr next); 49s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
51s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list, 50 u16 *checksum_val);
52 u32 mc_addr_count, ixgbe_mc_addr_itr next); 51s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
53s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); 52
54s32 ixgbe_validate_mac_addr(u8 *mac_addr); 53s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
55 54 u32 enable_addr);
56s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); 55s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
56s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
57s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
58 u32 mc_addr_count,
59 ixgbe_mc_addr_itr func);
60s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
61 u32 addr_count, ixgbe_mc_addr_itr func);
62s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
63s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
57 64
65s32 ixgbe_validate_mac_addr(u8 *mac_addr);
58s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 66s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
59void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 67void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
60s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 68s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
61 69
62s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); 70s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
63s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); 71s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
64 72
65#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
66 74
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 61c000e23094..81a9c4b86726 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -48,7 +47,7 @@ struct ixgbe_stats {
48}; 47};
49 48
50#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
51 offsetof(struct ixgbe_adapter, m) 50 offsetof(struct ixgbe_adapter, m)
52static struct ixgbe_stats ixgbe_gstrings_stats[] = { 51static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 52 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
54 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
@@ -95,14 +94,15 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
95}; 94};
96 95
97#define IXGBE_QUEUE_STATS_LEN \ 96#define IXGBE_QUEUE_STATS_LEN \
98 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
99 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
100 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
103 103
104static int ixgbe_get_settings(struct net_device *netdev, 104static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 105 struct ethtool_cmd *ecmd)
106{ 106{
107 struct ixgbe_adapter *adapter = netdev_priv(netdev); 107 struct ixgbe_adapter *adapter = netdev_priv(netdev);
108 struct ixgbe_hw *hw = &adapter->hw; 108 struct ixgbe_hw *hw = &adapter->hw;
@@ -114,7 +114,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
114 ecmd->transceiver = XCVR_EXTERNAL; 114 ecmd->transceiver = XCVR_EXTERNAL;
115 if (hw->phy.media_type == ixgbe_media_type_copper) { 115 if (hw->phy.media_type == ixgbe_media_type_copper) {
116 ecmd->supported |= (SUPPORTED_1000baseT_Full | 116 ecmd->supported |= (SUPPORTED_1000baseT_Full |
117 SUPPORTED_TP | SUPPORTED_Autoneg); 117 SUPPORTED_TP | SUPPORTED_Autoneg);
118 118
119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); 119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -126,14 +126,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
126 } else { 126 } else {
127 ecmd->supported |= SUPPORTED_FIBRE; 127 ecmd->supported |= SUPPORTED_FIBRE;
128 ecmd->advertising = (ADVERTISED_10000baseT_Full | 128 ecmd->advertising = (ADVERTISED_10000baseT_Full |
129 ADVERTISED_FIBRE); 129 ADVERTISED_FIBRE);
130 ecmd->port = PORT_FIBRE; 130 ecmd->port = PORT_FIBRE;
131 ecmd->autoneg = AUTONEG_DISABLE;
131 } 132 }
132 133
133 adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up); 134 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
134 if (link_up) { 135 if (link_up) {
135 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 136 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
136 SPEED_10000 : SPEED_1000; 137 SPEED_10000 : SPEED_1000;
137 ecmd->duplex = DUPLEX_FULL; 138 ecmd->duplex = DUPLEX_FULL;
138 } else { 139 } else {
139 ecmd->speed = -1; 140 ecmd->speed = -1;
@@ -144,7 +145,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
144} 145}
145 146
146static int ixgbe_set_settings(struct net_device *netdev, 147static int ixgbe_set_settings(struct net_device *netdev,
147 struct ethtool_cmd *ecmd) 148 struct ethtool_cmd *ecmd)
148{ 149{
149 struct ixgbe_adapter *adapter = netdev_priv(netdev); 150 struct ixgbe_adapter *adapter = netdev_priv(netdev);
150 struct ixgbe_hw *hw = &adapter->hw; 151 struct ixgbe_hw *hw = &adapter->hw;
@@ -164,7 +165,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
164} 165}
165 166
166static void ixgbe_get_pauseparam(struct net_device *netdev, 167static void ixgbe_get_pauseparam(struct net_device *netdev,
167 struct ethtool_pauseparam *pause) 168 struct ethtool_pauseparam *pause)
168{ 169{
169 struct ixgbe_adapter *adapter = netdev_priv(netdev); 170 struct ixgbe_adapter *adapter = netdev_priv(netdev);
170 struct ixgbe_hw *hw = &adapter->hw; 171 struct ixgbe_hw *hw = &adapter->hw;
@@ -182,7 +183,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
182} 183}
183 184
184static int ixgbe_set_pauseparam(struct net_device *netdev, 185static int ixgbe_set_pauseparam(struct net_device *netdev,
185 struct ethtool_pauseparam *pause) 186 struct ethtool_pauseparam *pause)
186{ 187{
187 struct ixgbe_adapter *adapter = netdev_priv(netdev); 188 struct ixgbe_adapter *adapter = netdev_priv(netdev);
188 struct ixgbe_hw *hw = &adapter->hw; 189 struct ixgbe_hw *hw = &adapter->hw;
@@ -241,7 +242,7 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
241 if (data) 242 if (data)
242 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 243 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
243 else 244 else
244 netdev->features &= ~NETIF_F_IP_CSUM; 245 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
245 246
246 return 0; 247 return 0;
247} 248}
@@ -281,7 +282,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
281#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 282#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
282 283
283static void ixgbe_get_regs(struct net_device *netdev, 284static void ixgbe_get_regs(struct net_device *netdev,
284 struct ethtool_regs *regs, void *p) 285 struct ethtool_regs *regs, void *p)
285{ 286{
286 struct ixgbe_adapter *adapter = netdev_priv(netdev); 287 struct ixgbe_adapter *adapter = netdev_priv(netdev);
287 struct ixgbe_hw *hw = &adapter->hw; 288 struct ixgbe_hw *hw = &adapter->hw;
@@ -315,7 +316,9 @@ static void ixgbe_get_regs(struct net_device *netdev,
315 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 316 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
316 317
317 /* Interrupt */ 318 /* Interrupt */
318 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); 319 /* don't read EICR because it can clear interrupt causes, instead
320 * read EICS which is a shadow but doesn't clear EICR */
321 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
319 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 322 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
320 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 323 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
321 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 324 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
@@ -325,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
325 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 328 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
326 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 329 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
327 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 330 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
328 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); 331 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
329 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 332 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
330 333
331 /* Flow Control */ 334 /* Flow Control */
@@ -371,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
371 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 374 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
372 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
373 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 376 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
374 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); 377 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
375 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
376 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 379 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
377 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 380 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
@@ -419,7 +422,6 @@ static void ixgbe_get_regs(struct net_device *netdev,
419 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 422 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
420 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); 423 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
421 424
422 /* DCE */
423 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 425 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
424 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 426 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
425 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 427 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -539,21 +541,17 @@ static void ixgbe_get_regs(struct net_device *netdev,
539 /* Diagnostic */ 541 /* Diagnostic */
540 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 542 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
541 for (i = 0; i < 8; i++) 543 for (i = 0; i < 8; i++)
542 regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 544 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
543 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 545 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
544 regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); 546 for (i = 0; i < 4; i++)
545 regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); 547 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
546 regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
547 regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
550 for (i = 0; i < 8; i++) 550 for (i = 0; i < 8; i++)
551 regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 551 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
553 regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); 553 for (i = 0; i < 4; i++)
554 regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); 554 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
555 regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
556 regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
557 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 555 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
558 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 556 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
559 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 557 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
@@ -566,7 +564,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
566 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 564 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
567 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 565 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
568 for (i = 0; i < 8; i++) 566 for (i = 0; i < 8; i++)
569 regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 567 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
570 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 568 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
571 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 569 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
572 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 570 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
@@ -585,7 +583,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
585} 583}
586 584
587static int ixgbe_get_eeprom(struct net_device *netdev, 585static int ixgbe_get_eeprom(struct net_device *netdev,
588 struct ethtool_eeprom *eeprom, u8 *bytes) 586 struct ethtool_eeprom *eeprom, u8 *bytes)
589{ 587{
590 struct ixgbe_adapter *adapter = netdev_priv(netdev); 588 struct ixgbe_adapter *adapter = netdev_priv(netdev);
591 struct ixgbe_hw *hw = &adapter->hw; 589 struct ixgbe_hw *hw = &adapter->hw;
@@ -608,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
608 return -ENOMEM; 606 return -ENOMEM;
609 607
610 for (i = 0; i < eeprom_len; i++) { 608 for (i = 0; i < eeprom_len; i++) {
611 if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, 609 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
612 &eeprom_buff[i]))) 610 &eeprom_buff[i])))
613 break; 611 break;
614 } 612 }
615 613
@@ -624,7 +622,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
624} 622}
625 623
626static void ixgbe_get_drvinfo(struct net_device *netdev, 624static void ixgbe_get_drvinfo(struct net_device *netdev,
627 struct ethtool_drvinfo *drvinfo) 625 struct ethtool_drvinfo *drvinfo)
628{ 626{
629 struct ixgbe_adapter *adapter = netdev_priv(netdev); 627 struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 628
@@ -637,7 +635,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
637} 635}
638 636
639static void ixgbe_get_ringparam(struct net_device *netdev, 637static void ixgbe_get_ringparam(struct net_device *netdev,
640 struct ethtool_ringparam *ring) 638 struct ethtool_ringparam *ring)
641{ 639{
642 struct ixgbe_adapter *adapter = netdev_priv(netdev); 640 struct ixgbe_adapter *adapter = netdev_priv(netdev);
643 struct ixgbe_ring *tx_ring = adapter->tx_ring; 641 struct ixgbe_ring *tx_ring = adapter->tx_ring;
@@ -654,15 +652,12 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
654} 652}
655 653
656static int ixgbe_set_ringparam(struct net_device *netdev, 654static int ixgbe_set_ringparam(struct net_device *netdev,
657 struct ethtool_ringparam *ring) 655 struct ethtool_ringparam *ring)
658{ 656{
659 struct ixgbe_adapter *adapter = netdev_priv(netdev); 657 struct ixgbe_adapter *adapter = netdev_priv(netdev);
660 struct ixgbe_tx_buffer *old_buf; 658 struct ixgbe_ring *temp_ring;
661 struct ixgbe_rx_buffer *old_rx_buf;
662 void *old_desc;
663 int i, err; 659 int i, err;
664 u32 new_rx_count, new_tx_count, old_size; 660 u32 new_rx_count, new_tx_count;
665 dma_addr_t old_dma;
666 661
667 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 662 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
668 return -EINVAL; 663 return -EINVAL;
@@ -681,6 +676,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
681 return 0; 676 return 0;
682 } 677 }
683 678
679 if (adapter->num_tx_queues > adapter->num_rx_queues)
680 temp_ring = vmalloc(adapter->num_tx_queues *
681 sizeof(struct ixgbe_ring));
682 else
683 temp_ring = vmalloc(adapter->num_rx_queues *
684 sizeof(struct ixgbe_ring));
685 if (!temp_ring)
686 return -ENOMEM;
687
684 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
685 msleep(1); 689 msleep(1);
686 690
@@ -693,66 +697,61 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
693 * to the tx and rx ring structs. 697 * to the tx and rx ring structs.
694 */ 698 */
695 if (new_tx_count != adapter->tx_ring->count) { 699 if (new_tx_count != adapter->tx_ring->count) {
700 memcpy(temp_ring, adapter->tx_ring,
701 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
702
696 for (i = 0; i < adapter->num_tx_queues; i++) { 703 for (i = 0; i < adapter->num_tx_queues; i++) {
697 /* Save existing descriptor ring */ 704 temp_ring[i].count = new_tx_count;
698 old_buf = adapter->tx_ring[i].tx_buffer_info; 705 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
699 old_desc = adapter->tx_ring[i].desc;
700 old_size = adapter->tx_ring[i].size;
701 old_dma = adapter->tx_ring[i].dma;
702 /* Try to allocate a new one */
703 adapter->tx_ring[i].tx_buffer_info = NULL;
704 adapter->tx_ring[i].desc = NULL;
705 adapter->tx_ring[i].count = new_tx_count;
706 err = ixgbe_setup_tx_resources(adapter,
707 &adapter->tx_ring[i]);
708 if (err) { 706 if (err) {
709 /* Restore the old one so at least 707 while (i) {
710 the adapter still works, even if 708 i--;
711 we failed the request */ 709 ixgbe_free_tx_resources(adapter,
712 adapter->tx_ring[i].tx_buffer_info = old_buf; 710 &temp_ring[i]);
713 adapter->tx_ring[i].desc = old_desc; 711 }
714 adapter->tx_ring[i].size = old_size;
715 adapter->tx_ring[i].dma = old_dma;
716 goto err_setup; 712 goto err_setup;
717 } 713 }
718 /* Free the old buffer manually */
719 vfree(old_buf);
720 pci_free_consistent(adapter->pdev, old_size,
721 old_desc, old_dma);
722 } 714 }
715
716 for (i = 0; i < adapter->num_tx_queues; i++)
717 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
718
719 memcpy(adapter->tx_ring, temp_ring,
720 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
721
722 adapter->tx_ring_count = new_tx_count;
723 } 723 }
724 724
725 if (new_rx_count != adapter->rx_ring->count) { 725 if (new_rx_count != adapter->rx_ring->count) {
726 for (i = 0; i < adapter->num_rx_queues; i++) { 726 memcpy(temp_ring, adapter->rx_ring,
727 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
727 728
728 old_rx_buf = adapter->rx_ring[i].rx_buffer_info; 729 for (i = 0; i < adapter->num_rx_queues; i++) {
729 old_desc = adapter->rx_ring[i].desc; 730 temp_ring[i].count = new_rx_count;
730 old_size = adapter->rx_ring[i].size; 731 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
731 old_dma = adapter->rx_ring[i].dma;
732
733 adapter->rx_ring[i].rx_buffer_info = NULL;
734 adapter->rx_ring[i].desc = NULL;
735 adapter->rx_ring[i].dma = 0;
736 adapter->rx_ring[i].count = new_rx_count;
737 err = ixgbe_setup_rx_resources(adapter,
738 &adapter->rx_ring[i]);
739 if (err) { 732 if (err) {
740 adapter->rx_ring[i].rx_buffer_info = old_rx_buf; 733 while (i) {
741 adapter->rx_ring[i].desc = old_desc; 734 i--;
742 adapter->rx_ring[i].size = old_size; 735 ixgbe_free_rx_resources(adapter,
743 adapter->rx_ring[i].dma = old_dma; 736 &temp_ring[i]);
737 }
744 goto err_setup; 738 goto err_setup;
745 } 739 }
746
747 vfree(old_rx_buf);
748 pci_free_consistent(adapter->pdev, old_size, old_desc,
749 old_dma);
750 } 740 }
741
742 for (i = 0; i < adapter->num_rx_queues; i++)
743 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
744
745 memcpy(adapter->rx_ring, temp_ring,
746 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
747
748 adapter->rx_ring_count = new_rx_count;
751 } 749 }
752 750
751 /* success! */
753 err = 0; 752 err = 0;
754err_setup: 753err_setup:
755 if (netif_running(adapter->netdev)) 754 if (netif_running(netdev))
756 ixgbe_up(adapter); 755 ixgbe_up(adapter);
757 756
758 clear_bit(__IXGBE_RESETTING, &adapter->state); 757 clear_bit(__IXGBE_RESETTING, &adapter->state);
@@ -770,7 +769,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
770} 769}
771 770
772static void ixgbe_get_ethtool_stats(struct net_device *netdev, 771static void ixgbe_get_ethtool_stats(struct net_device *netdev,
773 struct ethtool_stats *stats, u64 *data) 772 struct ethtool_stats *stats, u64 *data)
774{ 773{
775 struct ixgbe_adapter *adapter = netdev_priv(netdev); 774 struct ixgbe_adapter *adapter = netdev_priv(netdev);
776 u64 *queue_stat; 775 u64 *queue_stat;
@@ -778,12 +777,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
778 int j, k; 777 int j, k;
779 int i; 778 int i;
780 u64 aggregated = 0, flushed = 0, no_desc = 0; 779 u64 aggregated = 0, flushed = 0, no_desc = 0;
780 for (i = 0; i < adapter->num_rx_queues; i++) {
781 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
782 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
783 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
784 }
785 adapter->lro_aggregated = aggregated;
786 adapter->lro_flushed = flushed;
787 adapter->lro_no_desc = no_desc;
781 788
782 ixgbe_update_stats(adapter); 789 ixgbe_update_stats(adapter);
783 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 790 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
784 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 791 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
785 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 792 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
786 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 793 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
787 } 794 }
788 for (j = 0; j < adapter->num_tx_queues; j++) { 795 for (j = 0; j < adapter->num_tx_queues; j++) {
789 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 796 queue_stat = (u64 *)&adapter->tx_ring[j].stats;
@@ -792,24 +799,18 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
792 i += k; 799 i += k;
793 } 800 }
794 for (j = 0; j < adapter->num_rx_queues; j++) { 801 for (j = 0; j < adapter->num_rx_queues; j++) {
795 aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
796 flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
797 no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
798 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 802 queue_stat = (u64 *)&adapter->rx_ring[j].stats;
799 for (k = 0; k < stat_count; k++) 803 for (k = 0; k < stat_count; k++)
800 data[i + k] = queue_stat[k]; 804 data[i + k] = queue_stat[k];
801 i += k; 805 i += k;
802 } 806 }
803 adapter->lro_aggregated = aggregated;
804 adapter->lro_flushed = flushed;
805 adapter->lro_no_desc = no_desc;
806} 807}
807 808
808static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 809static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
809 u8 *data) 810 u8 *data)
810{ 811{
811 struct ixgbe_adapter *adapter = netdev_priv(netdev); 812 struct ixgbe_adapter *adapter = netdev_priv(netdev);
812 u8 *p = data; 813 char *p = (char *)data;
813 int i; 814 int i;
814 815
815 switch (stringset) { 816 switch (stringset) {
@@ -831,14 +832,14 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
831 sprintf(p, "rx_queue_%u_bytes", i); 832 sprintf(p, "rx_queue_%u_bytes", i);
832 p += ETH_GSTRING_LEN; 833 p += ETH_GSTRING_LEN;
833 } 834 }
834/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 835 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
835 break; 836 break;
836 } 837 }
837} 838}
838 839
839 840
840static void ixgbe_get_wol(struct net_device *netdev, 841static void ixgbe_get_wol(struct net_device *netdev,
841 struct ethtool_wolinfo *wol) 842 struct ethtool_wolinfo *wol)
842{ 843{
843 wol->supported = 0; 844 wol->supported = 0;
844 wol->wolopts = 0; 845 wol->wolopts = 0;
@@ -859,16 +860,17 @@ static int ixgbe_nway_reset(struct net_device *netdev)
859static int ixgbe_phys_id(struct net_device *netdev, u32 data) 860static int ixgbe_phys_id(struct net_device *netdev, u32 data)
860{ 861{
861 struct ixgbe_adapter *adapter = netdev_priv(netdev); 862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
862 u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); 863 struct ixgbe_hw *hw = &adapter->hw;
864 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
863 u32 i; 865 u32 i;
864 866
865 if (!data || data > 300) 867 if (!data || data > 300)
866 data = 300; 868 data = 300;
867 869
868 for (i = 0; i < (data * 1000); i += 400) { 870 for (i = 0; i < (data * 1000); i += 400) {
869 ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); 871 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
870 msleep_interruptible(200); 872 msleep_interruptible(200);
871 ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); 873 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
872 msleep_interruptible(200); 874 msleep_interruptible(200);
873 } 875 }
874 876
@@ -879,67 +881,75 @@ static int ixgbe_phys_id(struct net_device *netdev, u32 data)
879} 881}
880 882
881static int ixgbe_get_coalesce(struct net_device *netdev, 883static int ixgbe_get_coalesce(struct net_device *netdev,
882 struct ethtool_coalesce *ec) 884 struct ethtool_coalesce *ec)
883{ 885{
884 struct ixgbe_adapter *adapter = netdev_priv(netdev); 886 struct ixgbe_adapter *adapter = netdev_priv(netdev);
885 887
886 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
887 ec->rx_coalesce_usecs = adapter->rx_eitr;
888 else
889 ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
890
891 if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
892 ec->tx_coalesce_usecs = adapter->tx_eitr;
893 else
894 ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
895
896 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 888 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
889
890 /* only valid if in constant ITR mode */
891 switch (adapter->itr_setting) {
892 case 0:
893 /* throttling disabled */
894 ec->rx_coalesce_usecs = 0;
895 break;
896 case 1:
897 /* dynamic ITR mode */
898 ec->rx_coalesce_usecs = 1;
899 break;
900 default:
901 /* fixed interrupt rate mode */
902 ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
903 break;
904 }
897 return 0; 905 return 0;
898} 906}
899 907
900static int ixgbe_set_coalesce(struct net_device *netdev, 908static int ixgbe_set_coalesce(struct net_device *netdev,
901 struct ethtool_coalesce *ec) 909 struct ethtool_coalesce *ec)
902{ 910{
903 struct ixgbe_adapter *adapter = netdev_priv(netdev); 911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
904 912 struct ixgbe_hw *hw = &adapter->hw;
905 if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 913 int i;
906 ((ec->rx_coalesce_usecs != 0) &&
907 (ec->rx_coalesce_usecs != 1) &&
908 (ec->rx_coalesce_usecs != 3) &&
909 (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
910 return -EINVAL;
911 if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
912 ((ec->tx_coalesce_usecs != 0) &&
913 (ec->tx_coalesce_usecs != 1) &&
914 (ec->tx_coalesce_usecs != 3) &&
915 (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
916 return -EINVAL;
917
918 /* convert to rate of irq's per second */
919 if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
920 adapter->rx_eitr = ec->rx_coalesce_usecs;
921 else
922 adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
923
924 if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
925 adapter->tx_eitr = ec->rx_coalesce_usecs;
926 else
927 adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
928 914
929 if (ec->tx_max_coalesced_frames_irq) 915 if (ec->tx_max_coalesced_frames_irq)
930 adapter->tx_ring[0].work_limit = 916 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
931 ec->tx_max_coalesced_frames_irq; 917
918 if (ec->rx_coalesce_usecs > 1) {
919 /* store the value in ints/second */
920 adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
921
922 /* static value of interrupt rate */
923 adapter->itr_setting = adapter->eitr_param;
924 /* clear the lower bit */
925 adapter->itr_setting &= ~1;
926 } else if (ec->rx_coalesce_usecs == 1) {
927 /* 1 means dynamic mode */
928 adapter->eitr_param = 20000;
929 adapter->itr_setting = 1;
930 } else {
931 /* any other value means disable eitr, which is best
932 * served by setting the interrupt rate very high */
933 adapter->eitr_param = 3000000;
934 adapter->itr_setting = 0;
935 }
932 936
933 if (netif_running(netdev)) { 937 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
934 ixgbe_down(adapter); 938 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
935 ixgbe_up(adapter); 939 if (q_vector->txr_count && !q_vector->rxr_count)
940 q_vector->eitr = (adapter->eitr_param >> 1);
941 else
942 /* rx only or mixed */
943 q_vector->eitr = adapter->eitr_param;
944 IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
945 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
936 } 946 }
937 947
938 return 0; 948 return 0;
939} 949}
940 950
941 951
942static struct ethtool_ops ixgbe_ethtool_ops = { 952static const struct ethtool_ops ixgbe_ethtool_ops = {
943 .get_settings = ixgbe_get_settings, 953 .get_settings = ixgbe_get_settings,
944 .set_settings = ixgbe_set_settings, 954 .set_settings = ixgbe_set_settings,
945 .get_drvinfo = ixgbe_get_drvinfo, 955 .get_drvinfo = ixgbe_get_drvinfo,
@@ -966,7 +976,7 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
966 .set_tso = ixgbe_set_tso, 976 .set_tso = ixgbe_set_tso,
967 .get_strings = ixgbe_get_strings, 977 .get_strings = ixgbe_get_strings,
968 .phys_id = ixgbe_phys_id, 978 .phys_id = ixgbe_phys_id,
969 .get_sset_count = ixgbe_get_sset_count, 979 .get_sset_count = ixgbe_get_sset_count,
970 .get_ethtool_stats = ixgbe_get_ethtool_stats, 980 .get_ethtool_stats = ixgbe_get_ethtool_stats,
971 .get_coalesce = ixgbe_get_coalesce, 981 .get_coalesce = ixgbe_get_coalesce,
972 .set_coalesce = ixgbe_set_coalesce, 982 .set_coalesce = ixgbe_set_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 362541aa946e..ca17af4349d0 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -46,15 +45,14 @@
46 45
47char ixgbe_driver_name[] = "ixgbe"; 46char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 49
51#define DRV_VERSION "1.3.18-k4" 50#define DRV_VERSION "1.3.30-k2"
52const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] = 52static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
54 "Copyright (c) 1999-2007 Intel Corporation.";
55 53
56static const struct ixgbe_info *ixgbe_info_tbl[] = { 54static const struct ixgbe_info *ixgbe_info_tbl[] = {
57 [board_82598] = &ixgbe_82598_info, 55 [board_82598] = &ixgbe_82598_info,
58}; 56};
59 57
60/* ixgbe_pci_tbl - PCI Device ID Table 58/* ixgbe_pci_tbl - PCI Device ID Table
@@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
74 board_82598 }, 72 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 }, 74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
76 board_82598 },
77 77
78 /* required last entry */ 78 /* required last entry */
79 {0, } 79 {0, }
80}; 80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 82
83#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 83#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p); 85 void *p);
86static struct notifier_block dca_notifier = { 86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca, 87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL, 88 .next = NULL,
@@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
104 /* Let firmware take over control of h/w */ 104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108} 108}
109 109
110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
114 /* Let firmware know the driver has taken over */ 114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118} 118}
119 119
120#ifdef DEBUG
121/**
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
124 **/
125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126{
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
130}
131#endif
132
133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 120static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector) 121 u8 msix_vector)
135{ 122{
136 u32 ivar, index; 123 u32 ivar, index;
137 124
@@ -144,12 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
144} 131}
145 132
146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 133static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer 134 struct ixgbe_tx_buffer
148 *tx_buffer_info) 135 *tx_buffer_info)
149{ 136{
150 if (tx_buffer_info->dma) { 137 if (tx_buffer_info->dma) {
151 pci_unmap_page(adapter->pdev, tx_buffer_info->dma, 138 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
152 tx_buffer_info->length, PCI_DMA_TODEVICE); 139 tx_buffer_info->length, PCI_DMA_TODEVICE);
153 tx_buffer_info->dma = 0; 140 tx_buffer_info->dma = 0;
154 } 141 }
155 if (tx_buffer_info->skb) { 142 if (tx_buffer_info->skb) {
@@ -160,8 +147,8 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
160} 147}
161 148
162static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 149static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
163 struct ixgbe_ring *tx_ring, 150 struct ixgbe_ring *tx_ring,
164 unsigned int eop) 151 unsigned int eop)
165{ 152{
166 struct ixgbe_hw *hw = &adapter->hw; 153 struct ixgbe_hw *hw = &adapter->hw;
167 u32 head, tail; 154 u32 head, tail;
@@ -196,14 +183,14 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
196 return false; 183 return false;
197} 184}
198 185
199#define IXGBE_MAX_TXD_PWR 14 186#define IXGBE_MAX_TXD_PWR 14
200#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 187#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
201 188
202/* Tx Descriptors needed, worst case */ 189/* Tx Descriptors needed, worst case */
203#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 190#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
204 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 191 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
205#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 192#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
206 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 193 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
207 194
208#define GET_TX_HEAD_FROM_RING(ring) (\ 195#define GET_TX_HEAD_FROM_RING(ring) (\
209 *(volatile u32 *) \ 196 *(volatile u32 *) \
@@ -309,9 +296,9 @@ done_cleaning:
309 return (total_packets ? true : false); 296 return (total_packets ? true : false);
310} 297}
311 298
312#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 299#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
313static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 300static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
314 struct ixgbe_ring *rx_ring) 301 struct ixgbe_ring *rx_ring)
315{ 302{
316 u32 rxctrl; 303 u32 rxctrl;
317 int cpu = get_cpu(); 304 int cpu = get_cpu();
@@ -330,7 +317,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
330} 317}
331 318
332static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 319static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
333 struct ixgbe_ring *tx_ring) 320 struct ixgbe_ring *tx_ring)
334{ 321{
335 u32 txctrl; 322 u32 txctrl;
336 int cpu = get_cpu(); 323 int cpu = get_cpu();
@@ -406,8 +393,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
406 * @rx_desc: rx descriptor 393 * @rx_desc: rx descriptor
407 **/ 394 **/
408static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 395static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
409 struct sk_buff *skb, u8 status, 396 struct sk_buff *skb, u8 status,
410 struct ixgbe_ring *ring, 397 struct ixgbe_ring *ring,
411 union ixgbe_adv_rx_desc *rx_desc) 398 union ixgbe_adv_rx_desc *rx_desc)
412{ 399{
413 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 400 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
@@ -480,7 +467,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
480 struct ixgbe_ring *rx_ring, 467 struct ixgbe_ring *rx_ring,
481 int cleaned_count) 468 int cleaned_count)
482{ 469{
483 struct net_device *netdev = adapter->netdev;
484 struct pci_dev *pdev = adapter->pdev; 470 struct pci_dev *pdev = adapter->pdev;
485 union ixgbe_adv_rx_desc *rx_desc; 471 union ixgbe_adv_rx_desc *rx_desc;
486 struct ixgbe_rx_buffer *bi; 472 struct ixgbe_rx_buffer *bi;
@@ -493,20 +479,29 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
493 while (cleaned_count--) { 479 while (cleaned_count--) {
494 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 480 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
495 481
496 if (!bi->page && 482 if (!bi->page_dma &&
497 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 483 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
498 bi->page = alloc_page(GFP_ATOMIC);
499 if (!bi->page) { 484 if (!bi->page) {
500 adapter->alloc_rx_page_failed++; 485 bi->page = alloc_page(GFP_ATOMIC);
501 goto no_buffers; 486 if (!bi->page) {
487 adapter->alloc_rx_page_failed++;
488 goto no_buffers;
489 }
490 bi->page_offset = 0;
491 } else {
492 /* use a half page if we're re-using */
493 bi->page_offset ^= (PAGE_SIZE / 2);
502 } 494 }
503 bi->page_dma = pci_map_page(pdev, bi->page, 0, 495
504 PAGE_SIZE, 496 bi->page_dma = pci_map_page(pdev, bi->page,
505 PCI_DMA_FROMDEVICE); 497 bi->page_offset,
498 (PAGE_SIZE / 2),
499 PCI_DMA_FROMDEVICE);
506 } 500 }
507 501
508 if (!bi->skb) { 502 if (!bi->skb) {
509 struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz); 503 struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
504 bufsz);
510 505
511 if (!skb) { 506 if (!skb) {
512 adapter->alloc_rx_buff_failed++; 507 adapter->alloc_rx_buff_failed++;
@@ -567,10 +562,9 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
567} 562}
568 563
569static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 564static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
570 struct ixgbe_ring *rx_ring, 565 struct ixgbe_ring *rx_ring,
571 int *work_done, int work_to_do) 566 int *work_done, int work_to_do)
572{ 567{
573 struct net_device *netdev = adapter->netdev;
574 struct pci_dev *pdev = adapter->pdev; 568 struct pci_dev *pdev = adapter->pdev;
575 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 569 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
576 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 570 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -596,7 +590,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
596 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 590 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
597 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 591 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
598 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 592 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
599 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 593 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
600 if (hdr_info & IXGBE_RXDADV_SPH) 594 if (hdr_info & IXGBE_RXDADV_SPH)
601 adapter->rx_hdr_split++; 595 adapter->rx_hdr_split++;
602 if (len > IXGBE_RX_HDR_SIZE) 596 if (len > IXGBE_RX_HDR_SIZE)
@@ -613,18 +607,25 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
613 607
614 if (len && !skb_shinfo(skb)->nr_frags) { 608 if (len && !skb_shinfo(skb)->nr_frags) {
615 pci_unmap_single(pdev, rx_buffer_info->dma, 609 pci_unmap_single(pdev, rx_buffer_info->dma,
616 rx_ring->rx_buf_len + NET_IP_ALIGN, 610 rx_ring->rx_buf_len + NET_IP_ALIGN,
617 PCI_DMA_FROMDEVICE); 611 PCI_DMA_FROMDEVICE);
618 skb_put(skb, len); 612 skb_put(skb, len);
619 } 613 }
620 614
621 if (upper_len) { 615 if (upper_len) {
622 pci_unmap_page(pdev, rx_buffer_info->page_dma, 616 pci_unmap_page(pdev, rx_buffer_info->page_dma,
623 PAGE_SIZE, PCI_DMA_FROMDEVICE); 617 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
624 rx_buffer_info->page_dma = 0; 618 rx_buffer_info->page_dma = 0;
625 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 619 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
626 rx_buffer_info->page, 0, upper_len); 620 rx_buffer_info->page,
627 rx_buffer_info->page = NULL; 621 rx_buffer_info->page_offset,
622 upper_len);
623
624 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
625 (page_count(rx_buffer_info->page) != 1))
626 rx_buffer_info->page = NULL;
627 else
628 get_page(rx_buffer_info->page);
628 629
629 skb->len += upper_len; 630 skb->len += upper_len;
630 skb->data_len += upper_len; 631 skb->data_len += upper_len;
@@ -647,6 +648,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
647 rx_buffer_info->skb = next_buffer->skb; 648 rx_buffer_info->skb = next_buffer->skb;
648 rx_buffer_info->dma = next_buffer->dma; 649 rx_buffer_info->dma = next_buffer->dma;
649 next_buffer->skb = skb; 650 next_buffer->skb = skb;
651 next_buffer->dma = 0;
650 adapter->non_eop_descs++; 652 adapter->non_eop_descs++;
651 goto next_desc; 653 goto next_desc;
652 } 654 }
@@ -662,9 +664,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
662 total_rx_bytes += skb->len; 664 total_rx_bytes += skb->len;
663 total_rx_packets++; 665 total_rx_packets++;
664 666
665 skb->protocol = eth_type_trans(skb, netdev); 667 skb->protocol = eth_type_trans(skb, adapter->netdev);
666 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 668 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
667 netdev->last_rx = jiffies; 669 adapter->netdev->last_rx = jiffies;
668 670
669next_desc: 671next_desc:
670 rx_desc->wb.upper.status_error = 0; 672 rx_desc->wb.upper.status_error = 0;
@@ -724,43 +726,43 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
724 q_vector = &adapter->q_vector[v_idx]; 726 q_vector = &adapter->q_vector[v_idx];
725 /* XXX for_each_bit(...) */ 727 /* XXX for_each_bit(...) */
726 r_idx = find_first_bit(q_vector->rxr_idx, 728 r_idx = find_first_bit(q_vector->rxr_idx,
727 adapter->num_rx_queues); 729 adapter->num_rx_queues);
728 730
729 for (i = 0; i < q_vector->rxr_count; i++) { 731 for (i = 0; i < q_vector->rxr_count; i++) {
730 j = adapter->rx_ring[r_idx].reg_idx; 732 j = adapter->rx_ring[r_idx].reg_idx;
731 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 733 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
732 r_idx = find_next_bit(q_vector->rxr_idx, 734 r_idx = find_next_bit(q_vector->rxr_idx,
733 adapter->num_rx_queues, 735 adapter->num_rx_queues,
734 r_idx + 1); 736 r_idx + 1);
735 } 737 }
736 r_idx = find_first_bit(q_vector->txr_idx, 738 r_idx = find_first_bit(q_vector->txr_idx,
737 adapter->num_tx_queues); 739 adapter->num_tx_queues);
738 740
739 for (i = 0; i < q_vector->txr_count; i++) { 741 for (i = 0; i < q_vector->txr_count; i++) {
740 j = adapter->tx_ring[r_idx].reg_idx; 742 j = adapter->tx_ring[r_idx].reg_idx;
741 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 743 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
742 r_idx = find_next_bit(q_vector->txr_idx, 744 r_idx = find_next_bit(q_vector->txr_idx,
743 adapter->num_tx_queues, 745 adapter->num_tx_queues,
744 r_idx + 1); 746 r_idx + 1);
745 } 747 }
746 748
747 /* if this is a tx only vector use half the irq (tx) rate */ 749 /* if this is a tx only vector halve the interrupt rate */
748 if (q_vector->txr_count && !q_vector->rxr_count) 750 if (q_vector->txr_count && !q_vector->rxr_count)
749 q_vector->eitr = adapter->tx_eitr; 751 q_vector->eitr = (adapter->eitr_param >> 1);
750 else 752 else
751 /* rx only or mixed */ 753 /* rx only */
752 q_vector->eitr = adapter->rx_eitr; 754 q_vector->eitr = adapter->eitr_param;
753 755
754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
755 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 757 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
756 } 758 }
757 759
758 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 760 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 761 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
760 762
761 /* set up to autoclear timer, lsc, and the vectors */ 763 /* set up to autoclear timer, and the vectors */
762 mask = IXGBE_EIMS_ENABLE_MASK; 764 mask = IXGBE_EIMS_ENABLE_MASK;
763 mask &= ~IXGBE_EIMS_OTHER; 765 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
764 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
765} 767}
766 768
@@ -790,8 +792,8 @@ enum latency_range {
790 * parameter (see ixgbe_param.c) 792 * parameter (see ixgbe_param.c)
791 **/ 793 **/
792static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 794static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
793 u32 eitr, u8 itr_setting, 795 u32 eitr, u8 itr_setting,
794 int packets, int bytes) 796 int packets, int bytes)
795{ 797{
796 unsigned int retval = itr_setting; 798 unsigned int retval = itr_setting;
797 u32 timepassed_us; 799 u32 timepassed_us;
@@ -838,40 +840,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
838 u32 new_itr; 840 u32 new_itr;
839 u8 current_itr, ret_itr; 841 u8 current_itr, ret_itr;
840 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / 842 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
841 sizeof(struct ixgbe_q_vector); 843 sizeof(struct ixgbe_q_vector);
842 struct ixgbe_ring *rx_ring, *tx_ring; 844 struct ixgbe_ring *rx_ring, *tx_ring;
843 845
844 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 846 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
845 for (i = 0; i < q_vector->txr_count; i++) { 847 for (i = 0; i < q_vector->txr_count; i++) {
846 tx_ring = &(adapter->tx_ring[r_idx]); 848 tx_ring = &(adapter->tx_ring[r_idx]);
847 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 849 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
848 q_vector->tx_eitr, 850 q_vector->tx_itr,
849 tx_ring->total_packets, 851 tx_ring->total_packets,
850 tx_ring->total_bytes); 852 tx_ring->total_bytes);
851 /* if the result for this queue would decrease interrupt 853 /* if the result for this queue would decrease interrupt
852 * rate for this vector then use that result */ 854 * rate for this vector then use that result */
853 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? 855 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
854 q_vector->tx_eitr - 1 : ret_itr); 856 q_vector->tx_itr - 1 : ret_itr);
855 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 857 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
856 r_idx + 1); 858 r_idx + 1);
857 } 859 }
858 860
859 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 861 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
860 for (i = 0; i < q_vector->rxr_count; i++) { 862 for (i = 0; i < q_vector->rxr_count; i++) {
861 rx_ring = &(adapter->rx_ring[r_idx]); 863 rx_ring = &(adapter->rx_ring[r_idx]);
862 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 864 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
863 q_vector->rx_eitr, 865 q_vector->rx_itr,
864 rx_ring->total_packets, 866 rx_ring->total_packets,
865 rx_ring->total_bytes); 867 rx_ring->total_bytes);
866 /* if the result for this queue would decrease interrupt 868 /* if the result for this queue would decrease interrupt
867 * rate for this vector then use that result */ 869 * rate for this vector then use that result */
868 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? 870 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
869 q_vector->rx_eitr - 1 : ret_itr); 871 q_vector->rx_itr - 1 : ret_itr);
870 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 872 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
871 r_idx + 1); 873 r_idx + 1);
872 } 874 }
873 875
874 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 876 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
875 877
876 switch (current_itr) { 878 switch (current_itr) {
877 /* counts and packets in update_itr are dependent on these numbers */ 879 /* counts and packets in update_itr are dependent on these numbers */
@@ -895,13 +897,27 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
895 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 897 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
896 /* must write high and low 16 bits to reset counter */ 898 /* must write high and low 16 bits to reset counter */
897 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 899 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
898 itr_reg); 900 itr_reg);
899 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); 901 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
900 } 902 }
901 903
902 return; 904 return;
903} 905}
904 906
907
908static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
909{
910 struct ixgbe_hw *hw = &adapter->hw;
911
912 adapter->lsc_int++;
913 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
914 adapter->link_check_timeout = jiffies;
915 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
916 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
917 schedule_work(&adapter->watchdog_task);
918 }
919}
920
905static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 921static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
906{ 922{
907 struct net_device *netdev = data; 923 struct net_device *netdev = data;
@@ -909,11 +925,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
909 struct ixgbe_hw *hw = &adapter->hw; 925 struct ixgbe_hw *hw = &adapter->hw;
910 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 926 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
911 927
912 if (eicr & IXGBE_EICR_LSC) { 928 if (eicr & IXGBE_EICR_LSC)
913 adapter->lsc_int++; 929 ixgbe_check_lsc(adapter);
914 if (!test_bit(__IXGBE_DOWN, &adapter->state))
915 mod_timer(&adapter->watchdog_timer, jiffies);
916 }
917 930
918 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 931 if (!test_bit(__IXGBE_DOWN, &adapter->state))
919 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 932 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -934,7 +947,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
934 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 947 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
935 for (i = 0; i < q_vector->txr_count; i++) { 948 for (i = 0; i < q_vector->txr_count; i++) {
936 tx_ring = &(adapter->tx_ring[r_idx]); 949 tx_ring = &(adapter->tx_ring[r_idx]);
937#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 950#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
938 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
939 ixgbe_update_tx_dca(adapter, tx_ring); 952 ixgbe_update_tx_dca(adapter, tx_ring);
940#endif 953#endif
@@ -942,7 +955,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
942 tx_ring->total_packets = 0; 955 tx_ring->total_packets = 0;
943 ixgbe_clean_tx_irq(adapter, tx_ring); 956 ixgbe_clean_tx_irq(adapter, tx_ring);
944 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 957 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
945 r_idx + 1); 958 r_idx + 1);
946 } 959 }
947 960
948 return IRQ_HANDLED; 961 return IRQ_HANDLED;
@@ -959,16 +972,24 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
959 struct ixgbe_adapter *adapter = q_vector->adapter; 972 struct ixgbe_adapter *adapter = q_vector->adapter;
960 struct ixgbe_ring *rx_ring; 973 struct ixgbe_ring *rx_ring;
961 int r_idx; 974 int r_idx;
975 int i;
962 976
963 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 977 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
978 for (i = 0; i < q_vector->rxr_count; i++) {
979 rx_ring = &(adapter->rx_ring[r_idx]);
980 rx_ring->total_bytes = 0;
981 rx_ring->total_packets = 0;
982 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
983 r_idx + 1);
984 }
985
964 if (!q_vector->rxr_count) 986 if (!q_vector->rxr_count)
965 return IRQ_HANDLED; 987 return IRQ_HANDLED;
966 988
989 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
967 rx_ring = &(adapter->rx_ring[r_idx]); 990 rx_ring = &(adapter->rx_ring[r_idx]);
968 /* disable interrupts on this vector only */ 991 /* disable interrupts on this vector only */
969 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
970 rx_ring->total_bytes = 0;
971 rx_ring->total_packets = 0;
972 netif_rx_schedule(adapter->netdev, &q_vector->napi); 993 netif_rx_schedule(adapter->netdev, &q_vector->napi);
973 994
974 return IRQ_HANDLED; 995 return IRQ_HANDLED;
@@ -987,19 +1008,21 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
987 * @napi: napi struct with our devices info in it 1008 * @napi: napi struct with our devices info in it
988 * @budget: amount of work driver is allowed to do this pass, in packets 1009 * @budget: amount of work driver is allowed to do this pass, in packets
989 * 1010 *
1011 * This function is optimized for cleaning one queue only on a single
1012 * q_vector!!!
990 **/ 1013 **/
991static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1014static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
992{ 1015{
993 struct ixgbe_q_vector *q_vector = 1016 struct ixgbe_q_vector *q_vector =
994 container_of(napi, struct ixgbe_q_vector, napi); 1017 container_of(napi, struct ixgbe_q_vector, napi);
995 struct ixgbe_adapter *adapter = q_vector->adapter; 1018 struct ixgbe_adapter *adapter = q_vector->adapter;
996 struct ixgbe_ring *rx_ring; 1019 struct ixgbe_ring *rx_ring = NULL;
997 int work_done = 0; 1020 int work_done = 0;
998 long r_idx; 1021 long r_idx;
999 1022
1000 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1023 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1001 rx_ring = &(adapter->rx_ring[r_idx]); 1024 rx_ring = &(adapter->rx_ring[r_idx]);
1002#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 1025#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1003 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1004 ixgbe_update_rx_dca(adapter, rx_ring); 1027 ixgbe_update_rx_dca(adapter, rx_ring);
1005#endif 1028#endif
@@ -1009,7 +1032,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1009 /* If all Rx work done, exit the polling mode */ 1032 /* If all Rx work done, exit the polling mode */
1010 if (work_done < budget) { 1033 if (work_done < budget) {
1011 netif_rx_complete(adapter->netdev, napi); 1034 netif_rx_complete(adapter->netdev, napi);
1012 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 1035 if (adapter->itr_setting & 3)
1013 ixgbe_set_itr_msix(q_vector); 1036 ixgbe_set_itr_msix(q_vector);
1014 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1037 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1015 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); 1038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
@@ -1018,8 +1041,57 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1018 return work_done; 1041 return work_done;
1019} 1042}
1020 1043
1044/**
1045 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1046 * @napi: napi struct with our devices info in it
1047 * @budget: amount of work driver is allowed to do this pass, in packets
1048 *
1049 * This function will clean more than one rx queue associated with a
1050 * q_vector.
1051 **/
1052static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1053{
1054 struct ixgbe_q_vector *q_vector =
1055 container_of(napi, struct ixgbe_q_vector, napi);
1056 struct ixgbe_adapter *adapter = q_vector->adapter;
1057 struct ixgbe_ring *rx_ring = NULL;
1058 int work_done = 0, i;
1059 long r_idx;
1060 u16 enable_mask = 0;
1061
1062 /* attempt to distribute budget to each queue fairly, but don't allow
1063 * the budget to go below 1 because we'll exit polling */
1064 budget /= (q_vector->rxr_count ?: 1);
1065 budget = max(budget, 1);
1066 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1067 for (i = 0; i < q_vector->rxr_count; i++) {
1068 rx_ring = &(adapter->rx_ring[r_idx]);
1069#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1070 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1071 ixgbe_update_rx_dca(adapter, rx_ring);
1072#endif
1073 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1074 enable_mask |= rx_ring->v_idx;
1075 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1076 r_idx + 1);
1077 }
1078
1079 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1080 rx_ring = &(adapter->rx_ring[r_idx]);
1081 /* If all Rx work done, exit the polling mode */
1082 if (work_done < budget) {
1083 netif_rx_complete(adapter->netdev, napi);
1084 if (adapter->itr_setting & 3)
1085 ixgbe_set_itr_msix(q_vector);
1086 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1087 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1088 return 0;
1089 }
1090
1091 return work_done;
1092}
1021static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1093static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1022 int r_idx) 1094 int r_idx)
1023{ 1095{
1024 a->q_vector[v_idx].adapter = a; 1096 a->q_vector[v_idx].adapter = a;
1025 set_bit(r_idx, a->q_vector[v_idx].rxr_idx); 1097 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
@@ -1028,7 +1100,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1028} 1100}
1029 1101
1030static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1102static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1031 int r_idx) 1103 int r_idx)
1032{ 1104{
1033 a->q_vector[v_idx].adapter = a; 1105 a->q_vector[v_idx].adapter = a;
1034 set_bit(r_idx, a->q_vector[v_idx].txr_idx); 1106 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
@@ -1048,7 +1120,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1048 * mapping configurations in here. 1120 * mapping configurations in here.
1049 **/ 1121 **/
1050static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1122static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1051 int vectors) 1123 int vectors)
1052{ 1124{
1053 int v_start = 0; 1125 int v_start = 0;
1054 int rxr_idx = 0, txr_idx = 0; 1126 int rxr_idx = 0, txr_idx = 0;
@@ -1125,28 +1197,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1125 goto out; 1197 goto out;
1126 1198
1127#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1199#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1128 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1200 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1129 &ixgbe_msix_clean_many) 1201 &ixgbe_msix_clean_many)
1130 for (vector = 0; vector < q_vectors; vector++) { 1202 for (vector = 0; vector < q_vectors; vector++) {
1131 handler = SET_HANDLER(&adapter->q_vector[vector]); 1203 handler = SET_HANDLER(&adapter->q_vector[vector]);
1132 sprintf(adapter->name[vector], "%s:v%d-%s", 1204 sprintf(adapter->name[vector], "%s:v%d-%s",
1133 netdev->name, vector, 1205 netdev->name, vector,
1134 (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1206 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1135 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1207 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1136 err = request_irq(adapter->msix_entries[vector].vector, 1208 err = request_irq(adapter->msix_entries[vector].vector,
1137 handler, 0, adapter->name[vector], 1209 handler, 0, adapter->name[vector],
1138 &(adapter->q_vector[vector])); 1210 &(adapter->q_vector[vector]));
1139 if (err) { 1211 if (err) {
1140 DPRINTK(PROBE, ERR, 1212 DPRINTK(PROBE, ERR,
1141 "request_irq failed for MSIX interrupt " 1213 "request_irq failed for MSIX interrupt "
1142 "Error: %d\n", err); 1214 "Error: %d\n", err);
1143 goto free_queue_irqs; 1215 goto free_queue_irqs;
1144 } 1216 }
1145 } 1217 }
1146 1218
1147 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1219 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1148 err = request_irq(adapter->msix_entries[vector].vector, 1220 err = request_irq(adapter->msix_entries[vector].vector,
1149 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1221 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1150 if (err) { 1222 if (err) {
1151 DPRINTK(PROBE, ERR, 1223 DPRINTK(PROBE, ERR,
1152 "request_irq for msix_lsc failed: %d\n", err); 1224 "request_irq for msix_lsc failed: %d\n", err);
@@ -1158,7 +1230,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1158free_queue_irqs: 1230free_queue_irqs:
1159 for (i = vector - 1; i >= 0; i--) 1231 for (i = vector - 1; i >= 0; i--)
1160 free_irq(adapter->msix_entries[--vector].vector, 1232 free_irq(adapter->msix_entries[--vector].vector,
1161 &(adapter->q_vector[i])); 1233 &(adapter->q_vector[i]));
1162 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1234 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1163 pci_disable_msix(adapter->pdev); 1235 pci_disable_msix(adapter->pdev);
1164 kfree(adapter->msix_entries); 1236 kfree(adapter->msix_entries);
@@ -1176,16 +1248,16 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1176 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1248 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1177 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1249 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1178 1250
1179 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, 1251 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1180 q_vector->tx_eitr, 1252 q_vector->tx_itr,
1181 tx_ring->total_packets, 1253 tx_ring->total_packets,
1182 tx_ring->total_bytes); 1254 tx_ring->total_bytes);
1183 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, 1255 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1184 q_vector->rx_eitr, 1256 q_vector->rx_itr,
1185 rx_ring->total_packets, 1257 rx_ring->total_packets,
1186 rx_ring->total_bytes); 1258 rx_ring->total_bytes);
1187 1259
1188 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 1260 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1189 1261
1190 switch (current_itr) { 1262 switch (current_itr) {
1191 /* counts and packets in update_itr are dependent on these numbers */ 1263 /* counts and packets in update_itr are dependent on these numbers */
@@ -1230,19 +1302,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1230 struct ixgbe_hw *hw = &adapter->hw; 1302 struct ixgbe_hw *hw = &adapter->hw;
1231 u32 eicr; 1303 u32 eicr;
1232 1304
1233
1234 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1305 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1235 * therefore no explict interrupt disable is necessary */ 1306 * therefore no explict interrupt disable is necessary */
1236 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1307 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1237 if (!eicr) 1308 if (!eicr) {
1309 /* shared interrupt alert!
1310 * make sure interrupts are enabled because the read will
1311 * have disabled interrupts due to EIAM */
1312 ixgbe_irq_enable(adapter);
1238 return IRQ_NONE; /* Not our interrupt */ 1313 return IRQ_NONE; /* Not our interrupt */
1239
1240 if (eicr & IXGBE_EICR_LSC) {
1241 adapter->lsc_int++;
1242 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1243 mod_timer(&adapter->watchdog_timer, jiffies);
1244 } 1314 }
1245 1315
1316 if (eicr & IXGBE_EICR_LSC)
1317 ixgbe_check_lsc(adapter);
1246 1318
1247 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1319 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1248 adapter->tx_ring[0].total_packets = 0; 1320 adapter->tx_ring[0].total_packets = 0;
@@ -1285,10 +1357,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1285 err = ixgbe_request_msix_irqs(adapter); 1357 err = ixgbe_request_msix_irqs(adapter);
1286 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1358 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1287 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1359 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1288 netdev->name, netdev); 1360 netdev->name, netdev);
1289 } else { 1361 } else {
1290 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1362 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1291 netdev->name, netdev); 1363 netdev->name, netdev);
1292 } 1364 }
1293 1365
1294 if (err) 1366 if (err)
@@ -1312,7 +1384,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1312 i--; 1384 i--;
1313 for (; i >= 0; i--) { 1385 for (; i >= 0; i--) {
1314 free_irq(adapter->msix_entries[i].vector, 1386 free_irq(adapter->msix_entries[i].vector,
1315 &(adapter->q_vector[i])); 1387 &(adapter->q_vector[i]));
1316 } 1388 }
1317 1389
1318 ixgbe_reset_q_vectors(adapter); 1390 ixgbe_reset_q_vectors(adapter);
@@ -1359,7 +1431,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1359 struct ixgbe_hw *hw = &adapter->hw; 1431 struct ixgbe_hw *hw = &adapter->hw;
1360 1432
1361 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1433 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1362 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); 1434 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1363 1435
1364 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1436 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1365 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1437 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
@@ -1445,8 +1517,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1445 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1517 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1446 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1518 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1447 srrctl |= ((IXGBE_RX_HDR_SIZE << 1519 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1448 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1520 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1449 IXGBE_SRRCTL_BSIZEHDR_MASK); 1521 IXGBE_SRRCTL_BSIZEHDR_MASK);
1450 } else { 1522 } else {
1451 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1523 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1452 1524
@@ -1463,7 +1535,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1463/** 1535/**
1464 * ixgbe_get_skb_hdr - helper function for LRO header processing 1536 * ixgbe_get_skb_hdr - helper function for LRO header processing
1465 * @skb: pointer to sk_buff to be added to LRO packet 1537 * @skb: pointer to sk_buff to be added to LRO packet
1466 * @iphdr: pointer to tcp header structure 1538 * @iphdr: pointer to ip header structure
1467 * @tcph: pointer to tcp header structure 1539 * @tcph: pointer to tcp header structure
1468 * @hdr_flags: pointer to header flags 1540 * @hdr_flags: pointer to header flags
1469 * @priv: private data 1541 * @priv: private data
@@ -1488,7 +1560,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1488} 1560}
1489 1561
1490#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1562#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1491 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1563 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1492 1564
1493/** 1565/**
1494 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 1566 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
@@ -1514,10 +1586,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1514 int rx_buf_len; 1586 int rx_buf_len;
1515 1587
1516 /* Decide whether to use packet split mode or not */ 1588 /* Decide whether to use packet split mode or not */
1517 if (netdev->mtu > ETH_DATA_LEN) 1589 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1518 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1519 else
1520 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1521 1590
1522 /* Set the RX buffer length according to the mode */ 1591 /* Set the RX buffer length according to the mode */
1523 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1592 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -1638,7 +1707,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1638} 1707}
1639 1708
1640static void ixgbe_vlan_rx_register(struct net_device *netdev, 1709static void ixgbe_vlan_rx_register(struct net_device *netdev,
1641 struct vlan_group *grp) 1710 struct vlan_group *grp)
1642{ 1711{
1643 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1712 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1644 u32 ctrl; 1713 u32 ctrl;
@@ -1662,14 +1731,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1662static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1731static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1663{ 1732{
1664 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1733 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1734 struct ixgbe_hw *hw = &adapter->hw;
1665 1735
1666 /* add VID to filter table */ 1736 /* add VID to filter table */
1667 ixgbe_set_vfta(&adapter->hw, vid, 0, true); 1737 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1668} 1738}
1669 1739
1670static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1740static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1671{ 1741{
1672 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1742 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1743 struct ixgbe_hw *hw = &adapter->hw;
1673 1744
1674 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1745 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1675 ixgbe_irq_disable(adapter); 1746 ixgbe_irq_disable(adapter);
@@ -1680,7 +1751,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1680 ixgbe_irq_enable(adapter); 1751 ixgbe_irq_enable(adapter);
1681 1752
1682 /* remove VID from filter table */ 1753 /* remove VID from filter table */
1683 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1754 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1684} 1755}
1685 1756
1686static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1757static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -1756,15 +1827,15 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
1756 addr_count = netdev->uc_count; 1827 addr_count = netdev->uc_count;
1757 if (addr_count) 1828 if (addr_count)
1758 addr_list = netdev->uc_list->dmi_addr; 1829 addr_list = netdev->uc_list->dmi_addr;
1759 ixgbe_update_uc_addr_list(hw, addr_list, addr_count, 1830 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
1760 ixgbe_addr_list_itr); 1831 ixgbe_addr_list_itr);
1761 1832
1762 /* reprogram multicast list */ 1833 /* reprogram multicast list */
1763 addr_count = netdev->mc_count; 1834 addr_count = netdev->mc_count;
1764 if (addr_count) 1835 if (addr_count)
1765 addr_list = netdev->mc_list->dmi_addr; 1836 addr_list = netdev->mc_list->dmi_addr;
1766 ixgbe_update_mc_addr_list(hw, addr_list, addr_count, 1837 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1767 ixgbe_addr_list_itr); 1838 ixgbe_addr_list_itr);
1768} 1839}
1769 1840
1770static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 1841static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -1778,10 +1849,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1778 q_vectors = 1; 1849 q_vectors = 1;
1779 1850
1780 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1851 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1852 struct napi_struct *napi;
1781 q_vector = &adapter->q_vector[q_idx]; 1853 q_vector = &adapter->q_vector[q_idx];
1782 if (!q_vector->rxr_count) 1854 if (!q_vector->rxr_count)
1783 continue; 1855 continue;
1784 napi_enable(&q_vector->napi); 1856 napi = &q_vector->napi;
1857 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1858 (q_vector->rxr_count > 1))
1859 napi->poll = &ixgbe_clean_rxonly_many;
1860
1861 napi_enable(napi);
1785 } 1862 }
1786} 1863}
1787 1864
@@ -1816,7 +1893,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1816 ixgbe_configure_rx(adapter); 1893 ixgbe_configure_rx(adapter);
1817 for (i = 0; i < adapter->num_rx_queues; i++) 1894 for (i = 0; i < adapter->num_rx_queues; i++)
1818 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 1895 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1819 (adapter->rx_ring[i].count - 1)); 1896 (adapter->rx_ring[i].count - 1));
1820} 1897}
1821 1898
1822static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1899static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -1834,7 +1911,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1834 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 1911 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1835 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1912 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1836 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1913 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1837 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1914 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1838 } else { 1915 } else {
1839 /* MSI only */ 1916 /* MSI only */
1840 gpie = 0; 1917 gpie = 0;
@@ -1897,6 +1974,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1897 1974
1898 /* bring the link up in the watchdog, this could race with our first 1975 /* bring the link up in the watchdog, this could race with our first
1899 * link up interrupt but shouldn't be a problem */ 1976 * link up interrupt but shouldn't be a problem */
1977 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1978 adapter->link_check_timeout = jiffies;
1900 mod_timer(&adapter->watchdog_timer, jiffies); 1979 mod_timer(&adapter->watchdog_timer, jiffies);
1901 return 0; 1980 return 0;
1902} 1981}
@@ -1921,58 +2000,22 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
1921 2000
1922void ixgbe_reset(struct ixgbe_adapter *adapter) 2001void ixgbe_reset(struct ixgbe_adapter *adapter)
1923{ 2002{
1924 if (ixgbe_init_hw(&adapter->hw)) 2003 struct ixgbe_hw *hw = &adapter->hw;
1925 DPRINTK(PROBE, ERR, "Hardware Error\n"); 2004 if (hw->mac.ops.init_hw(hw))
2005 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1926 2006
1927 /* reprogram the RAR[0] in case user changed it. */ 2007 /* reprogram the RAR[0] in case user changed it. */
1928 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2008 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1929 2009
1930} 2010}
1931 2011
1932#ifdef CONFIG_PM
1933static int ixgbe_resume(struct pci_dev *pdev)
1934{
1935 struct net_device *netdev = pci_get_drvdata(pdev);
1936 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1937 u32 err;
1938
1939 pci_set_power_state(pdev, PCI_D0);
1940 pci_restore_state(pdev);
1941 err = pci_enable_device(pdev);
1942 if (err) {
1943 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1944 "suspend\n");
1945 return err;
1946 }
1947 pci_set_master(pdev);
1948
1949 pci_enable_wake(pdev, PCI_D3hot, 0);
1950 pci_enable_wake(pdev, PCI_D3cold, 0);
1951
1952 if (netif_running(netdev)) {
1953 err = ixgbe_request_irq(adapter);
1954 if (err)
1955 return err;
1956 }
1957
1958 ixgbe_reset(adapter);
1959
1960 if (netif_running(netdev))
1961 ixgbe_up(adapter);
1962
1963 netif_device_attach(netdev);
1964
1965 return 0;
1966}
1967#endif
1968
1969/** 2012/**
1970 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 2013 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1971 * @adapter: board private structure 2014 * @adapter: board private structure
1972 * @rx_ring: ring to free buffers from 2015 * @rx_ring: ring to free buffers from
1973 **/ 2016 **/
1974static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 2017static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1975 struct ixgbe_ring *rx_ring) 2018 struct ixgbe_ring *rx_ring)
1976{ 2019{
1977 struct pci_dev *pdev = adapter->pdev; 2020 struct pci_dev *pdev = adapter->pdev;
1978 unsigned long size; 2021 unsigned long size;
@@ -1986,8 +2029,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1986 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 2029 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1987 if (rx_buffer_info->dma) { 2030 if (rx_buffer_info->dma) {
1988 pci_unmap_single(pdev, rx_buffer_info->dma, 2031 pci_unmap_single(pdev, rx_buffer_info->dma,
1989 rx_ring->rx_buf_len, 2032 rx_ring->rx_buf_len,
1990 PCI_DMA_FROMDEVICE); 2033 PCI_DMA_FROMDEVICE);
1991 rx_buffer_info->dma = 0; 2034 rx_buffer_info->dma = 0;
1992 } 2035 }
1993 if (rx_buffer_info->skb) { 2036 if (rx_buffer_info->skb) {
@@ -1996,12 +2039,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1996 } 2039 }
1997 if (!rx_buffer_info->page) 2040 if (!rx_buffer_info->page)
1998 continue; 2041 continue;
1999 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, 2042 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2000 PCI_DMA_FROMDEVICE); 2043 PCI_DMA_FROMDEVICE);
2001 rx_buffer_info->page_dma = 0; 2044 rx_buffer_info->page_dma = 0;
2002
2003 put_page(rx_buffer_info->page); 2045 put_page(rx_buffer_info->page);
2004 rx_buffer_info->page = NULL; 2046 rx_buffer_info->page = NULL;
2047 rx_buffer_info->page_offset = 0;
2005 } 2048 }
2006 2049
2007 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2050 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -2023,7 +2066,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2023 * @tx_ring: ring to be cleaned 2066 * @tx_ring: ring to be cleaned
2024 **/ 2067 **/
2025static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 2068static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
2026 struct ixgbe_ring *tx_ring) 2069 struct ixgbe_ring *tx_ring)
2027{ 2070{
2028 struct ixgbe_tx_buffer *tx_buffer_info; 2071 struct ixgbe_tx_buffer *tx_buffer_info;
2029 unsigned long size; 2072 unsigned long size;
@@ -2076,33 +2119,43 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2076void ixgbe_down(struct ixgbe_adapter *adapter) 2119void ixgbe_down(struct ixgbe_adapter *adapter)
2077{ 2120{
2078 struct net_device *netdev = adapter->netdev; 2121 struct net_device *netdev = adapter->netdev;
2122 struct ixgbe_hw *hw = &adapter->hw;
2079 u32 rxctrl; 2123 u32 rxctrl;
2124 u32 txdctl;
2125 int i, j;
2080 2126
2081 /* signal that we are down to the interrupt handler */ 2127 /* signal that we are down to the interrupt handler */
2082 set_bit(__IXGBE_DOWN, &adapter->state); 2128 set_bit(__IXGBE_DOWN, &adapter->state);
2083 2129
2084 /* disable receives */ 2130 /* disable receives */
2085 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 2131 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2086 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, 2132 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2087 rxctrl & ~IXGBE_RXCTRL_RXEN);
2088 2133
2089 netif_tx_disable(netdev); 2134 netif_tx_disable(netdev);
2090 2135
2091 /* disable transmits in the hardware */ 2136 IXGBE_WRITE_FLUSH(hw);
2092
2093 /* flush both disables */
2094 IXGBE_WRITE_FLUSH(&adapter->hw);
2095 msleep(10); 2137 msleep(10);
2096 2138
2139 netif_tx_stop_all_queues(netdev);
2140
2097 ixgbe_irq_disable(adapter); 2141 ixgbe_irq_disable(adapter);
2098 2142
2099 ixgbe_napi_disable_all(adapter); 2143 ixgbe_napi_disable_all(adapter);
2144
2100 del_timer_sync(&adapter->watchdog_timer); 2145 del_timer_sync(&adapter->watchdog_timer);
2146 cancel_work_sync(&adapter->watchdog_task);
2147
2148 /* disable transmits in the hardware now that interrupts are off */
2149 for (i = 0; i < adapter->num_tx_queues; i++) {
2150 j = adapter->tx_ring[i].reg_idx;
2151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2152 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2153 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2154 }
2101 2155
2102 netif_carrier_off(netdev); 2156 netif_carrier_off(netdev);
2103 netif_tx_stop_all_queues(netdev);
2104 2157
2105#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2158#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2106 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2107 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 2160 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2108 dca_remove_requester(&adapter->pdev->dev); 2161 dca_remove_requester(&adapter->pdev->dev);
@@ -2114,56 +2167,18 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2114 ixgbe_clean_all_tx_rings(adapter); 2167 ixgbe_clean_all_tx_rings(adapter);
2115 ixgbe_clean_all_rx_rings(adapter); 2168 ixgbe_clean_all_rx_rings(adapter);
2116 2169
2117#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2170#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2118 /* since we reset the hardware DCA settings were cleared */ 2171 /* since we reset the hardware DCA settings were cleared */
2119 if (dca_add_requester(&adapter->pdev->dev) == 0) { 2172 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2120 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 2173 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2121 /* always use CB2 mode, difference is masked 2174 /* always use CB2 mode, difference is masked
2122 * in the CB driver */ 2175 * in the CB driver */
2123 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 2176 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
2124 ixgbe_setup_dca(adapter); 2177 ixgbe_setup_dca(adapter);
2125 } 2178 }
2126#endif 2179#endif
2127} 2180}
2128 2181
2129static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2130{
2131 struct net_device *netdev = pci_get_drvdata(pdev);
2132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2133#ifdef CONFIG_PM
2134 int retval = 0;
2135#endif
2136
2137 netif_device_detach(netdev);
2138
2139 if (netif_running(netdev)) {
2140 ixgbe_down(adapter);
2141 ixgbe_free_irq(adapter);
2142 }
2143
2144#ifdef CONFIG_PM
2145 retval = pci_save_state(pdev);
2146 if (retval)
2147 return retval;
2148#endif
2149
2150 pci_enable_wake(pdev, PCI_D3hot, 0);
2151 pci_enable_wake(pdev, PCI_D3cold, 0);
2152
2153 ixgbe_release_hw_control(adapter);
2154
2155 pci_disable_device(pdev);
2156
2157 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2158
2159 return 0;
2160}
2161
2162static void ixgbe_shutdown(struct pci_dev *pdev)
2163{
2164 ixgbe_suspend(pdev, PMSG_SUSPEND);
2165}
2166
2167/** 2182/**
2168 * ixgbe_poll - NAPI Rx polling callback 2183 * ixgbe_poll - NAPI Rx polling callback
2169 * @napi: structure for representing this polling device 2184 * @napi: structure for representing this polling device
@@ -2174,11 +2189,11 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
2174static int ixgbe_poll(struct napi_struct *napi, int budget) 2189static int ixgbe_poll(struct napi_struct *napi, int budget)
2175{ 2190{
2176 struct ixgbe_q_vector *q_vector = container_of(napi, 2191 struct ixgbe_q_vector *q_vector = container_of(napi,
2177 struct ixgbe_q_vector, napi); 2192 struct ixgbe_q_vector, napi);
2178 struct ixgbe_adapter *adapter = q_vector->adapter; 2193 struct ixgbe_adapter *adapter = q_vector->adapter;
2179 int tx_cleaned = 0, work_done = 0; 2194 int tx_cleaned, work_done = 0;
2180 2195
2181#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2196#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2182 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2183 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2198 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2184 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 2199 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
@@ -2194,12 +2209,11 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2194 /* If budget not fully consumed, exit the polling mode */ 2209 /* If budget not fully consumed, exit the polling mode */
2195 if (work_done < budget) { 2210 if (work_done < budget) {
2196 netif_rx_complete(adapter->netdev, napi); 2211 netif_rx_complete(adapter->netdev, napi);
2197 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 2212 if (adapter->itr_setting & 3)
2198 ixgbe_set_itr(adapter); 2213 ixgbe_set_itr(adapter);
2199 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2214 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2200 ixgbe_irq_enable(adapter); 2215 ixgbe_irq_enable(adapter);
2201 } 2216 }
2202
2203 return work_done; 2217 return work_done;
2204} 2218}
2205 2219
@@ -2225,8 +2239,48 @@ static void ixgbe_reset_task(struct work_struct *work)
2225 ixgbe_reinit_locked(adapter); 2239 ixgbe_reinit_locked(adapter);
2226} 2240}
2227 2241
2242static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2243{
2244 int nrq = 1, ntq = 1;
2245 int feature_mask = 0, rss_i, rss_m;
2246
2247 /* Number of supported queues */
2248 switch (adapter->hw.mac.type) {
2249 case ixgbe_mac_82598EB:
2250 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2251 rss_m = 0;
2252 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2253
2254 switch (adapter->flags & feature_mask) {
2255 case (IXGBE_FLAG_RSS_ENABLED):
2256 rss_m = 0xF;
2257 nrq = rss_i;
2258 ntq = rss_i;
2259 break;
2260 case 0:
2261 default:
2262 rss_i = 0;
2263 rss_m = 0;
2264 nrq = 1;
2265 ntq = 1;
2266 break;
2267 }
2268
2269 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2270 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2271 break;
2272 default:
2273 nrq = 1;
2274 ntq = 1;
2275 break;
2276 }
2277
2278 adapter->num_rx_queues = nrq;
2279 adapter->num_tx_queues = ntq;
2280}
2281
2228static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2282static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2229 int vectors) 2283 int vectors)
2230{ 2284{
2231 int err, vector_threshold; 2285 int err, vector_threshold;
2232 2286
@@ -2245,7 +2299,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2245 */ 2299 */
2246 while (vectors >= vector_threshold) { 2300 while (vectors >= vector_threshold) {
2247 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2301 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2248 vectors); 2302 vectors);
2249 if (!err) /* Success in acquiring all requested vectors. */ 2303 if (!err) /* Success in acquiring all requested vectors. */
2250 break; 2304 break;
2251 else if (err < 0) 2305 else if (err < 0)
@@ -2264,54 +2318,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2264 kfree(adapter->msix_entries); 2318 kfree(adapter->msix_entries);
2265 adapter->msix_entries = NULL; 2319 adapter->msix_entries = NULL;
2266 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2320 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2267 adapter->num_tx_queues = 1; 2321 ixgbe_set_num_queues(adapter);
2268 adapter->num_rx_queues = 1;
2269 } else { 2322 } else {
2270 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2323 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2271 adapter->num_msix_vectors = vectors; 2324 adapter->num_msix_vectors = vectors;
2272 } 2325 }
2273} 2326}
2274 2327
2275static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2276{
2277 int nrq, ntq;
2278 int feature_mask = 0, rss_i, rss_m;
2279
2280 /* Number of supported queues */
2281 switch (adapter->hw.mac.type) {
2282 case ixgbe_mac_82598EB:
2283 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2284 rss_m = 0;
2285 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2286
2287 switch (adapter->flags & feature_mask) {
2288 case (IXGBE_FLAG_RSS_ENABLED):
2289 rss_m = 0xF;
2290 nrq = rss_i;
2291 ntq = rss_i;
2292 break;
2293 case 0:
2294 default:
2295 rss_i = 0;
2296 rss_m = 0;
2297 nrq = 1;
2298 ntq = 1;
2299 break;
2300 }
2301
2302 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2303 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2304 break;
2305 default:
2306 nrq = 1;
2307 ntq = 1;
2308 break;
2309 }
2310
2311 adapter->num_rx_queues = nrq;
2312 adapter->num_tx_queues = ntq;
2313}
2314
2315/** 2328/**
2316 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2329 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2317 * @adapter: board private structure to initialize 2330 * @adapter: board private structure to initialize
@@ -2321,9 +2334,6 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2321 **/ 2334 **/
2322static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2335static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2323{ 2336{
2324 /* TODO: Remove all uses of the indices in the cases where multiple
2325 * features are OR'd together, if the feature set makes sense.
2326 */
2327 int feature_mask = 0, rss_i; 2337 int feature_mask = 0, rss_i;
2328 int i, txr_idx, rxr_idx; 2338 int i, txr_idx, rxr_idx;
2329 2339
@@ -2364,21 +2374,22 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2364 int i; 2374 int i;
2365 2375
2366 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2376 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2367 sizeof(struct ixgbe_ring), GFP_KERNEL); 2377 sizeof(struct ixgbe_ring), GFP_KERNEL);
2368 if (!adapter->tx_ring) 2378 if (!adapter->tx_ring)
2369 goto err_tx_ring_allocation; 2379 goto err_tx_ring_allocation;
2370 2380
2371 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2381 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2372 sizeof(struct ixgbe_ring), GFP_KERNEL); 2382 sizeof(struct ixgbe_ring), GFP_KERNEL);
2373 if (!adapter->rx_ring) 2383 if (!adapter->rx_ring)
2374 goto err_rx_ring_allocation; 2384 goto err_rx_ring_allocation;
2375 2385
2376 for (i = 0; i < adapter->num_tx_queues; i++) { 2386 for (i = 0; i < adapter->num_tx_queues; i++) {
2377 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; 2387 adapter->tx_ring[i].count = adapter->tx_ring_count;
2378 adapter->tx_ring[i].queue_index = i; 2388 adapter->tx_ring[i].queue_index = i;
2379 } 2389 }
2390
2380 for (i = 0; i < adapter->num_rx_queues; i++) { 2391 for (i = 0; i < adapter->num_rx_queues; i++) {
2381 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2392 adapter->rx_ring[i].count = adapter->rx_ring_count;
2382 adapter->rx_ring[i].queue_index = i; 2393 adapter->rx_ring[i].queue_index = i;
2383 } 2394 }
2384 2395
@@ -2400,25 +2411,19 @@ err_tx_ring_allocation:
2400 * capabilities of the hardware and the kernel. 2411 * capabilities of the hardware and the kernel.
2401 **/ 2412 **/
2402static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2413static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2403 *adapter) 2414 *adapter)
2404{ 2415{
2405 int err = 0; 2416 int err = 0;
2406 int vector, v_budget; 2417 int vector, v_budget;
2407 2418
2408 /* 2419 /*
2409 * Set the default interrupt throttle rate.
2410 */
2411 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
2412 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
2413
2414 /*
2415 * It's easy to be greedy for MSI-X vectors, but it really 2420 * It's easy to be greedy for MSI-X vectors, but it really
2416 * doesn't do us much good if we have a lot more vectors 2421 * doesn't do us much good if we have a lot more vectors
2417 * than CPU's. So let's be conservative and only ask for 2422 * than CPU's. So let's be conservative and only ask for
2418 * (roughly) twice the number of vectors as there are CPU's. 2423 * (roughly) twice the number of vectors as there are CPU's.
2419 */ 2424 */
2420 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2425 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2421 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2426 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2422 2427
2423 /* 2428 /*
2424 * At the same time, hardware can only support a maximum of 2429 * At the same time, hardware can only support a maximum of
@@ -2432,7 +2437,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2432 /* A failure in MSI-X entry allocation isn't fatal, but it does 2437 /* A failure in MSI-X entry allocation isn't fatal, but it does
2433 * mean we disable MSI-X capabilities of the adapter. */ 2438 * mean we disable MSI-X capabilities of the adapter. */
2434 adapter->msix_entries = kcalloc(v_budget, 2439 adapter->msix_entries = kcalloc(v_budget,
2435 sizeof(struct msix_entry), GFP_KERNEL); 2440 sizeof(struct msix_entry), GFP_KERNEL);
2436 if (!adapter->msix_entries) { 2441 if (!adapter->msix_entries) {
2437 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2442 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2438 ixgbe_set_num_queues(adapter); 2443 ixgbe_set_num_queues(adapter);
@@ -2441,7 +2446,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2441 err = ixgbe_alloc_queues(adapter); 2446 err = ixgbe_alloc_queues(adapter);
2442 if (err) { 2447 if (err) {
2443 DPRINTK(PROBE, ERR, "Unable to allocate memory " 2448 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2444 "for queues\n"); 2449 "for queues\n");
2445 goto out; 2450 goto out;
2446 } 2451 }
2447 2452
@@ -2462,7 +2467,7 @@ try_msi:
2462 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 2467 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2463 } else { 2468 } else {
2464 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 2469 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2465 "falling back to legacy. Error: %d\n", err); 2470 "falling back to legacy. Error: %d\n", err);
2466 /* reset err */ 2471 /* reset err */
2467 err = 0; 2472 err = 0;
2468 } 2473 }
@@ -2518,9 +2523,9 @@ static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2518 } 2523 }
2519 2524
2520 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 2525 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2521 "Tx Queue count = %u\n", 2526 "Tx Queue count = %u\n",
2522 (adapter->num_rx_queues > 1) ? "Enabled" : 2527 (adapter->num_rx_queues > 1) ? "Enabled" :
2523 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2528 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2524 2529
2525 set_bit(__IXGBE_DOWN, &adapter->state); 2530 set_bit(__IXGBE_DOWN, &adapter->state);
2526 2531
@@ -2547,15 +2552,19 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2547 struct pci_dev *pdev = adapter->pdev; 2552 struct pci_dev *pdev = adapter->pdev;
2548 unsigned int rss; 2553 unsigned int rss;
2549 2554
2555 /* PCI config space info */
2556
2557 hw->vendor_id = pdev->vendor;
2558 hw->device_id = pdev->device;
2559 hw->revision_id = pdev->revision;
2560 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2561 hw->subsystem_device_id = pdev->subsystem_device;
2562
2550 /* Set capability flags */ 2563 /* Set capability flags */
2551 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2564 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2552 adapter->ring_feature[RING_F_RSS].indices = rss; 2565 adapter->ring_feature[RING_F_RSS].indices = rss;
2553 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2566 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2554 2567
2555 /* Enable Dynamic interrupt throttling by default */
2556 adapter->rx_eitr = 1;
2557 adapter->tx_eitr = 1;
2558
2559 /* default flow control settings */ 2568 /* default flow control settings */
2560 hw->fc.original_type = ixgbe_fc_none; 2569 hw->fc.original_type = ixgbe_fc_none;
2561 hw->fc.type = ixgbe_fc_none; 2570 hw->fc.type = ixgbe_fc_none;
@@ -2566,18 +2575,21 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2566 2575
2567 /* select 10G link by default */ 2576 /* select 10G link by default */
2568 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2577 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2569 if (hw->mac.ops.reset(hw)) { 2578
2570 dev_err(&pdev->dev, "HW Init failed\n"); 2579 /* enable itr by default in dynamic mode */
2571 return -EIO; 2580 adapter->itr_setting = 1;
2572 } 2581 adapter->eitr_param = 20000;
2573 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, 2582
2574 false)) { 2583 /* set defaults for eitr in MegaBytes */
2575 dev_err(&pdev->dev, "Link Speed setup failed\n"); 2584 adapter->eitr_low = 10;
2576 return -EIO; 2585 adapter->eitr_high = 20;
2577 } 2586
2587 /* set default ring sizes */
2588 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2589 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2578 2590
2579 /* initialize eeprom parameters */ 2591 /* initialize eeprom parameters */
2580 if (ixgbe_init_eeprom(hw)) { 2592 if (ixgbe_init_eeprom_params_generic(hw)) {
2581 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 2593 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2582 return -EIO; 2594 return -EIO;
2583 } 2595 }
@@ -2633,6 +2645,31 @@ err:
2633} 2645}
2634 2646
2635/** 2647/**
2648 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2649 * @adapter: board private structure
2650 *
2651 * If this function returns with an error, then it's possible one or
2652 * more of the rings is populated (while the rest are not). It is the
2653 * callers duty to clean those orphaned rings.
2654 *
2655 * Return 0 on success, negative on failure
2656 **/
2657static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2658{
2659 int i, err = 0;
2660
2661 for (i = 0; i < adapter->num_tx_queues; i++) {
2662 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2663 if (!err)
2664 continue;
2665 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
2666 break;
2667 }
2668
2669 return err;
2670}
2671
2672/**
2636 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2673 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2637 * @adapter: board private structure 2674 * @adapter: board private structure
2638 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2675 * @rx_ring: rx descriptor ring (for a specific queue) to setup
@@ -2640,7 +2677,7 @@ err:
2640 * Returns 0 on success, negative on failure 2677 * Returns 0 on success, negative on failure
2641 **/ 2678 **/
2642int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2679int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2643 struct ixgbe_ring *rx_ring) 2680 struct ixgbe_ring *rx_ring)
2644{ 2681{
2645 struct pci_dev *pdev = adapter->pdev; 2682 struct pci_dev *pdev = adapter->pdev;
2646 int size; 2683 int size;
@@ -2655,7 +2692,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2655 rx_ring->rx_buffer_info = vmalloc(size); 2692 rx_ring->rx_buffer_info = vmalloc(size);
2656 if (!rx_ring->rx_buffer_info) { 2693 if (!rx_ring->rx_buffer_info) {
2657 DPRINTK(PROBE, ERR, 2694 DPRINTK(PROBE, ERR,
2658 "vmalloc allocation failed for the rx desc ring\n"); 2695 "vmalloc allocation failed for the rx desc ring\n");
2659 goto alloc_failed; 2696 goto alloc_failed;
2660 } 2697 }
2661 memset(rx_ring->rx_buffer_info, 0, size); 2698 memset(rx_ring->rx_buffer_info, 0, size);
@@ -2668,7 +2705,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2668 2705
2669 if (!rx_ring->desc) { 2706 if (!rx_ring->desc) {
2670 DPRINTK(PROBE, ERR, 2707 DPRINTK(PROBE, ERR,
2671 "Memory allocation failed for the rx desc ring\n"); 2708 "Memory allocation failed for the rx desc ring\n");
2672 vfree(rx_ring->rx_buffer_info); 2709 vfree(rx_ring->rx_buffer_info);
2673 goto alloc_failed; 2710 goto alloc_failed;
2674 } 2711 }
@@ -2685,14 +2722,40 @@ alloc_failed:
2685} 2722}
2686 2723
2687/** 2724/**
2725 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2726 * @adapter: board private structure
2727 *
2728 * If this function returns with an error, then it's possible one or
2729 * more of the rings is populated (while the rest are not). It is the
2730 * callers duty to clean those orphaned rings.
2731 *
2732 * Return 0 on success, negative on failure
2733 **/
2734
2735static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2736{
2737 int i, err = 0;
2738
2739 for (i = 0; i < adapter->num_rx_queues; i++) {
2740 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2741 if (!err)
2742 continue;
2743 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
2744 break;
2745 }
2746
2747 return err;
2748}
2749
2750/**
2688 * ixgbe_free_tx_resources - Free Tx Resources per Queue 2751 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2689 * @adapter: board private structure 2752 * @adapter: board private structure
2690 * @tx_ring: Tx descriptor ring for a specific queue 2753 * @tx_ring: Tx descriptor ring for a specific queue
2691 * 2754 *
2692 * Free all transmit software resources 2755 * Free all transmit software resources
2693 **/ 2756 **/
2694static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2757void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2695 struct ixgbe_ring *tx_ring) 2758 struct ixgbe_ring *tx_ring)
2696{ 2759{
2697 struct pci_dev *pdev = adapter->pdev; 2760 struct pci_dev *pdev = adapter->pdev;
2698 2761
@@ -2727,8 +2790,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2727 * 2790 *
2728 * Free all receive software resources 2791 * Free all receive software resources
2729 **/ 2792 **/
2730static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2793void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2731 struct ixgbe_ring *rx_ring) 2794 struct ixgbe_ring *rx_ring)
2732{ 2795{
2733 struct pci_dev *pdev = adapter->pdev; 2796 struct pci_dev *pdev = adapter->pdev;
2734 2797
@@ -2760,59 +2823,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2760} 2823}
2761 2824
2762/** 2825/**
2763 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2764 * @adapter: board private structure
2765 *
2766 * If this function returns with an error, then it's possible one or
2767 * more of the rings is populated (while the rest are not). It is the
2768 * callers duty to clean those orphaned rings.
2769 *
2770 * Return 0 on success, negative on failure
2771 **/
2772static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2773{
2774 int i, err = 0;
2775
2776 for (i = 0; i < adapter->num_tx_queues; i++) {
2777 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2778 if (err) {
2779 DPRINTK(PROBE, ERR,
2780 "Allocation for Tx Queue %u failed\n", i);
2781 break;
2782 }
2783 }
2784
2785 return err;
2786}
2787
2788/**
2789 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2790 * @adapter: board private structure
2791 *
2792 * If this function returns with an error, then it's possible one or
2793 * more of the rings is populated (while the rest are not). It is the
2794 * callers duty to clean those orphaned rings.
2795 *
2796 * Return 0 on success, negative on failure
2797 **/
2798
2799static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2800{
2801 int i, err = 0;
2802
2803 for (i = 0; i < adapter->num_rx_queues; i++) {
2804 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2805 if (err) {
2806 DPRINTK(PROBE, ERR,
2807 "Allocation for Rx Queue %u failed\n", i);
2808 break;
2809 }
2810 }
2811
2812 return err;
2813}
2814
2815/**
2816 * ixgbe_change_mtu - Change the Maximum Transfer Unit 2826 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2817 * @netdev: network interface device structure 2827 * @netdev: network interface device structure
2818 * @new_mtu: new value for maximum frame size 2828 * @new_mtu: new value for maximum frame size
@@ -2824,12 +2834,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2824 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2825 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2835 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2826 2836
2827 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || 2837 /* MTU < 68 is an error and causes problems on some kernels */
2828 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2838 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2829 return -EINVAL; 2839 return -EINVAL;
2830 2840
2831 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 2841 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2832 netdev->mtu, new_mtu); 2842 netdev->mtu, new_mtu);
2833 /* must set new MTU before calling down or up */ 2843 /* must set new MTU before calling down or up */
2834 netdev->mtu = new_mtu; 2844 netdev->mtu = new_mtu;
2835 2845
@@ -2924,6 +2934,135 @@ static int ixgbe_close(struct net_device *netdev)
2924} 2934}
2925 2935
2926/** 2936/**
2937 * ixgbe_napi_add_all - prep napi structs for use
2938 * @adapter: private struct
2939 * helper function to napi_add each possible q_vector->napi
2940 */
2941static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2942{
2943 int q_idx, q_vectors;
2944 int (*poll)(struct napi_struct *, int);
2945
2946 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2947 poll = &ixgbe_clean_rxonly;
2948 /* Only enable as many vectors as we have rx queues. */
2949 q_vectors = adapter->num_rx_queues;
2950 } else {
2951 poll = &ixgbe_poll;
2952 /* only one q_vector for legacy modes */
2953 q_vectors = 1;
2954 }
2955
2956 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2957 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2958 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
2959 }
2960}
2961
2962static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2963{
2964 int q_idx;
2965 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2966
2967 /* legacy and MSI only use one vector */
2968 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2969 q_vectors = 1;
2970
2971 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2972 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2973 if (!q_vector->rxr_count)
2974 continue;
2975 netif_napi_del(&q_vector->napi);
2976 }
2977}
2978
2979#ifdef CONFIG_PM
2980static int ixgbe_resume(struct pci_dev *pdev)
2981{
2982 struct net_device *netdev = pci_get_drvdata(pdev);
2983 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2984 u32 err;
2985
2986 pci_set_power_state(pdev, PCI_D0);
2987 pci_restore_state(pdev);
2988 err = pci_enable_device(pdev);
2989 if (err) {
2990 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
2991 "suspend\n");
2992 return err;
2993 }
2994 pci_set_master(pdev);
2995
2996 pci_enable_wake(pdev, PCI_D3hot, 0);
2997 pci_enable_wake(pdev, PCI_D3cold, 0);
2998
2999 err = ixgbe_init_interrupt_scheme(adapter);
3000 if (err) {
3001 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
3002 "device\n");
3003 return err;
3004 }
3005
3006 ixgbe_napi_add_all(adapter);
3007 ixgbe_reset(adapter);
3008
3009 if (netif_running(netdev)) {
3010 err = ixgbe_open(adapter->netdev);
3011 if (err)
3012 return err;
3013 }
3014
3015 netif_device_attach(netdev);
3016
3017 return 0;
3018}
3019
3020#endif /* CONFIG_PM */
3021static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023 struct net_device *netdev = pci_get_drvdata(pdev);
3024 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3025#ifdef CONFIG_PM
3026 int retval = 0;
3027#endif
3028
3029 netif_device_detach(netdev);
3030
3031 if (netif_running(netdev)) {
3032 ixgbe_down(adapter);
3033 ixgbe_free_irq(adapter);
3034 ixgbe_free_all_tx_resources(adapter);
3035 ixgbe_free_all_rx_resources(adapter);
3036 }
3037 ixgbe_reset_interrupt_capability(adapter);
3038 ixgbe_napi_del_all(adapter);
3039 kfree(adapter->tx_ring);
3040 kfree(adapter->rx_ring);
3041
3042#ifdef CONFIG_PM
3043 retval = pci_save_state(pdev);
3044 if (retval)
3045 return retval;
3046#endif
3047
3048 pci_enable_wake(pdev, PCI_D3hot, 0);
3049 pci_enable_wake(pdev, PCI_D3cold, 0);
3050
3051 ixgbe_release_hw_control(adapter);
3052
3053 pci_disable_device(pdev);
3054
3055 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3056
3057 return 0;
3058}
3059
3060static void ixgbe_shutdown(struct pci_dev *pdev)
3061{
3062 ixgbe_suspend(pdev, PMSG_SUSPEND);
3063}
3064
3065/**
2927 * ixgbe_update_stats - Update the board statistics counters. 3066 * ixgbe_update_stats - Update the board statistics counters.
2928 * @adapter: board private structure 3067 * @adapter: board private structure
2929 **/ 3068 **/
@@ -2996,7 +3135,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2996 3135
2997 /* Rx Errors */ 3136 /* Rx Errors */
2998 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 3137 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2999 adapter->stats.rlec; 3138 adapter->stats.rlec;
3000 adapter->net_stats.rx_dropped = 0; 3139 adapter->net_stats.rx_dropped = 0;
3001 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3140 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3002 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3141 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -3010,27 +3149,74 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3010static void ixgbe_watchdog(unsigned long data) 3149static void ixgbe_watchdog(unsigned long data)
3011{ 3150{
3012 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 3151 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3013 struct net_device *netdev = adapter->netdev; 3152 struct ixgbe_hw *hw = &adapter->hw;
3014 bool link_up; 3153
3015 u32 link_speed = 0; 3154 /* Do the watchdog outside of interrupt context due to the lovely
3155 * delays that some of the newer hardware requires */
3156 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3157 /* Cause software interrupt to ensure rx rings are cleaned */
3158 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3159 u32 eics =
3160 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3161 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3162 } else {
3163 /* For legacy and MSI interrupts don't set any bits that
3164 * are enabled for EIAM, because this operation would
3165 * set *both* EIMS and EICS for any bit in EIAM */
3166 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3167 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3168 }
3169 /* Reset the timer */
3170 mod_timer(&adapter->watchdog_timer,
3171 round_jiffies(jiffies + 2 * HZ));
3172 }
3016 3173
3017 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 3174 schedule_work(&adapter->watchdog_task);
3175}
3176
3177/**
3178 * ixgbe_watchdog_task - worker thread to bring link up
3179 * @work: pointer to work_struct containing our data
3180 **/
3181static void ixgbe_watchdog_task(struct work_struct *work)
3182{
3183 struct ixgbe_adapter *adapter = container_of(work,
3184 struct ixgbe_adapter,
3185 watchdog_task);
3186 struct net_device *netdev = adapter->netdev;
3187 struct ixgbe_hw *hw = &adapter->hw;
3188 u32 link_speed = adapter->link_speed;
3189 bool link_up = adapter->link_up;
3190
3191 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3192
3193 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3194 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3195 if (link_up ||
3196 time_after(jiffies, (adapter->link_check_timeout +
3197 IXGBE_TRY_LINK_TIMEOUT))) {
3198 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3199 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3200 }
3201 adapter->link_up = link_up;
3202 adapter->link_speed = link_speed;
3203 }
3018 3204
3019 if (link_up) { 3205 if (link_up) {
3020 if (!netif_carrier_ok(netdev)) { 3206 if (!netif_carrier_ok(netdev)) {
3021 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3207 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3022 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); 3208 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
3023#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3209#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3024#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3210#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3025 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 3211 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
3026 "Flow Control: %s\n", 3212 "Flow Control: %s\n",
3027 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3213 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3028 "10 Gbps" : 3214 "10 Gbps" :
3029 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3215 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3030 "1 Gbps" : "unknown speed")), 3216 "1 Gbps" : "unknown speed")),
3031 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3217 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3032 (FLOW_RX ? "RX" : 3218 (FLOW_RX ? "RX" :
3033 (FLOW_TX ? "TX" : "None")))); 3219 (FLOW_TX ? "TX" : "None"))));
3034 3220
3035 netif_carrier_on(netdev); 3221 netif_carrier_on(netdev);
3036 netif_tx_wake_all_queues(netdev); 3222 netif_tx_wake_all_queues(netdev);
@@ -3039,6 +3225,8 @@ static void ixgbe_watchdog(unsigned long data)
3039 adapter->detect_tx_hung = true; 3225 adapter->detect_tx_hung = true;
3040 } 3226 }
3041 } else { 3227 } else {
3228 adapter->link_up = false;
3229 adapter->link_speed = 0;
3042 if (netif_carrier_ok(netdev)) { 3230 if (netif_carrier_ok(netdev)) {
3043 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3231 DPRINTK(LINK, INFO, "NIC Link is Down\n");
3044 netif_carrier_off(netdev); 3232 netif_carrier_off(netdev);
@@ -3047,36 +3235,19 @@ static void ixgbe_watchdog(unsigned long data)
3047 } 3235 }
3048 3236
3049 ixgbe_update_stats(adapter); 3237 ixgbe_update_stats(adapter);
3050 3238 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
3051 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3052 /* Cause software interrupt to ensure rx rings are cleaned */
3053 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3054 u32 eics =
3055 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3056 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
3057 } else {
3058 /* for legacy and MSI interrupts don't set any bits that
3059 * are enabled for EIAM, because this operation would
3060 * set *both* EIMS and EICS for any bit in EIAM */
3061 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
3062 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3063 }
3064 /* Reset the timer */
3065 mod_timer(&adapter->watchdog_timer,
3066 round_jiffies(jiffies + 2 * HZ));
3067 }
3068} 3239}
3069 3240
3070static int ixgbe_tso(struct ixgbe_adapter *adapter, 3241static int ixgbe_tso(struct ixgbe_adapter *adapter,
3071 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 3242 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
3072 u32 tx_flags, u8 *hdr_len) 3243 u32 tx_flags, u8 *hdr_len)
3073{ 3244{
3074 struct ixgbe_adv_tx_context_desc *context_desc; 3245 struct ixgbe_adv_tx_context_desc *context_desc;
3075 unsigned int i; 3246 unsigned int i;
3076 int err; 3247 int err;
3077 struct ixgbe_tx_buffer *tx_buffer_info; 3248 struct ixgbe_tx_buffer *tx_buffer_info;
3078 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 3249 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
3079 u32 mss_l4len_idx = 0, l4len; 3250 u32 mss_l4len_idx, l4len;
3080 3251
3081 if (skb_is_gso(skb)) { 3252 if (skb_is_gso(skb)) {
3082 if (skb_header_cloned(skb)) { 3253 if (skb_header_cloned(skb)) {
@@ -3092,16 +3263,16 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3092 iph->tot_len = 0; 3263 iph->tot_len = 0;
3093 iph->check = 0; 3264 iph->check = 0;
3094 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 3265 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3095 iph->daddr, 0, 3266 iph->daddr, 0,
3096 IPPROTO_TCP, 3267 IPPROTO_TCP,
3097 0); 3268 0);
3098 adapter->hw_tso_ctxt++; 3269 adapter->hw_tso_ctxt++;
3099 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3270 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3100 ipv6_hdr(skb)->payload_len = 0; 3271 ipv6_hdr(skb)->payload_len = 0;
3101 tcp_hdr(skb)->check = 3272 tcp_hdr(skb)->check =
3102 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3273 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3103 &ipv6_hdr(skb)->daddr, 3274 &ipv6_hdr(skb)->daddr,
3104 0, IPPROTO_TCP, 0); 3275 0, IPPROTO_TCP, 0);
3105 adapter->hw_tso6_ctxt++; 3276 adapter->hw_tso6_ctxt++;
3106 } 3277 }
3107 3278
@@ -3115,7 +3286,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3115 vlan_macip_lens |= 3286 vlan_macip_lens |=
3116 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3287 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3117 vlan_macip_lens |= ((skb_network_offset(skb)) << 3288 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3118 IXGBE_ADVTXD_MACLEN_SHIFT); 3289 IXGBE_ADVTXD_MACLEN_SHIFT);
3119 *hdr_len += skb_network_offset(skb); 3290 *hdr_len += skb_network_offset(skb);
3120 vlan_macip_lens |= 3291 vlan_macip_lens |=
3121 (skb_transport_header(skb) - skb_network_header(skb)); 3292 (skb_transport_header(skb) - skb_network_header(skb));
@@ -3125,8 +3296,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3125 context_desc->seqnum_seed = 0; 3296 context_desc->seqnum_seed = 0;
3126 3297
3127 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3298 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3128 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3299 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
3129 IXGBE_ADVTXD_DTYP_CTXT); 3300 IXGBE_ADVTXD_DTYP_CTXT);
3130 3301
3131 if (skb->protocol == htons(ETH_P_IP)) 3302 if (skb->protocol == htons(ETH_P_IP))
3132 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3303 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3134,7 +3305,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3134 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3305 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3135 3306
3136 /* MSS L4LEN IDX */ 3307 /* MSS L4LEN IDX */
3137 mss_l4len_idx |= 3308 mss_l4len_idx =
3138 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 3309 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3139 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 3310 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3140 /* use index 1 for TSO */ 3311 /* use index 1 for TSO */
@@ -3155,8 +3326,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3155} 3326}
3156 3327
3157static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 3328static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3158 struct ixgbe_ring *tx_ring, 3329 struct ixgbe_ring *tx_ring,
3159 struct sk_buff *skb, u32 tx_flags) 3330 struct sk_buff *skb, u32 tx_flags)
3160{ 3331{
3161 struct ixgbe_adv_tx_context_desc *context_desc; 3332 struct ixgbe_adv_tx_context_desc *context_desc;
3162 unsigned int i; 3333 unsigned int i;
@@ -3173,16 +3344,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3173 vlan_macip_lens |= 3344 vlan_macip_lens |=
3174 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3345 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3175 vlan_macip_lens |= (skb_network_offset(skb) << 3346 vlan_macip_lens |= (skb_network_offset(skb) <<
3176 IXGBE_ADVTXD_MACLEN_SHIFT); 3347 IXGBE_ADVTXD_MACLEN_SHIFT);
3177 if (skb->ip_summed == CHECKSUM_PARTIAL) 3348 if (skb->ip_summed == CHECKSUM_PARTIAL)
3178 vlan_macip_lens |= (skb_transport_header(skb) - 3349 vlan_macip_lens |= (skb_transport_header(skb) -
3179 skb_network_header(skb)); 3350 skb_network_header(skb));
3180 3351
3181 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3352 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3182 context_desc->seqnum_seed = 0; 3353 context_desc->seqnum_seed = 0;
3183 3354
3184 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3355 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3185 IXGBE_ADVTXD_DTYP_CTXT); 3356 IXGBE_ADVTXD_DTYP_CTXT);
3186 3357
3187 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3358 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3188 switch (skb->protocol) { 3359 switch (skb->protocol) {
@@ -3190,16 +3361,14 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3190 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3361 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3191 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3362 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3192 type_tucmd_mlhl |= 3363 type_tucmd_mlhl |=
3193 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3364 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3194 break; 3365 break;
3195
3196 case __constant_htons(ETH_P_IPV6): 3366 case __constant_htons(ETH_P_IPV6):
3197 /* XXX what about other V6 headers?? */ 3367 /* XXX what about other V6 headers?? */
3198 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3368 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3199 type_tucmd_mlhl |= 3369 type_tucmd_mlhl |=
3200 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3370 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3201 break; 3371 break;
3202
3203 default: 3372 default:
3204 if (unlikely(net_ratelimit())) { 3373 if (unlikely(net_ratelimit())) {
3205 DPRINTK(PROBE, WARNING, 3374 DPRINTK(PROBE, WARNING,
@@ -3216,6 +3385,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3216 3385
3217 tx_buffer_info->time_stamp = jiffies; 3386 tx_buffer_info->time_stamp = jiffies;
3218 tx_buffer_info->next_to_watch = i; 3387 tx_buffer_info->next_to_watch = i;
3388
3219 adapter->hw_csum_tx_good++; 3389 adapter->hw_csum_tx_good++;
3220 i++; 3390 i++;
3221 if (i == tx_ring->count) 3391 if (i == tx_ring->count)
@@ -3224,12 +3394,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3224 3394
3225 return true; 3395 return true;
3226 } 3396 }
3397
3227 return false; 3398 return false;
3228} 3399}
3229 3400
3230static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 3401static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3231 struct ixgbe_ring *tx_ring, 3402 struct ixgbe_ring *tx_ring,
3232 struct sk_buff *skb, unsigned int first) 3403 struct sk_buff *skb, unsigned int first)
3233{ 3404{
3234 struct ixgbe_tx_buffer *tx_buffer_info; 3405 struct ixgbe_tx_buffer *tx_buffer_info;
3235 unsigned int len = skb->len; 3406 unsigned int len = skb->len;
@@ -3247,8 +3418,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3247 3418
3248 tx_buffer_info->length = size; 3419 tx_buffer_info->length = size;
3249 tx_buffer_info->dma = pci_map_single(adapter->pdev, 3420 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3250 skb->data + offset, 3421 skb->data + offset,
3251 size, PCI_DMA_TODEVICE); 3422 size, PCI_DMA_TODEVICE);
3252 tx_buffer_info->time_stamp = jiffies; 3423 tx_buffer_info->time_stamp = jiffies;
3253 tx_buffer_info->next_to_watch = i; 3424 tx_buffer_info->next_to_watch = i;
3254 3425
@@ -3273,9 +3444,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3273 3444
3274 tx_buffer_info->length = size; 3445 tx_buffer_info->length = size;
3275 tx_buffer_info->dma = pci_map_page(adapter->pdev, 3446 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3276 frag->page, 3447 frag->page,
3277 offset, 3448 offset,
3278 size, PCI_DMA_TODEVICE); 3449 size,
3450 PCI_DMA_TODEVICE);
3279 tx_buffer_info->time_stamp = jiffies; 3451 tx_buffer_info->time_stamp = jiffies;
3280 tx_buffer_info->next_to_watch = i; 3452 tx_buffer_info->next_to_watch = i;
3281 3453
@@ -3298,8 +3470,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3298} 3470}
3299 3471
3300static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 3472static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3301 struct ixgbe_ring *tx_ring, 3473 struct ixgbe_ring *tx_ring,
3302 int tx_flags, int count, u32 paylen, u8 hdr_len) 3474 int tx_flags, int count, u32 paylen, u8 hdr_len)
3303{ 3475{
3304 union ixgbe_adv_tx_desc *tx_desc = NULL; 3476 union ixgbe_adv_tx_desc *tx_desc = NULL;
3305 struct ixgbe_tx_buffer *tx_buffer_info; 3477 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -3318,17 +3490,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3318 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3490 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3319 3491
3320 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3492 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3321 IXGBE_ADVTXD_POPTS_SHIFT; 3493 IXGBE_ADVTXD_POPTS_SHIFT;
3322 3494
3323 /* use index 1 context for tso */ 3495 /* use index 1 context for tso */
3324 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3496 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3325 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3497 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3326 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3498 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3327 IXGBE_ADVTXD_POPTS_SHIFT; 3499 IXGBE_ADVTXD_POPTS_SHIFT;
3328 3500
3329 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3501 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3330 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3502 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3331 IXGBE_ADVTXD_POPTS_SHIFT; 3503 IXGBE_ADVTXD_POPTS_SHIFT;
3332 3504
3333 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3505 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3334 3506
@@ -3338,9 +3510,8 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3338 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3510 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3339 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3511 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3340 tx_desc->read.cmd_type_len = 3512 tx_desc->read.cmd_type_len =
3341 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3513 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3342 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3514 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3343
3344 i++; 3515 i++;
3345 if (i == tx_ring->count) 3516 if (i == tx_ring->count)
3346 i = 0; 3517 i = 0;
@@ -3361,7 +3532,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3361} 3532}
3362 3533
3363static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 3534static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3364 struct ixgbe_ring *tx_ring, int size) 3535 struct ixgbe_ring *tx_ring, int size)
3365{ 3536{
3366 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3367 3538
@@ -3377,61 +3548,52 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3377 return -EBUSY; 3548 return -EBUSY;
3378 3549
3379 /* A reprieve! - use start_queue because it doesn't call schedule */ 3550 /* A reprieve! - use start_queue because it doesn't call schedule */
3380 netif_wake_subqueue(netdev, tx_ring->queue_index); 3551 netif_start_subqueue(netdev, tx_ring->queue_index);
3381 ++adapter->restart_queue; 3552 ++adapter->restart_queue;
3382 return 0; 3553 return 0;
3383} 3554}
3384 3555
3385static int ixgbe_maybe_stop_tx(struct net_device *netdev, 3556static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3386 struct ixgbe_ring *tx_ring, int size) 3557 struct ixgbe_ring *tx_ring, int size)
3387{ 3558{
3388 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3559 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3389 return 0; 3560 return 0;
3390 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 3561 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3391} 3562}
3392 3563
3393
3394static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3564static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3395{ 3565{
3396 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3566 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3397 struct ixgbe_ring *tx_ring; 3567 struct ixgbe_ring *tx_ring;
3398 unsigned int len = skb->len;
3399 unsigned int first; 3568 unsigned int first;
3400 unsigned int tx_flags = 0; 3569 unsigned int tx_flags = 0;
3401 u8 hdr_len = 0; 3570 u8 hdr_len = 0;
3402 int r_idx = 0, tso; 3571 int r_idx = 0, tso;
3403 unsigned int mss = 0;
3404 int count = 0; 3572 int count = 0;
3405 unsigned int f; 3573 unsigned int f;
3406 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3574
3407 len -= skb->data_len;
3408 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3575 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3409 tx_ring = &adapter->tx_ring[r_idx]; 3576 tx_ring = &adapter->tx_ring[r_idx];
3410 3577
3411 3578 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3412 if (skb->len <= 0) { 3579 tx_flags |= vlan_tx_tag_get(skb);
3413 dev_kfree_skb(skb); 3580 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3414 return NETDEV_TX_OK; 3581 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3415 } 3582 }
3416 mss = skb_shinfo(skb)->gso_size; 3583 /* three things can cause us to need a context descriptor */
3417 3584 if (skb_is_gso(skb) ||
3418 if (mss) 3585 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3419 count++; 3586 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3420 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3421 count++; 3587 count++;
3422 3588
3423 count += TXD_USE_COUNT(len); 3589 count += TXD_USE_COUNT(skb_headlen(skb));
3424 for (f = 0; f < nr_frags; f++) 3590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3425 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3591 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3426 3592
3427 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 3593 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3428 adapter->tx_busy++; 3594 adapter->tx_busy++;
3429 return NETDEV_TX_BUSY; 3595 return NETDEV_TX_BUSY;
3430 } 3596 }
3431 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3432 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3433 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3434 }
3435 3597
3436 if (skb->protocol == htons(ETH_P_IP)) 3598 if (skb->protocol == htons(ETH_P_IP))
3437 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3599 tx_flags |= IXGBE_TX_FLAGS_IPV4;
@@ -3445,12 +3607,12 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3445 if (tso) 3607 if (tso)
3446 tx_flags |= IXGBE_TX_FLAGS_TSO; 3608 tx_flags |= IXGBE_TX_FLAGS_TSO;
3447 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 3609 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3448 (skb->ip_summed == CHECKSUM_PARTIAL)) 3610 (skb->ip_summed == CHECKSUM_PARTIAL))
3449 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3611 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3450 3612
3451 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 3613 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3452 ixgbe_tx_map(adapter, tx_ring, skb, first), 3614 ixgbe_tx_map(adapter, tx_ring, skb, first),
3453 skb->len, hdr_len); 3615 skb->len, hdr_len);
3454 3616
3455 netdev->trans_start = jiffies; 3617 netdev->trans_start = jiffies;
3456 3618
@@ -3484,15 +3646,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3484static int ixgbe_set_mac(struct net_device *netdev, void *p) 3646static int ixgbe_set_mac(struct net_device *netdev, void *p)
3485{ 3647{
3486 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3648 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3649 struct ixgbe_hw *hw = &adapter->hw;
3487 struct sockaddr *addr = p; 3650 struct sockaddr *addr = p;
3488 3651
3489 if (!is_valid_ether_addr(addr->sa_data)) 3652 if (!is_valid_ether_addr(addr->sa_data))
3490 return -EADDRNOTAVAIL; 3653 return -EADDRNOTAVAIL;
3491 3654
3492 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3655 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3493 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3656 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3494 3657
3495 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3658 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3496 3659
3497 return 0; 3660 return 0;
3498} 3661}
@@ -3516,28 +3679,19 @@ static void ixgbe_netpoll(struct net_device *netdev)
3516#endif 3679#endif
3517 3680
3518/** 3681/**
3519 * ixgbe_napi_add_all - prep napi structs for use 3682 * ixgbe_link_config - set up initial link with default speed and duplex
3520 * @adapter: private struct 3683 * @hw: pointer to private hardware struct
3521 * helper function to napi_add each possible q_vector->napi 3684 *
3522 */ 3685 * Returns 0 on success, negative on failure
3523static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3686 **/
3687static int ixgbe_link_config(struct ixgbe_hw *hw)
3524{ 3688{
3525 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3689 u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
3526 int (*poll)(struct napi_struct *, int);
3527 3690
3528 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3691 /* must always autoneg for both 1G and 10G link */
3529 poll = &ixgbe_clean_rxonly; 3692 hw->mac.autoneg = true;
3530 } else {
3531 poll = &ixgbe_poll;
3532 /* only one q_vector for legacy modes */
3533 q_vectors = 1;
3534 }
3535 3693
3536 for (i = 0; i < q_vectors; i++) { 3694 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3537 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3538 netif_napi_add(adapter->netdev, &q_vector->napi,
3539 (*poll), 64);
3540 }
3541} 3695}
3542 3696
3543/** 3697/**
@@ -3552,17 +3706,16 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3552 * and a hardware reset occur. 3706 * and a hardware reset occur.
3553 **/ 3707 **/
3554static int __devinit ixgbe_probe(struct pci_dev *pdev, 3708static int __devinit ixgbe_probe(struct pci_dev *pdev,
3555 const struct pci_device_id *ent) 3709 const struct pci_device_id *ent)
3556{ 3710{
3557 struct net_device *netdev; 3711 struct net_device *netdev;
3558 struct ixgbe_adapter *adapter = NULL; 3712 struct ixgbe_adapter *adapter = NULL;
3559 struct ixgbe_hw *hw; 3713 struct ixgbe_hw *hw;
3560 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 3714 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3561 unsigned long mmio_start, mmio_len;
3562 static int cards_found; 3715 static int cards_found;
3563 int i, err, pci_using_dac; 3716 int i, err, pci_using_dac;
3564 u16 link_status, link_speed, link_width; 3717 u16 link_status, link_speed, link_width;
3565 u32 part_num; 3718 u32 part_num, eec;
3566 3719
3567 err = pci_enable_device(pdev); 3720 err = pci_enable_device(pdev);
3568 if (err) 3721 if (err)
@@ -3577,7 +3730,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3577 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3730 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3578 if (err) { 3731 if (err) {
3579 dev_err(&pdev->dev, "No usable DMA " 3732 dev_err(&pdev->dev, "No usable DMA "
3580 "configuration, aborting\n"); 3733 "configuration, aborting\n");
3581 goto err_dma; 3734 goto err_dma;
3582 } 3735 }
3583 } 3736 }
@@ -3610,10 +3763,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3610 hw->back = adapter; 3763 hw->back = adapter;
3611 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3764 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3612 3765
3613 mmio_start = pci_resource_start(pdev, 0); 3766 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3614 mmio_len = pci_resource_len(pdev, 0); 3767 pci_resource_len(pdev, 0));
3615
3616 hw->hw_addr = ioremap(mmio_start, mmio_len);
3617 if (!hw->hw_addr) { 3768 if (!hw->hw_addr) {
3618 err = -EIO; 3769 err = -EIO;
3619 goto err_ioremap; 3770 goto err_ioremap;
@@ -3643,22 +3794,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3643#endif 3794#endif
3644 strcpy(netdev->name, pci_name(pdev)); 3795 strcpy(netdev->name, pci_name(pdev));
3645 3796
3646 netdev->mem_start = mmio_start;
3647 netdev->mem_end = mmio_start + mmio_len;
3648
3649 adapter->bd_number = cards_found; 3797 adapter->bd_number = cards_found;
3650 3798
3651 /* PCI config space info */
3652 hw->vendor_id = pdev->vendor;
3653 hw->device_id = pdev->device;
3654 hw->revision_id = pdev->revision;
3655 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3656 hw->subsystem_device_id = pdev->subsystem_device;
3657
3658 /* Setup hw api */ 3799 /* Setup hw api */
3659 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3800 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3660 hw->mac.type = ii->mac; 3801 hw->mac.type = ii->mac;
3661 3802
3803 /* EEPROM */
3804 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
3805 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
3806 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
3807 if (!(eec & (1 << 8)))
3808 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
3809
3810 /* PHY */
3811 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
3812 /* phy->sfp_type = ixgbe_sfp_type_unknown; */
3813
3662 err = ii->get_invariants(hw); 3814 err = ii->get_invariants(hw);
3663 if (err) 3815 if (err)
3664 goto err_hw_init; 3816 goto err_hw_init;
@@ -3668,11 +3820,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3668 if (err) 3820 if (err)
3669 goto err_sw_init; 3821 goto err_sw_init;
3670 3822
3823 /* reset_hw fills in the perm_addr as well */
3824 err = hw->mac.ops.reset_hw(hw);
3825 if (err) {
3826 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
3827 goto err_sw_init;
3828 }
3829
3671 netdev->features = NETIF_F_SG | 3830 netdev->features = NETIF_F_SG |
3672 NETIF_F_IP_CSUM | 3831 NETIF_F_IP_CSUM |
3673 NETIF_F_HW_VLAN_TX | 3832 NETIF_F_HW_VLAN_TX |
3674 NETIF_F_HW_VLAN_RX | 3833 NETIF_F_HW_VLAN_RX |
3675 NETIF_F_HW_VLAN_FILTER; 3834 NETIF_F_HW_VLAN_FILTER;
3676 3835
3677 netdev->features |= NETIF_F_IPV6_CSUM; 3836 netdev->features |= NETIF_F_IPV6_CSUM;
3678 netdev->features |= NETIF_F_TSO; 3837 netdev->features |= NETIF_F_TSO;
@@ -3688,7 +3847,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3688 netdev->features |= NETIF_F_HIGHDMA; 3847 netdev->features |= NETIF_F_HIGHDMA;
3689 3848
3690 /* make sure the EEPROM is good */ 3849 /* make sure the EEPROM is good */
3691 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3850 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
3692 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 3851 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3693 err = -EIO; 3852 err = -EIO;
3694 goto err_eeprom; 3853 goto err_eeprom;
@@ -3697,7 +3856,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3697 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 3856 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3698 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 3857 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3699 3858
3700 if (ixgbe_validate_mac_addr(netdev->dev_addr)) { 3859 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
3860 dev_err(&pdev->dev, "invalid MAC address\n");
3701 err = -EIO; 3861 err = -EIO;
3702 goto err_eeprom; 3862 goto err_eeprom;
3703 } 3863 }
@@ -3707,6 +3867,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3707 adapter->watchdog_timer.data = (unsigned long)adapter; 3867 adapter->watchdog_timer.data = (unsigned long)adapter;
3708 3868
3709 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 3869 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3870 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
3710 3871
3711 err = ixgbe_init_interrupt_scheme(adapter); 3872 err = ixgbe_init_interrupt_scheme(adapter);
3712 if (err) 3873 if (err)
@@ -3717,32 +3878,39 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3717 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 3878 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3718 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 3879 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3719 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 3880 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3720 "%02x:%02x:%02x:%02x:%02x:%02x\n", 3881 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3721 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3882 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3722 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3883 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3723 "Unknown"), 3884 "Unknown"),
3724 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3885 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3725 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3886 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3726 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3887 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3727 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3888 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3728 "Unknown"), 3889 "Unknown"),
3729 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3890 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3730 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3891 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3731 ixgbe_read_part_num(hw, &part_num); 3892 ixgbe_read_pba_num_generic(hw, &part_num);
3732 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3893 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3733 hw->mac.type, hw->phy.type, 3894 hw->mac.type, hw->phy.type,
3734 (part_num >> 8), (part_num & 0xff)); 3895 (part_num >> 8), (part_num & 0xff));
3735 3896
3736 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 3897 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3737 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 3898 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3738 "this card is not sufficient for optimal " 3899 "this card is not sufficient for optimal "
3739 "performance.\n"); 3900 "performance.\n");
3740 dev_warn(&pdev->dev, "For optimal performance a x8 " 3901 dev_warn(&pdev->dev, "For optimal performance a x8 "
3741 "PCI-Express slot is required.\n"); 3902 "PCI-Express slot is required.\n");
3742 } 3903 }
3743 3904
3744 /* reset the hardware with the new settings */ 3905 /* reset the hardware with the new settings */
3745 ixgbe_start_hw(hw); 3906 hw->mac.ops.start_hw(hw);
3907
3908 /* link_config depends on start_hw being called at least once */
3909 err = ixgbe_link_config(hw);
3910 if (err) {
3911 dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
3912 goto err_register;
3913 }
3746 3914
3747 netif_carrier_off(netdev); 3915 netif_carrier_off(netdev);
3748 netif_tx_stop_all_queues(netdev); 3916 netif_tx_stop_all_queues(netdev);
@@ -3754,7 +3922,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3754 if (err) 3922 if (err)
3755 goto err_register; 3923 goto err_register;
3756 3924
3757#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3925#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3758 if (dca_add_requester(&pdev->dev) == 0) { 3926 if (dca_add_requester(&pdev->dev) == 0) {
3759 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3927 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3760 /* always use CB2 mode, difference is masked 3928 /* always use CB2 mode, difference is masked
@@ -3804,7 +3972,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3804 3972
3805 flush_scheduled_work(); 3973 flush_scheduled_work();
3806 3974
3807#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3975#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3808 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3976 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3809 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3977 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3810 dca_remove_requester(&pdev->dev); 3978 dca_remove_requester(&pdev->dev);
@@ -3822,6 +3990,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3822 pci_release_regions(pdev); 3990 pci_release_regions(pdev);
3823 3991
3824 DPRINTK(PROBE, INFO, "complete\n"); 3992 DPRINTK(PROBE, INFO, "complete\n");
3993 ixgbe_napi_del_all(adapter);
3825 kfree(adapter->tx_ring); 3994 kfree(adapter->tx_ring);
3826 kfree(adapter->rx_ring); 3995 kfree(adapter->rx_ring);
3827 3996
@@ -3839,7 +4008,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3839 * this device has been detected. 4008 * this device has been detected.
3840 */ 4009 */
3841static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 4010static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3842 pci_channel_state_t state) 4011 pci_channel_state_t state)
3843{ 4012{
3844 struct net_device *netdev = pci_get_drvdata(pdev); 4013 struct net_device *netdev = pci_get_drvdata(pdev);
3845 struct ixgbe_adapter *adapter = netdev->priv; 4014 struct ixgbe_adapter *adapter = netdev->priv;
@@ -3850,7 +4019,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3850 ixgbe_down(adapter); 4019 ixgbe_down(adapter);
3851 pci_disable_device(pdev); 4020 pci_disable_device(pdev);
3852 4021
3853 /* Request a slot slot reset. */ 4022 /* Request a slot reset. */
3854 return PCI_ERS_RESULT_NEED_RESET; 4023 return PCI_ERS_RESULT_NEED_RESET;
3855} 4024}
3856 4025
@@ -3867,7 +4036,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3867 4036
3868 if (pci_enable_device(pdev)) { 4037 if (pci_enable_device(pdev)) {
3869 DPRINTK(PROBE, ERR, 4038 DPRINTK(PROBE, ERR,
3870 "Cannot re-enable PCI device after reset.\n"); 4039 "Cannot re-enable PCI device after reset.\n");
3871 return PCI_ERS_RESULT_DISCONNECT; 4040 return PCI_ERS_RESULT_DISCONNECT;
3872 } 4041 }
3873 pci_set_master(pdev); 4042 pci_set_master(pdev);
@@ -3901,7 +4070,6 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
3901 } 4070 }
3902 4071
3903 netif_device_attach(netdev); 4072 netif_device_attach(netdev);
3904
3905} 4073}
3906 4074
3907static struct pci_error_handlers ixgbe_err_handler = { 4075static struct pci_error_handlers ixgbe_err_handler = {
@@ -3937,13 +4105,14 @@ static int __init ixgbe_init_module(void)
3937 4105
3938 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 4106 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3939 4107
3940#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 4108#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3941 dca_register_notify(&dca_notifier); 4109 dca_register_notify(&dca_notifier);
3942 4110
3943#endif 4111#endif
3944 ret = pci_register_driver(&ixgbe_driver); 4112 ret = pci_register_driver(&ixgbe_driver);
3945 return ret; 4113 return ret;
3946} 4114}
4115
3947module_init(ixgbe_init_module); 4116module_init(ixgbe_init_module);
3948 4117
3949/** 4118/**
@@ -3954,20 +4123,20 @@ module_init(ixgbe_init_module);
3954 **/ 4123 **/
3955static void __exit ixgbe_exit_module(void) 4124static void __exit ixgbe_exit_module(void)
3956{ 4125{
3957#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 4126#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3958 dca_unregister_notify(&dca_notifier); 4127 dca_unregister_notify(&dca_notifier);
3959#endif 4128#endif
3960 pci_unregister_driver(&ixgbe_driver); 4129 pci_unregister_driver(&ixgbe_driver);
3961} 4130}
3962 4131
3963#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 4132#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3964static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 4133static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3965 void *p) 4134 void *p)
3966{ 4135{
3967 int ret_val; 4136 int ret_val;
3968 4137
3969 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 4138 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3970 __ixgbe_notify_dca); 4139 __ixgbe_notify_dca);
3971 4140
3972 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3973} 4142}
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8002931ae823..764035a8c9a1 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,32 +32,36 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
35static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
38static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
39static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
40 u32 device_type, u16 phy_data);
41 38
42/** 39/**
43 * ixgbe_identify_phy - Get physical layer module 40 * ixgbe_identify_phy_generic - Get physical layer module
44 * @hw: pointer to hardware structure 41 * @hw: pointer to hardware structure
45 * 42 *
46 * Determines the physical layer module found on the current adapter. 43 * Determines the physical layer module found on the current adapter.
47 **/ 44 **/
48s32 ixgbe_identify_phy(struct ixgbe_hw *hw) 45s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
49{ 46{
50 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 47 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
51 u32 phy_addr; 48 u32 phy_addr;
52 49
53 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 50 if (hw->phy.type == ixgbe_phy_unknown) {
54 if (ixgbe_validate_phy_addr(hw, phy_addr)) { 51 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
55 hw->phy.addr = phy_addr; 52 if (ixgbe_validate_phy_addr(hw, phy_addr)) {
56 ixgbe_get_phy_id(hw); 53 hw->phy.addr = phy_addr;
57 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); 54 ixgbe_get_phy_id(hw);
58 status = 0; 55 hw->phy.type =
59 break; 56 ixgbe_get_phy_type_from_id(hw->phy.id);
57 status = 0;
58 break;
59 }
60 } 60 }
61 } else {
62 status = 0;
61 } 63 }
64
62 return status; 65 return status;
63} 66}
64 67
@@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
73 bool valid = false; 76 bool valid = false;
74 77
75 hw->phy.addr = phy_addr; 78 hw->phy.addr = phy_addr;
76 ixgbe_read_phy_reg(hw, 79 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
77 IXGBE_MDIO_PHY_ID_HIGH, 80 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
78 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
79 &phy_id);
80 81
81 if (phy_id != 0xFFFF && phy_id != 0x0) 82 if (phy_id != 0xFFFF && phy_id != 0x0)
82 valid = true; 83 valid = true;
@@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
95 u16 phy_id_high = 0; 96 u16 phy_id_high = 0;
96 u16 phy_id_low = 0; 97 u16 phy_id_low = 0;
97 98
98 status = ixgbe_read_phy_reg(hw, 99 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
99 IXGBE_MDIO_PHY_ID_HIGH, 100 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
100 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 101 &phy_id_high);
101 &phy_id_high);
102 102
103 if (status == 0) { 103 if (status == 0) {
104 hw->phy.id = (u32)(phy_id_high << 16); 104 hw->phy.id = (u32)(phy_id_high << 16);
105 status = ixgbe_read_phy_reg(hw, 105 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
106 IXGBE_MDIO_PHY_ID_LOW, 106 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
107 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 107 &phy_id_low);
108 &phy_id_low);
109 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 108 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
110 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 109 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
111 } 110 }
112
113 return status; 111 return status;
114} 112}
115 113
@@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
123 enum ixgbe_phy_type phy_type; 121 enum ixgbe_phy_type phy_type;
124 122
125 switch (phy_id) { 123 switch (phy_id) {
126 case TN1010_PHY_ID:
127 phy_type = ixgbe_phy_tn;
128 break;
129 case QT2022_PHY_ID: 124 case QT2022_PHY_ID:
130 phy_type = ixgbe_phy_qt; 125 phy_type = ixgbe_phy_qt;
131 break; 126 break;
@@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138} 133}
139 134
140/** 135/**
141 * ixgbe_reset_phy - Performs a PHY reset 136 * ixgbe_reset_phy_generic - Performs a PHY reset
142 * @hw: pointer to hardware structure 137 * @hw: pointer to hardware structure
143 **/ 138 **/
144s32 ixgbe_reset_phy(struct ixgbe_hw *hw) 139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
145{ 140{
146 /* 141 /*
147 * Perform soft PHY reset to the PHY_XS. 142 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 143 * This will cause a soft reset to the PHY
149 */ 144 */
150 return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 145 return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
151 IXGBE_MDIO_PHY_XS_DEV_TYPE, 146 IXGBE_MDIO_PHY_XS_DEV_TYPE,
152 IXGBE_MDIO_PHY_XS_RESET); 147 IXGBE_MDIO_PHY_XS_RESET);
153} 148}
154 149
155/** 150/**
156 * ixgbe_read_phy_reg - Reads a value from a specified PHY register 151 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
157 * @hw: pointer to hardware structure 152 * @hw: pointer to hardware structure
158 * @reg_addr: 32 bit address of PHY register to read 153 * @reg_addr: 32 bit address of PHY register to read
159 * @phy_data: Pointer to read data from PHY register 154 * @phy_data: Pointer to read data from PHY register
160 **/ 155 **/
161s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 156s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
162 u32 device_type, u16 *phy_data) 157 u32 device_type, u16 *phy_data)
163{ 158{
164 u32 command; 159 u32 command;
165 u32 i; 160 u32 i;
166 u32 timeout = 10;
167 u32 data; 161 u32 data;
168 s32 status = 0; 162 s32 status = 0;
169 u16 gssr; 163 u16 gssr;
@@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
179 if (status == 0) { 173 if (status == 0) {
180 /* Setup and write the address cycle command */ 174 /* Setup and write the address cycle command */
181 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 175 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
182 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 176 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
183 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 177 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
184 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 178 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
185 179
186 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 180 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
187 181
@@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
190 * The MDI Command bit will clear when the operation is 184 * The MDI Command bit will clear when the operation is
191 * complete 185 * complete
192 */ 186 */
193 for (i = 0; i < timeout; i++) { 187 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
194 udelay(10); 188 udelay(10);
195 189
196 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 190 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
210 * command 204 * command
211 */ 205 */
212 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 206 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
213 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 207 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
214 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 208 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
215 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 209 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
216 210
217 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 211 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
218 212
@@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
221 * completed. The MDI Command bit will clear when the 215 * completed. The MDI Command bit will clear when the
222 * operation is complete 216 * operation is complete
223 */ 217 */
224 for (i = 0; i < timeout; i++) { 218 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
225 udelay(10); 219 udelay(10);
226 220
227 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 221 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
231 } 225 }
232 226
233 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 227 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
234 hw_dbg(hw, 228 hw_dbg(hw, "PHY read command didn't complete\n");
235 "PHY read command didn't complete\n");
236 status = IXGBE_ERR_PHY; 229 status = IXGBE_ERR_PHY;
237 } else { 230 } else {
238 /* 231 /*
@@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
247 240
248 ixgbe_release_swfw_sync(hw, gssr); 241 ixgbe_release_swfw_sync(hw, gssr);
249 } 242 }
243
250 return status; 244 return status;
251} 245}
252 246
253/** 247/**
254 * ixgbe_write_phy_reg - Writes a value to specified PHY register 248 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
255 * @hw: pointer to hardware structure 249 * @hw: pointer to hardware structure
256 * @reg_addr: 32 bit PHY register to write 250 * @reg_addr: 32 bit PHY register to write
257 * @device_type: 5 bit device type 251 * @device_type: 5 bit device type
258 * @phy_data: Data to write to the PHY register 252 * @phy_data: Data to write to the PHY register
259 **/ 253 **/
260static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 254s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
261 u32 device_type, u16 phy_data) 255 u32 device_type, u16 phy_data)
262{ 256{
263 u32 command; 257 u32 command;
264 u32 i; 258 u32 i;
265 u32 timeout = 10;
266 s32 status = 0; 259 s32 status = 0;
267 u16 gssr; 260 u16 gssr;
268 261
@@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
280 273
281 /* Setup and write the address cycle command */ 274 /* Setup and write the address cycle command */
282 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 275 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
283 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 276 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
284 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 277 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
285 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 278 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
286 279
287 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 280 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
288 281
@@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
291 * The MDI Command bit will clear when the operation is 284 * The MDI Command bit will clear when the operation is
292 * complete 285 * complete
293 */ 286 */
294 for (i = 0; i < timeout; i++) { 287 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
295 udelay(10); 288 udelay(10);
296 289
297 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 290 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
298 291
299 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 292 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
300 hw_dbg(hw, "PHY address cmd didn't complete\n");
301 break; 293 break;
302 }
303 } 294 }
304 295
305 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 296 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
297 hw_dbg(hw, "PHY address cmd didn't complete\n");
306 status = IXGBE_ERR_PHY; 298 status = IXGBE_ERR_PHY;
299 }
307 300
308 if (status == 0) { 301 if (status == 0) {
309 /* 302 /*
@@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
311 * command 304 * command
312 */ 305 */
313 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 306 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
314 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 307 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
315 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 308 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
316 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 309 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
317 310
318 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 311 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
319 312
@@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
322 * completed. The MDI Command bit will clear when the 315 * completed. The MDI Command bit will clear when the
323 * operation is complete 316 * operation is complete
324 */ 317 */
325 for (i = 0; i < timeout; i++) { 318 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
326 udelay(10); 319 udelay(10);
327 320
328 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 321 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
329 322
330 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 323 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
331 hw_dbg(hw, "PHY write command did not "
332 "complete.\n");
333 break; 324 break;
334 }
335 } 325 }
336 326
337 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 327 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
328 hw_dbg(hw, "PHY address cmd didn't complete\n");
338 status = IXGBE_ERR_PHY; 329 status = IXGBE_ERR_PHY;
330 }
339 } 331 }
340 332
341 ixgbe_release_swfw_sync(hw, gssr); 333 ixgbe_release_swfw_sync(hw, gssr);
@@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
345} 337}
346 338
347/** 339/**
348 * ixgbe_setup_tnx_phy_link - Set and restart autoneg 340 * ixgbe_setup_phy_link_generic - Set and restart autoneg
349 * @hw: pointer to hardware structure 341 * @hw: pointer to hardware structure
350 * 342 *
351 * Restart autonegotiation and PHY and waits for completion. 343 * Restart autonegotiation and PHY and waits for completion.
352 **/ 344 **/
353s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) 345s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
354{ 346{
355 s32 status = IXGBE_NOT_IMPLEMENTED; 347 s32 status = IXGBE_NOT_IMPLEMENTED;
356 u32 time_out; 348 u32 time_out;
357 u32 max_time_out = 10; 349 u32 max_time_out = 10;
358 u16 autoneg_speed_selection_register = 0x10; 350 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
359 u16 autoneg_restart_mask = 0x0200;
360 u16 autoneg_complete_mask = 0x0020;
361 u16 autoneg_reg = 0;
362 351
363 /* 352 /*
364 * Set advertisement settings in PHY based on autoneg_advertised 353 * Set advertisement settings in PHY based on autoneg_advertised
365 * settings. If autoneg_advertised = 0, then advertise default values 354 * settings. If autoneg_advertised = 0, then advertise default values
366 * txn devices cannot be "forced" to a autoneg 10G and fail. But can 355 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
367 * for a 1G. 356 * for a 1G.
368 */ 357 */
369 ixgbe_read_phy_reg(hw, 358 hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
370 autoneg_speed_selection_register, 359 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
372 &autoneg_reg);
373 360
374 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 361 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
375 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 362 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
376 else 363 else
377 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 364 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
378 365
379 ixgbe_write_phy_reg(hw, 366 hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
380 autoneg_speed_selection_register, 367 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
381 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
382 autoneg_reg);
383
384 368
385 /* Restart PHY autonegotiation and wait for completion */ 369 /* Restart PHY autonegotiation and wait for completion */
386 ixgbe_read_phy_reg(hw, 370 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
387 IXGBE_MDIO_AUTO_NEG_CONTROL, 371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
388 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
389 &autoneg_reg);
390 372
391 autoneg_reg |= autoneg_restart_mask; 373 autoneg_reg |= IXGBE_MII_RESTART;
392 374
393 ixgbe_write_phy_reg(hw, 375 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
394 IXGBE_MDIO_AUTO_NEG_CONTROL, 376 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
395 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
396 autoneg_reg);
397 377
398 /* Wait for autonegotiation to finish */ 378 /* Wait for autonegotiation to finish */
399 for (time_out = 0; time_out < max_time_out; time_out++) { 379 for (time_out = 0; time_out < max_time_out; time_out++) {
400 udelay(10); 380 udelay(10);
401 /* Restart PHY autonegotiation and wait for completion */ 381 /* Restart PHY autonegotiation and wait for completion */
402 status = ixgbe_read_phy_reg(hw, 382 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
403 IXGBE_MDIO_AUTO_NEG_STATUS, 383 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
404 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 384 &autoneg_reg);
405 &autoneg_reg);
406 385
407 autoneg_reg &= autoneg_complete_mask; 386 autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
408 if (autoneg_reg == autoneg_complete_mask) { 387 if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
409 status = 0; 388 status = 0;
410 break; 389 break;
411 } 390 }
@@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
418} 397}
419 398
420/** 399/**
421 * ixgbe_check_tnx_phy_link - Determine link and speed status 400 * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
422 * @hw: pointer to hardware structure
423 *
424 * Reads the VS1 register to determine if link is up and the current speed for
425 * the PHY.
426 **/
427s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
428 bool *link_up)
429{
430 s32 status = 0;
431 u32 time_out;
432 u32 max_time_out = 10;
433 u16 phy_link = 0;
434 u16 phy_speed = 0;
435 u16 phy_data = 0;
436
437 /* Initialize speed and link to default case */
438 *link_up = false;
439 *speed = IXGBE_LINK_SPEED_10GB_FULL;
440
441 /*
442 * Check current speed and link status of the PHY register.
443 * This is a vendor specific register and may have to
444 * be changed for other copper PHYs.
445 */
446 for (time_out = 0; time_out < max_time_out; time_out++) {
447 udelay(10);
448 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
449 *link_up = true;
450 if (phy_speed ==
451 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
452 *speed = IXGBE_LINK_SPEED_1GB_FULL;
453 break;
454 } else {
455 status = ixgbe_read_phy_reg(hw,
456 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
457 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
458 &phy_data);
459 phy_link = phy_data &
460 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
461 phy_speed = phy_data &
462 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
463 }
464 }
465
466 return status;
467}
468
469/**
470 * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
471 * @hw: pointer to hardware structure 401 * @hw: pointer to hardware structure
472 * @speed: new link speed 402 * @speed: new link speed
473 * @autoneg: true if autonegotiation enabled 403 * @autoneg: true if autonegotiation enabled
474 **/ 404 **/
475s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, 405s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
476 bool autoneg, 406 ixgbe_link_speed speed,
477 bool autoneg_wait_to_complete) 407 bool autoneg,
408 bool autoneg_wait_to_complete)
478{ 409{
410
479 /* 411 /*
480 * Clear autoneg_advertised and set new values based on input link 412 * Clear autoneg_advertised and set new values based on input link
481 * speed. 413 * speed.
@@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
484 416
485 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 417 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
486 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 418 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
419
487 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 420 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
488 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 421 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
489 422
490 /* Setup link based on the new speed settings */ 423 /* Setup link based on the new speed settings */
491 ixgbe_setup_tnx_phy_link(hw); 424 hw->phy.ops.setup_link(hw);
492 425
493 return 0; 426 return 0;
494} 427}
428
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index aa3ea72e678e..9bfe3f2b1d8f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -30,20 +29,52 @@
30#define _IXGBE_PHY_H_ 29#define _IXGBE_PHY_H_
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
33 33
34s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); 34/* EEPROM byte offsets */
35s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 35#define IXGBE_SFF_IDENTIFIER 0x0
36s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 36#define IXGBE_SFF_IDENTIFIER_SFP 0x3
37 bool autoneg_wait_to_complete); 37#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
38s32 ixgbe_identify_phy(struct ixgbe_hw *hw); 38#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
39s32 ixgbe_reset_phy(struct ixgbe_hw *hw); 39#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
40s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 40#define IXGBE_SFF_1GBE_COMP_CODES 0x6
41 u32 device_type, u16 *phy_data); 41#define IXGBE_SFF_10GBE_COMP_CODES 0x3
42 42#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
43/* PHY specific */ 43
44s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); 44/* Bitmasks */
45s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 45#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
46s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47 bool autoneg_wait_to_complete); 47#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
48#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
49#define IXGBE_I2C_EEPROM_READ_MASK 0x100
50#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
51#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
52#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
53#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
54#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
55
56/* Bit-shift macros */
57#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
58#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
59#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
60
61/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
62#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
63#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
64#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
65
66
67s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
68s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
69s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
70s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
71 u32 device_type, u16 *phy_data);
72s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
73 u32 device_type, u16 phy_data);
74s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
75s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
76 ixgbe_link_speed speed,
77 bool autoneg,
78 bool autoneg_wait_to_complete);
48 79
49#endif /* _IXGBE_PHY_H_ */ 80#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 3e9c483ad8e6..c6f8fa1c4e59 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -37,9 +36,9 @@
37/* Device IDs */ 36/* Device IDs */
38#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
39#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
40#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD 39#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
42#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 40#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
41#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
43 42
44/* General Registers */ 43/* General Registers */
45#define IXGBE_CTRL 0x00000 44#define IXGBE_CTRL 0x00000
@@ -70,11 +69,11 @@
70#define IXGBE_EIMC 0x00888 69#define IXGBE_EIMC 0x00888
71#define IXGBE_EIAC 0x00810 70#define IXGBE_EIAC 0x00810
72#define IXGBE_EIAM 0x00890 71#define IXGBE_EIAM 0x00890
73#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ 72#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
74#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ 73#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
75#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ 74#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
76#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ 75#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
77#define IXGBE_PBACL 0x11068 76#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
78#define IXGBE_GPIE 0x00898 77#define IXGBE_GPIE 0x00898
79 78
80/* Flow Control Registers */ 79/* Flow Control Registers */
@@ -86,20 +85,33 @@
86#define IXGBE_TFCS 0x0CE00 85#define IXGBE_TFCS 0x0CE00
87 86
88/* Receive DMA Registers */ 87/* Receive DMA Registers */
89#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ 88#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
90#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) 89#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
91#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) 90#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
92#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) 91#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
93#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) 92#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
94#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) 93#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
95#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) 94/*
96#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) 95 * Split and Replication Receive Control Registers
97 /* array of 16 (0x02100-0x0213C) */ 96 * 00-15 : 0x02100 + n*4
98#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) 97 * 16-64 : 0x01014 + n*0x40
99 /* array of 16 (0x02200-0x0223C) */ 98 * 64-127: 0x0D014 + (n-64)*0x40
100#define IXGBE_RDRXCTL 0x02F00 99 */
100#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
101 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
102 (0x0D014 + ((_i - 64) * 0x40))))
103/*
104 * Rx DCA Control Register:
105 * 00-15 : 0x02200 + n*4
106 * 16-64 : 0x0100C + n*0x40
107 * 64-127: 0x0D00C + (n-64)*0x40
108 */
109#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
110 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
111 (0x0D00C + ((_i - 64) * 0x40))))
112#define IXGBE_RDRXCTL 0x02F00
101#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 113#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
102 /* 8 of these 0x03C00 - 0x03C1C */ 114 /* 8 of these 0x03C00 - 0x03C1C */
103#define IXGBE_RXCTRL 0x03000 115#define IXGBE_RXCTRL 0x03000
104#define IXGBE_DROPEN 0x03D04 116#define IXGBE_DROPEN 0x03D04
105#define IXGBE_RXPBSIZE_SHIFT 10 117#define IXGBE_RXPBSIZE_SHIFT 10
@@ -107,29 +119,32 @@
107/* Receive Registers */ 119/* Receive Registers */
108#define IXGBE_RXCSUM 0x05000 120#define IXGBE_RXCSUM 0x05000
109#define IXGBE_RFCTL 0x05008 121#define IXGBE_RFCTL 0x05008
122#define IXGBE_DRECCCTL 0x02F08
123#define IXGBE_DRECCCTL_DISABLE 0
124/* Multicast Table Array - 128 entries */
110#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 125#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
111 /* Multicast Table Array - 128 entries */ 126#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
112#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ 127#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
113#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ 128/* Packet split receive type */
114#define IXGBE_PSRTYPE 0x05480 129#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
115 /* 0x5480-0x54BC Packet split receive type */ 130/* array of 4096 1-bit vlan filters */
116#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 131#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
117 /* array of 4096 1-bit vlan filters */ 132/*array of 4096 4-bit vlan vmdq indices */
118#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) 133#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
119 /*array of 4096 4-bit vlan vmdq indicies */
120#define IXGBE_FCTRL 0x05080 134#define IXGBE_FCTRL 0x05080
121#define IXGBE_VLNCTRL 0x05088 135#define IXGBE_VLNCTRL 0x05088
122#define IXGBE_MCSTCTRL 0x05090 136#define IXGBE_MCSTCTRL 0x05090
123#define IXGBE_MRQC 0x05818 137#define IXGBE_MRQC 0x05818
124#define IXGBE_VMD_CTL 0x0581C
125#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 138#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
126#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 139#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
127#define IXGBE_IMIRVP 0x05AC0 140#define IXGBE_IMIRVP 0x05AC0
141#define IXGBE_VMD_CTL 0x0581C
128#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 142#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
129#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 143#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
130 144
145
131/* Transmit DMA registers */ 146/* Transmit DMA registers */
132#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ 147#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
133#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) 148#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
134#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) 149#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
135#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) 150#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
@@ -138,11 +153,10 @@
138#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) 153#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
139#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 154#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
140#define IXGBE_DTXCTL 0x07E00 155#define IXGBE_DTXCTL 0x07E00
141#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) 156
142 /* there are 16 of these (0-15) */ 157#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
143#define IXGBE_TIPG 0x0CB00 158#define IXGBE_TIPG 0x0CB00
144#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) 159#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
145 /* there are 8 of these */
146#define IXGBE_MNGTXMAP 0x0CD10 160#define IXGBE_MNGTXMAP 0x0CD10
147#define IXGBE_TIPG_FIBER_DEFAULT 3 161#define IXGBE_TIPG_FIBER_DEFAULT 3
148#define IXGBE_TXPBSIZE_SHIFT 10 162#define IXGBE_TXPBSIZE_SHIFT 10
@@ -154,6 +168,7 @@
154#define IXGBE_IPAV 0x05838 168#define IXGBE_IPAV 0x05838
155#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ 169#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
156#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ 170#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
171
157#define IXGBE_WUPL 0x05900 172#define IXGBE_WUPL 0x05900
158#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 173#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
159#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ 174#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
@@ -170,6 +185,8 @@
170#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ 185#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
171#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ 186#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
172 187
188
189
173/* Stats registers */ 190/* Stats registers */
174#define IXGBE_CRCERRS 0x04000 191#define IXGBE_CRCERRS 0x04000
175#define IXGBE_ILLERRC 0x04004 192#define IXGBE_ILLERRC 0x04004
@@ -224,7 +241,7 @@
224#define IXGBE_XEC 0x04120 241#define IXGBE_XEC 0x04120
225 242
226#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ 243#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
227#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ 244#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
228 245
229#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 246#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
230#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 247#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
@@ -275,23 +292,17 @@
275#define IXGBE_DCA_CTRL 0x11074 292#define IXGBE_DCA_CTRL 0x11074
276 293
277/* Diagnostic Registers */ 294/* Diagnostic Registers */
278#define IXGBE_RDSTATCTL 0x02C20 295#define IXGBE_RDSTATCTL 0x02C20
279#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ 296#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
280#define IXGBE_RDHMPN 0x02F08 297#define IXGBE_RDHMPN 0x02F08
281#define IXGBE_RIC_DW0 0x02F10 298#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
282#define IXGBE_RIC_DW1 0x02F14 299#define IXGBE_RDPROBE 0x02F20
283#define IXGBE_RIC_DW2 0x02F18 300#define IXGBE_TDSTATCTL 0x07C20
284#define IXGBE_RIC_DW3 0x02F1C 301#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
285#define IXGBE_RDPROBE 0x02F20 302#define IXGBE_TDHMPN 0x07F08
286#define IXGBE_TDSTATCTL 0x07C20 303#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
287#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ 304#define IXGBE_TDPROBE 0x07F20
288#define IXGBE_TDHMPN 0x07F08 305#define IXGBE_TXBUFCTRL 0x0C600
289#define IXGBE_TIC_DW0 0x07F10
290#define IXGBE_TIC_DW1 0x07F14
291#define IXGBE_TIC_DW2 0x07F18
292#define IXGBE_TIC_DW3 0x07F1C
293#define IXGBE_TDPROBE 0x07F20
294#define IXGBE_TXBUFCTRL 0x0C600
295#define IXGBE_TXBUFDATA0 0x0C610 306#define IXGBE_TXBUFDATA0 0x0C610
296#define IXGBE_TXBUFDATA1 0x0C614 307#define IXGBE_TXBUFDATA1 0x0C614
297#define IXGBE_TXBUFDATA2 0x0C618 308#define IXGBE_TXBUFDATA2 0x0C618
@@ -392,7 +403,7 @@
392 403
393#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 404#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
394#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 405#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
395#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ 406#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
396#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ 407#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
397 408
398/* MSCA Bit Masks */ 409/* MSCA Bit Masks */
@@ -416,10 +427,10 @@
416#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ 427#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
417 428
418/* MSRWD bit masks */ 429/* MSRWD bit masks */
419#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF 430#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
420#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 431#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
421#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 432#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
422#define IXGBE_MSRWD_READ_DATA_SHIFT 16 433#define IXGBE_MSRWD_READ_DATA_SHIFT 16
423 434
424/* Atlas registers */ 435/* Atlas registers */
425#define IXGBE_ATLAS_PDN_LPBK 0x24 436#define IXGBE_ATLAS_PDN_LPBK 0x24
@@ -434,6 +445,7 @@
434#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 445#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
435#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 446#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
436 447
448
437/* Device Type definitions for new protocol MDIO commands */ 449/* Device Type definitions for new protocol MDIO commands */
438#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 450#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
439#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 451#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
@@ -441,6 +453,8 @@
441#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 453#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
442#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ 454#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
443 455
456#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
457
444#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ 458#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
445#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ 459#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
446#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ 460#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
@@ -454,23 +468,39 @@
454#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ 468#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
455#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ 469#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
456#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ 470#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
457#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ 471#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
458#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ 472#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
459#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ 473#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
460 474
475#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
476#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
477#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
478
479/* MII clause 22/28 definitions */
480#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
481
482#define IXGBE_MII_SPEED_SELECTION_REG 0x10
483#define IXGBE_MII_RESTART 0x200
484#define IXGBE_MII_AUTONEG_COMPLETE 0x20
485#define IXGBE_MII_AUTONEG_REG 0x0
486
461#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 487#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
462#define IXGBE_MAX_PHY_ADDR 32 488#define IXGBE_MAX_PHY_ADDR 32
463 489
464/* PHY IDs*/ 490/* PHY IDs*/
465#define TN1010_PHY_ID 0x00A19410
466#define QT2022_PHY_ID 0x0043A400 491#define QT2022_PHY_ID 0x0043A400
467 492
493/* PHY Types */
494#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
495
468/* General purpose Interrupt Enable */ 496/* General purpose Interrupt Enable */
469#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ 497#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
470#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ 498#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
471#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 499#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
472#define IXGBE_GPIE_EIAME 0x40000000 500#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
473#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 501#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
502#define IXGBE_GPIE_EIAME 0x40000000
503#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
474 504
475/* Transmit Flow Control status */ 505/* Transmit Flow Control status */
476#define IXGBE_TFCS_TXOFF 0x00000001 506#define IXGBE_TFCS_TXOFF 0x00000001
@@ -531,7 +561,7 @@
531#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ 561#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
532 562
533/* RMCS Bit Masks */ 563/* RMCS Bit Masks */
534#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ 564#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
535/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ 565/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
536#define IXGBE_RMCS_RAC 0x00000004 566#define IXGBE_RMCS_RAC 0x00000004
537#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ 567#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
@@ -539,12 +569,15 @@
539#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ 569#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
540#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ 570#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
541 571
572
542/* Interrupt register bitmasks */ 573/* Interrupt register bitmasks */
543 574
544/* Extended Interrupt Cause Read */ 575/* Extended Interrupt Cause Read */
545#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ 576#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
546#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ 577#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
547#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ 578#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
579#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
580#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
548#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ 581#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
549#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ 582#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
550#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ 583#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -552,11 +585,12 @@
552 585
553/* Extended Interrupt Cause Set */ 586/* Extended Interrupt Cause Set */
554#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 587#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
555#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 588#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
556#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 589#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
557#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 590#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
558#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 591#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
559#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 592#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
593#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
560#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 594#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
561#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 595#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
562 596
@@ -564,7 +598,9 @@
564#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 598#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
565#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ 599#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
566#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 600#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
567#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 601#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
602#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
603#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
568#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ 604#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
569#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 605#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
570#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 606#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
@@ -573,18 +609,20 @@
573#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 609#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
574#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ 610#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
575#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 611#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
576#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 612#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
577#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 613#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
614#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
615#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
578#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 616#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
579#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 617#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
580 618
581#define IXGBE_EIMS_ENABLE_MASK (\ 619#define IXGBE_EIMS_ENABLE_MASK ( \
582 IXGBE_EIMS_RTX_QUEUE | \ 620 IXGBE_EIMS_RTX_QUEUE | \
583 IXGBE_EIMS_LSC | \ 621 IXGBE_EIMS_LSC | \
584 IXGBE_EIMS_TCP_TIMER | \ 622 IXGBE_EIMS_TCP_TIMER | \
585 IXGBE_EIMS_OTHER) 623 IXGBE_EIMS_OTHER)
586 624
587/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ 625/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
588#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 626#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
589#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ 627#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
590#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 628#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -621,6 +659,7 @@
621#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ 659#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
622#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ 660#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
623 661
662
624#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 663#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
625 664
626/* STATUS Bit Masks */ 665/* STATUS Bit Masks */
@@ -668,16 +707,16 @@
668#define IXGBE_AUTOC_AN_RESTART 0x00001000 707#define IXGBE_AUTOC_AN_RESTART 0x00001000
669#define IXGBE_AUTOC_FLU 0x00000001 708#define IXGBE_AUTOC_FLU 0x00000001
670#define IXGBE_AUTOC_LMS_SHIFT 13 709#define IXGBE_AUTOC_LMS_SHIFT 13
671#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) 710#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
672#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) 711#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
673#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) 712#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
674#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) 713#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
675#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) 714#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
676#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) 715#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
677#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 716#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
678 717
679#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 718#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
680#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 719#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
681#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 720#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
682#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 721#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
683#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 722#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
@@ -703,6 +742,7 @@
703#define IXGBE_LINKS_TL_FAULT 0x00001000 742#define IXGBE_LINKS_TL_FAULT 0x00001000
704#define IXGBE_LINKS_SIGNAL 0x00000F00 743#define IXGBE_LINKS_SIGNAL 0x00000F00
705 744
745#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
706#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 746#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
707 747
708/* SW Semaphore Register bitmasks */ 748/* SW Semaphore Register bitmasks */
@@ -757,6 +797,11 @@
757#define IXGBE_PBANUM0_PTR 0x15 797#define IXGBE_PBANUM0_PTR 0x15
758#define IXGBE_PBANUM1_PTR 0x16 798#define IXGBE_PBANUM1_PTR 0x16
759 799
800/* Legacy EEPROM word offsets */
801#define IXGBE_ISCSI_BOOT_CAPS 0x0033
802#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
803#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
804
760/* EEPROM Commands - SPI */ 805/* EEPROM Commands - SPI */
761#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ 806#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
762#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 807#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
@@ -764,7 +809,7 @@
764#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 809#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
765#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ 810#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
766#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ 811#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
767/* EEPROM reset Write Enbale latch */ 812/* EEPROM reset Write Enable latch */
768#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 813#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
769#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ 814#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
770#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ 815#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
@@ -803,22 +848,20 @@
803/* Number of 100 microseconds we wait for PCI Express master disable */ 848/* Number of 100 microseconds we wait for PCI Express master disable */
804#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 849#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
805 850
806/* PHY Types */
807#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
808
809/* Check whether address is multicast. This is little-endian specific check.*/ 851/* Check whether address is multicast. This is little-endian specific check.*/
810#define IXGBE_IS_MULTICAST(Address) \ 852#define IXGBE_IS_MULTICAST(Address) \
811 (bool)(((u8 *)(Address))[0] & ((u8)0x01)) 853 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
812 854
813/* Check whether an address is broadcast. */ 855/* Check whether an address is broadcast. */
814#define IXGBE_IS_BROADCAST(Address) \ 856#define IXGBE_IS_BROADCAST(Address) \
815 ((((u8 *)(Address))[0] == ((u8)0xff)) && \ 857 ((((u8 *)(Address))[0] == ((u8)0xff)) && \
816 (((u8 *)(Address))[1] == ((u8)0xff))) 858 (((u8 *)(Address))[1] == ((u8)0xff)))
817 859
818/* RAH */ 860/* RAH */
819#define IXGBE_RAH_VIND_MASK 0x003C0000 861#define IXGBE_RAH_VIND_MASK 0x003C0000
820#define IXGBE_RAH_VIND_SHIFT 18 862#define IXGBE_RAH_VIND_SHIFT 18
821#define IXGBE_RAH_AV 0x80000000 863#define IXGBE_RAH_AV 0x80000000
864#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
822 865
823/* Header split receive */ 866/* Header split receive */
824#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 867#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
@@ -847,7 +890,7 @@
847#define IXGBE_MAX_FRAME_SZ 0x40040000 890#define IXGBE_MAX_FRAME_SZ 0x40040000
848 891
849#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ 892#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
850#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ 893#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
851 894
852/* Receive Config masks */ 895/* Receive Config masks */
853#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 896#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
@@ -860,7 +903,7 @@
860#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ 903#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
861#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ 904#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
862#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ 905#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
863/* Receive Priority Flow Control Enbale */ 906/* Receive Priority Flow Control Enable */
864#define IXGBE_FCTRL_RPFCE 0x00004000 907#define IXGBE_FCTRL_RPFCE 0x00004000
865#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ 908#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
866 909
@@ -890,9 +933,8 @@
890/* Receive Descriptor bit definitions */ 933/* Receive Descriptor bit definitions */
891#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ 934#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
892#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ 935#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
893#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */
894#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 936#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
895#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 937#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
896#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ 938#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
897#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 939#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
898#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 940#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
@@ -908,7 +950,7 @@
908#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ 950#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
909#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ 951#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
910#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 952#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
911#define IXGBE_RXDADV_HBO 0x00800000 953#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
912#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ 954#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
913#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 955#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
914#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ 956#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
@@ -922,15 +964,17 @@
922#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ 964#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
923#define IXGBE_RXD_CFI_SHIFT 12 965#define IXGBE_RXD_CFI_SHIFT 12
924 966
967
925/* SRRCTL bit definitions */ 968/* SRRCTL bit definitions */
926#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ 969#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
927#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F 970#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
928#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 971#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
929#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 972#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
930#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 973#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
931#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 974#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
932#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 975#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
933#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 976#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
977#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
934 978
935#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 979#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
936#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF 980#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
@@ -964,21 +1008,20 @@
964#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 1008#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
965#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 1009#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
966#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 1010#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
967
968/* Masks to determine if packets should be dropped due to frame errors */ 1011/* Masks to determine if packets should be dropped due to frame errors */
969#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ 1012#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
970 IXGBE_RXD_ERR_CE | \ 1013 IXGBE_RXD_ERR_CE | \
971 IXGBE_RXD_ERR_LE | \ 1014 IXGBE_RXD_ERR_LE | \
972 IXGBE_RXD_ERR_PE | \ 1015 IXGBE_RXD_ERR_PE | \
973 IXGBE_RXD_ERR_OSE | \ 1016 IXGBE_RXD_ERR_OSE | \
974 IXGBE_RXD_ERR_USE) 1017 IXGBE_RXD_ERR_USE)
975 1018
976#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ 1019#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
977 IXGBE_RXDADV_ERR_CE | \ 1020 IXGBE_RXDADV_ERR_CE | \
978 IXGBE_RXDADV_ERR_LE | \ 1021 IXGBE_RXDADV_ERR_LE | \
979 IXGBE_RXDADV_ERR_PE | \ 1022 IXGBE_RXDADV_ERR_PE | \
980 IXGBE_RXDADV_ERR_OSE | \ 1023 IXGBE_RXDADV_ERR_OSE | \
981 IXGBE_RXDADV_ERR_USE) 1024 IXGBE_RXDADV_ERR_USE)
982 1025
983/* Multicast bit mask */ 1026/* Multicast bit mask */
984#define IXGBE_MCSTCTRL_MFE 0x4 1027#define IXGBE_MCSTCTRL_MFE 0x4
@@ -994,6 +1037,7 @@
994#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1037#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
995#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1038#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
996 1039
1040
997/* Transmit Descriptor - Legacy */ 1041/* Transmit Descriptor - Legacy */
998struct ixgbe_legacy_tx_desc { 1042struct ixgbe_legacy_tx_desc {
999 u64 buffer_addr; /* Address of the descriptor's data buffer */ 1043 u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -1008,8 +1052,8 @@ struct ixgbe_legacy_tx_desc {
1008 union { 1052 union {
1009 __le32 data; 1053 __le32 data;
1010 struct { 1054 struct {
1011 u8 status; /* Descriptor status */ 1055 u8 status; /* Descriptor status */
1012 u8 css; /* Checksum start */ 1056 u8 css; /* Checksum start */
1013 __le16 vlan; 1057 __le16 vlan;
1014 } fields; 1058 } fields;
1015 } upper; 1059 } upper;
@@ -1018,7 +1062,7 @@ struct ixgbe_legacy_tx_desc {
1018/* Transmit Descriptor - Advanced */ 1062/* Transmit Descriptor - Advanced */
1019union ixgbe_adv_tx_desc { 1063union ixgbe_adv_tx_desc {
1020 struct { 1064 struct {
1021 __le64 buffer_addr; /* Address of descriptor's data buf */ 1065 __le64 buffer_addr; /* Address of descriptor's data buf */
1022 __le32 cmd_type_len; 1066 __le32 cmd_type_len;
1023 __le32 olinfo_status; 1067 __le32 olinfo_status;
1024 } read; 1068 } read;
@@ -1050,8 +1094,8 @@ union ixgbe_adv_rx_desc {
1050 union { 1094 union {
1051 __le32 data; 1095 __le32 data;
1052 struct { 1096 struct {
1053 __le16 pkt_info; /* RSS type, Packet type */ 1097 __le16 pkt_info; /* RSS, Pkt type */
1054 __le16 hdr_info; /* Split Header, header len */ 1098 __le16 hdr_info; /* Splithdr, hdrlen */
1055 } hs_rss; 1099 } hs_rss;
1056 } lo_dword; 1100 } lo_dword;
1057 union { 1101 union {
@@ -1079,49 +1123,69 @@ struct ixgbe_adv_tx_context_desc {
1079}; 1123};
1080 1124
1081/* Adv Transmit Descriptor Config Masks */ 1125/* Adv Transmit Descriptor Config Masks */
1082#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ 1126#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
1083#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ 1127#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
1084#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ 1128#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
1085#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 1129#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
1086#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ 1130#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
1087#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ 1131#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
1088#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */
1089#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ 1132#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
1090#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ 1133#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
1091#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ 1134#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
1092#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ 1135#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
1093#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 1136#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
1094#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ 1137#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
1095#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ 1138#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
1096#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ 1139#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
1097#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ 1140#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
1141#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
1098#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 1142#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
1099#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 1143#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
1100 IXGBE_ADVTXD_POPTS_SHIFT) 1144 IXGBE_ADVTXD_POPTS_SHIFT)
1101#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 1145#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
1102 IXGBE_ADVTXD_POPTS_SHIFT) 1146 IXGBE_ADVTXD_POPTS_SHIFT)
1103#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ 1147#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
1104#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 1148#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
1105#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 1149#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
1106#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 1150#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
1107#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ 1151#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
1108#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ 1152#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
1109#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 1153#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
1110#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 1154#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
1111#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ 1155#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
1112#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 1156#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
1113#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ 1157#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
1114#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ 1158#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
1115#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 1159#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
1116#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ 1160#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
1117#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 1161#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
1118#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 1162#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
1119 1163
1164/* Autonegotiation advertised speeds */
1165typedef u32 ixgbe_autoneg_advertised;
1120/* Link speed */ 1166/* Link speed */
1167typedef u32 ixgbe_link_speed;
1121#define IXGBE_LINK_SPEED_UNKNOWN 0 1168#define IXGBE_LINK_SPEED_UNKNOWN 0
1122#define IXGBE_LINK_SPEED_100_FULL 0x0008 1169#define IXGBE_LINK_SPEED_100_FULL 0x0008
1123#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 1170#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
1124#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 1171#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
1172#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
1173 IXGBE_LINK_SPEED_10GB_FULL)
1174
1175/* Physical layer type */
1176typedef u32 ixgbe_physical_layer;
1177#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
1178#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
1179#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
1180#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
1181#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
1182#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
1183#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
1184#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
1185#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
1186#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
1187#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
1188#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
1125 1189
1126 1190
1127enum ixgbe_eeprom_type { 1191enum ixgbe_eeprom_type {
@@ -1138,16 +1202,38 @@ enum ixgbe_mac_type {
1138 1202
1139enum ixgbe_phy_type { 1203enum ixgbe_phy_type {
1140 ixgbe_phy_unknown = 0, 1204 ixgbe_phy_unknown = 0,
1141 ixgbe_phy_tn,
1142 ixgbe_phy_qt, 1205 ixgbe_phy_qt,
1143 ixgbe_phy_xaui 1206 ixgbe_phy_xaui,
1207 ixgbe_phy_tw_tyco,
1208 ixgbe_phy_tw_unknown,
1209 ixgbe_phy_sfp_avago,
1210 ixgbe_phy_sfp_ftl,
1211 ixgbe_phy_sfp_unknown,
1212 ixgbe_phy_generic
1213};
1214
1215/*
1216 * SFP+ module type IDs:
1217 *
1218 * ID Module Type
1219 * =============
1220 * 0 SFP_DA_CU
1221 * 1 SFP_SR
1222 * 2 SFP_LR
1223 */
1224enum ixgbe_sfp_type {
1225 ixgbe_sfp_type_da_cu = 0,
1226 ixgbe_sfp_type_sr = 1,
1227 ixgbe_sfp_type_lr = 2,
1228 ixgbe_sfp_type_unknown = 0xFFFF
1144}; 1229};
1145 1230
1146enum ixgbe_media_type { 1231enum ixgbe_media_type {
1147 ixgbe_media_type_unknown = 0, 1232 ixgbe_media_type_unknown = 0,
1148 ixgbe_media_type_fiber, 1233 ixgbe_media_type_fiber,
1149 ixgbe_media_type_copper, 1234 ixgbe_media_type_copper,
1150 ixgbe_media_type_backplane 1235 ixgbe_media_type_backplane,
1236 ixgbe_media_type_virtual
1151}; 1237};
1152 1238
1153/* Flow Control Settings */ 1239/* Flow Control Settings */
@@ -1245,59 +1331,114 @@ struct ixgbe_hw;
1245typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 1331typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1246 u32 *vmdq); 1332 u32 *vmdq);
1247 1333
1334/* Function pointer table */
1335struct ixgbe_eeprom_operations {
1336 s32 (*init_params)(struct ixgbe_hw *);
1337 s32 (*read)(struct ixgbe_hw *, u16, u16 *);
1338 s32 (*write)(struct ixgbe_hw *, u16, u16);
1339 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
1340 s32 (*update_checksum)(struct ixgbe_hw *);
1341};
1342
1248struct ixgbe_mac_operations { 1343struct ixgbe_mac_operations {
1249 s32 (*reset)(struct ixgbe_hw *); 1344 s32 (*init_hw)(struct ixgbe_hw *);
1345 s32 (*reset_hw)(struct ixgbe_hw *);
1346 s32 (*start_hw)(struct ixgbe_hw *);
1347 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
1250 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); 1348 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
1349 s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
1350 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
1351 s32 (*stop_adapter)(struct ixgbe_hw *);
1352 s32 (*get_bus_info)(struct ixgbe_hw *);
1353 s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
1354 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
1355
1356 /* Link */
1251 s32 (*setup_link)(struct ixgbe_hw *); 1357 s32 (*setup_link)(struct ixgbe_hw *);
1252 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1358 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1253 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1359 bool);
1254 s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); 1360 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
1361 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
1362 bool *);
1363
1364 /* LED */
1365 s32 (*led_on)(struct ixgbe_hw *, u32);
1366 s32 (*led_off)(struct ixgbe_hw *, u32);
1367 s32 (*blink_led_start)(struct ixgbe_hw *, u32);
1368 s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
1369
1370 /* RAR, Multicast, VLAN */
1371 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
1372 s32 (*clear_rar)(struct ixgbe_hw *, u32);
1373 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
1374 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
1375 s32 (*init_rx_addrs)(struct ixgbe_hw *);
1376 s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1377 ixgbe_mc_addr_itr);
1378 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1379 ixgbe_mc_addr_itr);
1380 s32 (*enable_mc)(struct ixgbe_hw *);
1381 s32 (*disable_mc)(struct ixgbe_hw *);
1382 s32 (*clear_vfta)(struct ixgbe_hw *);
1383 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
1384 s32 (*init_uta_tables)(struct ixgbe_hw *);
1385
1386 /* Flow Control */
1387 s32 (*setup_fc)(struct ixgbe_hw *, s32);
1255}; 1388};
1256 1389
1257struct ixgbe_phy_operations { 1390struct ixgbe_phy_operations {
1391 s32 (*identify)(struct ixgbe_hw *);
1392 s32 (*identify_sfp)(struct ixgbe_hw *);
1393 s32 (*reset)(struct ixgbe_hw *);
1394 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
1395 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
1258 s32 (*setup_link)(struct ixgbe_hw *); 1396 s32 (*setup_link)(struct ixgbe_hw *);
1259 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1397 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1260 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1398 bool);
1261}; 1399 s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
1262 1400 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
1263struct ixgbe_mac_info { 1401 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
1264 struct ixgbe_mac_operations ops; 1402 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
1265 enum ixgbe_mac_type type;
1266 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1267 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1268 s32 mc_filter_type;
1269 u32 mcft_size;
1270 u32 vft_size;
1271 u32 num_rar_entries;
1272 u32 num_rx_queues;
1273 u32 num_tx_queues;
1274 u32 link_attach_type;
1275 u32 link_mode_select;
1276 bool link_settings_loaded;
1277}; 1403};
1278 1404
1279struct ixgbe_eeprom_info { 1405struct ixgbe_eeprom_info {
1280 enum ixgbe_eeprom_type type; 1406 struct ixgbe_eeprom_operations ops;
1281 u16 word_size; 1407 enum ixgbe_eeprom_type type;
1282 u16 address_bits; 1408 u32 semaphore_delay;
1409 u16 word_size;
1410 u16 address_bits;
1283}; 1411};
1284 1412
1285struct ixgbe_phy_info { 1413struct ixgbe_mac_info {
1286 struct ixgbe_phy_operations ops; 1414 struct ixgbe_mac_operations ops;
1287 1415 enum ixgbe_mac_type type;
1288 enum ixgbe_phy_type type; 1416 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1289 u32 addr; 1417 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1290 u32 id; 1418 s32 mc_filter_type;
1291 u32 revision; 1419 u32 mcft_size;
1292 enum ixgbe_media_type media_type; 1420 u32 vft_size;
1293 u32 autoneg_advertised; 1421 u32 num_rar_entries;
1294 bool autoneg_wait_to_complete; 1422 u32 max_tx_queues;
1423 u32 max_rx_queues;
1424 u32 link_attach_type;
1425 u32 link_mode_select;
1426 bool link_settings_loaded;
1427 bool autoneg;
1428 bool autoneg_failed;
1295}; 1429};
1296 1430
1297struct ixgbe_info { 1431struct ixgbe_phy_info {
1298 enum ixgbe_mac_type mac; 1432 struct ixgbe_phy_operations ops;
1299 s32 (*get_invariants)(struct ixgbe_hw *); 1433 enum ixgbe_phy_type type;
1300 struct ixgbe_mac_operations *mac_ops; 1434 u32 addr;
1435 u32 id;
1436 enum ixgbe_sfp_type sfp_type;
1437 u32 revision;
1438 enum ixgbe_media_type media_type;
1439 bool reset_disable;
1440 ixgbe_autoneg_advertised autoneg_advertised;
1441 bool autoneg_wait_to_complete;
1301}; 1442};
1302 1443
1303struct ixgbe_hw { 1444struct ixgbe_hw {
@@ -1316,6 +1457,15 @@ struct ixgbe_hw {
1316 bool adapter_stopped; 1457 bool adapter_stopped;
1317}; 1458};
1318 1459
1460struct ixgbe_info {
1461 enum ixgbe_mac_type mac;
1462 s32 (*get_invariants)(struct ixgbe_hw *);
1463 struct ixgbe_mac_operations *mac_ops;
1464 struct ixgbe_eeprom_operations *eeprom_ops;
1465 struct ixgbe_phy_operations *phy_ops;
1466};
1467
1468
1319/* Error Codes */ 1469/* Error Codes */
1320#define IXGBE_ERR_EEPROM -1 1470#define IXGBE_ERR_EEPROM -1
1321#define IXGBE_ERR_EEPROM_CHECKSUM -2 1471#define IXGBE_ERR_EEPROM_CHECKSUM -2
@@ -1334,6 +1484,8 @@ struct ixgbe_hw {
1334#define IXGBE_ERR_RESET_FAILED -15 1484#define IXGBE_ERR_RESET_FAILED -15
1335#define IXGBE_ERR_SWFW_SYNC -16 1485#define IXGBE_ERR_SWFW_SYNC -16
1336#define IXGBE_ERR_PHY_ADDR_INVALID -17 1486#define IXGBE_ERR_PHY_ADDR_INVALID -17
1487#define IXGBE_ERR_I2C -18
1488#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
1337#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 1489#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
1338 1490
1339#endif /* _IXGBE_TYPE_H_ */ 1491#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0a97c26df6ab..a1e22ed1f6ee 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#if MFE_DEBUG>=1 43#if MFE_DEBUG>=1
44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) 44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
45#define MFE_RX_DEBUG 2 45#define MFE_RX_DEBUG 2
46#else 46#else
47#define DPRINTK(str,args...) 47#define DPRINTK(str,args...)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 6d343efb2717..4e7a5faf0351 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -203,7 +203,7 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
203 203
204out_badirq: 204out_badirq:
205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
206 dev->name, __FUNCTION__, irq); 206 dev->name, __func__, irq);
207 return ret; 207 return ret;
208} 208}
209 209
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 096bca54bcf7..b411b79d72ad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mm.h>
36#include <linux/bitmap.h> 37#include <linux/bitmap.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d6524db321af..005f2aa75019 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -183,7 +183,7 @@ struct myri10ge_slice_state {
183 dma_addr_t fw_stats_bus; 183 dma_addr_t fw_stats_bus;
184 int watchdog_tx_done; 184 int watchdog_tx_done;
185 int watchdog_tx_req; 185 int watchdog_tx_req;
186#ifdef CONFIG_DCA 186#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
187 int cached_dca_tag; 187 int cached_dca_tag;
188 int cpu; 188 int cpu;
189 __be32 __iomem *dca_tag; 189 __be32 __iomem *dca_tag;
@@ -215,7 +215,7 @@ struct myri10ge_priv {
215 int msi_enabled; 215 int msi_enabled;
216 int msix_enabled; 216 int msix_enabled;
217 struct msix_entry *msix_vectors; 217 struct msix_entry *msix_vectors;
218#ifdef CONFIG_DCA 218#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
219 int dca_enabled; 219 int dca_enabled;
220#endif 220#endif
221 u32 link_state; 221 u32 link_state;
@@ -891,7 +891,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
891 struct myri10ge_slice_state *ss; 891 struct myri10ge_slice_state *ss;
892 int i, status; 892 int i, status;
893 size_t bytes; 893 size_t bytes;
894#ifdef CONFIG_DCA 894#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
895 unsigned long dca_tag_off; 895 unsigned long dca_tag_off;
896#endif 896#endif
897 897
@@ -986,7 +986,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
986 } 986 }
987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
988 988
989#ifdef CONFIG_DCA 989#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
991 dca_tag_off = cmd.data0; 991 dca_tag_off = cmd.data0;
992 for (i = 0; i < mgp->num_slices; i++) { 992 for (i = 0; i < mgp->num_slices; i++) {
@@ -1025,7 +1025,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028#ifdef CONFIG_DCA 1028#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1029static void 1029static void
1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1031{ 1031{
@@ -1060,8 +1060,9 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1060 } 1060 }
1061 err = dca_add_requester(&pdev->dev); 1061 err = dca_add_requester(&pdev->dev);
1062 if (err) { 1062 if (err) {
1063 dev_err(&pdev->dev, 1063 if (err != -ENODEV)
1064 "dca_add_requester() failed, err=%d\n", err); 1064 dev_err(&pdev->dev,
1065 "dca_add_requester() failed, err=%d\n", err);
1065 return; 1066 return;
1066 } 1067 }
1067 mgp->dca_enabled = 1; 1068 mgp->dca_enabled = 1;
@@ -1457,7 +1458,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1457 struct net_device *netdev = ss->mgp->dev; 1458 struct net_device *netdev = ss->mgp->dev;
1458 int work_done; 1459 int work_done;
1459 1460
1460#ifdef CONFIG_DCA 1461#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1461 if (ss->mgp->dca_enabled) 1462 if (ss->mgp->dca_enabled)
1462 myri10ge_update_dca(ss); 1463 myri10ge_update_dca(ss);
1463#endif 1464#endif
@@ -1686,8 +1687,8 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1686 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1687 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1687 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1688 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1688 "serial_number", "watchdog_resets", 1689 "serial_number", "watchdog_resets",
1689#ifdef CONFIG_DCA 1690#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1690 "dca_capable", "dca_enabled", 1691 "dca_capable_firmware", "dca_device_present",
1691#endif 1692#endif
1692 "link_changes", "link_up", "dropped_link_overflow", 1693 "link_changes", "link_up", "dropped_link_overflow",
1693 "dropped_link_error_or_filtered", 1694 "dropped_link_error_or_filtered",
@@ -1765,7 +1766,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1765 data[i++] = (unsigned int)mgp->read_write_dma; 1766 data[i++] = (unsigned int)mgp->read_write_dma;
1766 data[i++] = (unsigned int)mgp->serial_number; 1767 data[i++] = (unsigned int)mgp->serial_number;
1767 data[i++] = (unsigned int)mgp->watchdog_resets; 1768 data[i++] = (unsigned int)mgp->watchdog_resets;
1768#ifdef CONFIG_DCA 1769#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1769 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1770 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1770 data[i++] = (unsigned int)(mgp->dca_enabled); 1771 data[i++] = (unsigned int)(mgp->dca_enabled);
1771#endif 1772#endif
@@ -3763,7 +3764,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3763 dev_err(&pdev->dev, "failed reset\n"); 3764 dev_err(&pdev->dev, "failed reset\n");
3764 goto abort_with_slices; 3765 goto abort_with_slices;
3765 } 3766 }
3766#ifdef CONFIG_DCA 3767#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3767 myri10ge_setup_dca(mgp); 3768 myri10ge_setup_dca(mgp);
3768#endif 3769#endif
3769 pci_set_drvdata(pdev, mgp); 3770 pci_set_drvdata(pdev, mgp);
@@ -3866,7 +3867,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3866 netdev = mgp->dev; 3867 netdev = mgp->dev;
3867 unregister_netdev(netdev); 3868 unregister_netdev(netdev);
3868 3869
3869#ifdef CONFIG_DCA 3870#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3870 myri10ge_teardown_dca(mgp); 3871 myri10ge_teardown_dca(mgp);
3871#endif 3872#endif
3872 myri10ge_dummy_rdma(mgp, 0); 3873 myri10ge_dummy_rdma(mgp, 0);
@@ -3911,7 +3912,7 @@ static struct pci_driver myri10ge_driver = {
3911#endif 3912#endif
3912}; 3913};
3913 3914
3914#ifdef CONFIG_DCA 3915#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3915static int 3916static int
3916myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 3917myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3917{ 3918{
@@ -3943,7 +3944,7 @@ static __init int myri10ge_init_module(void)
3943 myri10ge_driver.name, myri10ge_rss_hash); 3944 myri10ge_driver.name, myri10ge_rss_hash);
3944 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 3945 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3945 } 3946 }
3946#ifdef CONFIG_DCA 3947#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3947 dca_register_notify(&myri10ge_dca_notifier); 3948 dca_register_notify(&myri10ge_dca_notifier);
3948#endif 3949#endif
3949 3950
@@ -3954,7 +3955,7 @@ module_init(myri10ge_init_module);
3954 3955
3955static __exit void myri10ge_cleanup_module(void) 3956static __exit void myri10ge_cleanup_module(void)
3956{ 3957{
3957#ifdef CONFIG_DCA 3958#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3958 dca_unregister_notify(&myri10ge_dca_notifier); 3959 dca_unregister_notify(&myri10ge_dca_notifier);
3959#endif 3960#endif
3960 pci_unregister_driver(&myri10ge_driver); 3961 pci_unregister_driver(&myri10ge_driver);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index fa3ceca4e15c..79599900c4b5 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -844,8 +844,12 @@ static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state)
844{ 844{
845 struct net_device *dev = platform_get_drvdata(pdev); 845 struct net_device *dev = platform_get_drvdata(pdev);
846 846
847 if (netif_running(dev)) 847 if (netif_running(dev)) {
848 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
848 netif_device_detach(dev); 849 netif_device_detach(dev);
850 if (idev)
851 pnp_stop_dev(idev);
852 }
849 return 0; 853 return 0;
850} 854}
851 855
@@ -854,6 +858,9 @@ static int ne_drv_resume(struct platform_device *pdev)
854 struct net_device *dev = platform_get_drvdata(pdev); 858 struct net_device *dev = platform_get_drvdata(pdev);
855 859
856 if (netif_running(dev)) { 860 if (netif_running(dev)) {
861 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
862 if (idev)
863 pnp_start_dev(idev);
857 ne_reset_8390(dev); 864 ne_reset_8390(dev);
858 NS8390p_init(dev, 1); 865 NS8390p_init(dev, 1);
859 netif_device_attach(dev); 866 netif_device_attach(dev);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 3f9af759cb90..b9bed82e1d21 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -189,7 +189,7 @@ netx_eth_interrupt(int irq, void *dev_id)
189 189
190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) 190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
191 printk("%s: unexpected status: 0x%08x\n", 191 printk("%s: unexpected status: 0x%08x\n",
192 __FUNCTION__, status); 192 __func__, status);
193 193
194 fill_level = 194 fill_level =
195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id))); 195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 244ab49c4337..f8e601c51da7 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -742,7 +742,7 @@ extern char netxen_nic_driver_name[];
742 } while (0) 742 } while (0)
743#else 743#else
744#define DPRINTK(klevel, fmt, args...) do { \ 744#define DPRINTK(klevel, fmt, args...) do { \
745 printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\ 745 printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
746 (adapter != NULL && adapter->netdev != NULL) ? \ 746 (adapter != NULL && adapter->netdev != NULL) ? \
747 adapter->netdev->name : NULL, \ 747 adapter->netdev->name : NULL, \
748 ## args); } while(0) 748 ## args); } while(0)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 008fd6618a5f..6ef3f0d84bcf 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -77,18 +77,18 @@ static irqreturn_t netxen_msi_intr(int irq, void *data);
77 77
78/* PCI Device ID Table */ 78/* PCI Device ID Table */
79#define ENTRY(device) \ 79#define ENTRY(device) \
80 {PCI_DEVICE(0x4040, (device)), \ 80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
82 82
83static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 83static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(0x0001), 84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(0x0002), 85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(0x0003), 86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(0x0004), 87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(0x0005), 88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(0x0024), 89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(0x0025), 90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(0x0100), 91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,} 92 {0,}
93}; 93};
94 94
@@ -241,7 +241,7 @@ static void netxen_check_options(struct netxen_adapter *adapter)
241 case NETXEN_BRDTYPE_P3_REF_QG: 241 case NETXEN_BRDTYPE_P3_REF_QG:
242 case NETXEN_BRDTYPE_P3_4_GB: 242 case NETXEN_BRDTYPE_P3_4_GB:
243 case NETXEN_BRDTYPE_P3_4_GB_MM: 243 case NETXEN_BRDTYPE_P3_4_GB_MM:
244 adapter->msix_supported = 0; 244 adapter->msix_supported = !!use_msi_x;
245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
246 break; 246 break;
247 247
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 53451c3b2c0d..0a575fef29e6 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -119,7 +119,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
119 119
120#ifdef NETDRV_DEBUG 120#ifdef NETDRV_DEBUG
121/* note: prints function name for you */ 121/* note: prints function name for you */
122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
123#else 123#else
124# define DPRINTK(fmt, args...) 124# define DPRINTK(fmt, args...)
125#endif 125#endif
@@ -130,7 +130,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
130# define assert(expr) \ 130# define assert(expr) \
131 if(!(expr)) { \ 131 if(!(expr)) { \
132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
133 #expr,__FILE__,__FUNCTION__,__LINE__); \ 133 #expr,__FILE__,__func__,__LINE__); \
134 } 134 }
135#endif 135#endif
136 136
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5d86281d9363..025f526558bc 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -370,7 +370,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
370 /* Reset internal state machine */ 370 /* Reset internal state machine */
371 iowrite16(2, ioaddr + MAC_SM); 371 iowrite16(2, ioaddr + MAC_SM);
372 iowrite16(0, ioaddr + MAC_SM); 372 iowrite16(0, ioaddr + MAC_SM);
373 udelay(5000); 373 mdelay(5);
374 374
375 /* MAC Bus Control Register */ 375 /* MAC Bus Control Register */
376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -806,7 +806,7 @@ static void r6040_mac_address(struct net_device *dev)
806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ 806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ 807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
808 iowrite16(0, ioaddr + MAC_SM); 808 iowrite16(0, ioaddr + MAC_SM);
809 udelay(5000); 809 mdelay(5);
810 810
811 /* Restore MAC Address */ 811 /* Restore MAC Address */
812 adrp = (u16 *) dev->dev_addr; 812 adrp = (u16 *) dev->dev_addr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 1822491f19cb..fb899c675f47 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -36,7 +36,7 @@
36#define assert(expr) \ 36#define assert(expr) \
37 if (!(expr)) { \ 37 if (!(expr)) { \
38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
39 #expr,__FILE__,__FUNCTION__,__LINE__); \ 39 #expr,__FILE__,__func__,__LINE__); \
40 } 40 }
41#define dprintk(fmt, args...) \ 41#define dprintk(fmt, args...) \
42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) 42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
@@ -2286,8 +2286,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
2286 2286
2287 RTL_R8(IntrMask); 2287 RTL_R8(IntrMask);
2288 2288
2289 RTL_W32(RxMissed, 0);
2290
2291 rtl_set_rx_mode(dev); 2289 rtl_set_rx_mode(dev);
2292 2290
2293 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2291 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2412,8 +2410,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
2412 2410
2413 RTL_R8(IntrMask); 2411 RTL_R8(IntrMask);
2414 2412
2415 RTL_W32(RxMissed, 0);
2416
2417 rtl_set_rx_mode(dev); 2413 rtl_set_rx_mode(dev);
2418 2414
2419 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2415 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -3191,6 +3187,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3191 return work_done; 3187 return work_done;
3192} 3188}
3193 3189
3190static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
3191{
3192 struct rtl8169_private *tp = netdev_priv(dev);
3193
3194 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
3195 return;
3196
3197 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
3198 RTL_W32(RxMissed, 0);
3199}
3200
3194static void rtl8169_down(struct net_device *dev) 3201static void rtl8169_down(struct net_device *dev)
3195{ 3202{
3196 struct rtl8169_private *tp = netdev_priv(dev); 3203 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3208,9 +3215,7 @@ core_down:
3208 3215
3209 rtl8169_asic_down(ioaddr); 3216 rtl8169_asic_down(ioaddr);
3210 3217
3211 /* Update the error counts. */ 3218 rtl8169_rx_missed(dev, ioaddr);
3212 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3213 RTL_W32(RxMissed, 0);
3214 3219
3215 spin_unlock_irq(&tp->lock); 3220 spin_unlock_irq(&tp->lock);
3216 3221
@@ -3332,8 +3337,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3332 3337
3333 if (netif_running(dev)) { 3338 if (netif_running(dev)) {
3334 spin_lock_irqsave(&tp->lock, flags); 3339 spin_lock_irqsave(&tp->lock, flags);
3335 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3340 rtl8169_rx_missed(dev, ioaddr);
3336 RTL_W32(RxMissed, 0);
3337 spin_unlock_irqrestore(&tp->lock, flags); 3341 spin_unlock_irqrestore(&tp->lock, flags);
3338 } 3342 }
3339 3343
@@ -3358,8 +3362,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3358 3362
3359 rtl8169_asic_down(ioaddr); 3363 rtl8169_asic_down(ioaddr);
3360 3364
3361 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3365 rtl8169_rx_missed(dev, ioaddr);
3362 RTL_W32(RxMissed, 0);
3363 3366
3364 spin_unlock_irq(&tp->lock); 3367 spin_unlock_irq(&tp->lock);
3365 3368
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 243db33042a8..6a1375f9cbb8 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -371,9 +371,6 @@ static void s2io_vlan_rx_register(struct net_device *dev,
371 flags[i]); 371 flags[i]);
372} 372}
373 373
374/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375static int vlan_strip_flag;
376
377/* Unregister the vlan */ 374/* Unregister the vlan */
378static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 375static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379{ 376{
@@ -2303,7 +2300,7 @@ static int start_nic(struct s2io_nic *nic)
2303 val64 = readq(&bar0->rx_pa_cfg); 2300 val64 = readq(&bar0->rx_pa_cfg);
2304 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 2301 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2305 writeq(val64, &bar0->rx_pa_cfg); 2302 writeq(val64, &bar0->rx_pa_cfg);
2306 vlan_strip_flag = 0; 2303 nic->vlan_strip_flag = 0;
2307 } 2304 }
2308 2305
2309 /* 2306 /*
@@ -3136,7 +3133,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3136 if (skb == NULL) { 3133 if (skb == NULL) {
3137 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3134 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3138 DBG_PRINT(ERR_DBG, "%s: Null skb ", 3135 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3139 __FUNCTION__); 3136 __func__);
3140 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 3137 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3141 return; 3138 return;
3142 } 3139 }
@@ -3496,7 +3493,7 @@ static void s2io_reset(struct s2io_nic * sp)
3496 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; 3493 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3497 3494
3498 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", 3495 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3499 __FUNCTION__, sp->dev->name); 3496 __func__, sp->dev->name);
3500 3497
3501 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3498 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3502 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
@@ -3518,7 +3515,7 @@ static void s2io_reset(struct s2io_nic * sp)
3518 } 3515 }
3519 3516
3520 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) { 3517 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3521 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__); 3518 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3522 } 3519 }
3523 3520
3524 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); 3521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
@@ -3768,7 +3765,7 @@ static void restore_xmsi_data(struct s2io_nic *nic)
3768 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3765 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3769 writeq(val64, &bar0->xmsi_access); 3766 writeq(val64, &bar0->xmsi_access);
3770 if (wait_for_msix_trans(nic, msix_index)) { 3767 if (wait_for_msix_trans(nic, msix_index)) {
3771 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3768 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3772 continue; 3769 continue;
3773 } 3770 }
3774 } 3771 }
@@ -3789,7 +3786,7 @@ static void store_xmsi_data(struct s2io_nic *nic)
3789 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3786 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3790 writeq(val64, &bar0->xmsi_access); 3787 writeq(val64, &bar0->xmsi_access);
3791 if (wait_for_msix_trans(nic, msix_index)) { 3788 if (wait_for_msix_trans(nic, msix_index)) {
3792 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3789 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3793 continue; 3790 continue;
3794 } 3791 }
3795 addr = readq(&bar0->xmsi_address); 3792 addr = readq(&bar0->xmsi_address);
@@ -3812,7 +3809,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3812 GFP_KERNEL); 3809 GFP_KERNEL);
3813 if (!nic->entries) { 3810 if (!nic->entries) {
3814 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3815 __FUNCTION__); 3812 __func__);
3816 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3813 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3817 return -ENOMEM; 3814 return -ENOMEM;
3818 } 3815 }
@@ -3826,7 +3823,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3826 GFP_KERNEL); 3823 GFP_KERNEL);
3827 if (!nic->s2io_entries) { 3824 if (!nic->s2io_entries) {
3828 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3825 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3829 __FUNCTION__); 3826 __func__);
3830 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3827 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3831 kfree(nic->entries); 3828 kfree(nic->entries);
3832 nic->mac_control.stats_info->sw_stat.mem_freed 3829 nic->mac_control.stats_info->sw_stat.mem_freed
@@ -5010,7 +5007,7 @@ static void s2io_set_multicast(struct net_device *dev)
5010 val64 = readq(&bar0->rx_pa_cfg); 5007 val64 = readq(&bar0->rx_pa_cfg);
5011 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 5008 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5012 writeq(val64, &bar0->rx_pa_cfg); 5009 writeq(val64, &bar0->rx_pa_cfg);
5013 vlan_strip_flag = 0; 5010 sp->vlan_strip_flag = 0;
5014 } 5011 }
5015 5012
5016 val64 = readq(&bar0->mac_cfg); 5013 val64 = readq(&bar0->mac_cfg);
@@ -5032,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev)
5032 val64 = readq(&bar0->rx_pa_cfg); 5029 val64 = readq(&bar0->rx_pa_cfg);
5033 val64 |= RX_PA_CFG_STRIP_VLAN_TAG; 5030 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5034 writeq(val64, &bar0->rx_pa_cfg); 5031 writeq(val64, &bar0->rx_pa_cfg);
5035 vlan_strip_flag = 1; 5032 sp->vlan_strip_flag = 1;
5036 } 5033 }
5037 5034
5038 val64 = readq(&bar0->mac_cfg); 5035 val64 = readq(&bar0->mac_cfg);
@@ -6746,7 +6743,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6746 ret = s2io_card_up(sp); 6743 ret = s2io_card_up(sp);
6747 if (ret) { 6744 if (ret) {
6748 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6745 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6749 __FUNCTION__); 6746 __func__);
6750 return ret; 6747 return ret;
6751 } 6748 }
6752 s2io_wake_all_tx_queue(sp); 6749 s2io_wake_all_tx_queue(sp);
@@ -7530,7 +7527,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7530 default: 7527 default:
7531 DBG_PRINT(ERR_DBG, 7528 DBG_PRINT(ERR_DBG,
7532 "%s: Samadhana!!\n", 7529 "%s: Samadhana!!\n",
7533 __FUNCTION__); 7530 __func__);
7534 BUG(); 7531 BUG();
7535 } 7532 }
7536 } 7533 }
@@ -7781,7 +7778,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7781 return -ENOMEM; 7778 return -ENOMEM;
7782 } 7779 }
7783 if ((ret = pci_request_regions(pdev, s2io_driver_name))) { 7780 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7784 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); 7781 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7785 pci_disable_device(pdev); 7782 pci_disable_device(pdev);
7786 return -ENODEV; 7783 return -ENODEV;
7787 } 7784 }
@@ -7998,7 +7995,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7998 if (sp->device_type & XFRAME_II_DEVICE) { 7995 if (sp->device_type & XFRAME_II_DEVICE) {
7999 mode = s2io_verify_pci_mode(sp); 7996 mode = s2io_verify_pci_mode(sp);
8000 if (mode < 0) { 7997 if (mode < 0) {
8001 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__); 7998 DBG_PRINT(ERR_DBG, "%s: ", __func__);
8002 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n"); 7999 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8003 ret = -EBADSLT; 8000 ret = -EBADSLT;
8004 goto set_swap_failed; 8001 goto set_swap_failed;
@@ -8206,6 +8203,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8206 /* Initialize device name */ 8203 /* Initialize device name */
8207 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 8204 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8208 8205
8206 if (vlan_tag_strip)
8207 sp->vlan_strip_flag = 1;
8208 else
8209 sp->vlan_strip_flag = 0;
8210
8209 /* 8211 /*
8210 * Make Link state as off at this point, when the Link change 8212 * Make Link state as off at this point, when the Link change
8211 * interrupt comes the state will be automatically changed to 8213 * interrupt comes the state will be automatically changed to
@@ -8299,7 +8301,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8299 8301
8300 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { 8302 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8301 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", 8303 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8302 __FUNCTION__); 8304 __func__);
8303 return -1; 8305 return -1;
8304 } 8306 }
8305 8307
@@ -8311,7 +8313,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8311 * If vlan stripping is disabled and the frame is VLAN tagged, 8313 * If vlan stripping is disabled and the frame is VLAN tagged,
8312 * shift the offset by the VLAN header size bytes. 8314 * shift the offset by the VLAN header size bytes.
8313 */ 8315 */
8314 if ((!vlan_strip_flag) && 8316 if ((!sp->vlan_strip_flag) &&
8315 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) 8317 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8316 ip_off += HEADER_VLAN_SIZE; 8318 ip_off += HEADER_VLAN_SIZE;
8317 } else { 8319 } else {
@@ -8330,7 +8332,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8330static int check_for_socket_match(struct lro *lro, struct iphdr *ip, 8332static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8331 struct tcphdr *tcp) 8333 struct tcphdr *tcp)
8332{ 8334{
8333 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8335 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8334 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || 8336 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8335 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) 8337 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8336 return -1; 8338 return -1;
@@ -8345,7 +8347,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8345static void initiate_new_session(struct lro *lro, u8 *l2h, 8347static void initiate_new_session(struct lro *lro, u8 *l2h,
8346 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) 8348 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8347{ 8349{
8348 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8349 lro->l2h = l2h; 8351 lro->l2h = l2h;
8350 lro->iph = ip; 8352 lro->iph = ip;
8351 lro->tcph = tcp; 8353 lro->tcph = tcp;
@@ -8375,7 +8377,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8375 struct tcphdr *tcp = lro->tcph; 8377 struct tcphdr *tcp = lro->tcph;
8376 __sum16 nchk; 8378 __sum16 nchk;
8377 struct stat_block *statinfo = sp->mac_control.stats_info; 8379 struct stat_block *statinfo = sp->mac_control.stats_info;
8378 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8380 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8379 8381
8380 /* Update L3 header */ 8382 /* Update L3 header */
8381 ip->tot_len = htons(lro->total_len); 8383 ip->tot_len = htons(lro->total_len);
@@ -8403,7 +8405,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8403static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, 8405static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8404 struct tcphdr *tcp, u32 l4_pyld) 8406 struct tcphdr *tcp, u32 l4_pyld)
8405{ 8407{
8406 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8408 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8407 lro->total_len += l4_pyld; 8409 lro->total_len += l4_pyld;
8408 lro->frags_len += l4_pyld; 8410 lro->frags_len += l4_pyld;
8409 lro->tcp_next_seq += l4_pyld; 8411 lro->tcp_next_seq += l4_pyld;
@@ -8427,7 +8429,7 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8427{ 8429{
8428 u8 *ptr; 8430 u8 *ptr;
8429 8431
8430 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8432 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8431 8433
8432 if (!tcp_pyld_len) { 8434 if (!tcp_pyld_len) {
8433 /* Runt frame or a pure ack */ 8435 /* Runt frame or a pure ack */
@@ -8509,7 +8511,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8509 8511
8510 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { 8512 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8511 DBG_PRINT(INFO_DBG, "%s:Out of order. expected " 8513 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8512 "0x%x, actual 0x%x\n", __FUNCTION__, 8514 "0x%x, actual 0x%x\n", __func__,
8513 (*lro)->tcp_next_seq, 8515 (*lro)->tcp_next_seq,
8514 ntohl(tcph->seq)); 8516 ntohl(tcph->seq));
8515 8517
@@ -8549,7 +8551,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8549 8551
8550 if (ret == 0) { /* sessions exceeded */ 8552 if (ret == 0) { /* sessions exceeded */
8551 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", 8553 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8552 __FUNCTION__); 8554 __func__);
8553 *lro = NULL; 8555 *lro = NULL;
8554 return ret; 8556 return ret;
8555 } 8557 }
@@ -8571,7 +8573,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8571 break; 8573 break;
8572 default: 8574 default:
8573 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", 8575 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8574 __FUNCTION__); 8576 __func__);
8575 break; 8577 break;
8576 } 8578 }
8577 8579
@@ -8592,7 +8594,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8592 8594
8593 skb->protocol = eth_type_trans(skb, dev); 8595 skb->protocol = eth_type_trans(skb, dev);
8594 if (sp->vlgrp && vlan_tag 8596 if (sp->vlgrp && vlan_tag
8595 && (vlan_strip_flag)) { 8597 && (sp->vlan_strip_flag)) {
8596 /* Queueing the vlan frame to the upper layer */ 8598 /* Queueing the vlan frame to the upper layer */
8597 if (sp->config.napi) 8599 if (sp->config.napi)
8598 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); 8600 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 6722a2f7d091..55cb943f23f8 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -962,6 +962,7 @@ struct s2io_nic {
962 int task_flag; 962 int task_flag;
963 unsigned long long start_time; 963 unsigned long long start_time;
964 struct vlan_group *vlgrp; 964 struct vlan_group *vlgrp;
965 int vlan_strip_flag;
965#define MSIX_FLG 0xA5 966#define MSIX_FLG 0xA5
966 int num_entries; 967 int num_entries;
967 struct msix_entry *entries; 968 struct msix_entry *entries;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 0d47d6ffe68a..06ea71c7e34e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -445,10 +445,17 @@ static void efx_fini_channels(struct efx_nic *efx)
445 struct efx_channel *channel; 445 struct efx_channel *channel;
446 struct efx_tx_queue *tx_queue; 446 struct efx_tx_queue *tx_queue;
447 struct efx_rx_queue *rx_queue; 447 struct efx_rx_queue *rx_queue;
448 int rc;
448 449
449 EFX_ASSERT_RESET_SERIALISED(efx); 450 EFX_ASSERT_RESET_SERIALISED(efx);
450 BUG_ON(efx->port_enabled); 451 BUG_ON(efx->port_enabled);
451 452
453 rc = falcon_flush_queues(efx);
454 if (rc)
455 EFX_ERR(efx, "failed to flush queues\n");
456 else
457 EFX_LOG(efx, "successfully flushed all queues\n");
458
452 efx_for_each_channel(channel, efx) { 459 efx_for_each_channel(channel, efx) {
453 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
454 461
@@ -456,13 +463,6 @@ static void efx_fini_channels(struct efx_nic *efx)
456 efx_fini_rx_queue(rx_queue); 463 efx_fini_rx_queue(rx_queue);
457 efx_for_each_channel_tx_queue(tx_queue, channel) 464 efx_for_each_channel_tx_queue(tx_queue, channel)
458 efx_fini_tx_queue(tx_queue); 465 efx_fini_tx_queue(tx_queue);
459 }
460
461 /* Do the event queues last so that we can handle flush events
462 * for all DMA queues. */
463 efx_for_each_channel(channel, efx) {
464 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
465
466 efx_fini_eventq(channel); 466 efx_fini_eventq(channel);
467 } 467 }
468} 468}
@@ -780,7 +780,7 @@ static int efx_init_io(struct efx_nic *efx)
780 return 0; 780 return 0;
781 781
782 fail4: 782 fail4:
783 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 783 pci_release_region(efx->pci_dev, efx->type->mem_bar);
784 fail3: 784 fail3:
785 efx->membase_phys = 0; 785 efx->membase_phys = 0;
786 fail2: 786 fail2:
@@ -1092,7 +1092,6 @@ static void efx_stop_all(struct efx_nic *efx)
1092 1092
1093 /* Isolate the MAC from the TX and RX engines, so that queue 1093 /* Isolate the MAC from the TX and RX engines, so that queue
1094 * flushes will complete in a timely fashion. */ 1094 * flushes will complete in a timely fashion. */
1095 falcon_deconfigure_mac_wrapper(efx);
1096 falcon_drain_tx_fifo(efx); 1095 falcon_drain_tx_fifo(efx);
1097 1096
1098 /* Stop the kernel transmit interface late, so the watchdog 1097 /* Stop the kernel transmit interface late, so the watchdog
@@ -1750,7 +1749,6 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
1750 .check_hw = efx_port_dummy_op_int, 1749 .check_hw = efx_port_dummy_op_int,
1751 .fini = efx_port_dummy_op_void, 1750 .fini = efx_port_dummy_op_void,
1752 .clear_interrupt = efx_port_dummy_op_void, 1751 .clear_interrupt = efx_port_dummy_op_void,
1753 .reset_xaui = efx_port_dummy_op_void,
1754}; 1752};
1755 1753
1756static struct efx_board efx_dummy_board_info = { 1754static struct efx_board efx_dummy_board_info = {
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index e0c0b23f94ef..31ed1f49de00 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
108/* Max number of internal errors. After this resets will not be performed */ 108/* Max number of internal errors. After this resets will not be performed */
109#define FALCON_MAX_INT_ERRORS 4 109#define FALCON_MAX_INT_ERRORS 4
110 110
111/* Maximum period that we wait for flush events. If the flush event 111/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
112 * doesn't arrive in this period of time then we check if the queue 112 */
113 * was disabled anyway. */ 113#define FALCON_FLUSH_INTERVAL 10
114#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ 114#define FALCON_FLUSH_POLL_COUNT 100
115 115
116/************************************************************************** 116/**************************************************************************
117 * 117 *
@@ -452,6 +452,8 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
452 efx_oword_t tx_desc_ptr; 452 efx_oword_t tx_desc_ptr;
453 struct efx_nic *efx = tx_queue->efx; 453 struct efx_nic *efx = tx_queue->efx;
454 454
455 tx_queue->flushed = false;
456
455 /* Pin TX descriptor ring */ 457 /* Pin TX descriptor ring */
456 falcon_init_special_buffer(efx, &tx_queue->txd); 458 falcon_init_special_buffer(efx, &tx_queue->txd);
457 459
@@ -492,60 +494,16 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
492 } 494 }
493} 495}
494 496
495static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 497static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
496{ 498{
497 struct efx_nic *efx = tx_queue->efx; 499 struct efx_nic *efx = tx_queue->efx;
498 struct efx_channel *channel = &efx->channel[0];
499 efx_oword_t tx_flush_descq; 500 efx_oword_t tx_flush_descq;
500 unsigned int read_ptr, i;
501 501
502 /* Post a flush command */ 502 /* Post a flush command */
503 EFX_POPULATE_OWORD_2(tx_flush_descq, 503 EFX_POPULATE_OWORD_2(tx_flush_descq,
504 TX_FLUSH_DESCQ_CMD, 1, 504 TX_FLUSH_DESCQ_CMD, 1,
505 TX_FLUSH_DESCQ, tx_queue->queue); 505 TX_FLUSH_DESCQ, tx_queue->queue);
506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
507 msleep(FALCON_FLUSH_TIMEOUT);
508
509 if (EFX_WORKAROUND_7803(efx))
510 return 0;
511
512 /* Look for a flush completed event */
513 read_ptr = channel->eventq_read_ptr;
514 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
515 efx_qword_t *event = falcon_event(channel, read_ptr);
516 int ev_code, ev_sub_code, ev_queue;
517 if (!falcon_event_present(event))
518 break;
519
520 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
521 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
522 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
523 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
524 (ev_queue == tx_queue->queue)) {
525 EFX_LOG(efx, "tx queue %d flush command succesful\n",
526 tx_queue->queue);
527 return 0;
528 }
529
530 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
531 }
532
533 if (EFX_WORKAROUND_11557(efx)) {
534 efx_oword_t reg;
535 bool enabled;
536
537 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
538 tx_queue->queue);
539 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
540 if (!enabled) {
541 EFX_LOG(efx, "tx queue %d disabled without a "
542 "flush event seen\n", tx_queue->queue);
543 return 0;
544 }
545 }
546
547 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
548 return -ETIMEDOUT;
549} 507}
550 508
551void falcon_fini_tx(struct efx_tx_queue *tx_queue) 509void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -553,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
553 struct efx_nic *efx = tx_queue->efx; 511 struct efx_nic *efx = tx_queue->efx;
554 efx_oword_t tx_desc_ptr; 512 efx_oword_t tx_desc_ptr;
555 513
556 /* Stop the hardware using the queue */ 514 /* The queue should have been flushed */
557 if (falcon_flush_tx_queue(tx_queue)) 515 WARN_ON(!tx_queue->flushed);
558 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
559 516
560 /* Remove TX descriptor ring from card */ 517 /* Remove TX descriptor ring from card */
561 EFX_ZERO_OWORD(tx_desc_ptr); 518 EFX_ZERO_OWORD(tx_desc_ptr);
@@ -643,6 +600,8 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
643 rx_queue->queue, rx_queue->rxd.index, 600 rx_queue->queue, rx_queue->rxd.index,
644 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 601 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
645 602
603 rx_queue->flushed = false;
604
646 /* Pin RX descriptor ring */ 605 /* Pin RX descriptor ring */
647 falcon_init_special_buffer(efx, &rx_queue->rxd); 606 falcon_init_special_buffer(efx, &rx_queue->rxd);
648 607
@@ -663,11 +622,9 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
663 rx_queue->queue); 622 rx_queue->queue);
664} 623}
665 624
666static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 625static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
667{ 626{
668 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
669 struct efx_channel *channel = &efx->channel[0];
670 unsigned int read_ptr, i;
671 efx_oword_t rx_flush_descq; 628 efx_oword_t rx_flush_descq;
672 629
673 /* Post a flush command */ 630 /* Post a flush command */
@@ -675,76 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
675 RX_FLUSH_DESCQ_CMD, 1, 632 RX_FLUSH_DESCQ_CMD, 1,
676 RX_FLUSH_DESCQ, rx_queue->queue); 633 RX_FLUSH_DESCQ, rx_queue->queue);
677 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 634 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
678 msleep(FALCON_FLUSH_TIMEOUT);
679
680 if (EFX_WORKAROUND_7803(efx))
681 return 0;
682
683 /* Look for a flush completed event */
684 read_ptr = channel->eventq_read_ptr;
685 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
686 efx_qword_t *event = falcon_event(channel, read_ptr);
687 int ev_code, ev_sub_code, ev_queue;
688 bool ev_failed;
689 if (!falcon_event_present(event))
690 break;
691
692 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
693 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
694 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
695 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
696
697 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
698 (ev_queue == rx_queue->queue)) {
699 if (ev_failed) {
700 EFX_INFO(efx, "rx queue %d flush command "
701 "failed\n", rx_queue->queue);
702 return -EAGAIN;
703 } else {
704 EFX_LOG(efx, "rx queue %d flush command "
705 "succesful\n", rx_queue->queue);
706 return 0;
707 }
708 }
709
710 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
711 }
712
713 if (EFX_WORKAROUND_11557(efx)) {
714 efx_oword_t reg;
715 bool enabled;
716
717 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
718 rx_queue->queue);
719 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
720 if (!enabled) {
721 EFX_LOG(efx, "rx queue %d disabled without a "
722 "flush event seen\n", rx_queue->queue);
723 return 0;
724 }
725 }
726
727 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
728 return -ETIMEDOUT;
729} 635}
730 636
731void falcon_fini_rx(struct efx_rx_queue *rx_queue) 637void falcon_fini_rx(struct efx_rx_queue *rx_queue)
732{ 638{
733 efx_oword_t rx_desc_ptr; 639 efx_oword_t rx_desc_ptr;
734 struct efx_nic *efx = rx_queue->efx; 640 struct efx_nic *efx = rx_queue->efx;
735 int i, rc;
736 641
737 /* Try and flush the rx queue. This may need to be repeated */ 642 /* The queue should already have been flushed */
738 for (i = 0; i < 5; i++) { 643 WARN_ON(!rx_queue->flushed);
739 rc = falcon_flush_rx_queue(rx_queue);
740 if (rc == -EAGAIN)
741 continue;
742 break;
743 }
744 if (rc) {
745 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
746 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
747 }
748 644
749 /* Remove RX descriptor ring from card */ 645 /* Remove RX descriptor ring from card */
750 EFX_ZERO_OWORD(rx_desc_ptr); 646 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -1007,7 +903,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1007 is_phy_event = true; 903 is_phy_event = true;
1008 904
1009 if ((falcon_rev(efx) >= FALCON_REV_B0) && 905 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1010 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 906 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0))
1011 is_phy_event = true; 907 is_phy_event = true;
1012 908
1013 if (is_phy_event) { 909 if (is_phy_event) {
@@ -1255,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1255 falcon_generate_event(channel, &test_event); 1151 falcon_generate_event(channel, &test_event);
1256} 1152}
1257 1153
1154/**************************************************************************
1155 *
1156 * Flush handling
1157 *
1158 **************************************************************************/
1159
1160
1161static void falcon_poll_flush_events(struct efx_nic *efx)
1162{
1163 struct efx_channel *channel = &efx->channel[0];
1164 struct efx_tx_queue *tx_queue;
1165 struct efx_rx_queue *rx_queue;
1166 unsigned int read_ptr, i;
1167
1168 read_ptr = channel->eventq_read_ptr;
1169 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
1170 efx_qword_t *event = falcon_event(channel, read_ptr);
1171 int ev_code, ev_sub_code, ev_queue;
1172 bool ev_failed;
1173 if (!falcon_event_present(event))
1174 break;
1175
1176 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1177 if (ev_code != DRIVER_EV_DECODE)
1178 continue;
1179
1180 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1181 switch (ev_sub_code) {
1182 case TX_DESCQ_FLS_DONE_EV_DECODE:
1183 ev_queue = EFX_QWORD_FIELD(*event,
1184 DRIVER_EV_TX_DESCQ_ID);
1185 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1186 tx_queue = efx->tx_queue + ev_queue;
1187 tx_queue->flushed = true;
1188 }
1189 break;
1190 case RX_DESCQ_FLS_DONE_EV_DECODE:
1191 ev_queue = EFX_QWORD_FIELD(*event,
1192 DRIVER_EV_RX_DESCQ_ID);
1193 ev_failed = EFX_QWORD_FIELD(*event,
1194 DRIVER_EV_RX_FLUSH_FAIL);
1195 if (ev_queue < efx->n_rx_queues) {
1196 rx_queue = efx->rx_queue + ev_queue;
1197
1198 /* retry the rx flush */
1199 if (ev_failed)
1200 falcon_flush_rx_queue(rx_queue);
1201 else
1202 rx_queue->flushed = true;
1203 }
1204 break;
1205 }
1206
1207 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1208 }
1209}
1210
1211/* Handle tx and rx flushes at the same time, since they run in
1212 * parallel in the hardware and there's no reason for us to
1213 * serialise them */
1214int falcon_flush_queues(struct efx_nic *efx)
1215{
1216 struct efx_rx_queue *rx_queue;
1217 struct efx_tx_queue *tx_queue;
1218 int i;
1219 bool outstanding;
1220
1221 /* Issue flush requests */
1222 efx_for_each_tx_queue(tx_queue, efx) {
1223 tx_queue->flushed = false;
1224 falcon_flush_tx_queue(tx_queue);
1225 }
1226 efx_for_each_rx_queue(rx_queue, efx) {
1227 rx_queue->flushed = false;
1228 falcon_flush_rx_queue(rx_queue);
1229 }
1230
1231 /* Poll the evq looking for flush completions. Since we're not pushing
1232 * any more rx or tx descriptors at this point, we're in no danger of
1233 * overflowing the evq whilst we wait */
1234 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1235 msleep(FALCON_FLUSH_INTERVAL);
1236 falcon_poll_flush_events(efx);
1237
1238 /* Check if every queue has been succesfully flushed */
1239 outstanding = false;
1240 efx_for_each_tx_queue(tx_queue, efx)
1241 outstanding |= !tx_queue->flushed;
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 outstanding |= !rx_queue->flushed;
1244 if (!outstanding)
1245 return 0;
1246 }
1247
1248 /* Mark the queues as all flushed. We're going to return failure
1249 * leading to a reset, or fake up success anyway. "flushed" now
1250 * indicates that we tried to flush. */
1251 efx_for_each_tx_queue(tx_queue, efx) {
1252 if (!tx_queue->flushed)
1253 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1254 tx_queue->queue);
1255 tx_queue->flushed = true;
1256 }
1257 efx_for_each_rx_queue(rx_queue, efx) {
1258 if (!rx_queue->flushed)
1259 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1260 rx_queue->queue);
1261 rx_queue->flushed = true;
1262 }
1263
1264 if (EFX_WORKAROUND_7803(efx))
1265 return 0;
1266
1267 return -ETIMEDOUT;
1268}
1258 1269
1259/************************************************************************** 1270/**************************************************************************
1260 * 1271 *
@@ -1363,10 +1374,11 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1363 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1374 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1364 } 1375 }
1365 1376
1366 /* Disable DMA bus mastering on both devices */ 1377 /* Disable both devices */
1367 pci_disable_device(efx->pci_dev); 1378 pci_disable_device(efx->pci_dev);
1368 if (FALCON_IS_DUAL_FUNC(efx)) 1379 if (FALCON_IS_DUAL_FUNC(efx))
1369 pci_disable_device(nic_data->pci_dev2); 1380 pci_disable_device(nic_data->pci_dev2);
1381 falcon_disable_interrupts(efx);
1370 1382
1371 if (++n_int_errors < FALCON_MAX_INT_ERRORS) { 1383 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1372 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1384 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
@@ -1593,7 +1605,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1593 ************************************************************************** 1605 **************************************************************************
1594 */ 1606 */
1595 1607
1596#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) 1608#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t))
1597 1609
1598/* Wait for SPI command completion */ 1610/* Wait for SPI command completion */
1599static int falcon_spi_wait(struct efx_nic *efx) 1611static int falcon_spi_wait(struct efx_nic *efx)
@@ -1942,8 +1954,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1942 1954
1943 /* Wait for transfer to complete */ 1955 /* Wait for transfer to complete */
1944 for (i = 0; i < 400; i++) { 1956 for (i = 0; i < 400; i++) {
1945 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) 1957 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
1958 rmb(); /* Ensure the stats are valid. */
1946 return 0; 1959 return 0;
1960 }
1947 udelay(10); 1961 udelay(10);
1948 } 1962 }
1949 1963
@@ -2758,6 +2772,8 @@ int falcon_probe_nic(struct efx_nic *efx)
2758 2772
2759 /* Allocate storage for hardware specific data */ 2773 /* Allocate storage for hardware specific data */
2760 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2774 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2775 if (!nic_data)
2776 return -ENOMEM;
2761 efx->nic_data = nic_data; 2777 efx->nic_data = nic_data;
2762 2778
2763 /* Determine number of ports etc. */ 2779 /* Determine number of ports etc. */
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 30d61e48ac60..be025ba7a6c6 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -86,6 +86,7 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
86extern int falcon_probe_nic(struct efx_nic *efx); 86extern int falcon_probe_nic(struct efx_nic *efx);
87extern int falcon_probe_resources(struct efx_nic *efx); 87extern int falcon_probe_resources(struct efx_nic *efx);
88extern int falcon_init_nic(struct efx_nic *efx); 88extern int falcon_init_nic(struct efx_nic *efx);
89extern int falcon_flush_queues(struct efx_nic *efx);
89extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 90extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
90extern void falcon_remove_resources(struct efx_nic *efx); 91extern void falcon_remove_resources(struct efx_nic *efx);
91extern void falcon_remove_nic(struct efx_nic *efx); 92extern void falcon_remove_nic(struct efx_nic *efx);
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index e319fd64d07c..5d584b0dbb51 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -117,7 +117,6 @@
117#define SF_PRST_WIDTH 1 117#define SF_PRST_WIDTH 1
118#define EE_PRST_LBN 8 118#define EE_PRST_LBN 8
119#define EE_PRST_WIDTH 1 119#define EE_PRST_WIDTH 1
120/* See pic_mode_t for decoding of this field */
121/* These bit definitions are extrapolated from the list of numerical 120/* These bit definitions are extrapolated from the list of numerical
122 * values for STRAP_PINS. 121 * values for STRAP_PINS.
123 */ 122 */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index 6670cdfc41ab..c16da3149fa9 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -13,7 +13,6 @@
13 13
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include "net_driver.h"
17 16
18/************************************************************************** 17/**************************************************************************
19 * 18 *
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 0d9f68ff71e7..d4012314dd01 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -78,79 +78,7 @@ static void falcon_setup_xaui(struct efx_nic *efx)
78 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 78 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
79} 79}
80 80
81static void falcon_hold_xaui_in_rst(struct efx_nic *efx) 81int falcon_reset_xaui(struct efx_nic *efx)
82{
83 efx_oword_t reg;
84
85 EFX_ZERO_OWORD(reg);
86 EFX_SET_OWORD_FIELD(reg, XX_PWRDNA_EN, 1);
87 EFX_SET_OWORD_FIELD(reg, XX_PWRDNB_EN, 1);
88 EFX_SET_OWORD_FIELD(reg, XX_PWRDNC_EN, 1);
89 EFX_SET_OWORD_FIELD(reg, XX_PWRDND_EN, 1);
90 EFX_SET_OWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
91 EFX_SET_OWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
92 EFX_SET_OWORD_FIELD(reg, XX_RESETA_EN, 1);
93 EFX_SET_OWORD_FIELD(reg, XX_RESETB_EN, 1);
94 EFX_SET_OWORD_FIELD(reg, XX_RESETC_EN, 1);
95 EFX_SET_OWORD_FIELD(reg, XX_RESETD_EN, 1);
96 EFX_SET_OWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
97 EFX_SET_OWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
98 falcon_write(efx, &reg, XX_PWR_RST_REG);
99 udelay(10);
100}
101
102static int _falcon_reset_xaui_a(struct efx_nic *efx)
103{
104 efx_oword_t reg;
105
106 falcon_hold_xaui_in_rst(efx);
107 falcon_read(efx, &reg, XX_PWR_RST_REG);
108
109 /* Follow the RAMBUS XAUI data reset sequencing
110 * Channels A and B first: power down, reset PLL, reset, clear
111 */
112 EFX_SET_OWORD_FIELD(reg, XX_PWRDNA_EN, 0);
113 EFX_SET_OWORD_FIELD(reg, XX_PWRDNB_EN, 0);
114 falcon_write(efx, &reg, XX_PWR_RST_REG);
115 udelay(10);
116
117 EFX_SET_OWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
118 falcon_write(efx, &reg, XX_PWR_RST_REG);
119 udelay(10);
120
121 EFX_SET_OWORD_FIELD(reg, XX_RESETA_EN, 0);
122 EFX_SET_OWORD_FIELD(reg, XX_RESETB_EN, 0);
123 falcon_write(efx, &reg, XX_PWR_RST_REG);
124 udelay(10);
125
126 /* Channels C and D: power down, reset PLL, reset, clear */
127 EFX_SET_OWORD_FIELD(reg, XX_PWRDNC_EN, 0);
128 EFX_SET_OWORD_FIELD(reg, XX_PWRDND_EN, 0);
129 falcon_write(efx, &reg, XX_PWR_RST_REG);
130 udelay(10);
131
132 EFX_SET_OWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
133 falcon_write(efx, &reg, XX_PWR_RST_REG);
134 udelay(10);
135
136 EFX_SET_OWORD_FIELD(reg, XX_RESETC_EN, 0);
137 EFX_SET_OWORD_FIELD(reg, XX_RESETD_EN, 0);
138 falcon_write(efx, &reg, XX_PWR_RST_REG);
139 udelay(10);
140
141 /* Setup XAUI */
142 falcon_setup_xaui(efx);
143 udelay(10);
144
145 /* Take XGXS out of reset */
146 EFX_ZERO_OWORD(reg);
147 falcon_write(efx, &reg, XX_PWR_RST_REG);
148 udelay(10);
149
150 return 0;
151}
152
153static int _falcon_reset_xaui_b(struct efx_nic *efx)
154{ 82{
155 efx_oword_t reg; 83 efx_oword_t reg;
156 int count; 84 int count;
@@ -171,20 +99,6 @@ static int _falcon_reset_xaui_b(struct efx_nic *efx)
171 return -ETIMEDOUT; 99 return -ETIMEDOUT;
172} 100}
173 101
174int falcon_reset_xaui(struct efx_nic *efx)
175{
176 int rc;
177
178 if (EFX_WORKAROUND_9388(efx)) {
179 falcon_hold_xaui_in_rst(efx);
180 efx->phy_op->reset_xaui(efx);
181 rc = _falcon_reset_xaui_a(efx);
182 } else {
183 rc = _falcon_reset_xaui_b(efx);
184 }
185 return rc;
186}
187
188static bool falcon_xgmii_status(struct efx_nic *efx) 102static bool falcon_xgmii_status(struct efx_nic *efx)
189{ 103{
190 efx_oword_t reg; 104 efx_oword_t reg;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 567df00090fb..cdb11fad6050 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -160,6 +160,7 @@ struct efx_tx_buffer {
160 * @channel: The associated channel 160 * @channel: The associated channel
161 * @buffer: The software buffer ring 161 * @buffer: The software buffer ring
162 * @txd: The hardware descriptor ring 162 * @txd: The hardware descriptor ring
163 * @flushed: Used when handling queue flushing
163 * @read_count: Current read pointer. 164 * @read_count: Current read pointer.
164 * This is the number of buffers that have been removed from both rings. 165 * This is the number of buffers that have been removed from both rings.
165 * @stopped: Stopped count. 166 * @stopped: Stopped count.
@@ -192,6 +193,7 @@ struct efx_tx_queue {
192 struct efx_nic *nic; 193 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 194 struct efx_tx_buffer *buffer;
194 struct efx_special_buffer txd; 195 struct efx_special_buffer txd;
196 bool flushed;
195 197
196 /* Members used mainly on the completion path */ 198 /* Members used mainly on the completion path */
197 unsigned int read_count ____cacheline_aligned_in_smp; 199 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -260,6 +262,7 @@ struct efx_rx_buffer {
260 * the remaining space in the allocation. 262 * the remaining space in the allocation.
261 * @buf_dma_addr: Page's DMA address. 263 * @buf_dma_addr: Page's DMA address.
262 * @buf_data: Page's host address. 264 * @buf_data: Page's host address.
265 * @flushed: Use when handling queue flushing
263 */ 266 */
264struct efx_rx_queue { 267struct efx_rx_queue {
265 struct efx_nic *efx; 268 struct efx_nic *efx;
@@ -285,6 +288,7 @@ struct efx_rx_queue {
285 struct page *buf_page; 288 struct page *buf_page;
286 dma_addr_t buf_dma_addr; 289 dma_addr_t buf_dma_addr;
287 char *buf_data; 290 char *buf_data;
291 bool flushed;
288}; 292};
289 293
290/** 294/**
@@ -470,7 +474,7 @@ enum nic_state {
470 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 474 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
471 * of the skb->head for hardware DMA]. 475 * of the skb->head for hardware DMA].
472 */ 476 */
473#if defined(__i386__) || defined(__x86_64__) 477#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
474#define EFX_PAGE_IP_ALIGN 0 478#define EFX_PAGE_IP_ALIGN 0
475#else 479#else
476#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN 480#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
@@ -503,7 +507,6 @@ enum efx_fc_type {
503 * @clear_interrupt: Clear down interrupt 507 * @clear_interrupt: Clear down interrupt
504 * @blink: Blink LEDs 508 * @blink: Blink LEDs
505 * @check_hw: Check hardware 509 * @check_hw: Check hardware
506 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
507 * @mmds: MMD presence mask 510 * @mmds: MMD presence mask
508 * @loopbacks: Supported loopback modes mask 511 * @loopbacks: Supported loopback modes mask
509 */ 512 */
@@ -513,7 +516,6 @@ struct efx_phy_operations {
513 void (*reconfigure) (struct efx_nic *efx); 516 void (*reconfigure) (struct efx_nic *efx);
514 void (*clear_interrupt) (struct efx_nic *efx); 517 void (*clear_interrupt) (struct efx_nic *efx);
515 int (*check_hw) (struct efx_nic *efx); 518 int (*check_hw) (struct efx_nic *efx);
516 void (*reset_xaui) (struct efx_nic *efx);
517 int (*test) (struct efx_nic *efx); 519 int (*test) (struct efx_nic *efx);
518 int mmds; 520 int mmds;
519 unsigned loopbacks; 521 unsigned loopbacks;
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index b7005da55d5e..fe4e3fd22330 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -129,18 +129,6 @@ static int sfe4001_poweron(struct efx_nic *efx)
129 unsigned int i, j; 129 unsigned int i, j;
130 int rc; 130 int rc;
131 u8 out; 131 u8 out;
132 efx_oword_t reg;
133
134 /* Ensure that XGXS and XAUI SerDes are held in reset */
135 EFX_POPULATE_OWORD_7(reg, XX_PWRDNA_EN, 1,
136 XX_PWRDNB_EN, 1,
137 XX_RSTPLLAB_EN, 1,
138 XX_RESETA_EN, 1,
139 XX_RESETB_EN, 1,
140 XX_RSTXGXSRX_EN, 1,
141 XX_RSTXGXSTX_EN, 1);
142 falcon_write(efx, &reg, XX_PWR_RST_REG);
143 udelay(10);
144 132
145 /* Clear any previous over-temperature alert */ 133 /* Clear any previous over-temperature alert */
146 rc = i2c_smbus_read_byte_data(hwmon_client, RSL); 134 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 8412dbe1e8fb..d507c93d666e 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -146,8 +146,6 @@ static int tenxpress_phy_check(struct efx_nic *efx)
146 return 0; 146 return 0;
147} 147}
148 148
149static void tenxpress_reset_xaui(struct efx_nic *efx);
150
151static int tenxpress_init(struct efx_nic *efx) 149static int tenxpress_init(struct efx_nic *efx)
152{ 150{
153 int rc, reg; 151 int rc, reg;
@@ -216,7 +214,10 @@ static int tenxpress_special_reset(struct efx_nic *efx)
216{ 214{
217 int rc, reg; 215 int rc, reg;
218 216
219 EFX_TRACE(efx, "%s\n", __func__); 217 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
218 * a special software reset can glitch the XGMAC sufficiently for stats
219 * requests to fail. Since we don't ofen special_reset, just lock. */
220 spin_lock(&efx->stats_lock);
220 221
221 /* Initiate reset */ 222 /* Initiate reset */
222 reg = mdio_clause45_read(efx, efx->mii.phy_id, 223 reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -225,20 +226,22 @@ static int tenxpress_special_reset(struct efx_nic *efx)
225 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 226 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
226 PMA_PMD_EXT_CTRL_REG, reg); 227 PMA_PMD_EXT_CTRL_REG, reg);
227 228
228 msleep(200); 229 mdelay(200);
229 230
230 /* Wait for the blocks to come out of reset */ 231 /* Wait for the blocks to come out of reset */
231 rc = mdio_clause45_wait_reset_mmds(efx, 232 rc = mdio_clause45_wait_reset_mmds(efx,
232 TENXPRESS_REQUIRED_DEVS); 233 TENXPRESS_REQUIRED_DEVS);
233 if (rc < 0) 234 if (rc < 0)
234 return rc; 235 goto unlock;
235 236
236 /* Try and reconfigure the device */ 237 /* Try and reconfigure the device */
237 rc = tenxpress_init(efx); 238 rc = tenxpress_init(efx);
238 if (rc < 0) 239 if (rc < 0)
239 return rc; 240 goto unlock;
240 241
241 return 0; 242unlock:
243 spin_unlock(&efx->stats_lock);
244 return rc;
242} 245}
243 246
244static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp) 247static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp)
@@ -374,8 +377,7 @@ static int tenxpress_phy_check_hw(struct efx_nic *efx)
374 struct tenxpress_phy_data *phy_data = efx->phy_data; 377 struct tenxpress_phy_data *phy_data = efx->phy_data;
375 bool link_ok; 378 bool link_ok;
376 379
377 link_ok = (phy_data->phy_mode == PHY_MODE_NORMAL && 380 link_ok = tenxpress_link_ok(efx, true);
378 tenxpress_link_ok(efx, true));
379 381
380 if (link_ok != efx->link_up) 382 if (link_ok != efx->link_up)
381 falcon_xmac_sim_phy_event(efx); 383 falcon_xmac_sim_phy_event(efx);
@@ -428,54 +430,6 @@ void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
428 PMA_PMD_LED_OVERR_REG, reg); 430 PMA_PMD_LED_OVERR_REG, reg);
429} 431}
430 432
431static void tenxpress_reset_xaui(struct efx_nic *efx)
432{
433 int phy = efx->mii.phy_id;
434 int clk_ctrl, test_select, soft_rst2;
435
436 /* Real work is done on clock_ctrl other resets are thought to be
437 * optional but make the reset more reliable
438 */
439
440 /* Read */
441 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
442 PCS_CLOCK_CTRL_REG);
443 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
444 PCS_TEST_SELECT_REG);
445 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
446 PCS_SOFT_RST2_REG);
447
448 /* Put in reset */
449 test_select &= ~(1 << CLK312_EN_LBN);
450 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
451 PCS_TEST_SELECT_REG, test_select);
452
453 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
454 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
455 PCS_SOFT_RST2_REG, soft_rst2);
456
457 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
458 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
459 PCS_CLOCK_CTRL_REG, clk_ctrl);
460 udelay(10);
461
462 /* Remove reset */
463 clk_ctrl |= (1 << PLL312_RST_N_LBN);
464 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
465 PCS_CLOCK_CTRL_REG, clk_ctrl);
466 udelay(10);
467
468 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
469 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
470 PCS_SOFT_RST2_REG, soft_rst2);
471 udelay(10);
472
473 test_select |= (1 << CLK312_EN_LBN);
474 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
475 PCS_TEST_SELECT_REG, test_select);
476 udelay(10);
477}
478
479static int tenxpress_phy_test(struct efx_nic *efx) 433static int tenxpress_phy_test(struct efx_nic *efx)
480{ 434{
481 /* BIST is automatically run after a special software reset */ 435 /* BIST is automatically run after a special software reset */
@@ -488,7 +442,6 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
488 .check_hw = tenxpress_phy_check_hw, 442 .check_hw = tenxpress_phy_check_hw,
489 .fini = tenxpress_phy_fini, 443 .fini = tenxpress_phy_fini,
490 .clear_interrupt = tenxpress_phy_clear_interrupt, 444 .clear_interrupt = tenxpress_phy_clear_interrupt,
491 .reset_xaui = tenxpress_reset_xaui,
492 .test = tenxpress_phy_test, 445 .test = tenxpress_phy_test,
493 .mmds = TENXPRESS_REQUIRED_DEVS, 446 .mmds = TENXPRESS_REQUIRED_DEVS,
494 .loopbacks = TENXPRESS_LOOPBACKS, 447 .loopbacks = TENXPRESS_LOOPBACKS,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index cdee7c200d63..da3e9ff339f5 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -516,7 +516,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
516/* Number of bytes inserted at the start of a TSO header buffer, 516/* Number of bytes inserted at the start of a TSO header buffer,
517 * similar to NET_IP_ALIGN. 517 * similar to NET_IP_ALIGN.
518 */ 518 */
519#if defined(__i386__) || defined(__x86_64__) 519#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
520#define TSOH_OFFSET 0 520#define TSOH_OFFSET 0
521#else 521#else
522#define TSOH_OFFSET NET_IP_ALIGN 522#define TSOH_OFFSET NET_IP_ALIGN
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index a824f5998c04..fa7b49d69288 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -24,8 +24,6 @@
24#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 24#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
25/* TX pkt parser problem with <= 16 byte TXes */ 25/* TX pkt parser problem with <= 16 byte TXes */
26#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 26#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
27/* XGXS and XAUI reset sequencing in SW */
28#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
29/* Low rate CRC errors require XAUI reset */ 27/* Low rate CRC errors require XAUI reset */
30#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS 28#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
31/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 29/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index f6edecc2e588..276151df3a70 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -165,7 +165,6 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
165 .check_hw = xfp_phy_check_hw, 165 .check_hw = xfp_phy_check_hw,
166 .fini = xfp_phy_fini, 166 .fini = xfp_phy_fini,
167 .clear_interrupt = xfp_phy_clear_interrupt, 167 .clear_interrupt = xfp_phy_clear_interrupt,
168 .reset_xaui = efx_port_dummy_op_void,
169 .mmds = XFP_REQUIRED_DEVS, 168 .mmds = XFP_REQUIRED_DEVS,
170 .loopbacks = XFP_LOOPBACKS, 169 .loopbacks = XFP_LOOPBACKS,
171}; 170};
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index ea85de918233..79e665e0853d 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -44,17 +44,10 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
44 int set, int local); 44 int set, int local);
45static int port_to_mib(struct s_smc *smc, int p); 45static int port_to_mib(struct s_smc *smc, int p);
46 46
47#define MOFFSS(e) ((int)&(((struct fddi_mib *)0)->e)) 47#define MOFFSS(e) offsetof(struct fddi_mib, e)
48#define MOFFSA(e) ((int) (((struct fddi_mib *)0)->e)) 48#define MOFFMS(e) offsetof(struct fddi_mib_m, e)
49 49#define MOFFAS(e) offsetof(struct fddi_mib_a, e)
50#define MOFFMS(e) ((int)&(((struct fddi_mib_m *)0)->e)) 50#define MOFFPS(e) offsetof(struct fddi_mib_p, e)
51#define MOFFMA(e) ((int) (((struct fddi_mib_m *)0)->e))
52
53#define MOFFAS(e) ((int)&(((struct fddi_mib_a *)0)->e))
54#define MOFFAA(e) ((int) (((struct fddi_mib_a *)0)->e))
55
56#define MOFFPS(e) ((int)&(((struct fddi_mib_p *)0)->e))
57#define MOFFPA(e) ((int) (((struct fddi_mib_p *)0)->e))
58 51
59 52
60#define AC_G 0x01 /* Get */ 53#define AC_G 0x01 /* Get */
@@ -87,8 +80,8 @@ static const struct s_p_tab {
87 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } , 80 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } ,
88 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } , 81 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } ,
89 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } , 82 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } ,
90 { SMT_P1010,AC_G, MOFFSA(fddiSMTManufacturerData), "D" } , 83 { SMT_P1010,AC_G, MOFFSS(fddiSMTManufacturerData), "D" } ,
91 { SMT_P1011,AC_GR, MOFFSA(fddiSMTUserData), "D" } , 84 { SMT_P1011,AC_GR, MOFFSS(fddiSMTUserData), "D" } ,
92 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } , 85 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } ,
93 86
94 /* StationConfigGrp */ 87 /* StationConfigGrp */
@@ -103,7 +96,7 @@ static const struct s_p_tab {
103 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } , 96 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } ,
104 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } , 97 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } ,
105 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } , 98 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } ,
106 { SMT_P1020,AC_G, MOFFSA(fddiSMTPORTIndexes), "II" } , 99 { SMT_P1020,AC_G, MOFFSS(fddiSMTPORTIndexes), "II" } ,
107 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } , 100 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } ,
108 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } , 101 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } ,
109 102
@@ -117,8 +110,8 @@ static const struct s_p_tab {
117 110
118 /* MIBOperationGrp */ 111 /* MIBOperationGrp */
119 { SMT_P1032,AC_GROUP } , 112 { SMT_P1032,AC_GROUP } ,
120 { SMT_P1033,AC_G, MOFFSA(fddiSMTTimeStamp),"P" } , 113 { SMT_P1033,AC_G, MOFFSS(fddiSMTTimeStamp),"P" } ,
121 { SMT_P1034,AC_G, MOFFSA(fddiSMTTransitionTimeStamp),"P" } , 114 { SMT_P1034,AC_G, MOFFSS(fddiSMTTransitionTimeStamp),"P" } ,
122 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */ 115 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
123 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } , 116 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } ,
124 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } , 117 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } ,
@@ -129,7 +122,7 @@ static const struct s_p_tab {
129 * PRIVATE EXTENSIONS 122 * PRIVATE EXTENSIONS
130 * only accessible locally to get/set passwd 123 * only accessible locally to get/set passwd
131 */ 124 */
132 { SMT_P10F0,AC_GR, MOFFSA(fddiPRPMFPasswd), "8" } , 125 { SMT_P10F0,AC_GR, MOFFSS(fddiPRPMFPasswd), "8" } ,
133 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } , 126 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } ,
134#ifdef ESS 127#ifdef ESS
135 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } , 128 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } ,
@@ -245,7 +238,7 @@ static const struct s_p_tab {
245 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } , 238 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } ,
246 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } , 239 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } ,
247 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } , 240 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } ,
248 { SMT_P4011,AC_GR, MOFFPA(fddiPORTRequestedPaths), "l4" } , 241 { SMT_P4011,AC_GR, MOFFPS(fddiPORTRequestedPaths), "l4" } ,
249 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } , 242 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } ,
250 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } , 243 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } ,
251 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } , 244 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index c5871624f972..02cc064c2c8b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -183,7 +183,7 @@ static void smc911x_reset(struct net_device *dev)
183 unsigned int reg, timeout=0, resets=1; 183 unsigned int reg, timeout=0, resets=1;
184 unsigned long flags; 184 unsigned long flags;
185 185
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
187 187
188 /* Take out of PM setting first */ 188 /* Take out of PM setting first */
189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { 189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -272,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
272 unsigned mask, cfg, cr; 272 unsigned mask, cfg, cr;
273 unsigned long flags; 273 unsigned long flags;
274 274
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 276
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 277 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 278
@@ -329,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
329 unsigned cr; 329 unsigned cr;
330 unsigned long flags; 330 unsigned long flags;
331 331
332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); 332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
333 333
334 /* Disable IRQ's */ 334 /* Disable IRQ's */
335 SMC_SET_INT_EN(lp, 0); 335 SMC_SET_INT_EN(lp, 0);
@@ -348,7 +348,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
348 struct smc911x_local *lp = netdev_priv(dev); 348 struct smc911x_local *lp = netdev_priv(dev);
349 unsigned int fifo_count, timeout, reg; 349 unsigned int fifo_count, timeout, reg;
350 350
351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); 351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; 352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
353 if (fifo_count <= 4) { 353 if (fifo_count <= 4) {
354 /* Manually dump the packet data */ 354 /* Manually dump the packet data */
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
382 unsigned char *data; 382 unsigned char *data;
383 383
384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
385 dev->name, __FUNCTION__); 385 dev->name, __func__);
386 status = SMC_GET_RX_STS_FIFO(lp); 386 status = SMC_GET_RX_STS_FIFO(lp);
387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
@@ -460,7 +460,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
460 unsigned char *buf; 460 unsigned char *buf;
461 unsigned long flags; 461 unsigned long flags;
462 462
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__); 463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 464 BUG_ON(lp->pending_tx_skb == NULL);
465 465
466 skb = lp->pending_tx_skb; 466 skb = lp->pending_tx_skb;
@@ -524,7 +524,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
524 unsigned long flags; 524 unsigned long flags;
525 525
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __FUNCTION__); 527 dev->name, __func__);
528 528
529 BUG_ON(lp->pending_tx_skb != NULL); 529 BUG_ON(lp->pending_tx_skb != NULL);
530 530
@@ -596,7 +596,7 @@ static void smc911x_tx(struct net_device *dev)
596 unsigned int tx_status; 596 unsigned int tx_status;
597 597
598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
599 dev->name, __FUNCTION__); 599 dev->name, __func__);
600 600
601 /* Collect the TX status */ 601 /* Collect the TX status */
602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { 602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
@@ -647,7 +647,7 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
647 SMC_GET_MII(lp, phyreg, phyaddr, phydata); 647 SMC_GET_MII(lp, phyreg, phyaddr, phydata);
648 648
649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", 649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
650 __FUNCTION__, phyaddr, phyreg, phydata); 650 __func__, phyaddr, phyreg, phydata);
651 return phydata; 651 return phydata;
652} 652}
653 653
@@ -661,7 +661,7 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
661 struct smc911x_local *lp = netdev_priv(dev); 661 struct smc911x_local *lp = netdev_priv(dev);
662 662
663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
664 __FUNCTION__, phyaddr, phyreg, phydata); 664 __func__, phyaddr, phyreg, phydata);
665 665
666 SMC_SET_MII(lp, phyreg, phyaddr, phydata); 666 SMC_SET_MII(lp, phyreg, phyaddr, phydata);
667} 667}
@@ -676,7 +676,7 @@ static void smc911x_phy_detect(struct net_device *dev)
676 int phyaddr; 676 int phyaddr;
677 unsigned int cfg, id1, id2; 677 unsigned int cfg, id1, id2;
678 678
679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
680 680
681 lp->phy_type = 0; 681 lp->phy_type = 0;
682 682
@@ -746,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
746 int phyaddr = lp->mii.phy_id; 746 int phyaddr = lp->mii.phy_id;
747 int bmcr; 747 int bmcr;
748 748
749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
750 750
751 /* Enter Link Disable state */ 751 /* Enter Link Disable state */
752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); 752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -793,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
793 unsigned long flags; 793 unsigned long flags;
794 unsigned int reg; 794 unsigned int reg;
795 795
796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
797 797
798 spin_lock_irqsave(&lp->lock, flags); 798 spin_lock_irqsave(&lp->lock, flags);
799 reg = SMC_GET_PMT_CTRL(lp); 799 reg = SMC_GET_PMT_CTRL(lp);
@@ -852,7 +852,7 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
852 int phyaddr = lp->mii.phy_id; 852 int phyaddr = lp->mii.phy_id;
853 unsigned int bmcr, cr; 853 unsigned int bmcr, cr;
854 854
855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
856 856
857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { 857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
858 /* duplex state has changed */ 858 /* duplex state has changed */
@@ -892,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
892 int status; 892 int status;
893 unsigned long flags; 893 unsigned long flags;
894 894
895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
896 896
897 /* 897 /*
898 * We should not be called if phy_type is zero. 898 * We should not be called if phy_type is zero.
@@ -985,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
985 int phyaddr = lp->mii.phy_id; 985 int phyaddr = lp->mii.phy_id;
986 int status; 986 int status;
987 987
988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
989 989
990 if (lp->phy_type == 0) 990 if (lp->phy_type == 0)
991 return; 991 return;
@@ -1013,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1013 unsigned int rx_overrun=0, cr, pkts; 1013 unsigned int rx_overrun=0, cr, pkts;
1014 unsigned long flags; 1014 unsigned long flags;
1015 1015
1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1017 1017
1018 spin_lock_irqsave(&lp->lock, flags); 1018 spin_lock_irqsave(&lp->lock, flags);
1019 1019
@@ -1174,8 +1174,6 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1174 1174
1175 spin_unlock_irqrestore(&lp->lock, flags); 1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 1176
1177 DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
1178
1179 return IRQ_HANDLED; 1177 return IRQ_HANDLED;
1180} 1178}
1181 1179
@@ -1188,7 +1186,7 @@ smc911x_tx_dma_irq(int dma, void *data)
1188 struct sk_buff *skb = lp->current_tx_skb; 1186 struct sk_buff *skb = lp->current_tx_skb;
1189 unsigned long flags; 1187 unsigned long flags;
1190 1188
1191 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1189 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1192 1190
1193 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); 1191 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
1194 /* Clear the DMA interrupt sources */ 1192 /* Clear the DMA interrupt sources */
@@ -1224,7 +1222,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1224 unsigned long flags; 1222 unsigned long flags;
1225 unsigned int pkts; 1223 unsigned int pkts;
1226 1224
1227 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1225 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1228 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); 1226 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
1229 /* Clear the DMA interrupt sources */ 1227 /* Clear the DMA interrupt sources */
1230 SMC_DMA_ACK_IRQ(dev, dma); 1228 SMC_DMA_ACK_IRQ(dev, dma);
@@ -1272,7 +1270,7 @@ static void smc911x_timeout(struct net_device *dev)
1272 int status, mask; 1270 int status, mask;
1273 unsigned long flags; 1271 unsigned long flags;
1274 1272
1275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1273 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1276 1274
1277 spin_lock_irqsave(&lp->lock, flags); 1275 spin_lock_irqsave(&lp->lock, flags);
1278 status = SMC_GET_INT(lp); 1276 status = SMC_GET_INT(lp);
@@ -1310,7 +1308,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1310 unsigned int mcr, update_multicast = 0; 1308 unsigned int mcr, update_multicast = 0;
1311 unsigned long flags; 1309 unsigned long flags;
1312 1310
1313 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1311 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1314 1312
1315 spin_lock_irqsave(&lp->lock, flags); 1313 spin_lock_irqsave(&lp->lock, flags);
1316 SMC_GET_MAC_CR(lp, mcr); 1314 SMC_GET_MAC_CR(lp, mcr);
@@ -1412,7 +1410,7 @@ smc911x_open(struct net_device *dev)
1412{ 1410{
1413 struct smc911x_local *lp = netdev_priv(dev); 1411 struct smc911x_local *lp = netdev_priv(dev);
1414 1412
1415 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1413 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1416 1414
1417 /* 1415 /*
1418 * Check that the address is valid. If its not, refuse 1416 * Check that the address is valid. If its not, refuse
@@ -1420,7 +1418,7 @@ smc911x_open(struct net_device *dev)
1420 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1418 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1421 */ 1419 */
1422 if (!is_valid_ether_addr(dev->dev_addr)) { 1420 if (!is_valid_ether_addr(dev->dev_addr)) {
1423 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1421 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1424 return -EINVAL; 1422 return -EINVAL;
1425 } 1423 }
1426 1424
@@ -1449,7 +1447,7 @@ static int smc911x_close(struct net_device *dev)
1449{ 1447{
1450 struct smc911x_local *lp = netdev_priv(dev); 1448 struct smc911x_local *lp = netdev_priv(dev);
1451 1449
1452 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1450 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1453 1451
1454 netif_stop_queue(dev); 1452 netif_stop_queue(dev);
1455 netif_carrier_off(dev); 1453 netif_carrier_off(dev);
@@ -1483,7 +1481,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1483 int ret, status; 1481 int ret, status;
1484 unsigned long flags; 1482 unsigned long flags;
1485 1483
1486 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1484 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1487 cmd->maxtxpkt = 1; 1485 cmd->maxtxpkt = 1;
1488 cmd->maxrxpkt = 1; 1486 cmd->maxrxpkt = 1;
1489 1487
@@ -1621,7 +1619,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1621 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { 1619 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
1622 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { 1620 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
1623 PRINTK("%s: %s timeout waiting for EEPROM to respond\n", 1621 PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
1624 dev->name, __FUNCTION__); 1622 dev->name, __func__);
1625 return -EFAULT; 1623 return -EFAULT;
1626 } 1624 }
1627 mdelay(1); 1625 mdelay(1);
@@ -1629,7 +1627,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1629 } 1627 }
1630 if (timeout == 0) { 1628 if (timeout == 0) {
1631 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", 1629 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
1632 dev->name, __FUNCTION__); 1630 dev->name, __func__);
1633 return -ETIMEDOUT; 1631 return -ETIMEDOUT;
1634 } 1632 }
1635 return 0; 1633 return 0;
@@ -1742,7 +1740,7 @@ static int __init smc911x_findirq(struct net_device *dev)
1742 int timeout = 20; 1740 int timeout = 20;
1743 unsigned long cookie; 1741 unsigned long cookie;
1744 1742
1745 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 1743 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
1746 1744
1747 cookie = probe_irq_on(); 1745 cookie = probe_irq_on();
1748 1746
@@ -1808,7 +1806,7 @@ static int __init smc911x_probe(struct net_device *dev)
1808 const char *version_string; 1806 const char *version_string;
1809 unsigned long irq_flags; 1807 unsigned long irq_flags;
1810 1808
1811 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1809 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1812 1810
1813 /* First, see if the endian word is recognized */ 1811 /* First, see if the endian word is recognized */
1814 val = SMC_GET_BYTE_TEST(lp); 1812 val = SMC_GET_BYTE_TEST(lp);
@@ -2058,7 +2056,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2058 unsigned int *addr; 2056 unsigned int *addr;
2059 int ret; 2057 int ret;
2060 2058
2061 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2059 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2060 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2063 if (!res) { 2061 if (!res) {
2064 ret = -ENODEV; 2062 ret = -ENODEV;
@@ -2129,7 +2127,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2129 struct smc911x_local *lp = netdev_priv(ndev); 2127 struct smc911x_local *lp = netdev_priv(ndev);
2130 struct resource *res; 2128 struct resource *res;
2131 2129
2132 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2130 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2133 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
2134 2132
2135 unregister_netdev(ndev); 2133 unregister_netdev(ndev);
@@ -2159,7 +2157,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
2159 struct net_device *ndev = platform_get_drvdata(dev); 2157 struct net_device *ndev = platform_get_drvdata(dev);
2160 struct smc911x_local *lp = netdev_priv(ndev); 2158 struct smc911x_local *lp = netdev_priv(ndev);
2161 2159
2162 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2160 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2163 if (ndev) { 2161 if (ndev) {
2164 if (netif_running(ndev)) { 2162 if (netif_running(ndev)) {
2165 netif_device_detach(ndev); 2163 netif_device_detach(ndev);
@@ -2177,7 +2175,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2177{ 2175{
2178 struct net_device *ndev = platform_get_drvdata(dev); 2176 struct net_device *ndev = platform_get_drvdata(dev);
2179 2177
2180 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2178 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2181 if (ndev) { 2179 if (ndev) {
2182 struct smc911x_local *lp = netdev_priv(ndev); 2180 struct smc911x_local *lp = netdev_priv(ndev);
2183 2181
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 24768c10cadb..ef5ce8845c9d 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -270,7 +270,7 @@ static void smc_reset(struct net_device *dev)
270 unsigned int ctl, cfg; 270 unsigned int ctl, cfg;
271 struct sk_buff *pending_skb; 271 struct sk_buff *pending_skb;
272 272
273 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 273 DBG(2, "%s: %s\n", dev->name, __func__);
274 274
275 /* Disable all interrupts, block TX tasklet */ 275 /* Disable all interrupts, block TX tasklet */
276 spin_lock_irq(&lp->lock); 276 spin_lock_irq(&lp->lock);
@@ -363,7 +363,7 @@ static void smc_enable(struct net_device *dev)
363 void __iomem *ioaddr = lp->base; 363 void __iomem *ioaddr = lp->base;
364 int mask; 364 int mask;
365 365
366 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 366 DBG(2, "%s: %s\n", dev->name, __func__);
367 367
368 /* see the header file for options in TCR/RCR DEFAULT */ 368 /* see the header file for options in TCR/RCR DEFAULT */
369 SMC_SELECT_BANK(lp, 0); 369 SMC_SELECT_BANK(lp, 0);
@@ -397,7 +397,7 @@ static void smc_shutdown(struct net_device *dev)
397 void __iomem *ioaddr = lp->base; 397 void __iomem *ioaddr = lp->base;
398 struct sk_buff *pending_skb; 398 struct sk_buff *pending_skb;
399 399
400 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 400 DBG(2, "%s: %s\n", CARDNAME, __func__);
401 401
402 /* no more interrupts for me */ 402 /* no more interrupts for me */
403 spin_lock_irq(&lp->lock); 403 spin_lock_irq(&lp->lock);
@@ -430,7 +430,7 @@ static inline void smc_rcv(struct net_device *dev)
430 void __iomem *ioaddr = lp->base; 430 void __iomem *ioaddr = lp->base;
431 unsigned int packet_number, status, packet_len; 431 unsigned int packet_number, status, packet_len;
432 432
433 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 433 DBG(3, "%s: %s\n", dev->name, __func__);
434 434
435 packet_number = SMC_GET_RXFIFO(lp); 435 packet_number = SMC_GET_RXFIFO(lp);
436 if (unlikely(packet_number & RXFIFO_REMPTY)) { 436 if (unlikely(packet_number & RXFIFO_REMPTY)) {
@@ -577,7 +577,7 @@ static void smc_hardware_send_pkt(unsigned long data)
577 unsigned int packet_no, len; 577 unsigned int packet_no, len;
578 unsigned char *buf; 578 unsigned char *buf;
579 579
580 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 580 DBG(3, "%s: %s\n", dev->name, __func__);
581 581
582 if (!smc_special_trylock(&lp->lock)) { 582 if (!smc_special_trylock(&lp->lock)) {
583 netif_stop_queue(dev); 583 netif_stop_queue(dev);
@@ -662,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 void __iomem *ioaddr = lp->base; 662 void __iomem *ioaddr = lp->base;
663 unsigned int numPages, poll_count, status; 663 unsigned int numPages, poll_count, status;
664 664
665 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 665 DBG(3, "%s: %s\n", dev->name, __func__);
666 666
667 BUG_ON(lp->pending_tx_skb != NULL); 667 BUG_ON(lp->pending_tx_skb != NULL);
668 668
@@ -734,7 +734,7 @@ static void smc_tx(struct net_device *dev)
734 void __iomem *ioaddr = lp->base; 734 void __iomem *ioaddr = lp->base;
735 unsigned int saved_packet, packet_no, tx_status, pkt_len; 735 unsigned int saved_packet, packet_no, tx_status, pkt_len;
736 736
737 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 737 DBG(3, "%s: %s\n", dev->name, __func__);
738 738
739 /* If the TX FIFO is empty then nothing to do */ 739 /* If the TX FIFO is empty then nothing to do */
740 packet_no = SMC_GET_TXFIFO(lp); 740 packet_no = SMC_GET_TXFIFO(lp);
@@ -856,7 +856,7 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
857 857
858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
859 __FUNCTION__, phyaddr, phyreg, phydata); 859 __func__, phyaddr, phyreg, phydata);
860 860
861 SMC_SELECT_BANK(lp, 2); 861 SMC_SELECT_BANK(lp, 2);
862 return phydata; 862 return phydata;
@@ -883,7 +883,7 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
884 884
885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
886 __FUNCTION__, phyaddr, phyreg, phydata); 886 __func__, phyaddr, phyreg, phydata);
887 887
888 SMC_SELECT_BANK(lp, 2); 888 SMC_SELECT_BANK(lp, 2);
889} 889}
@@ -896,7 +896,7 @@ static void smc_phy_detect(struct net_device *dev)
896 struct smc_local *lp = netdev_priv(dev); 896 struct smc_local *lp = netdev_priv(dev);
897 int phyaddr; 897 int phyaddr;
898 898
899 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 899 DBG(2, "%s: %s\n", dev->name, __func__);
900 900
901 lp->phy_type = 0; 901 lp->phy_type = 0;
902 902
@@ -935,7 +935,7 @@ static int smc_phy_fixed(struct net_device *dev)
935 int phyaddr = lp->mii.phy_id; 935 int phyaddr = lp->mii.phy_id;
936 int bmcr, cfg1; 936 int bmcr, cfg1;
937 937
938 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 938 DBG(3, "%s: %s\n", dev->name, __func__);
939 939
940 /* Enter Link Disable state */ 940 /* Enter Link Disable state */
941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); 941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1168,7 +1168,7 @@ static void smc_phy_interrupt(struct net_device *dev)
1168 int phyaddr = lp->mii.phy_id; 1168 int phyaddr = lp->mii.phy_id;
1169 int phy18; 1169 int phy18;
1170 1170
1171 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1171 DBG(2, "%s: %s\n", dev->name, __func__);
1172 1172
1173 if (lp->phy_type == 0) 1173 if (lp->phy_type == 0)
1174 return; 1174 return;
@@ -1236,7 +1236,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1236 int status, mask, timeout, card_stats; 1236 int status, mask, timeout, card_stats;
1237 int saved_pointer; 1237 int saved_pointer;
1238 1238
1239 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 1239 DBG(3, "%s: %s\n", dev->name, __func__);
1240 1240
1241 spin_lock(&lp->lock); 1241 spin_lock(&lp->lock);
1242 1242
@@ -1358,7 +1358,7 @@ static void smc_timeout(struct net_device *dev)
1358 void __iomem *ioaddr = lp->base; 1358 void __iomem *ioaddr = lp->base;
1359 int status, mask, eph_st, meminfo, fifo; 1359 int status, mask, eph_st, meminfo, fifo;
1360 1360
1361 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1361 DBG(2, "%s: %s\n", dev->name, __func__);
1362 1362
1363 spin_lock_irq(&lp->lock); 1363 spin_lock_irq(&lp->lock);
1364 status = SMC_GET_INT(lp); 1364 status = SMC_GET_INT(lp);
@@ -1402,7 +1402,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1402 unsigned char multicast_table[8]; 1402 unsigned char multicast_table[8];
1403 int update_multicast = 0; 1403 int update_multicast = 0;
1404 1404
1405 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1405 DBG(2, "%s: %s\n", dev->name, __func__);
1406 1406
1407 if (dev->flags & IFF_PROMISC) { 1407 if (dev->flags & IFF_PROMISC) {
1408 DBG(2, "%s: RCR_PRMS\n", dev->name); 1408 DBG(2, "%s: RCR_PRMS\n", dev->name);
@@ -1505,7 +1505,7 @@ smc_open(struct net_device *dev)
1505{ 1505{
1506 struct smc_local *lp = netdev_priv(dev); 1506 struct smc_local *lp = netdev_priv(dev);
1507 1507
1508 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1508 DBG(2, "%s: %s\n", dev->name, __func__);
1509 1509
1510 /* 1510 /*
1511 * Check that the address is valid. If its not, refuse 1511 * Check that the address is valid. If its not, refuse
@@ -1513,7 +1513,7 @@ smc_open(struct net_device *dev)
1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1514 */ 1514 */
1515 if (!is_valid_ether_addr(dev->dev_addr)) { 1515 if (!is_valid_ether_addr(dev->dev_addr)) {
1516 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1516 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1517 return -EINVAL; 1517 return -EINVAL;
1518 } 1518 }
1519 1519
@@ -1557,7 +1557,7 @@ static int smc_close(struct net_device *dev)
1557{ 1557{
1558 struct smc_local *lp = netdev_priv(dev); 1558 struct smc_local *lp = netdev_priv(dev);
1559 1559
1560 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1560 DBG(2, "%s: %s\n", dev->name, __func__);
1561 1561
1562 netif_stop_queue(dev); 1562 netif_stop_queue(dev);
1563 netif_carrier_off(dev); 1563 netif_carrier_off(dev);
@@ -1700,7 +1700,7 @@ static int __init smc_findirq(struct smc_local *lp)
1700 int timeout = 20; 1700 int timeout = 20;
1701 unsigned long cookie; 1701 unsigned long cookie;
1702 1702
1703 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1703 DBG(2, "%s: %s\n", CARDNAME, __func__);
1704 1704
1705 cookie = probe_irq_on(); 1705 cookie = probe_irq_on();
1706 1706
@@ -1778,7 +1778,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1778 const char *version_string; 1778 const char *version_string;
1779 DECLARE_MAC_BUF(mac); 1779 DECLARE_MAC_BUF(mac);
1780 1780
1781 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1781 DBG(2, "%s: %s\n", CARDNAME, __func__);
1782 1782
1783 /* First, see if the high byte is 0x33 */ 1783 /* First, see if the high byte is 0x33 */
1784 val = SMC_CURRENT_BANK(lp); 1784 val = SMC_CURRENT_BANK(lp);
@@ -1961,7 +1961,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1961 if (dev->dma != (unsigned char)-1) 1961 if (dev->dma != (unsigned char)-1)
1962 printk(" DMA %d", dev->dma); 1962 printk(" DMA %d", dev->dma);
1963 1963
1964 printk("%s%s\n", nowait ? " [nowait]" : "", 1964 printk("%s%s\n",
1965 lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
1965 THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); 1966 THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
1966 1967
1967 if (!is_valid_ether_addr(dev->dev_addr)) { 1968 if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 997e7f1d5c6e..edea0732f145 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -446,6 +446,8 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
446#define SMC_CAN_USE_32BIT 1 446#define SMC_CAN_USE_32BIT 1
447#define SMC_NOWAIT 1 447#define SMC_NOWAIT 1
448 448
449#define SMC_IO_SHIFT (lp->io_shift)
450
449#define SMC_inb(a, r) readb((a) + (r)) 451#define SMC_inb(a, r) readb((a) + (r))
450#define SMC_inw(a, r) readw((a) + (r)) 452#define SMC_inw(a, r) readw((a) + (r))
451#define SMC_inl(a, r) readl((a) + (r)) 453#define SMC_inl(a, r) readl((a) + (r))
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 7d5561b8241c..f860ea150395 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -409,6 +409,7 @@ static int change_mtu(struct net_device *dev, int new_mtu);
409static int eeprom_read(void __iomem *ioaddr, int location); 409static int eeprom_read(void __iomem *ioaddr, int location);
410static int mdio_read(struct net_device *dev, int phy_id, int location); 410static int mdio_read(struct net_device *dev, int phy_id, int location);
411static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 411static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412static int mdio_wait_link(struct net_device *dev, int wait);
412static int netdev_open(struct net_device *dev); 413static int netdev_open(struct net_device *dev);
413static void check_duplex(struct net_device *dev); 414static void check_duplex(struct net_device *dev);
414static void netdev_timer(unsigned long data); 415static void netdev_timer(unsigned long data);
@@ -785,6 +786,24 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
785 return; 786 return;
786} 787}
787 788
789static int mdio_wait_link(struct net_device *dev, int wait)
790{
791 int bmsr;
792 int phy_id;
793 struct netdev_private *np;
794
795 np = netdev_priv(dev);
796 phy_id = np->phys[0];
797
798 do {
799 bmsr = mdio_read(dev, phy_id, MII_BMSR);
800 if (bmsr & 0x0004)
801 return 0;
802 mdelay(1);
803 } while (--wait > 0);
804 return -1;
805}
806
788static int netdev_open(struct net_device *dev) 807static int netdev_open(struct net_device *dev)
789{ 808{
790 struct netdev_private *np = netdev_priv(dev); 809 struct netdev_private *np = netdev_priv(dev);
@@ -1393,41 +1412,51 @@ static void netdev_error(struct net_device *dev, int intr_status)
1393 int speed; 1412 int speed;
1394 1413
1395 if (intr_status & LinkChange) { 1414 if (intr_status & LinkChange) {
1396 if (np->an_enable) { 1415 if (mdio_wait_link(dev, 10) == 0) {
1397 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE); 1416 printk(KERN_INFO "%s: Link up\n", dev->name);
1398 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA); 1417 if (np->an_enable) {
1399 mii_advertise &= mii_lpa; 1418 mii_advertise = mdio_read(dev, np->phys[0],
1400 printk (KERN_INFO "%s: Link changed: ", dev->name); 1419 MII_ADVERTISE);
1401 if (mii_advertise & ADVERTISE_100FULL) { 1420 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1402 np->speed = 100; 1421 mii_advertise &= mii_lpa;
1403 printk ("100Mbps, full duplex\n"); 1422 printk(KERN_INFO "%s: Link changed: ",
1404 } else if (mii_advertise & ADVERTISE_100HALF) { 1423 dev->name);
1405 np->speed = 100; 1424 if (mii_advertise & ADVERTISE_100FULL) {
1406 printk ("100Mbps, half duplex\n"); 1425 np->speed = 100;
1407 } else if (mii_advertise & ADVERTISE_10FULL) { 1426 printk("100Mbps, full duplex\n");
1408 np->speed = 10; 1427 } else if (mii_advertise & ADVERTISE_100HALF) {
1409 printk ("10Mbps, full duplex\n"); 1428 np->speed = 100;
1410 } else if (mii_advertise & ADVERTISE_10HALF) { 1429 printk("100Mbps, half duplex\n");
1411 np->speed = 10; 1430 } else if (mii_advertise & ADVERTISE_10FULL) {
1412 printk ("10Mbps, half duplex\n"); 1431 np->speed = 10;
1413 } else 1432 printk("10Mbps, full duplex\n");
1414 printk ("\n"); 1433 } else if (mii_advertise & ADVERTISE_10HALF) {
1434 np->speed = 10;
1435 printk("10Mbps, half duplex\n");
1436 } else
1437 printk("\n");
1415 1438
1439 } else {
1440 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1441 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1442 np->speed = speed;
1443 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1444 dev->name, speed);
1445 printk("%s duplex.\n",
1446 (mii_ctl & BMCR_FULLDPLX) ?
1447 "full" : "half");
1448 }
1449 check_duplex(dev);
1450 if (np->flowctrl && np->mii_if.full_duplex) {
1451 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1452 ioaddr + MulticastFilter1+2);
1453 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1454 ioaddr + MACCtrl0);
1455 }
1456 netif_carrier_on(dev);
1416 } else { 1457 } else {
1417 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR); 1458 printk(KERN_INFO "%s: Link down\n", dev->name);
1418 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; 1459 netif_carrier_off(dev);
1419 np->speed = speed;
1420 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1421 dev->name, speed);
1422 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1423 "full" : "half");
1424 }
1425 check_duplex (dev);
1426 if (np->flowctrl && np->mii_if.full_duplex) {
1427 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428 ioaddr + MulticastFilter1+2);
1429 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1430 ioaddr + MACCtrl0);
1431 } 1460 }
1432 } 1461 }
1433 if (intr_status & StatsMax) { 1462 if (intr_status & StatsMax) {
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 7db48f1cd949..efaf84d9757d 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -539,22 +539,22 @@ struct txd_desc {
539 539
540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) 540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
541#define DBG2(fmt, args...) \ 541#define DBG2(fmt, args...) \
542 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 542 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
543 543
544#define BDX_ASSERT(x) BUG_ON(x) 544#define BDX_ASSERT(x) BUG_ON(x)
545 545
546#ifdef DEBUG 546#ifdef DEBUG
547 547
548#define ENTER do { \ 548#define ENTER do { \
549 printk(KERN_ERR "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \ 549 printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
550} while (0) 550} while (0)
551 551
552#define RET(args...) do { \ 552#define RET(args...) do { \
553 printk(KERN_ERR "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \ 553 printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
554return args; } while (0) 554return args; } while (0)
555 555
556#define DBG(fmt, args...) \ 556#define DBG(fmt, args...) \
557 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 557 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
558#else 558#else
559#define ENTER do { } while (0) 559#define ENTER do { } while (0)
560#define RET(args...) return args 560#define RET(args...) return args
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 43fde99b24ac..eb1da6f0b086 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
263 return; 263 return;
264 udelay(10); 264 udelay(10);
265 } 265 }
266 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 266 printk(KERN_ERR "%s function time out \n", __func__);
267} 267}
268 268
269static int mii_speed(struct mii_if_info *mii) 269static int mii_speed(struct mii_if_info *mii)
@@ -1059,7 +1059,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
1059 return; 1059 return;
1060 udelay(10); 1060 udelay(10);
1061 } 1061 }
1062 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1062 printk(KERN_ERR "%s function time out \n", __func__);
1063} 1063}
1064 1064
1065static void tsi108_reset_ether(struct tsi108_prv_data * data) 1065static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1244,7 +1244,7 @@ static void tsi108_init_phy(struct net_device *dev)
1244 udelay(10); 1244 udelay(10);
1245 } 1245 }
1246 if (i == 0) 1246 if (i == 0)
1247 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1247 printk(KERN_ERR "%s function time out \n", __func__);
1248 1248
1249 if (data->phy_type == TSI108_PHY_BCM54XX) { 1249 if (data->phy_type == TSI108_PHY_BCM54XX) {
1250 tsi108_write_mii(data, 0x09, 0x0300); 1250 tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 9281d06d5aaa..f54c45049d50 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1418,7 +1418,6 @@ static int de_close (struct net_device *dev)
1418 1418
1419 de_free_rings(de); 1419 de_free_rings(de);
1420 de_adapter_sleep(de); 1420 de_adapter_sleep(de);
1421 pci_disable_device(de->pdev);
1422 return 0; 1421 return 0;
1423} 1422}
1424 1423
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8f944e57fd55..c87747bb24c5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -400,7 +400,7 @@ static struct enet_addr_container *get_enet_addr_container(void)
400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
401 if (!enet_addr_cont) { 401 if (!enet_addr_cont) {
402 ugeth_err("%s: No memory for enet_addr_container object.", 402 ugeth_err("%s: No memory for enet_addr_container object.",
403 __FUNCTION__); 403 __func__);
404 return NULL; 404 return NULL;
405 } 405 }
406 406
@@ -427,7 +427,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
428 428
429 if (!(paddr_num < NUM_OF_PADDRS)) { 429 if (!(paddr_num < NUM_OF_PADDRS)) {
430 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 430 ugeth_warn("%s: Illegal paddr_num.", __func__);
431 return -EINVAL; 431 return -EINVAL;
432 } 432 }
433 433
@@ -447,7 +447,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
448 448
449 if (!(paddr_num < NUM_OF_PADDRS)) { 449 if (!(paddr_num < NUM_OF_PADDRS)) {
450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 ugeth_warn("%s: Illagel paddr_num.", __func__);
451 return -EINVAL; 451 return -EINVAL;
452 } 452 }
453 453
@@ -1441,7 +1441,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1441 u32 upsmr, maccfg2, tbiBaseAddress; 1441 u32 upsmr, maccfg2, tbiBaseAddress;
1442 u16 value; 1442 u16 value;
1443 1443
1444 ugeth_vdbg("%s: IN", __FUNCTION__); 1444 ugeth_vdbg("%s: IN", __func__);
1445 1445
1446 ug_info = ugeth->ug_info; 1446 ug_info = ugeth->ug_info;
1447 ug_regs = ugeth->ug_regs; 1447 ug_regs = ugeth->ug_regs;
@@ -1504,7 +1504,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1504 if (ret_val != 0) { 1504 if (ret_val != 0) {
1505 if (netif_msg_probe(ugeth)) 1505 if (netif_msg_probe(ugeth))
1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1507 __FUNCTION__); 1507 __func__);
1508 return ret_val; 1508 return ret_val;
1509 } 1509 }
1510 1510
@@ -1744,7 +1744,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1744 /* check if the UCC number is in range. */ 1744 /* check if the UCC number is in range. */
1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1746 if (netif_msg_probe(ugeth)) 1746 if (netif_msg_probe(ugeth))
1747 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1747 ugeth_err("%s: ucc_num out of range.", __func__);
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 1750
@@ -1773,7 +1773,7 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1773 /* check if the UCC number is in range. */ 1773 /* check if the UCC number is in range. */
1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1775 if (netif_msg_probe(ugeth)) 1775 if (netif_msg_probe(ugeth))
1776 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1776 ugeth_err("%s: ucc_num out of range.", __func__);
1777 return -EINVAL; 1777 return -EINVAL;
1778 } 1778 }
1779 1779
@@ -2062,7 +2062,7 @@ static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth
2062 ugeth_warn 2062 ugeth_warn
2063 ("%s: multicast address added to paddr will have no " 2063 ("%s: multicast address added to paddr will have no "
2064 "effect - is this what you wanted?", 2064 "effect - is this what you wanted?",
2065 __FUNCTION__); 2065 __func__);
2066 2066
2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2068 /* store address in our database */ 2068 /* store address in our database */
@@ -2278,7 +2278,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2278 struct phy_device *phydev = ugeth->phydev; 2278 struct phy_device *phydev = ugeth->phydev;
2279 u32 tempval; 2279 u32 tempval;
2280 2280
2281 ugeth_vdbg("%s: IN", __FUNCTION__); 2281 ugeth_vdbg("%s: IN", __func__);
2282 2282
2283 /* Disable the controller */ 2283 /* Disable the controller */
2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -2315,7 +2315,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2316 if (netif_msg_probe(ugeth)) 2316 if (netif_msg_probe(ugeth))
2317 ugeth_err("%s: Bad memory partition value.", 2317 ugeth_err("%s: Bad memory partition value.",
2318 __FUNCTION__); 2318 __func__);
2319 return -EINVAL; 2319 return -EINVAL;
2320 } 2320 }
2321 2321
@@ -2327,7 +2327,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2327 if (netif_msg_probe(ugeth)) 2327 if (netif_msg_probe(ugeth))
2328 ugeth_err 2328 ugeth_err
2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2330 __FUNCTION__); 2330 __func__);
2331 return -EINVAL; 2331 return -EINVAL;
2332 } 2332 }
2333 } 2333 }
@@ -2338,7 +2338,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2338 if (netif_msg_probe(ugeth)) 2338 if (netif_msg_probe(ugeth))
2339 ugeth_err 2339 ugeth_err
2340 ("%s: Tx BD ring length must be no smaller than 2.", 2340 ("%s: Tx BD ring length must be no smaller than 2.",
2341 __FUNCTION__); 2341 __func__);
2342 return -EINVAL; 2342 return -EINVAL;
2343 } 2343 }
2344 } 2344 }
@@ -2349,21 +2349,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2349 if (netif_msg_probe(ugeth)) 2349 if (netif_msg_probe(ugeth))
2350 ugeth_err 2350 ugeth_err
2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2352 __FUNCTION__); 2352 __func__);
2353 return -EINVAL; 2353 return -EINVAL;
2354 } 2354 }
2355 2355
2356 /* num Tx queues */ 2356 /* num Tx queues */
2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2358 if (netif_msg_probe(ugeth)) 2358 if (netif_msg_probe(ugeth))
2359 ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2359 ugeth_err("%s: number of tx queues too large.", __func__);
2360 return -EINVAL; 2360 return -EINVAL;
2361 } 2361 }
2362 2362
2363 /* num Rx queues */ 2363 /* num Rx queues */
2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2365 if (netif_msg_probe(ugeth)) 2365 if (netif_msg_probe(ugeth))
2366 ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2366 ugeth_err("%s: number of rx queues too large.", __func__);
2367 return -EINVAL; 2367 return -EINVAL;
2368 } 2368 }
2369 2369
@@ -2374,7 +2374,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2374 ugeth_err 2374 ugeth_err
2375 ("%s: VLAN priority table entry must not be" 2375 ("%s: VLAN priority table entry must not be"
2376 " larger than number of Rx queues.", 2376 " larger than number of Rx queues.",
2377 __FUNCTION__); 2377 __func__);
2378 return -EINVAL; 2378 return -EINVAL;
2379 } 2379 }
2380 } 2380 }
@@ -2386,7 +2386,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2386 ugeth_err 2386 ugeth_err
2387 ("%s: IP priority table entry must not be" 2387 ("%s: IP priority table entry must not be"
2388 " larger than number of Rx queues.", 2388 " larger than number of Rx queues.",
2389 __FUNCTION__); 2389 __func__);
2390 return -EINVAL; 2390 return -EINVAL;
2391 } 2391 }
2392 } 2392 }
@@ -2394,7 +2394,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2394 if (ug_info->cam && !ug_info->ecamptr) { 2394 if (ug_info->cam && !ug_info->ecamptr) {
2395 if (netif_msg_probe(ugeth)) 2395 if (netif_msg_probe(ugeth))
2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2397 __FUNCTION__); 2397 __func__);
2398 return -EINVAL; 2398 return -EINVAL;
2399 } 2399 }
2400 2400
@@ -2404,7 +2404,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2404 if (netif_msg_probe(ugeth)) 2404 if (netif_msg_probe(ugeth))
2405 ugeth_err("%s: Number of station addresses greater than 1 " 2405 ugeth_err("%s: Number of station addresses greater than 1 "
2406 "not allowed in extended parsing mode.", 2406 "not allowed in extended parsing mode.",
2407 __FUNCTION__); 2407 __func__);
2408 return -EINVAL; 2408 return -EINVAL;
2409 } 2409 }
2410 2410
@@ -2418,7 +2418,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2418 /* Initialize the general fast UCC block. */ 2418 /* Initialize the general fast UCC block. */
2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2420 if (netif_msg_probe(ugeth)) 2420 if (netif_msg_probe(ugeth))
2421 ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2421 ugeth_err("%s: Failed to init uccf.", __func__);
2422 ucc_geth_memclean(ugeth); 2422 ucc_geth_memclean(ugeth);
2423 return -ENOMEM; 2423 return -ENOMEM;
2424 } 2424 }
@@ -2448,7 +2448,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2448 u8 __iomem *endOfRing; 2448 u8 __iomem *endOfRing;
2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2450 2450
2451 ugeth_vdbg("%s: IN", __FUNCTION__); 2451 ugeth_vdbg("%s: IN", __func__);
2452 uccf = ugeth->uccf; 2452 uccf = ugeth->uccf;
2453 ug_info = ugeth->ug_info; 2453 ug_info = ugeth->ug_info;
2454 uf_info = &ug_info->uf_info; 2454 uf_info = &ug_info->uf_info;
@@ -2474,7 +2474,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2474 default: 2474 default:
2475 if (netif_msg_ifup(ugeth)) 2475 if (netif_msg_ifup(ugeth))
2476 ugeth_err("%s: Bad number of Rx threads value.", 2476 ugeth_err("%s: Bad number of Rx threads value.",
2477 __FUNCTION__); 2477 __func__);
2478 ucc_geth_memclean(ugeth); 2478 ucc_geth_memclean(ugeth);
2479 return -EINVAL; 2479 return -EINVAL;
2480 break; 2480 break;
@@ -2499,7 +2499,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2499 default: 2499 default:
2500 if (netif_msg_ifup(ugeth)) 2500 if (netif_msg_ifup(ugeth))
2501 ugeth_err("%s: Bad number of Tx threads value.", 2501 ugeth_err("%s: Bad number of Tx threads value.",
2502 __FUNCTION__); 2502 __func__);
2503 ucc_geth_memclean(ugeth); 2503 ucc_geth_memclean(ugeth);
2504 return -EINVAL; 2504 return -EINVAL;
2505 break; 2505 break;
@@ -2553,7 +2553,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2553 if (ret_val != 0) { 2553 if (ret_val != 0) {
2554 if (netif_msg_ifup(ugeth)) 2554 if (netif_msg_ifup(ugeth))
2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2555 ugeth_err("%s: IPGIFG initialization parameter too large.",
2556 __FUNCTION__); 2556 __func__);
2557 ucc_geth_memclean(ugeth); 2557 ucc_geth_memclean(ugeth);
2558 return ret_val; 2558 return ret_val;
2559 } 2559 }
@@ -2571,7 +2571,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2571 if (ret_val != 0) { 2571 if (ret_val != 0) {
2572 if (netif_msg_ifup(ugeth)) 2572 if (netif_msg_ifup(ugeth))
2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2573 ugeth_err("%s: Half Duplex initialization parameter too large.",
2574 __FUNCTION__); 2574 __func__);
2575 ucc_geth_memclean(ugeth); 2575 ucc_geth_memclean(ugeth);
2576 return ret_val; 2576 return ret_val;
2577 } 2577 }
@@ -2626,7 +2626,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2626 if (netif_msg_ifup(ugeth)) 2626 if (netif_msg_ifup(ugeth))
2627 ugeth_err 2627 ugeth_err
2628 ("%s: Can not allocate memory for Tx bd rings.", 2628 ("%s: Can not allocate memory for Tx bd rings.",
2629 __FUNCTION__); 2629 __func__);
2630 ucc_geth_memclean(ugeth); 2630 ucc_geth_memclean(ugeth);
2631 return -ENOMEM; 2631 return -ENOMEM;
2632 } 2632 }
@@ -2662,7 +2662,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2662 if (netif_msg_ifup(ugeth)) 2662 if (netif_msg_ifup(ugeth))
2663 ugeth_err 2663 ugeth_err
2664 ("%s: Can not allocate memory for Rx bd rings.", 2664 ("%s: Can not allocate memory for Rx bd rings.",
2665 __FUNCTION__); 2665 __func__);
2666 ucc_geth_memclean(ugeth); 2666 ucc_geth_memclean(ugeth);
2667 return -ENOMEM; 2667 return -ENOMEM;
2668 } 2668 }
@@ -2678,7 +2678,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2678 if (ugeth->tx_skbuff[j] == NULL) { 2678 if (ugeth->tx_skbuff[j] == NULL) {
2679 if (netif_msg_ifup(ugeth)) 2679 if (netif_msg_ifup(ugeth))
2680 ugeth_err("%s: Could not allocate tx_skbuff", 2680 ugeth_err("%s: Could not allocate tx_skbuff",
2681 __FUNCTION__); 2681 __func__);
2682 ucc_geth_memclean(ugeth); 2682 ucc_geth_memclean(ugeth);
2683 return -ENOMEM; 2683 return -ENOMEM;
2684 } 2684 }
@@ -2710,7 +2710,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2710 if (ugeth->rx_skbuff[j] == NULL) { 2710 if (ugeth->rx_skbuff[j] == NULL) {
2711 if (netif_msg_ifup(ugeth)) 2711 if (netif_msg_ifup(ugeth))
2712 ugeth_err("%s: Could not allocate rx_skbuff", 2712 ugeth_err("%s: Could not allocate rx_skbuff",
2713 __FUNCTION__); 2713 __func__);
2714 ucc_geth_memclean(ugeth); 2714 ucc_geth_memclean(ugeth);
2715 return -ENOMEM; 2715 return -ENOMEM;
2716 } 2716 }
@@ -2744,7 +2744,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2744 if (netif_msg_ifup(ugeth)) 2744 if (netif_msg_ifup(ugeth))
2745 ugeth_err 2745 ugeth_err
2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2747 __FUNCTION__); 2747 __func__);
2748 ucc_geth_memclean(ugeth); 2748 ucc_geth_memclean(ugeth);
2749 return -ENOMEM; 2749 return -ENOMEM;
2750 } 2750 }
@@ -2767,7 +2767,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2767 if (netif_msg_ifup(ugeth)) 2767 if (netif_msg_ifup(ugeth))
2768 ugeth_err 2768 ugeth_err
2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2770 __FUNCTION__); 2770 __func__);
2771 ucc_geth_memclean(ugeth); 2771 ucc_geth_memclean(ugeth);
2772 return -ENOMEM; 2772 return -ENOMEM;
2773 } 2773 }
@@ -2797,7 +2797,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2797 if (netif_msg_ifup(ugeth)) 2797 if (netif_msg_ifup(ugeth))
2798 ugeth_err 2798 ugeth_err
2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2800 __FUNCTION__); 2800 __func__);
2801 ucc_geth_memclean(ugeth); 2801 ucc_geth_memclean(ugeth);
2802 return -ENOMEM; 2802 return -ENOMEM;
2803 } 2803 }
@@ -2841,7 +2841,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2841 if (netif_msg_ifup(ugeth)) 2841 if (netif_msg_ifup(ugeth))
2842 ugeth_err 2842 ugeth_err
2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2843 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2844 __FUNCTION__); 2844 __func__);
2845 ucc_geth_memclean(ugeth); 2845 ucc_geth_memclean(ugeth);
2846 return -ENOMEM; 2846 return -ENOMEM;
2847 } 2847 }
@@ -2892,7 +2892,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2892 ugeth_err 2892 ugeth_err
2893 ("%s: Can not allocate DPRAM memory for" 2893 ("%s: Can not allocate DPRAM memory for"
2894 " p_tx_fw_statistics_pram.", 2894 " p_tx_fw_statistics_pram.",
2895 __FUNCTION__); 2895 __func__);
2896 ucc_geth_memclean(ugeth); 2896 ucc_geth_memclean(ugeth);
2897 return -ENOMEM; 2897 return -ENOMEM;
2898 } 2898 }
@@ -2932,7 +2932,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2932 if (netif_msg_ifup(ugeth)) 2932 if (netif_msg_ifup(ugeth))
2933 ugeth_err 2933 ugeth_err
2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2935 __FUNCTION__); 2935 __func__);
2936 ucc_geth_memclean(ugeth); 2936 ucc_geth_memclean(ugeth);
2937 return -ENOMEM; 2937 return -ENOMEM;
2938 } 2938 }
@@ -2954,7 +2954,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2954 if (netif_msg_ifup(ugeth)) 2954 if (netif_msg_ifup(ugeth))
2955 ugeth_err 2955 ugeth_err
2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2957 __FUNCTION__); 2957 __func__);
2958 ucc_geth_memclean(ugeth); 2958 ucc_geth_memclean(ugeth);
2959 return -ENOMEM; 2959 return -ENOMEM;
2960 } 2960 }
@@ -2978,7 +2978,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2978 if (netif_msg_ifup(ugeth)) 2978 if (netif_msg_ifup(ugeth))
2979 ugeth_err 2979 ugeth_err
2980 ("%s: Can not allocate DPRAM memory for" 2980 ("%s: Can not allocate DPRAM memory for"
2981 " p_rx_fw_statistics_pram.", __FUNCTION__); 2981 " p_rx_fw_statistics_pram.", __func__);
2982 ucc_geth_memclean(ugeth); 2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM; 2983 return -ENOMEM;
2984 } 2984 }
@@ -3001,7 +3001,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3001 if (netif_msg_ifup(ugeth)) 3001 if (netif_msg_ifup(ugeth))
3002 ugeth_err 3002 ugeth_err
3003 ("%s: Can not allocate DPRAM memory for" 3003 ("%s: Can not allocate DPRAM memory for"
3004 " p_rx_irq_coalescing_tbl.", __FUNCTION__); 3004 " p_rx_irq_coalescing_tbl.", __func__);
3005 ucc_geth_memclean(ugeth); 3005 ucc_geth_memclean(ugeth);
3006 return -ENOMEM; 3006 return -ENOMEM;
3007 } 3007 }
@@ -3070,7 +3070,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3070 if (netif_msg_ifup(ugeth)) 3070 if (netif_msg_ifup(ugeth))
3071 ugeth_err 3071 ugeth_err
3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3073 __FUNCTION__); 3073 __func__);
3074 ucc_geth_memclean(ugeth); 3074 ucc_geth_memclean(ugeth);
3075 return -ENOMEM; 3075 return -ENOMEM;
3076 } 3076 }
@@ -3147,7 +3147,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3147 if (!ug_info->extendedFilteringChainPointer) { 3147 if (!ug_info->extendedFilteringChainPointer) {
3148 if (netif_msg_ifup(ugeth)) 3148 if (netif_msg_ifup(ugeth))
3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3150 __FUNCTION__); 3150 __func__);
3151 ucc_geth_memclean(ugeth); 3151 ucc_geth_memclean(ugeth);
3152 return -EINVAL; 3152 return -EINVAL;
3153 } 3153 }
@@ -3161,7 +3161,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3161 if (netif_msg_ifup(ugeth)) 3161 if (netif_msg_ifup(ugeth))
3162 ugeth_err 3162 ugeth_err
3163 ("%s: Can not allocate DPRAM memory for" 3163 ("%s: Can not allocate DPRAM memory for"
3164 " p_exf_glbl_param.", __FUNCTION__); 3164 " p_exf_glbl_param.", __func__);
3165 ucc_geth_memclean(ugeth); 3165 ucc_geth_memclean(ugeth);
3166 return -ENOMEM; 3166 return -ENOMEM;
3167 } 3167 }
@@ -3209,7 +3209,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3209 if (netif_msg_ifup(ugeth)) 3209 if (netif_msg_ifup(ugeth))
3210 ugeth_err 3210 ugeth_err
3211 ("%s: Can not allocate memory for" 3211 ("%s: Can not allocate memory for"
3212 " p_UccInitEnetParamShadows.", __FUNCTION__); 3212 " p_UccInitEnetParamShadows.", __func__);
3213 ucc_geth_memclean(ugeth); 3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM; 3214 return -ENOMEM;
3215 } 3215 }
@@ -3244,7 +3244,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3245 if (netif_msg_ifup(ugeth)) 3245 if (netif_msg_ifup(ugeth))
3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3246 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3247 __FUNCTION__); 3247 __func__);
3248 ucc_geth_memclean(ugeth); 3248 ucc_geth_memclean(ugeth);
3249 return -EINVAL; 3249 return -EINVAL;
3250 } 3250 }
@@ -3271,7 +3271,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3271 ug_info->riscRx, 1)) != 0) { 3271 ug_info->riscRx, 1)) != 0) {
3272 if (netif_msg_ifup(ugeth)) 3272 if (netif_msg_ifup(ugeth))
3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3274 __FUNCTION__); 3274 __func__);
3275 ucc_geth_memclean(ugeth); 3275 ucc_geth_memclean(ugeth);
3276 return ret_val; 3276 return ret_val;
3277 } 3277 }
@@ -3287,7 +3287,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3287 ug_info->riscTx, 0)) != 0) { 3287 ug_info->riscTx, 0)) != 0) {
3288 if (netif_msg_ifup(ugeth)) 3288 if (netif_msg_ifup(ugeth))
3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3290 __FUNCTION__); 3290 __func__);
3291 ucc_geth_memclean(ugeth); 3291 ucc_geth_memclean(ugeth);
3292 return ret_val; 3292 return ret_val;
3293 } 3293 }
@@ -3297,7 +3297,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3298 if (netif_msg_ifup(ugeth)) 3298 if (netif_msg_ifup(ugeth))
3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3299 ugeth_err("%s: Can not fill Rx bds with buffers.",
3300 __FUNCTION__); 3300 __func__);
3301 ucc_geth_memclean(ugeth); 3301 ucc_geth_memclean(ugeth);
3302 return ret_val; 3302 return ret_val;
3303 } 3303 }
@@ -3309,7 +3309,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3309 if (netif_msg_ifup(ugeth)) 3309 if (netif_msg_ifup(ugeth))
3310 ugeth_err 3310 ugeth_err
3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3312 __FUNCTION__); 3312 __func__);
3313 ucc_geth_memclean(ugeth); 3313 ucc_geth_memclean(ugeth);
3314 return -ENOMEM; 3314 return -ENOMEM;
3315 } 3315 }
@@ -3360,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev)
3360{ 3360{
3361 struct ucc_geth_private *ugeth = netdev_priv(dev); 3361 struct ucc_geth_private *ugeth = netdev_priv(dev);
3362 3362
3363 ugeth_vdbg("%s: IN", __FUNCTION__); 3363 ugeth_vdbg("%s: IN", __func__);
3364 3364
3365 dev->stats.tx_errors++; 3365 dev->stats.tx_errors++;
3366 3366
@@ -3386,7 +3386,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3386 u32 bd_status; 3386 u32 bd_status;
3387 u8 txQ = 0; 3387 u8 txQ = 0;
3388 3388
3389 ugeth_vdbg("%s: IN", __FUNCTION__); 3389 ugeth_vdbg("%s: IN", __func__);
3390 3390
3391 spin_lock_irq(&ugeth->lock); 3391 spin_lock_irq(&ugeth->lock);
3392 3392
@@ -3459,7 +3459,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3459 u8 *bdBuffer; 3459 u8 *bdBuffer;
3460 struct net_device *dev; 3460 struct net_device *dev;
3461 3461
3462 ugeth_vdbg("%s: IN", __FUNCTION__); 3462 ugeth_vdbg("%s: IN", __func__);
3463 3463
3464 dev = ugeth->dev; 3464 dev = ugeth->dev;
3465 3465
@@ -3481,7 +3481,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3481 (bd_status & R_ERRORS_FATAL)) { 3481 (bd_status & R_ERRORS_FATAL)) {
3482 if (netif_msg_rx_err(ugeth)) 3482 if (netif_msg_rx_err(ugeth))
3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3484 __FUNCTION__, __LINE__, (u32) skb); 3484 __func__, __LINE__, (u32) skb);
3485 if (skb) 3485 if (skb)
3486 dev_kfree_skb_any(skb); 3486 dev_kfree_skb_any(skb);
3487 3487
@@ -3507,7 +3507,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3507 skb = get_new_skb(ugeth, bd); 3507 skb = get_new_skb(ugeth, bd);
3508 if (!skb) { 3508 if (!skb) {
3509 if (netif_msg_rx_err(ugeth)) 3509 if (netif_msg_rx_err(ugeth))
3510 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3510 ugeth_warn("%s: No Rx Data Buffer", __func__);
3511 dev->stats.rx_dropped++; 3511 dev->stats.rx_dropped++;
3512 break; 3512 break;
3513 } 3513 }
@@ -3613,7 +3613,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3613 register u32 tx_mask; 3613 register u32 tx_mask;
3614 u8 i; 3614 u8 i;
3615 3615
3616 ugeth_vdbg("%s: IN", __FUNCTION__); 3616 ugeth_vdbg("%s: IN", __func__);
3617 3617
3618 uccf = ugeth->uccf; 3618 uccf = ugeth->uccf;
3619 ug_info = ugeth->ug_info; 3619 ug_info = ugeth->ug_info;
@@ -3683,13 +3683,13 @@ static int ucc_geth_open(struct net_device *dev)
3683 struct ucc_geth_private *ugeth = netdev_priv(dev); 3683 struct ucc_geth_private *ugeth = netdev_priv(dev);
3684 int err; 3684 int err;
3685 3685
3686 ugeth_vdbg("%s: IN", __FUNCTION__); 3686 ugeth_vdbg("%s: IN", __func__);
3687 3687
3688 /* Test station address */ 3688 /* Test station address */
3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3690 if (netif_msg_ifup(ugeth)) 3690 if (netif_msg_ifup(ugeth))
3691 ugeth_err("%s: Multicast address used for station address" 3691 ugeth_err("%s: Multicast address used for station address"
3692 " - is this what you wanted?", __FUNCTION__); 3692 " - is this what you wanted?", __func__);
3693 return -EINVAL; 3693 return -EINVAL;
3694 } 3694 }
3695 3695
@@ -3772,7 +3772,7 @@ static int ucc_geth_close(struct net_device *dev)
3772{ 3772{
3773 struct ucc_geth_private *ugeth = netdev_priv(dev); 3773 struct ucc_geth_private *ugeth = netdev_priv(dev);
3774 3774
3775 ugeth_vdbg("%s: IN", __FUNCTION__); 3775 ugeth_vdbg("%s: IN", __func__);
3776 3776
3777 napi_disable(&ugeth->napi); 3777 napi_disable(&ugeth->napi);
3778 3778
@@ -3840,7 +3840,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3841 }; 3841 };
3842 3842
3843 ugeth_vdbg("%s: IN", __FUNCTION__); 3843 ugeth_vdbg("%s: IN", __func__);
3844 3844
3845 prop = of_get_property(np, "cell-index", NULL); 3845 prop = of_get_property(np, "cell-index", NULL);
3846 if (!prop) { 3846 if (!prop) {
@@ -3857,7 +3857,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3857 if (ug_info == NULL) { 3857 if (ug_info == NULL) {
3858 if (netif_msg_probe(&debug)) 3858 if (netif_msg_probe(&debug))
3859 ugeth_err("%s: [%d] Missing additional data!", 3859 ugeth_err("%s: [%d] Missing additional data!",
3860 __FUNCTION__, ucc_num); 3860 __func__, ucc_num);
3861 return -ENODEV; 3861 return -ENODEV;
3862 } 3862 }
3863 3863
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6e42b5a8c22b..1164c52e2c0a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -92,9 +92,6 @@
92 92
93#define HSO_NET_TX_TIMEOUT (HZ*10) 93#define HSO_NET_TX_TIMEOUT (HZ*10)
94 94
95/* Serial port defines and structs. */
96#define HSO_SERIAL_FLAG_RX_SENT 0
97
98#define HSO_SERIAL_MAGIC 0x48534f31 95#define HSO_SERIAL_MAGIC 0x48534f31
99 96
100/* Number of ttys to handle */ 97/* Number of ttys to handle */
@@ -179,6 +176,12 @@ struct hso_net {
179 unsigned long flags; 176 unsigned long flags;
180}; 177};
181 178
179enum rx_ctrl_state{
180 RX_IDLE,
181 RX_SENT,
182 RX_PENDING
183};
184
182struct hso_serial { 185struct hso_serial {
183 struct hso_device *parent; 186 struct hso_device *parent;
184 int magic; 187 int magic;
@@ -205,7 +208,7 @@ struct hso_serial {
205 struct usb_endpoint_descriptor *in_endp; 208 struct usb_endpoint_descriptor *in_endp;
206 struct usb_endpoint_descriptor *out_endp; 209 struct usb_endpoint_descriptor *out_endp;
207 210
208 unsigned long flags; 211 enum rx_ctrl_state rx_state;
209 u8 rts_state; 212 u8 rts_state;
210 u8 dtr_state; 213 u8 dtr_state;
211 unsigned tx_urb_used:1; 214 unsigned tx_urb_used:1;
@@ -216,6 +219,15 @@ struct hso_serial {
216 spinlock_t serial_lock; 219 spinlock_t serial_lock;
217 220
218 int (*write_data) (struct hso_serial *serial); 221 int (*write_data) (struct hso_serial *serial);
222 /* Hacks required to get flow control
223 * working on the serial receive buffers
224 * so as not to drop characters on the floor.
225 */
226 int curr_rx_urb_idx;
227 u16 curr_rx_urb_offset;
228 u8 rx_urb_filled[MAX_RX_URBS];
229 struct tasklet_struct unthrottle_tasklet;
230 struct work_struct retry_unthrottle_workqueue;
219}; 231};
220 232
221struct hso_device { 233struct hso_device {
@@ -271,7 +283,7 @@ struct hso_device {
271static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 283static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
272 unsigned int set, unsigned int clear); 284 unsigned int set, unsigned int clear);
273static void ctrl_callback(struct urb *urb); 285static void ctrl_callback(struct urb *urb);
274static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 286static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
275static void hso_kick_transmit(struct hso_serial *serial); 287static void hso_kick_transmit(struct hso_serial *serial);
276/* Helper functions */ 288/* Helper functions */
277static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, 289static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
@@ -287,6 +299,8 @@ static int hso_start_net_device(struct hso_device *hso_dev);
287static void hso_free_shared_int(struct hso_shared_int *shared_int); 299static void hso_free_shared_int(struct hso_shared_int *shared_int);
288static int hso_stop_net_device(struct hso_device *hso_dev); 300static int hso_stop_net_device(struct hso_device *hso_dev);
289static void hso_serial_ref_free(struct kref *ref); 301static void hso_serial_ref_free(struct kref *ref);
302static void hso_std_serial_read_bulk_callback(struct urb *urb);
303static int hso_mux_serial_read(struct hso_serial *serial);
290static void async_get_intf(struct work_struct *data); 304static void async_get_intf(struct work_struct *data);
291static void async_put_intf(struct work_struct *data); 305static void async_put_intf(struct work_struct *data);
292static int hso_put_activity(struct hso_device *hso_dev); 306static int hso_put_activity(struct hso_device *hso_dev);
@@ -458,6 +472,17 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
458} 472}
459static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); 473static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
460 474
475static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
476{
477 int idx;
478
479 for (idx = 0; idx < serial->num_rx_urbs; idx++)
480 if (serial->rx_urb[idx] == urb)
481 return idx;
482 dev_err(serial->parent->dev, "hso_urb_to_index failed\n");
483 return -1;
484}
485
461/* converts mux value to a port spec value */ 486/* converts mux value to a port spec value */
462static u32 hso_mux_to_port(int mux) 487static u32 hso_mux_to_port(int mux)
463{ 488{
@@ -1039,6 +1064,158 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
1039 return; 1064 return;
1040} 1065}
1041 1066
1067static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
1068{
1069 int result;
1070#ifdef CONFIG_HSO_AUTOPM
1071 usb_mark_last_busy(urb->dev);
1072#endif
1073 /* We are done with this URB, resubmit it. Prep the USB to wait for
1074 * another frame */
1075 usb_fill_bulk_urb(urb, serial->parent->usb,
1076 usb_rcvbulkpipe(serial->parent->usb,
1077 serial->in_endp->
1078 bEndpointAddress & 0x7F),
1079 urb->transfer_buffer, serial->rx_data_length,
1080 hso_std_serial_read_bulk_callback, serial);
1081 /* Give this to the USB subsystem so it can tell us when more data
1082 * arrives. */
1083 result = usb_submit_urb(urb, GFP_ATOMIC);
1084 if (result) {
1085 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n",
1086 __func__, result);
1087 }
1088}
1089
1090
1091
1092
1093static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial)
1094{
1095 int count;
1096 struct urb *curr_urb;
1097
1098 while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) {
1099 curr_urb = serial->rx_urb[serial->curr_rx_urb_idx];
1100 count = put_rxbuf_data(curr_urb, serial);
1101 if (count == -1)
1102 return;
1103 if (count == 0) {
1104 serial->curr_rx_urb_idx++;
1105 if (serial->curr_rx_urb_idx >= serial->num_rx_urbs)
1106 serial->curr_rx_urb_idx = 0;
1107 hso_resubmit_rx_bulk_urb(serial, curr_urb);
1108 }
1109 }
1110}
1111
1112static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
1113{
1114 int count = 0;
1115 struct urb *urb;
1116
1117 urb = serial->rx_urb[0];
1118 if (serial->open_count > 0) {
1119 count = put_rxbuf_data(urb, serial);
1120 if (count == -1)
1121 return;
1122 }
1123 /* Re issue a read as long as we receive data. */
1124
1125 if (count == 0 && ((urb->actual_length != 0) ||
1126 (serial->rx_state == RX_PENDING))) {
1127 serial->rx_state = RX_SENT;
1128 hso_mux_serial_read(serial);
1129 } else
1130 serial->rx_state = RX_IDLE;
1131}
1132
1133
1134/* read callback for Diag and CS port */
1135static void hso_std_serial_read_bulk_callback(struct urb *urb)
1136{
1137 struct hso_serial *serial = urb->context;
1138 int status = urb->status;
1139
1140 /* sanity check */
1141 if (!serial) {
1142 D1("serial == NULL");
1143 return;
1144 } else if (status) {
1145 log_usb_status(status, __func__);
1146 return;
1147 }
1148
1149 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1150 D1("Actual length = %d\n", urb->actual_length);
1151 DUMP1(urb->transfer_buffer, urb->actual_length);
1152
1153 /* Anyone listening? */
1154 if (serial->open_count == 0)
1155 return;
1156
1157 if (status == 0) {
1158 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1159 u32 rest;
1160 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1161 rest =
1162 urb->actual_length %
1163 serial->in_endp->wMaxPacketSize;
1164 if (((rest == 5) || (rest == 6))
1165 && !memcmp(((u8 *) urb->transfer_buffer) +
1166 urb->actual_length - 4, crc_check, 4)) {
1167 urb->actual_length -= 4;
1168 }
1169 }
1170 /* Valid data, handle RX data */
1171 spin_lock(&serial->serial_lock);
1172 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
1173 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1174 spin_unlock(&serial->serial_lock);
1175 } else if (status == -ENOENT || status == -ECONNRESET) {
1176 /* Unlinked - check for throttled port. */
1177 D2("Port %d, successfully unlinked urb", serial->minor);
1178 spin_lock(&serial->serial_lock);
1179 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1180 hso_resubmit_rx_bulk_urb(serial, urb);
1181 spin_unlock(&serial->serial_lock);
1182 } else {
1183 D2("Port %d, status = %d for read urb", serial->minor, status);
1184 return;
1185 }
1186}
1187
1188/*
1189 * This needs to be a tasklet otherwise we will
1190 * end up recursively calling this function.
1191 */
1192void hso_unthrottle_tasklet(struct hso_serial *serial)
1193{
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&serial->serial_lock, flags);
1197 if ((serial->parent->port_spec & HSO_INTF_MUX))
1198 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1199 else
1200 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1201 spin_unlock_irqrestore(&serial->serial_lock, flags);
1202}
1203
1204static void hso_unthrottle(struct tty_struct *tty)
1205{
1206 struct hso_serial *serial = get_serial_by_tty(tty);
1207
1208 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1209}
1210
1211void hso_unthrottle_workfunc(struct work_struct *work)
1212{
1213 struct hso_serial *serial =
1214 container_of(work, struct hso_serial,
1215 retry_unthrottle_workqueue);
1216 hso_unthrottle_tasklet(serial);
1217}
1218
1042/* open the requested serial port */ 1219/* open the requested serial port */
1043static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1220static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1044{ 1221{
@@ -1064,13 +1241,18 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1064 tty->driver_data = serial; 1241 tty->driver_data = serial;
1065 serial->tty = tty; 1242 serial->tty = tty;
1066 1243
1067 /* check for port allready opened, if not set the termios */ 1244 /* check for port already opened, if not set the termios */
1068 serial->open_count++; 1245 serial->open_count++;
1069 if (serial->open_count == 1) { 1246 if (serial->open_count == 1) {
1070 tty->low_latency = 1; 1247 tty->low_latency = 1;
1071 serial->flags = 0; 1248 serial->rx_state = RX_IDLE;
1072 /* Force default termio settings */ 1249 /* Force default termio settings */
1073 _hso_serial_set_termios(tty, NULL); 1250 _hso_serial_set_termios(tty, NULL);
1251 tasklet_init(&serial->unthrottle_tasklet,
1252 (void (*)(unsigned long))hso_unthrottle_tasklet,
1253 (unsigned long)serial);
1254 INIT_WORK(&serial->retry_unthrottle_workqueue,
1255 hso_unthrottle_workfunc);
1074 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1256 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1075 if (result) { 1257 if (result) {
1076 hso_stop_serial_device(serial->parent); 1258 hso_stop_serial_device(serial->parent);
@@ -1117,9 +1299,13 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1117 } 1299 }
1118 if (!usb_gone) 1300 if (!usb_gone)
1119 hso_stop_serial_device(serial->parent); 1301 hso_stop_serial_device(serial->parent);
1302 tasklet_kill(&serial->unthrottle_tasklet);
1303 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1120 } 1304 }
1305
1121 if (!usb_gone) 1306 if (!usb_gone)
1122 usb_autopm_put_interface(serial->parent->interface); 1307 usb_autopm_put_interface(serial->parent->interface);
1308
1123 mutex_unlock(&serial->parent->mutex); 1309 mutex_unlock(&serial->parent->mutex);
1124} 1310}
1125 1311
@@ -1422,15 +1608,21 @@ static void intr_callback(struct urb *urb)
1422 (1 << i)); 1608 (1 << i));
1423 if (serial != NULL) { 1609 if (serial != NULL) {
1424 D1("Pending read interrupt on port %d\n", i); 1610 D1("Pending read interrupt on port %d\n", i);
1425 if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT, 1611 spin_lock(&serial->serial_lock);
1426 &serial->flags)) { 1612 if (serial->rx_state == RX_IDLE) {
1427 /* Setup and send a ctrl req read on 1613 /* Setup and send a ctrl req read on
1428 * port i */ 1614 * port i */
1429 hso_mux_serial_read(serial); 1615 if (!serial->rx_urb_filled[0]) {
1616 serial->rx_state = RX_SENT;
1617 hso_mux_serial_read(serial);
1618 } else
1619 serial->rx_state = RX_PENDING;
1620
1430 } else { 1621 } else {
1431 D1("Already pending a read on " 1622 D1("Already pending a read on "
1432 "port %d\n", i); 1623 "port %d\n", i);
1433 } 1624 }
1625 spin_unlock(&serial->serial_lock);
1434 } 1626 }
1435 } 1627 }
1436 } 1628 }
@@ -1532,16 +1724,10 @@ static void ctrl_callback(struct urb *urb)
1532 if (req->bRequestType == 1724 if (req->bRequestType ==
1533 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { 1725 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
1534 /* response to a read command */ 1726 /* response to a read command */
1535 if (serial->open_count > 0) { 1727 serial->rx_urb_filled[0] = 1;
1536 /* handle RX data the normal way */ 1728 spin_lock(&serial->serial_lock);
1537 put_rxbuf_data(urb, serial); 1729 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1538 } 1730 spin_unlock(&serial->serial_lock);
1539
1540 /* Re issue a read as long as we receive data. */
1541 if (urb->actual_length != 0)
1542 hso_mux_serial_read(serial);
1543 else
1544 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
1545 } else { 1731 } else {
1546 hso_put_activity(serial->parent); 1732 hso_put_activity(serial->parent);
1547 if (serial->tty) 1733 if (serial->tty)
@@ -1552,91 +1738,42 @@ static void ctrl_callback(struct urb *urb)
1552} 1738}
1553 1739
1554/* handle RX data for serial port */ 1740/* handle RX data for serial port */
1555static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 1741static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
1556{ 1742{
1557 struct tty_struct *tty = serial->tty; 1743 struct tty_struct *tty = serial->tty;
1558 1744 int write_length_remaining = 0;
1745 int curr_write_len;
1559 /* Sanity check */ 1746 /* Sanity check */
1560 if (urb == NULL || serial == NULL) { 1747 if (urb == NULL || serial == NULL) {
1561 D1("serial = NULL"); 1748 D1("serial = NULL");
1562 return; 1749 return -2;
1563 } 1750 }
1564 1751
1565 /* Push data to tty */ 1752 /* Push data to tty */
1566 if (tty && urb->actual_length) { 1753 if (tty) {
1754 write_length_remaining = urb->actual_length -
1755 serial->curr_rx_urb_offset;
1567 D1("data to push to tty"); 1756 D1("data to push to tty");
1568 tty_insert_flip_string(tty, urb->transfer_buffer, 1757 while (write_length_remaining) {
1569 urb->actual_length); 1758 if (test_bit(TTY_THROTTLED, &tty->flags))
1570 tty_flip_buffer_push(tty); 1759 return -1;
1571 } 1760 curr_write_len = tty_insert_flip_string
1572} 1761 (tty, urb->transfer_buffer +
1573 1762 serial->curr_rx_urb_offset,
1574/* read callback for Diag and CS port */ 1763 write_length_remaining);
1575static void hso_std_serial_read_bulk_callback(struct urb *urb) 1764 serial->curr_rx_urb_offset += curr_write_len;
1576{ 1765 write_length_remaining -= curr_write_len;
1577 struct hso_serial *serial = urb->context; 1766 tty_flip_buffer_push(tty);
1578 int result;
1579 int status = urb->status;
1580
1581 /* sanity check */
1582 if (!serial) {
1583 D1("serial == NULL");
1584 return;
1585 } else if (status) {
1586 log_usb_status(status, __func__);
1587 return;
1588 }
1589
1590 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1591 D1("Actual length = %d\n", urb->actual_length);
1592 DUMP1(urb->transfer_buffer, urb->actual_length);
1593
1594 /* Anyone listening? */
1595 if (serial->open_count == 0)
1596 return;
1597
1598 if (status == 0) {
1599 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1600 u32 rest;
1601 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1602 rest =
1603 urb->actual_length %
1604 serial->in_endp->wMaxPacketSize;
1605 if (((rest == 5) || (rest == 6))
1606 && !memcmp(((u8 *) urb->transfer_buffer) +
1607 urb->actual_length - 4, crc_check, 4)) {
1608 urb->actual_length -= 4;
1609 }
1610 } 1767 }
1611 /* Valid data, handle RX data */
1612 put_rxbuf_data(urb, serial);
1613 } else if (status == -ENOENT || status == -ECONNRESET) {
1614 /* Unlinked - check for throttled port. */
1615 D2("Port %d, successfully unlinked urb", serial->minor);
1616 } else {
1617 D2("Port %d, status = %d for read urb", serial->minor, status);
1618 return;
1619 } 1768 }
1620 1769 if (write_length_remaining == 0) {
1621 usb_mark_last_busy(urb->dev); 1770 serial->curr_rx_urb_offset = 0;
1622 1771 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1623 /* We are done with this URB, resubmit it. Prep the USB to wait for
1624 * another frame */
1625 usb_fill_bulk_urb(urb, serial->parent->usb,
1626 usb_rcvbulkpipe(serial->parent->usb,
1627 serial->in_endp->
1628 bEndpointAddress & 0x7F),
1629 urb->transfer_buffer, serial->rx_data_length,
1630 hso_std_serial_read_bulk_callback, serial);
1631 /* Give this to the USB subsystem so it can tell us when more data
1632 * arrives. */
1633 result = usb_submit_urb(urb, GFP_ATOMIC);
1634 if (result) {
1635 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d",
1636 __func__, result);
1637 } 1772 }
1773 return write_length_remaining;
1638} 1774}
1639 1775
1776
1640/* Base driver functions */ 1777/* Base driver functions */
1641 1778
1642static void hso_log_port(struct hso_device *hso_dev) 1779static void hso_log_port(struct hso_device *hso_dev)
@@ -1794,9 +1931,13 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
1794 return -ENODEV; 1931 return -ENODEV;
1795 1932
1796 for (i = 0; i < serial->num_rx_urbs; i++) { 1933 for (i = 0; i < serial->num_rx_urbs; i++) {
1797 if (serial->rx_urb[i]) 1934 if (serial->rx_urb[i]) {
1798 usb_kill_urb(serial->rx_urb[i]); 1935 usb_kill_urb(serial->rx_urb[i]);
1936 serial->rx_urb_filled[i] = 0;
1937 }
1799 } 1938 }
1939 serial->curr_rx_urb_idx = 0;
1940 serial->curr_rx_urb_offset = 0;
1800 1941
1801 if (serial->tx_urb) 1942 if (serial->tx_urb)
1802 usb_kill_urb(serial->tx_urb); 1943 usb_kill_urb(serial->tx_urb);
@@ -2211,14 +2352,14 @@ static struct hso_device *hso_create_bulk_serial_device(
2211 USB_DIR_IN); 2352 USB_DIR_IN);
2212 if (!serial->in_endp) { 2353 if (!serial->in_endp) {
2213 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2354 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2214 goto exit; 2355 goto exit2;
2215 } 2356 }
2216 2357
2217 if (! 2358 if (!
2218 (serial->out_endp = 2359 (serial->out_endp =
2219 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { 2360 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
2220 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2361 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2221 goto exit; 2362 goto exit2;
2222 } 2363 }
2223 2364
2224 serial->write_data = hso_std_serial_write_data; 2365 serial->write_data = hso_std_serial_write_data;
@@ -2231,9 +2372,10 @@ static struct hso_device *hso_create_bulk_serial_device(
2231 2372
2232 /* done, return it */ 2373 /* done, return it */
2233 return hso_dev; 2374 return hso_dev;
2375
2376exit2:
2377 hso_serial_common_free(serial);
2234exit: 2378exit:
2235 if (hso_dev && serial)
2236 hso_serial_common_free(serial);
2237 kfree(serial); 2379 kfree(serial);
2238 hso_free_device(hso_dev); 2380 hso_free_device(hso_dev);
2239 return NULL; 2381 return NULL;
@@ -2740,6 +2882,7 @@ static const struct tty_operations hso_serial_ops = {
2740 .chars_in_buffer = hso_serial_chars_in_buffer, 2882 .chars_in_buffer = hso_serial_chars_in_buffer,
2741 .tiocmget = hso_serial_tiocmget, 2883 .tiocmget = hso_serial_tiocmget,
2742 .tiocmset = hso_serial_tiocmset, 2884 .tiocmset = hso_serial_tiocmset,
2885 .unthrottle = hso_unthrottle
2743}; 2886};
2744 2887
2745static struct usb_driver hso_driver = { 2888static struct usb_driver hso_driver = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index ca9d00c1194e..b5143509e8be 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -118,7 +118,7 @@ static void mcs7830_async_cmd_callback(struct urb *urb)
118 118
119 if (urb->status < 0) 119 if (urb->status < 0)
120 printk(KERN_DEBUG "%s() failed with %d\n", 120 printk(KERN_DEBUG "%s() failed with %d\n",
121 __FUNCTION__, urb->status); 121 __func__, urb->status);
122 122
123 kfree(req); 123 kfree(req);
124 usb_free_urb(urb); 124 usb_free_urb(urb);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8c19307e5040..38b90e7a7ed3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -119,7 +119,7 @@ static void ctrl_callback(struct urb *urb)
119 default: 119 default:
120 if (netif_msg_drv(pegasus) && printk_ratelimit()) 120 if (netif_msg_drv(pegasus) && printk_ratelimit())
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __FUNCTION__, urb->status); 122 __func__, urb->status);
123 } 123 }
124 pegasus->flags &= ~ETH_REGS_CHANGED; 124 pegasus->flags &= ~ETH_REGS_CHANGED;
125 wake_up(&pegasus->ctrl_wait); 125 wake_up(&pegasus->ctrl_wait);
@@ -136,7 +136,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
136 if (!buffer) { 136 if (!buffer) {
137 if (netif_msg_drv(pegasus)) 137 if (netif_msg_drv(pegasus))
138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
139 __FUNCTION__); 139 __func__);
140 return -ENOMEM; 140 return -ENOMEM;
141 } 141 }
142 add_wait_queue(&pegasus->ctrl_wait, &wait); 142 add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -224,7 +224,7 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
224 netif_device_detach(pegasus->net); 224 netif_device_detach(pegasus->net);
225 if (netif_msg_drv(pegasus)) 225 if (netif_msg_drv(pegasus))
226 dev_err(&pegasus->intf->dev, "%s, status %d\n", 226 dev_err(&pegasus->intf->dev, "%s, status %d\n",
227 __FUNCTION__, ret); 227 __func__, ret);
228 goto out; 228 goto out;
229 } 229 }
230 230
@@ -246,7 +246,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
246 if (!tmp) { 246 if (!tmp) {
247 if (netif_msg_drv(pegasus)) 247 if (netif_msg_drv(pegasus))
248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
249 __FUNCTION__); 249 __func__);
250 return -ENOMEM; 250 return -ENOMEM;
251 } 251 }
252 memcpy(tmp, &data, 1); 252 memcpy(tmp, &data, 1);
@@ -277,7 +277,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
277 netif_device_detach(pegasus->net); 277 netif_device_detach(pegasus->net);
278 if (netif_msg_drv(pegasus) && printk_ratelimit()) 278 if (netif_msg_drv(pegasus) && printk_ratelimit())
279 dev_err(&pegasus->intf->dev, "%s, status %d\n", 279 dev_err(&pegasus->intf->dev, "%s, status %d\n",
280 __FUNCTION__, ret); 280 __func__, ret);
281 goto out; 281 goto out;
282 } 282 }
283 283
@@ -310,7 +310,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
310 netif_device_detach(pegasus->net); 310 netif_device_detach(pegasus->net);
311 if (netif_msg_drv(pegasus)) 311 if (netif_msg_drv(pegasus))
312 dev_err(&pegasus->intf->dev, "%s, status %d\n", 312 dev_err(&pegasus->intf->dev, "%s, status %d\n",
313 __FUNCTION__, ret); 313 __func__, ret);
314 } 314 }
315 315
316 return ret; 316 return ret;
@@ -341,7 +341,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
341 } 341 }
342fail: 342fail:
343 if (netif_msg_drv(pegasus)) 343 if (netif_msg_drv(pegasus))
344 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 344 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
345 345
346 return ret; 346 return ret;
347} 347}
@@ -378,7 +378,7 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
378 378
379fail: 379fail:
380 if (netif_msg_drv(pegasus)) 380 if (netif_msg_drv(pegasus))
381 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 381 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
382 return -ETIMEDOUT; 382 return -ETIMEDOUT;
383} 383}
384 384
@@ -415,7 +415,7 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
415 415
416fail: 416fail:
417 if (netif_msg_drv(pegasus)) 417 if (netif_msg_drv(pegasus))
418 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 418 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
419 return -ETIMEDOUT; 419 return -ETIMEDOUT;
420} 420}
421 421
@@ -463,7 +463,7 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
463 return ret; 463 return ret;
464fail: 464fail:
465 if (netif_msg_drv(pegasus)) 465 if (netif_msg_drv(pegasus))
466 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 466 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
467 return -ETIMEDOUT; 467 return -ETIMEDOUT;
468} 468}
469#endif /* PEGASUS_WRITE_EEPROM */ 469#endif /* PEGASUS_WRITE_EEPROM */
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 1b95b04c9257..29a33090d3d4 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1381,7 +1381,7 @@ enum velocity_msg_level {
1381#define ASSERT(x) { \ 1381#define ASSERT(x) { \
1382 if (!(x)) { \ 1382 if (!(x)) { \
1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ 1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
1384 __FUNCTION__, __LINE__);\ 1384 __func__, __LINE__);\
1385 BUG(); \ 1385 BUG(); \
1386 }\ 1386 }\
1387} 1387}
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index d14e6678deed..a5ddc6c8963e 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -407,7 +407,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
407 if (cfm->version != CFM_VERSION) { 407 if (cfm->version != CFM_VERSION) {
408 printk(KERN_ERR "%s:%s: firmware format %u rejected! " 408 printk(KERN_ERR "%s:%s: firmware format %u rejected! "
409 "Expecting %u.\n", 409 "Expecting %u.\n",
410 modname, __FUNCTION__, cfm->version, CFM_VERSION); 410 modname, __func__, cfm->version, CFM_VERSION);
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
413 413
@@ -420,7 +420,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
420*/ 420*/
421 if (cksum != cfm->checksum) { 421 if (cksum != cfm->checksum) {
422 printk(KERN_ERR "%s:%s: firmware corrupted!\n", 422 printk(KERN_ERR "%s:%s: firmware corrupted!\n",
423 modname, __FUNCTION__); 423 modname, __func__);
424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n", 424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
425 len - (int)sizeof(struct cycx_firmware) - 1, 425 len - (int)sizeof(struct cycx_firmware) - 1,
426 cfm->info.codesize); 426 cfm->info.codesize);
@@ -432,7 +432,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
432 /* If everything is ok, set reset, data and code pointers */ 432 /* If everything is ok, set reset, data and code pointers */
433 img_hdr = (struct cycx_fw_header *)&cfm->image; 433 img_hdr = (struct cycx_fw_header *)&cfm->image;
434#ifdef FIRMWARE_DEBUG 434#ifdef FIRMWARE_DEBUG
435 printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname); 435 printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname);
436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size); 436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size); 437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size); 438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index d3b28b01b9f9..5a7303dc0965 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -874,7 +874,7 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1); 874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
875 875
876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n", 876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
877 __FUNCTION__, lcn, loc, rem); 877 __func__, lcn, loc, rem);
878 878
879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem); 879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
880 if (!dev) { 880 if (!dev) {
@@ -902,7 +902,7 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key)); 903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n", 904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
905 card->devname, __FUNCTION__, lcn, key); 905 card->devname, __func__, lcn, key);
906 906
907 dev = cycx_x25_get_dev_by_lcn(wandev, -key); 907 dev = cycx_x25_get_dev_by_lcn(wandev, -key);
908 if (!dev) { 908 if (!dev) {
@@ -929,7 +929,7 @@ static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
929 929
930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n", 931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
932 card->devname, __FUNCTION__, lcn); 932 card->devname, __func__, lcn);
933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
934 if (!dev) { 934 if (!dev) {
935 /* Invalid channel, discard packet */ 935 /* Invalid channel, discard packet */
@@ -950,7 +950,7 @@ static void cycx_x25_irq_disconnect(struct cycx_device *card,
950 u8 lcn; 950 u8 lcn;
951 951
952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn); 953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
954 954
955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
956 if (dev) { 956 if (dev) {
@@ -1381,7 +1381,7 @@ static void cycx_x25_chan_timer(unsigned long d)
1381 cycx_x25_chan_disconnect(dev); 1381 cycx_x25_chan_disconnect(dev);
1382 else 1382 else
1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n", 1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
1384 chan->card->devname, __FUNCTION__, dev->name); 1384 chan->card->devname, __func__, dev->name);
1385} 1385}
1386 1386
1387/* Set logical channel state. */ 1387/* Set logical channel state. */
@@ -1485,7 +1485,7 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
1485 unsigned char *ptr; 1485 unsigned char *ptr;
1486 1486
1487 if ((skb = dev_alloc_skb(1)) == NULL) { 1487 if ((skb = dev_alloc_skb(1)) == NULL) {
1488 printk(KERN_ERR "%s: out of memory\n", __FUNCTION__); 1488 printk(KERN_ERR "%s: out of memory\n", __func__);
1489 return; 1489 return;
1490 } 1490 }
1491 1491
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index f5d55ad02267..5f1ccb2b08b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -647,7 +647,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
647 647
648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
649 if (!skb) { 649 if (!skb) {
650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); 650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
651 goto refill; 651 goto refill;
652 } 652 }
653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); 653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 8b7e5d2e2ac9..cbcbf6f0414c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -163,15 +163,17 @@ static void x25_close(struct net_device *dev)
163 163
164static int x25_rx(struct sk_buff *skb) 164static int x25_rx(struct sk_buff *skb)
165{ 165{
166 struct net_device *dev = skb->dev;
167
166 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 168 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
167 skb->dev->stats.rx_dropped++; 169 dev->stats.rx_dropped++;
168 return NET_RX_DROP; 170 return NET_RX_DROP;
169 } 171 }
170 172
171 if (lapb_data_received(skb->dev, skb) == LAPB_OK) 173 if (lapb_data_received(dev, skb) == LAPB_OK)
172 return NET_RX_SUCCESS; 174 return NET_RX_SUCCESS;
173 175
174 skb->dev->stats.rx_errors++; 176 dev->stats.rx_errors++;
175 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
176 return NET_RX_DROP; 178 return NET_RX_DROP;
177} 179}
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4518d0aa2480..4917a94943bd 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -548,7 +548,7 @@ static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
548{ 548{
549 st_cpc_tty_area *cpc_tty; 549 st_cpc_tty_area *cpc_tty;
550 550
551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear); 551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear);
552 552
553 if (!tty || !tty->driver_data ) { 553 if (!tty || !tty->driver_data ) {
554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); 554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6f4276d461c0..a65b082a888a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2247,6 +2247,16 @@
2247#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 2247#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
2248#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 2248#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
2249 2249
2250#define PCI_VENDOR_ID_NETXEN 0x4040
2251#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001
2252#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002
2253#define PCI_DEVICE_ID_NX2031_4GCU 0x0003
2254#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004
2255#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005
2256#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024
2257#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025
2258#define PCI_DEVICE_ID_NX3031 0x0100
2259
2250#define PCI_VENDOR_ID_AKS 0x416c 2260#define PCI_VENDOR_ID_AKS 0x416c
2251#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 2261#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
2252 2262