aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-30 19:38:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-30 19:38:05 -0400
commitcbdd3deb52ac9b013a63e4a60530717f75ce3177 (patch)
tree50fb6c194616cbbea2e8f3392b90223780c0714d
parent4dcf39c6cc5f9f01c46aa71fe95cae9927edeeab (diff)
parentc196d80f994ef4ffefd5a7c62e3f42bd75d538bc (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: Fix a potential NULL pointer dereference in mace_interrupt() in drivers/net/pcmcia/nmclan_cs.c PATCH kernel 2.6.22] PCMCIA-NETDEV : modify smc91c92_cs.c to become SMP safe S2io: Increment received packet count correctly S2io: Fix crash when resetting adapter S2io: Mask spurious interrupts S2IO: Implementing review comments from old patches S2IO: Checking for the return value of pci map function S2IO: Removing MSI support from driver S2IO: Removing 3 buffer mode support from the driver netxen: drop redudant spinlock netxen: Fix interrupt handling for multiport adapters netxen: re-init station address after h/w init tulip: Remove tulip maintainer forcedeth: mac address correct gfar: Fix modpost warning lib8390: comment on locking by Alan Cox Fix a potential NULL pointer dereference in write_bulk_callback() in drivers/net/usb/pegasus.c
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/net/forcedeth.c36
-rw-r--r--drivers/net/gianfar_mii.c2
-rw-r--r--drivers/net/gianfar_mii.h2
-rw-r--r--drivers/net/lib8390.c46
-rw-r--r--drivers/net/netxen/netxen_nic_main.c44
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c23
-rw-r--r--drivers/net/s2io-regs.h5
-rw-r--r--drivers/net/s2io.c542
-rw-r--r--drivers/net/s2io.h11
-rw-r--r--drivers/net/usb/pegasus.c4
12 files changed, 279 insertions, 444 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index babd00b0c65c..5c784be00b54 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3674,11 +3674,9 @@ W: http://www.auk.cx/tms380tr/
3674S: Maintained 3674S: Maintained
3675 3675
3676TULIP NETWORK DRIVER 3676TULIP NETWORK DRIVER
3677P: Valerie Henson
3678M: val@nmt.edu
3679L: tulip-users@lists.sourceforge.net 3677L: tulip-users@lists.sourceforge.net
3680W: http://sourceforge.net/projects/tulip/ 3678W: http://sourceforge.net/projects/tulip/
3681S: Maintained 3679S: Orphan
3682 3680
3683TUN/TAP driver 3681TUN/TAP driver
3684P: Maxim Krasnyansky 3682P: Maxim Krasnyansky
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 661c747389e4..51e1cb472738 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -178,6 +178,7 @@
178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
181#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
181 182
182enum { 183enum {
183 NvRegIrqStatus = 0x000, 184 NvRegIrqStatus = 0x000,
@@ -5172,7 +5173,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5172 5173
5173 /* check the workaround bit for correct mac address order */ 5174 /* check the workaround bit for correct mac address order */
5174 txreg = readl(base + NvRegTransmitPoll); 5175 txreg = readl(base + NvRegTransmitPoll);
5175 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5176 if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
5177 (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
5176 /* mac address is already in correct order */ 5178 /* mac address is already in correct order */
5177 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5179 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5178 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5180 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
@@ -5500,67 +5502,67 @@ static struct pci_device_id pci_tbl[] = {
5500 }, 5502 },
5501 { /* MCP61 Ethernet Controller */ 5503 { /* MCP61 Ethernet Controller */
5502 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5504 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5503 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5505 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5504 }, 5506 },
5505 { /* MCP61 Ethernet Controller */ 5507 { /* MCP61 Ethernet Controller */
5506 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5508 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5507 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5509 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5508 }, 5510 },
5509 { /* MCP61 Ethernet Controller */ 5511 { /* MCP61 Ethernet Controller */
5510 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5512 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5511 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5513 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5512 }, 5514 },
5513 { /* MCP61 Ethernet Controller */ 5515 { /* MCP61 Ethernet Controller */
5514 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5516 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5515 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5517 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5516 }, 5518 },
5517 { /* MCP65 Ethernet Controller */ 5519 { /* MCP65 Ethernet Controller */
5518 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5520 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5519 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5521 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5520 }, 5522 },
5521 { /* MCP65 Ethernet Controller */ 5523 { /* MCP65 Ethernet Controller */
5522 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5524 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5523 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5525 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5524 }, 5526 },
5525 { /* MCP65 Ethernet Controller */ 5527 { /* MCP65 Ethernet Controller */
5526 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5528 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5527 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5529 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5528 }, 5530 },
5529 { /* MCP65 Ethernet Controller */ 5531 { /* MCP65 Ethernet Controller */
5530 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5532 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5531 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5533 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5532 }, 5534 },
5533 { /* MCP67 Ethernet Controller */ 5535 { /* MCP67 Ethernet Controller */
5534 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5536 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5535 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5537 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5536 }, 5538 },
5537 { /* MCP67 Ethernet Controller */ 5539 { /* MCP67 Ethernet Controller */
5538 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5540 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5539 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5541 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5540 }, 5542 },
5541 { /* MCP67 Ethernet Controller */ 5543 { /* MCP67 Ethernet Controller */
5542 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5544 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5543 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5545 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5544 }, 5546 },
5545 { /* MCP67 Ethernet Controller */ 5547 { /* MCP67 Ethernet Controller */
5546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5548 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5549 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5548 }, 5550 },
5549 { /* MCP73 Ethernet Controller */ 5551 { /* MCP73 Ethernet Controller */
5550 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5552 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5551 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5553 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5552 }, 5554 },
5553 { /* MCP73 Ethernet Controller */ 5555 { /* MCP73 Ethernet Controller */
5554 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5556 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5555 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5557 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5556 }, 5558 },
5557 { /* MCP73 Ethernet Controller */ 5559 { /* MCP73 Ethernet Controller */
5558 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5560 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5559 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5561 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5560 }, 5562 },
5561 { /* MCP73 Ethernet Controller */ 5563 { /* MCP73 Ethernet Controller */
5562 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5564 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5563 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5565 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5564 }, 5566 },
5565 {0,}, 5567 {0,},
5566}; 5568};
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index ac3596f45dd8..100bf410bf5f 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -245,7 +245,7 @@ int __init gfar_mdio_init(void)
245 return driver_register(&gianfar_mdio_driver); 245 return driver_register(&gianfar_mdio_driver);
246} 246}
247 247
248void __exit gfar_mdio_exit(void) 248void gfar_mdio_exit(void)
249{ 249{
250 driver_unregister(&gianfar_mdio_driver); 250 driver_unregister(&gianfar_mdio_driver);
251} 251}
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 5d3400469514..b373091c7031 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -42,5 +42,5 @@ struct gfar_mii {
42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44int __init gfar_mdio_init(void); 44int __init gfar_mdio_init(void);
45void __exit gfar_mdio_exit(void); 45void gfar_mdio_exit(void);
46#endif /* GIANFAR_PHY_H */ 46#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 721ee38d2241..c429a5002dd6 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -143,6 +143,52 @@ static void __NS8390_init(struct net_device *dev, int startp);
143 * annoying the transmit function is called bh atomic. That places 143 * annoying the transmit function is called bh atomic. That places
144 * restrictions on the user context callers as disable_irq won't save 144 * restrictions on the user context callers as disable_irq won't save
145 * them. 145 * them.
146 *
147 * Additional explanation of problems with locking by Alan Cox:
148 *
149 * "The author (me) didn't use spin_lock_irqsave because the slowness of the
150 * card means that approach caused horrible problems like losing serial data
151 * at 38400 baud on some chips. Rememeber many 8390 nics on PCI were ISA
152 * chips with FPGA front ends.
153 *
154 * Ok the logic behind the 8390 is very simple:
155 *
156 * Things to know
157 * - IRQ delivery is asynchronous to the PCI bus
158 * - Blocking the local CPU IRQ via spin locks was too slow
159 * - The chip has register windows needing locking work
160 *
161 * So the path was once (I say once as people appear to have changed it
162 * in the mean time and it now looks rather bogus if the changes to use
163 * disable_irq_nosync_irqsave are disabling the local IRQ)
164 *
165 *
166 * Take the page lock
167 * Mask the IRQ on chip
168 * Disable the IRQ (but not mask locally- someone seems to have
169 * broken this with the lock validator stuff)
170 * [This must be _nosync as the page lock may otherwise
171 * deadlock us]
172 * Drop the page lock and turn IRQs back on
173 *
174 * At this point an existing IRQ may still be running but we can't
175 * get a new one
176 *
177 * Take the lock (so we know the IRQ has terminated) but don't mask
178 * the IRQs on the processor
179 * Set irqlock [for debug]
180 *
181 * Transmit (slow as ****)
182 *
183 * re-enable the IRQ
184 *
185 *
186 * We have to use disable_irq because otherwise you will get delayed
187 * interrupts on the APIC bus deadlocking the transmit path.
188 *
189 * Quite hairy but the chip simply wasn't designed for SMP and you can't
190 * even ACK an interrupt without risking corrupting other parallel
191 * activities on the chip." [lkml, 25 Jul 2007]
146 */ 192 */
147 193
148 194
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 19e2fa940ac0..08a62acde8bf 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -335,7 +335,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
335 adapter->ahw.pdev = pdev; 335 adapter->ahw.pdev = pdev;
336 adapter->ahw.pci_func = pci_func_id; 336 adapter->ahw.pci_func = pci_func_id;
337 spin_lock_init(&adapter->tx_lock); 337 spin_lock_init(&adapter->tx_lock);
338 spin_lock_init(&adapter->lock);
339 338
340 /* remap phys address */ 339 /* remap phys address */
341 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 340 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
@@ -895,8 +894,6 @@ static int netxen_nic_open(struct net_device *netdev)
895 894
896 /* Done here again so that even if phantom sw overwrote it, 895 /* Done here again so that even if phantom sw overwrote it,
897 * we set it */ 896 * we set it */
898 if (adapter->macaddr_set)
899 adapter->macaddr_set(adapter, netdev->dev_addr);
900 if (adapter->init_port 897 if (adapter->init_port
901 && adapter->init_port(adapter, adapter->portnum) != 0) { 898 && adapter->init_port(adapter, adapter->portnum) != 0) {
902 del_timer_sync(&adapter->watchdog_timer); 899 del_timer_sync(&adapter->watchdog_timer);
@@ -904,6 +901,8 @@ static int netxen_nic_open(struct net_device *netdev)
904 netxen_nic_driver_name, adapter->portnum); 901 netxen_nic_driver_name, adapter->portnum);
905 return -EIO; 902 return -EIO;
906 } 903 }
904 if (adapter->macaddr_set)
905 adapter->macaddr_set(adapter, netdev->dev_addr);
907 906
908 netxen_nic_set_link_parameters(adapter); 907 netxen_nic_set_link_parameters(adapter);
909 908
@@ -930,6 +929,8 @@ static int netxen_nic_close(struct net_device *netdev)
930 netif_carrier_off(netdev); 929 netif_carrier_off(netdev);
931 netif_stop_queue(netdev); 930 netif_stop_queue(netdev);
932 931
932 netxen_nic_disable_int(adapter);
933
933 cmd_buff = adapter->cmd_buf_arr; 934 cmd_buff = adapter->cmd_buf_arr;
934 for (i = 0; i < adapter->max_tx_desc_count; i++) { 935 for (i = 0; i < adapter->max_tx_desc_count; i++) {
935 buffrag = cmd_buff->frag_array; 936 buffrag = cmd_buff->frag_array;
@@ -1226,15 +1227,12 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1226{ 1227{
1227 struct netxen_adapter *adapter = 1228 struct netxen_adapter *adapter =
1228 container_of(work, struct netxen_adapter, tx_timeout_task); 1229 container_of(work, struct netxen_adapter, tx_timeout_task);
1229 unsigned long flags;
1230 1230
1231 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", 1231 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1232 netxen_nic_driver_name, adapter->netdev->name); 1232 netxen_nic_driver_name, adapter->netdev->name);
1233 1233
1234 spin_lock_irqsave(&adapter->lock, flags);
1235 netxen_nic_close(adapter->netdev); 1234 netxen_nic_close(adapter->netdev);
1236 netxen_nic_open(adapter->netdev); 1235 netxen_nic_open(adapter->netdev);
1237 spin_unlock_irqrestore(&adapter->lock, flags);
1238 adapter->netdev->trans_start = jiffies; 1236 adapter->netdev->trans_start = jiffies;
1239 netif_wake_queue(adapter->netdev); 1237 netif_wake_queue(adapter->netdev);
1240} 1238}
@@ -1243,28 +1241,12 @@ static int
1243netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) 1241netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
1244{ 1242{
1245 u32 ret = 0; 1243 u32 ret = 0;
1246 u32 our_int = 0;
1247 1244
1248 DPRINTK(INFO, "Entered handle ISR\n"); 1245 DPRINTK(INFO, "Entered handle ISR\n");
1249 adapter->stats.ints++; 1246 adapter->stats.ints++;
1250 1247
1251 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1252 our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1253 /* not our interrupt */
1254 if ((our_int & (0x80 << adapter->portnum)) == 0)
1255 return ret;
1256 }
1257
1258 netxen_nic_disable_int(adapter); 1248 netxen_nic_disable_int(adapter);
1259 1249
1260 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
1261 /* claim interrupt */
1262 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1263 writel(our_int & ~((u32)(0x80 << adapter->portnum)),
1264 NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1265 }
1266 }
1267
1268 if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { 1250 if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
1269 if (netif_rx_schedule_prep(netdev)) { 1251 if (netif_rx_schedule_prep(netdev)) {
1270 /* 1252 /*
@@ -1298,6 +1280,7 @@ irqreturn_t netxen_intr(int irq, void *data)
1298{ 1280{
1299 struct netxen_adapter *adapter; 1281 struct netxen_adapter *adapter;
1300 struct net_device *netdev; 1282 struct net_device *netdev;
1283 u32 our_int = 0;
1301 1284
1302 if (unlikely(!irq)) { 1285 if (unlikely(!irq)) {
1303 return IRQ_NONE; /* Not our interrupt */ 1286 return IRQ_NONE; /* Not our interrupt */
@@ -1305,7 +1288,22 @@ irqreturn_t netxen_intr(int irq, void *data)
1305 1288
1306 adapter = (struct netxen_adapter *)data; 1289 adapter = (struct netxen_adapter *)data;
1307 netdev = adapter->netdev; 1290 netdev = adapter->netdev;
1308 /* process our status queue (for all 4 ports) */ 1291
1292 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1293 our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1294 /* not our interrupt */
1295 if ((our_int & (0x80 << adapter->portnum)) == 0)
1296 return IRQ_NONE;
1297 }
1298
1299 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
1300 /* claim interrupt */
1301 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1302 writel(our_int & ~((u32)(0x80 << adapter->portnum)),
1303 NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1304 }
1305 }
1306
1309 if (netif_running(netdev)) 1307 if (netif_running(netdev))
1310 netxen_handle_int(adapter, netdev); 1308 netxen_handle_int(adapter, netdev);
1311 1309
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 73da611fd536..997c2d0c83bb 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -996,7 +996,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
996{ 996{
997 struct net_device *dev = (struct net_device *) dev_id; 997 struct net_device *dev = (struct net_device *) dev_id;
998 mace_private *lp = netdev_priv(dev); 998 mace_private *lp = netdev_priv(dev);
999 kio_addr_t ioaddr = dev->base_addr; 999 kio_addr_t ioaddr;
1000 int status; 1000 int status;
1001 int IntrCnt = MACE_MAX_IR_ITERATIONS; 1001 int IntrCnt = MACE_MAX_IR_ITERATIONS;
1002 1002
@@ -1006,6 +1006,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1006 return IRQ_NONE; 1006 return IRQ_NONE;
1007 } 1007 }
1008 1008
1009 ioaddr = dev->base_addr;
1010
1009 if (lp->tx_irq_disabled) { 1011 if (lp->tx_irq_disabled) {
1010 printk( 1012 printk(
1011 (lp->tx_irq_disabled? 1013 (lp->tx_irq_disabled?
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 7912dbd14251..af6728cb49c3 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1368,6 +1368,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1368 kio_addr_t ioaddr = dev->base_addr; 1368 kio_addr_t ioaddr = dev->base_addr;
1369 u_short num_pages; 1369 u_short num_pages;
1370 short time_out, ir; 1370 short time_out, ir;
1371 unsigned long flags;
1371 1372
1372 netif_stop_queue(dev); 1373 netif_stop_queue(dev);
1373 1374
@@ -1395,6 +1396,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1395 /* A packet is now waiting. */ 1396 /* A packet is now waiting. */
1396 smc->packets_waiting++; 1397 smc->packets_waiting++;
1397 1398
1399 spin_lock_irqsave(&smc->lock, flags);
1398 SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ 1400 SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */
1399 1401
1400 /* need MC_RESET to keep the memory consistent. errata? */ 1402 /* need MC_RESET to keep the memory consistent. errata? */
@@ -1411,6 +1413,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1411 /* Acknowledge the interrupt, send the packet. */ 1413 /* Acknowledge the interrupt, send the packet. */
1412 outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); 1414 outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT);
1413 smc_hardware_send_packet(dev); /* Send the packet now.. */ 1415 smc_hardware_send_packet(dev); /* Send the packet now.. */
1416 spin_unlock_irqrestore(&smc->lock, flags);
1414 return 0; 1417 return 0;
1415 } 1418 }
1416 } 1419 }
@@ -1418,6 +1421,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1418 /* Otherwise defer until the Tx-space-allocated interrupt. */ 1421 /* Otherwise defer until the Tx-space-allocated interrupt. */
1419 DEBUG(2, "%s: memory allocation deferred.\n", dev->name); 1422 DEBUG(2, "%s: memory allocation deferred.\n", dev->name);
1420 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); 1423 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
1424 spin_unlock_irqrestore(&smc->lock, flags);
1421 1425
1422 return 0; 1426 return 0;
1423} 1427}
@@ -1523,6 +1527,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1523 DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, 1527 DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
1524 irq, ioaddr); 1528 irq, ioaddr);
1525 1529
1530 spin_lock(&smc->lock);
1526 smc->watchdog = 0; 1531 smc->watchdog = 0;
1527 saved_bank = inw(ioaddr + BANK_SELECT); 1532 saved_bank = inw(ioaddr + BANK_SELECT);
1528 if ((saved_bank & 0xff00) != 0x3300) { 1533 if ((saved_bank & 0xff00) != 0x3300) {
@@ -1620,6 +1625,7 @@ irq_done:
1620 readb(smc->base+MEGAHERTZ_ISR); 1625 readb(smc->base+MEGAHERTZ_ISR);
1621 } 1626 }
1622#endif 1627#endif
1628 spin_unlock(&smc->lock);
1623 return IRQ_RETVAL(handled); 1629 return IRQ_RETVAL(handled);
1624} 1630}
1625 1631
@@ -1902,6 +1908,9 @@ static void media_check(u_long arg)
1902 kio_addr_t ioaddr = dev->base_addr; 1908 kio_addr_t ioaddr = dev->base_addr;
1903 u_short i, media, saved_bank; 1909 u_short i, media, saved_bank;
1904 u_short link; 1910 u_short link;
1911 unsigned long flags;
1912
1913 spin_lock_irqsave(&smc->lock, flags);
1905 1914
1906 saved_bank = inw(ioaddr + BANK_SELECT); 1915 saved_bank = inw(ioaddr + BANK_SELECT);
1907 1916
@@ -1934,6 +1943,7 @@ static void media_check(u_long arg)
1934 smc->media.expires = jiffies + HZ/100; 1943 smc->media.expires = jiffies + HZ/100;
1935 add_timer(&smc->media); 1944 add_timer(&smc->media);
1936 SMC_SELECT_BANK(saved_bank); 1945 SMC_SELECT_BANK(saved_bank);
1946 spin_unlock_irqrestore(&smc->lock, flags);
1937 return; 1947 return;
1938 } 1948 }
1939 1949
@@ -2007,6 +2017,7 @@ reschedule:
2007 smc->media.expires = jiffies + HZ; 2017 smc->media.expires = jiffies + HZ;
2008 add_timer(&smc->media); 2018 add_timer(&smc->media);
2009 SMC_SELECT_BANK(saved_bank); 2019 SMC_SELECT_BANK(saved_bank);
2020 spin_unlock_irqrestore(&smc->lock, flags);
2010} 2021}
2011 2022
2012static int smc_link_ok(struct net_device *dev) 2023static int smc_link_ok(struct net_device *dev)
@@ -2094,14 +2105,14 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2094 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2105 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2095 int ret; 2106 int ret;
2096 2107
2097 SMC_SELECT_BANK(3);
2098 spin_lock_irq(&smc->lock); 2108 spin_lock_irq(&smc->lock);
2109 SMC_SELECT_BANK(3);
2099 if (smc->cfg & CFG_MII_SELECT) 2110 if (smc->cfg & CFG_MII_SELECT)
2100 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 2111 ret = mii_ethtool_gset(&smc->mii_if, ecmd);
2101 else 2112 else
2102 ret = smc_netdev_get_ecmd(dev, ecmd); 2113 ret = smc_netdev_get_ecmd(dev, ecmd);
2103 spin_unlock_irq(&smc->lock);
2104 SMC_SELECT_BANK(saved_bank); 2114 SMC_SELECT_BANK(saved_bank);
2115 spin_unlock_irq(&smc->lock);
2105 return ret; 2116 return ret;
2106} 2117}
2107 2118
@@ -2112,14 +2123,14 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2112 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2123 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2113 int ret; 2124 int ret;
2114 2125
2115 SMC_SELECT_BANK(3);
2116 spin_lock_irq(&smc->lock); 2126 spin_lock_irq(&smc->lock);
2127 SMC_SELECT_BANK(3);
2117 if (smc->cfg & CFG_MII_SELECT) 2128 if (smc->cfg & CFG_MII_SELECT)
2118 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 2129 ret = mii_ethtool_sset(&smc->mii_if, ecmd);
2119 else 2130 else
2120 ret = smc_netdev_set_ecmd(dev, ecmd); 2131 ret = smc_netdev_set_ecmd(dev, ecmd);
2121 spin_unlock_irq(&smc->lock);
2122 SMC_SELECT_BANK(saved_bank); 2132 SMC_SELECT_BANK(saved_bank);
2133 spin_unlock_irq(&smc->lock);
2123 return ret; 2134 return ret;
2124} 2135}
2125 2136
@@ -2130,11 +2141,11 @@ static u32 smc_get_link(struct net_device *dev)
2130 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2141 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2131 u32 ret; 2142 u32 ret;
2132 2143
2133 SMC_SELECT_BANK(3);
2134 spin_lock_irq(&smc->lock); 2144 spin_lock_irq(&smc->lock);
2145 SMC_SELECT_BANK(3);
2135 ret = smc_link_ok(dev); 2146 ret = smc_link_ok(dev);
2136 spin_unlock_irq(&smc->lock);
2137 SMC_SELECT_BANK(saved_bank); 2147 SMC_SELECT_BANK(saved_bank);
2148 spin_unlock_irq(&smc->lock);
2138 return ret; 2149 return ret;
2139} 2150}
2140 2151
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 4cb710bbe729..cfa267914476 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -747,10 +747,9 @@ struct XENA_dev_config {
747#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 747#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
748#define MC_ERR_REG_SM_ERR BIT(31) 748#define MC_ERR_REG_SM_ERR BIT(31)
749#define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\ 749#define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\
750 BIT(6) | BIT(7) | BIT(17) | BIT(19)) 750 BIT(17) | BIT(19))
751#define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\ 751#define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\
752 BIT(13) | BIT(14) | BIT(15) |\ 752 BIT(13) | BIT(18) | BIT(20))
753 BIT(18) | BIT(20))
754 u64 mc_err_mask; 753 u64 mc_err_mask;
755 u64 mc_err_alarm; 754 u64 mc_err_alarm;
756 755
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index afef6c0c59fe..2be0a0f1b48f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -32,12 +32,12 @@
32 * rx_ring_sz: This defines the number of receive blocks each ring can have. 32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8. 33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3. 35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO. 38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)' 40 * 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not. 41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0' 42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 43 * lro_max_pkts: This parameter defines maximum number of packets can be
@@ -84,14 +84,14 @@
84#include "s2io.h" 84#include "s2io.h"
85#include "s2io-regs.h" 85#include "s2io-regs.h"
86 86
87#define DRV_VERSION "2.0.23.1" 87#define DRV_VERSION "2.0.25.1"
88 88
89/* S2io Driver name & version. */ 89/* S2io Driver name & version. */
90static char s2io_driver_name[] = "Neterion"; 90static char s2io_driver_name[] = "Neterion";
91static char s2io_driver_version[] = DRV_VERSION; 91static char s2io_driver_version[] = DRV_VERSION;
92 92
93static int rxd_size[4] = {32,48,48,64}; 93static int rxd_size[2] = {32,48};
94static int rxd_count[4] = {127,85,85,63}; 94static int rxd_count[2] = {127,85};
95 95
96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) 96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97{ 97{
@@ -282,6 +282,7 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
282 ("lro_flush_due_to_max_pkts"), 282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"), 283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"), 284 ("mem_alloc_fail_cnt"),
285 ("pci_map_fail_cnt"),
285 ("watchdog_timer_cnt"), 286 ("watchdog_timer_cnt"),
286 ("mem_allocated"), 287 ("mem_allocated"),
287 ("mem_freed"), 288 ("mem_freed"),
@@ -426,7 +427,7 @@ S2IO_PARM_INT(bimodal, 0);
426S2IO_PARM_INT(l3l4hdr_size, 128); 427S2IO_PARM_INT(l3l4hdr_size, 128);
427/* Frequency of Rx desc syncs expressed as power of 2 */ 428/* Frequency of Rx desc syncs expressed as power of 2 */
428S2IO_PARM_INT(rxsync_frequency, 3); 429S2IO_PARM_INT(rxsync_frequency, 3);
429/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 430/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
430S2IO_PARM_INT(intr_type, 0); 431S2IO_PARM_INT(intr_type, 0);
431/* Large receive offload feature */ 432/* Large receive offload feature */
432S2IO_PARM_INT(lro, 0); 433S2IO_PARM_INT(lro, 0);
@@ -701,7 +702,7 @@ static int init_shared_mem(struct s2io_nic *nic)
701 (u64) tmp_p_addr_next; 702 (u64) tmp_p_addr_next;
702 } 703 }
703 } 704 }
704 if (nic->rxd_mode >= RXD_MODE_3A) { 705 if (nic->rxd_mode == RXD_MODE_3B) {
705 /* 706 /*
706 * Allocation of Storages for buffer addresses in 2BUFF mode 707 * Allocation of Storages for buffer addresses in 2BUFF mode
707 * and the buffers as well. 708 * and the buffers as well.
@@ -870,7 +871,7 @@ static void free_shared_mem(struct s2io_nic *nic)
870 } 871 }
871 } 872 }
872 873
873 if (nic->rxd_mode >= RXD_MODE_3A) { 874 if (nic->rxd_mode == RXD_MODE_3B) {
874 /* Freeing buffer storage addresses in 2BUFF mode. */ 875 /* Freeing buffer storage addresses in 2BUFF mode. */
875 for (i = 0; i < config->rx_ring_num; i++) { 876 for (i = 0; i < config->rx_ring_num; i++) {
876 blk_cnt = config->rx_cfg[i].num_rxd / 877 blk_cnt = config->rx_cfg[i].num_rxd /
@@ -2233,44 +2234,6 @@ static void stop_nic(struct s2io_nic *nic)
2233 writeq(val64, &bar0->adapter_control); 2234 writeq(val64, &bar0->adapter_control);
2234} 2235}
2235 2236
2236static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2237 sk_buff *skb)
2238{
2239 struct net_device *dev = nic->dev;
2240 struct sk_buff *frag_list;
2241 void *tmp;
2242
2243 /* Buffer-1 receives L3/L4 headers */
2244 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2245 (nic->pdev, skb->data, l3l4hdr_size + 4,
2246 PCI_DMA_FROMDEVICE);
2247
2248 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2249 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2250 if (skb_shinfo(skb)->frag_list == NULL) {
2251 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
2252 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2253 return -ENOMEM ;
2254 }
2255 frag_list = skb_shinfo(skb)->frag_list;
2256 skb->truesize += frag_list->truesize;
2257 nic->mac_control.stats_info->sw_stat.mem_allocated
2258 += frag_list->truesize;
2259 frag_list->next = NULL;
2260 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2261 frag_list->data = tmp;
2262 skb_reset_tail_pointer(frag_list);
2263
2264 /* Buffer-2 receives L4 data payload */
2265 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2266 frag_list->data, dev->mtu,
2267 PCI_DMA_FROMDEVICE);
2268 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2269 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2270
2271 return SUCCESS;
2272}
2273
2274/** 2237/**
2275 * fill_rx_buffers - Allocates the Rx side skbs 2238 * fill_rx_buffers - Allocates the Rx side skbs
2276 * @nic: device private variable 2239 * @nic: device private variable
@@ -2307,6 +2270,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2307 unsigned long flags; 2270 unsigned long flags;
2308 struct RxD_t *first_rxdp = NULL; 2271 struct RxD_t *first_rxdp = NULL;
2309 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2272 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2273 struct RxD1 *rxdp1;
2274 struct RxD3 *rxdp3;
2275 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2310 2276
2311 mac_control = &nic->mac_control; 2277 mac_control = &nic->mac_control;
2312 config = &nic->config; 2278 config = &nic->config;
@@ -2359,7 +2325,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2359 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2360 } 2326 }
2361 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2327 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2362 ((nic->rxd_mode >= RXD_MODE_3A) && 2328 ((nic->rxd_mode == RXD_MODE_3B) &&
2363 (rxdp->Control_2 & BIT(0)))) { 2329 (rxdp->Control_2 & BIT(0)))) {
2364 mac_control->rings[ring_no].rx_curr_put_info. 2330 mac_control->rings[ring_no].rx_curr_put_info.
2365 offset = off; 2331 offset = off;
@@ -2370,10 +2336,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2370 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2336 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2371 if (nic->rxd_mode == RXD_MODE_1) 2337 if (nic->rxd_mode == RXD_MODE_1)
2372 size += NET_IP_ALIGN; 2338 size += NET_IP_ALIGN;
2373 else if (nic->rxd_mode == RXD_MODE_3B)
2374 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2375 else 2339 else
2376 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; 2340 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2377 2341
2378 /* allocate skb */ 2342 /* allocate skb */
2379 skb = dev_alloc_skb(size); 2343 skb = dev_alloc_skb(size);
@@ -2392,33 +2356,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2392 += skb->truesize; 2356 += skb->truesize;
2393 if (nic->rxd_mode == RXD_MODE_1) { 2357 if (nic->rxd_mode == RXD_MODE_1) {
2394 /* 1 buffer mode - normal operation mode */ 2358 /* 1 buffer mode - normal operation mode */
2359 rxdp1 = (struct RxD1*)rxdp;
2395 memset(rxdp, 0, sizeof(struct RxD1)); 2360 memset(rxdp, 0, sizeof(struct RxD1));
2396 skb_reserve(skb, NET_IP_ALIGN); 2361 skb_reserve(skb, NET_IP_ALIGN);
2397 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single 2362 rxdp1->Buffer0_ptr = pci_map_single
2398 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2363 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2399 PCI_DMA_FROMDEVICE); 2364 PCI_DMA_FROMDEVICE);
2365 if( (rxdp1->Buffer0_ptr == 0) ||
2366 (rxdp1->Buffer0_ptr ==
2367 DMA_ERROR_CODE))
2368 goto pci_map_failed;
2369
2400 rxdp->Control_2 = 2370 rxdp->Control_2 =
2401 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2371 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2402 2372
2403 } else if (nic->rxd_mode >= RXD_MODE_3A) { 2373 } else if (nic->rxd_mode == RXD_MODE_3B) {
2404 /* 2374 /*
2405 * 2 or 3 buffer mode - 2375 * 2 buffer mode -
2406 * Both 2 buffer mode and 3 buffer mode provides 128 2376 * 2 buffer mode provides 128
2407 * byte aligned receive buffers. 2377 * byte aligned receive buffers.
2408 *
2409 * 3 buffer mode provides header separation where in
2410 * skb->data will have L3/L4 headers where as
2411 * skb_shinfo(skb)->frag_list will have the L4 data
2412 * payload
2413 */ 2378 */
2414 2379
2380 rxdp3 = (struct RxD3*)rxdp;
2415 /* save buffer pointers to avoid frequent dma mapping */ 2381 /* save buffer pointers to avoid frequent dma mapping */
2416 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; 2382 Buffer0_ptr = rxdp3->Buffer0_ptr;
2417 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; 2383 Buffer1_ptr = rxdp3->Buffer1_ptr;
2418 memset(rxdp, 0, sizeof(struct RxD3)); 2384 memset(rxdp, 0, sizeof(struct RxD3));
2419 /* restore the buffer pointers for dma sync*/ 2385 /* restore the buffer pointers for dma sync*/
2420 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr; 2386 rxdp3->Buffer0_ptr = Buffer0_ptr;
2421 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr; 2387 rxdp3->Buffer1_ptr = Buffer1_ptr;
2422 2388
2423 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2389 ba = &mac_control->rings[ring_no].ba[block_no][off];
2424 skb_reserve(skb, BUF0_LEN); 2390 skb_reserve(skb, BUF0_LEN);
@@ -2428,14 +2394,18 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2428 skb->data = (void *) (unsigned long)tmp; 2394 skb->data = (void *) (unsigned long)tmp;
2429 skb_reset_tail_pointer(skb); 2395 skb_reset_tail_pointer(skb);
2430 2396
2431 if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) 2397 if (!(rxdp3->Buffer0_ptr))
2432 ((struct RxD3*)rxdp)->Buffer0_ptr = 2398 rxdp3->Buffer0_ptr =
2433 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2399 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2434 PCI_DMA_FROMDEVICE); 2400 PCI_DMA_FROMDEVICE);
2435 else 2401 else
2436 pci_dma_sync_single_for_device(nic->pdev, 2402 pci_dma_sync_single_for_device(nic->pdev,
2437 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, 2403 (dma_addr_t) rxdp3->Buffer0_ptr,
2438 BUF0_LEN, PCI_DMA_FROMDEVICE); 2404 BUF0_LEN, PCI_DMA_FROMDEVICE);
2405 if( (rxdp3->Buffer0_ptr == 0) ||
2406 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2407 goto pci_map_failed;
2408
2439 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2409 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2440 if (nic->rxd_mode == RXD_MODE_3B) { 2410 if (nic->rxd_mode == RXD_MODE_3B) {
2441 /* Two buffer mode */ 2411 /* Two buffer mode */
@@ -2444,33 +2414,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2444 * Buffer2 will have L3/L4 header plus 2414 * Buffer2 will have L3/L4 header plus
2445 * L4 payload 2415 * L4 payload
2446 */ 2416 */
2447 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single 2417 rxdp3->Buffer2_ptr = pci_map_single
2448 (nic->pdev, skb->data, dev->mtu + 4, 2418 (nic->pdev, skb->data, dev->mtu + 4,
2449 PCI_DMA_FROMDEVICE); 2419 PCI_DMA_FROMDEVICE);
2450 2420
2451 /* Buffer-1 will be dummy buffer. Not used */ 2421 if( (rxdp3->Buffer2_ptr == 0) ||
2452 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) { 2422 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2453 ((struct RxD3*)rxdp)->Buffer1_ptr = 2423 goto pci_map_failed;
2424
2425 rxdp3->Buffer1_ptr =
2454 pci_map_single(nic->pdev, 2426 pci_map_single(nic->pdev,
2455 ba->ba_1, BUF1_LEN, 2427 ba->ba_1, BUF1_LEN,
2456 PCI_DMA_FROMDEVICE); 2428 PCI_DMA_FROMDEVICE);
2429 if( (rxdp3->Buffer1_ptr == 0) ||
2430 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2431 pci_unmap_single
2432 (nic->pdev,
2433 (dma_addr_t)skb->data,
2434 dev->mtu + 4,
2435 PCI_DMA_FROMDEVICE);
2436 goto pci_map_failed;
2457 } 2437 }
2458 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2438 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2459 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2439 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2460 (dev->mtu + 4); 2440 (dev->mtu + 4);
2461 } else {
2462 /* 3 buffer mode */
2463 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2464 nic->mac_control.stats_info->sw_stat.\
2465 mem_freed += skb->truesize;
2466 dev_kfree_skb_irq(skb);
2467 if (first_rxdp) {
2468 wmb();
2469 first_rxdp->Control_1 |=
2470 RXD_OWN_XENA;
2471 }
2472 return -ENOMEM ;
2473 }
2474 } 2441 }
2475 rxdp->Control_2 |= BIT(0); 2442 rxdp->Control_2 |= BIT(0);
2476 } 2443 }
@@ -2505,6 +2472,11 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2505 } 2472 }
2506 2473
2507 return SUCCESS; 2474 return SUCCESS;
2475pci_map_failed:
2476 stats->pci_map_fail_cnt++;
2477 stats->mem_freed += skb->truesize;
2478 dev_kfree_skb_irq(skb);
2479 return -ENOMEM;
2508} 2480}
2509 2481
2510static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) 2482static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
@@ -2515,6 +2487,8 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2515 struct RxD_t *rxdp; 2487 struct RxD_t *rxdp;
2516 struct mac_info *mac_control; 2488 struct mac_info *mac_control;
2517 struct buffAdd *ba; 2489 struct buffAdd *ba;
2490 struct RxD1 *rxdp1;
2491 struct RxD3 *rxdp3;
2518 2492
2519 mac_control = &sp->mac_control; 2493 mac_control = &sp->mac_control;
2520 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2494 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
@@ -2526,40 +2500,30 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2526 continue; 2500 continue;
2527 } 2501 }
2528 if (sp->rxd_mode == RXD_MODE_1) { 2502 if (sp->rxd_mode == RXD_MODE_1) {
2503 rxdp1 = (struct RxD1*)rxdp;
2529 pci_unmap_single(sp->pdev, (dma_addr_t) 2504 pci_unmap_single(sp->pdev, (dma_addr_t)
2530 ((struct RxD1*)rxdp)->Buffer0_ptr, 2505 rxdp1->Buffer0_ptr,
2531 dev->mtu + 2506 dev->mtu +
2532 HEADER_ETHERNET_II_802_3_SIZE 2507 HEADER_ETHERNET_II_802_3_SIZE
2533 + HEADER_802_2_SIZE + 2508 + HEADER_802_2_SIZE +
2534 HEADER_SNAP_SIZE, 2509 HEADER_SNAP_SIZE,
2535 PCI_DMA_FROMDEVICE); 2510 PCI_DMA_FROMDEVICE);
2536 memset(rxdp, 0, sizeof(struct RxD1)); 2511 memset(rxdp, 0, sizeof(struct RxD1));
2537 } else if(sp->rxd_mode == RXD_MODE_3B) { 2512 } else if(sp->rxd_mode == RXD_MODE_3B) {
2513 rxdp3 = (struct RxD3*)rxdp;
2538 ba = &mac_control->rings[ring_no]. 2514 ba = &mac_control->rings[ring_no].
2539 ba[blk][j]; 2515 ba[blk][j];
2540 pci_unmap_single(sp->pdev, (dma_addr_t) 2516 pci_unmap_single(sp->pdev, (dma_addr_t)
2541 ((struct RxD3*)rxdp)->Buffer0_ptr, 2517 rxdp3->Buffer0_ptr,
2542 BUF0_LEN, 2518 BUF0_LEN,
2543 PCI_DMA_FROMDEVICE);
2544 pci_unmap_single(sp->pdev, (dma_addr_t)
2545 ((struct RxD3*)rxdp)->Buffer1_ptr,
2546 BUF1_LEN,
2547 PCI_DMA_FROMDEVICE);
2548 pci_unmap_single(sp->pdev, (dma_addr_t)
2549 ((struct RxD3*)rxdp)->Buffer2_ptr,
2550 dev->mtu + 4,
2551 PCI_DMA_FROMDEVICE);
2552 memset(rxdp, 0, sizeof(struct RxD3));
2553 } else {
2554 pci_unmap_single(sp->pdev, (dma_addr_t)
2555 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2556 PCI_DMA_FROMDEVICE); 2519 PCI_DMA_FROMDEVICE);
2557 pci_unmap_single(sp->pdev, (dma_addr_t) 2520 pci_unmap_single(sp->pdev, (dma_addr_t)
2558 ((struct RxD3*)rxdp)->Buffer1_ptr, 2521 rxdp3->Buffer1_ptr,
2559 l3l4hdr_size + 4, 2522 BUF1_LEN,
2560 PCI_DMA_FROMDEVICE); 2523 PCI_DMA_FROMDEVICE);
2561 pci_unmap_single(sp->pdev, (dma_addr_t) 2524 pci_unmap_single(sp->pdev, (dma_addr_t)
2562 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu, 2525 rxdp3->Buffer2_ptr,
2526 dev->mtu + 4,
2563 PCI_DMA_FROMDEVICE); 2527 PCI_DMA_FROMDEVICE);
2564 memset(rxdp, 0, sizeof(struct RxD3)); 2528 memset(rxdp, 0, sizeof(struct RxD3));
2565 } 2529 }
@@ -2756,6 +2720,8 @@ static void rx_intr_handler(struct ring_info *ring_data)
2756 struct sk_buff *skb; 2720 struct sk_buff *skb;
2757 int pkt_cnt = 0; 2721 int pkt_cnt = 0;
2758 int i; 2722 int i;
2723 struct RxD1* rxdp1;
2724 struct RxD3* rxdp3;
2759 2725
2760 spin_lock(&nic->rx_lock); 2726 spin_lock(&nic->rx_lock);
2761 if (atomic_read(&nic->card_state) == CARD_DOWN) { 2727 if (atomic_read(&nic->card_state) == CARD_DOWN) {
@@ -2796,32 +2762,23 @@ static void rx_intr_handler(struct ring_info *ring_data)
2796 return; 2762 return;
2797 } 2763 }
2798 if (nic->rxd_mode == RXD_MODE_1) { 2764 if (nic->rxd_mode == RXD_MODE_1) {
2765 rxdp1 = (struct RxD1*)rxdp;
2799 pci_unmap_single(nic->pdev, (dma_addr_t) 2766 pci_unmap_single(nic->pdev, (dma_addr_t)
2800 ((struct RxD1*)rxdp)->Buffer0_ptr, 2767 rxdp1->Buffer0_ptr,
2801 dev->mtu + 2768 dev->mtu +
2802 HEADER_ETHERNET_II_802_3_SIZE + 2769 HEADER_ETHERNET_II_802_3_SIZE +
2803 HEADER_802_2_SIZE + 2770 HEADER_802_2_SIZE +
2804 HEADER_SNAP_SIZE, 2771 HEADER_SNAP_SIZE,
2805 PCI_DMA_FROMDEVICE); 2772 PCI_DMA_FROMDEVICE);
2806 } else if (nic->rxd_mode == RXD_MODE_3B) { 2773 } else if (nic->rxd_mode == RXD_MODE_3B) {
2774 rxdp3 = (struct RxD3*)rxdp;
2807 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2775 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2808 ((struct RxD3*)rxdp)->Buffer0_ptr, 2776 rxdp3->Buffer0_ptr,
2809 BUF0_LEN, PCI_DMA_FROMDEVICE); 2777 BUF0_LEN, PCI_DMA_FROMDEVICE);
2810 pci_unmap_single(nic->pdev, (dma_addr_t)
2811 ((struct RxD3*)rxdp)->Buffer2_ptr,
2812 dev->mtu + 4,
2813 PCI_DMA_FROMDEVICE);
2814 } else {
2815 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2816 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2817 PCI_DMA_FROMDEVICE);
2818 pci_unmap_single(nic->pdev, (dma_addr_t)
2819 ((struct RxD3*)rxdp)->Buffer1_ptr,
2820 l3l4hdr_size + 4,
2821 PCI_DMA_FROMDEVICE);
2822 pci_unmap_single(nic->pdev, (dma_addr_t) 2778 pci_unmap_single(nic->pdev, (dma_addr_t)
2823 ((struct RxD3*)rxdp)->Buffer2_ptr, 2779 rxdp3->Buffer2_ptr,
2824 dev->mtu, PCI_DMA_FROMDEVICE); 2780 dev->mtu + 4,
2781 PCI_DMA_FROMDEVICE);
2825 } 2782 }
2826 prefetch(skb->data); 2783 prefetch(skb->data);
2827 rx_osm_handler(ring_data, rxdp); 2784 rx_osm_handler(ring_data, rxdp);
@@ -3425,23 +3382,8 @@ static void s2io_reset(struct s2io_nic * sp)
3425 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3382 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3426 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3383 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3427 3384
3428 if (sp->device_type == XFRAME_II_DEVICE) {
3429 int ret;
3430 ret = pci_set_power_state(sp->pdev, 3);
3431 if (!ret)
3432 ret = pci_set_power_state(sp->pdev, 0);
3433 else {
3434 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3435 __FUNCTION__);
3436 goto old_way;
3437 }
3438 msleep(20);
3439 goto new_way;
3440 }
3441old_way:
3442 val64 = SW_RESET_ALL; 3385 val64 = SW_RESET_ALL;
3443 writeq(val64, &bar0->sw_reset); 3386 writeq(val64, &bar0->sw_reset);
3444new_way:
3445 if (strstr(sp->product_name, "CX4")) { 3387 if (strstr(sp->product_name, "CX4")) {
3446 msleep(750); 3388 msleep(750);
3447 } 3389 }
@@ -3731,56 +3673,6 @@ static void store_xmsi_data(struct s2io_nic *nic)
3731 } 3673 }
3732} 3674}
3733 3675
3734int s2io_enable_msi(struct s2io_nic *nic)
3735{
3736 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3737 u16 msi_ctrl, msg_val;
3738 struct config_param *config = &nic->config;
3739 struct net_device *dev = nic->dev;
3740 u64 val64, tx_mat, rx_mat;
3741 int i, err;
3742
3743 val64 = readq(&bar0->pic_control);
3744 val64 &= ~BIT(1);
3745 writeq(val64, &bar0->pic_control);
3746
3747 err = pci_enable_msi(nic->pdev);
3748 if (err) {
3749 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3750 nic->dev->name);
3751 return err;
3752 }
3753
3754 /*
3755 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3756 * for interrupt handling.
3757 */
3758 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3759 msg_val ^= 0x1;
3760 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3761 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3762
3763 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3764 msi_ctrl |= 0x10;
3765 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3766
3767 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3768 tx_mat = readq(&bar0->tx_mat0_n[0]);
3769 for (i=0; i<config->tx_fifo_num; i++) {
3770 tx_mat |= TX_MAT_SET(i, 1);
3771 }
3772 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3773
3774 rx_mat = readq(&bar0->rx_mat);
3775 for (i=0; i<config->rx_ring_num; i++) {
3776 rx_mat |= RX_MAT_SET(i, 1);
3777 }
3778 writeq(rx_mat, &bar0->rx_mat);
3779
3780 dev->irq = nic->pdev->irq;
3781 return 0;
3782}
3783
3784static int s2io_enable_msi_x(struct s2io_nic *nic) 3676static int s2io_enable_msi_x(struct s2io_nic *nic)
3785{ 3677{
3786 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
@@ -4001,6 +3893,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4001 struct mac_info *mac_control; 3893 struct mac_info *mac_control;
4002 struct config_param *config; 3894 struct config_param *config;
4003 int offload_type; 3895 int offload_type;
3896 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4004 3897
4005 mac_control = &sp->mac_control; 3898 mac_control = &sp->mac_control;
4006 config = &sp->config; 3899 config = &sp->config;
@@ -4085,11 +3978,18 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4085 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 3978 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4086 sp->ufo_in_band_v, 3979 sp->ufo_in_band_v,
4087 sizeof(u64), PCI_DMA_TODEVICE); 3980 sizeof(u64), PCI_DMA_TODEVICE);
3981 if((txdp->Buffer_Pointer == 0) ||
3982 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3983 goto pci_map_failed;
4088 txdp++; 3984 txdp++;
4089 } 3985 }
4090 3986
4091 txdp->Buffer_Pointer = pci_map_single 3987 txdp->Buffer_Pointer = pci_map_single
4092 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3988 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3989 if((txdp->Buffer_Pointer == 0) ||
3990 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3991 goto pci_map_failed;
3992
4093 txdp->Host_Control = (unsigned long) skb; 3993 txdp->Host_Control = (unsigned long) skb;
4094 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3994 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4095 if (offload_type == SKB_GSO_UDP) 3995 if (offload_type == SKB_GSO_UDP)
@@ -4146,6 +4046,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4146 spin_unlock_irqrestore(&sp->tx_lock, flags); 4046 spin_unlock_irqrestore(&sp->tx_lock, flags);
4147 4047
4148 return 0; 4048 return 0;
4049pci_map_failed:
4050 stats->pci_map_fail_cnt++;
4051 netif_stop_queue(dev);
4052 stats->mem_freed += skb->truesize;
4053 dev_kfree_skb(skb);
4054 spin_unlock_irqrestore(&sp->tx_lock, flags);
4055 return 0;
4149} 4056}
4150 4057
4151static void 4058static void
@@ -4186,39 +4093,6 @@ static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4186 return 0; 4093 return 0;
4187} 4094}
4188 4095
4189static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4190{
4191 struct net_device *dev = (struct net_device *) dev_id;
4192 struct s2io_nic *sp = dev->priv;
4193 int i;
4194 struct mac_info *mac_control;
4195 struct config_param *config;
4196
4197 atomic_inc(&sp->isr_cnt);
4198 mac_control = &sp->mac_control;
4199 config = &sp->config;
4200 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4201
4202 /* If Intr is because of Rx Traffic */
4203 for (i = 0; i < config->rx_ring_num; i++)
4204 rx_intr_handler(&mac_control->rings[i]);
4205
4206 /* If Intr is because of Tx Traffic */
4207 for (i = 0; i < config->tx_fifo_num; i++)
4208 tx_intr_handler(&mac_control->fifos[i]);
4209
4210 /*
4211 * If the Rx buffer count is below the panic threshold then
4212 * reallocate the buffers from the interrupt handler itself,
4213 * else schedule a tasklet to reallocate the buffers.
4214 */
4215 for (i = 0; i < config->rx_ring_num; i++)
4216 s2io_chk_rx_buffers(sp, i);
4217
4218 atomic_dec(&sp->isr_cnt);
4219 return IRQ_HANDLED;
4220}
4221
4222static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4096static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4223{ 4097{
4224 struct ring_info *ring = (struct ring_info *)dev_id; 4098 struct ring_info *ring = (struct ring_info *)dev_id;
@@ -4927,19 +4801,17 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
4927 ering->rx_max_pending = MAX_RX_DESC_1; 4801 ering->rx_max_pending = MAX_RX_DESC_1;
4928 else if (sp->rxd_mode == RXD_MODE_3B) 4802 else if (sp->rxd_mode == RXD_MODE_3B)
4929 ering->rx_max_pending = MAX_RX_DESC_2; 4803 ering->rx_max_pending = MAX_RX_DESC_2;
4930 else if (sp->rxd_mode == RXD_MODE_3A)
4931 ering->rx_max_pending = MAX_RX_DESC_3;
4932 4804
4933 ering->tx_max_pending = MAX_TX_DESC; 4805 ering->tx_max_pending = MAX_TX_DESC;
4934 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { 4806 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
4935 tx_desc_count += sp->config.tx_cfg[i].fifo_len; 4807 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4936 } 4808
4937 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); 4809 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4938 ering->tx_pending = tx_desc_count; 4810 ering->tx_pending = tx_desc_count;
4939 rx_desc_count = 0; 4811 rx_desc_count = 0;
4940 for (i = 0 ; i < sp->config.rx_ring_num ; i++) { 4812 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
4941 rx_desc_count += sp->config.rx_cfg[i].num_rxd; 4813 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4942 } 4814
4943 ering->rx_pending = rx_desc_count; 4815 ering->rx_pending = rx_desc_count;
4944 4816
4945 ering->rx_mini_max_pending = 0; 4817 ering->rx_mini_max_pending = 0;
@@ -5923,6 +5795,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5923 else 5795 else
5924 tmp_stats[i++] = 0; 5796 tmp_stats[i++] = 0;
5925 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; 5797 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5798 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5926 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt; 5799 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5927 tmp_stats[i++] = stat_info->sw_stat.mem_allocated; 5800 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5928 tmp_stats[i++] = stat_info->sw_stat.mem_freed; 5801 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
@@ -6266,9 +6139,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6266 u64 *temp2, int size) 6139 u64 *temp2, int size)
6267{ 6140{
6268 struct net_device *dev = sp->dev; 6141 struct net_device *dev = sp->dev;
6269 struct sk_buff *frag_list; 6142 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6270 6143
6271 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { 6144 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6145 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6272 /* allocate skb */ 6146 /* allocate skb */
6273 if (*skb) { 6147 if (*skb) {
6274 DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); 6148 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
@@ -6277,7 +6151,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6277 * using same mapped address for the Rxd 6151 * using same mapped address for the Rxd
6278 * buffer pointer 6152 * buffer pointer
6279 */ 6153 */
6280 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0; 6154 rxdp1->Buffer0_ptr = *temp0;
6281 } else { 6155 } else {
6282 *skb = dev_alloc_skb(size); 6156 *skb = dev_alloc_skb(size);
6283 if (!(*skb)) { 6157 if (!(*skb)) {
@@ -6294,18 +6168,23 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6294 * such it will be used for next rxd whose 6168 * such it will be used for next rxd whose
6295 * Host Control is NULL 6169 * Host Control is NULL
6296 */ 6170 */
6297 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 = 6171 rxdp1->Buffer0_ptr = *temp0 =
6298 pci_map_single( sp->pdev, (*skb)->data, 6172 pci_map_single( sp->pdev, (*skb)->data,
6299 size - NET_IP_ALIGN, 6173 size - NET_IP_ALIGN,
6300 PCI_DMA_FROMDEVICE); 6174 PCI_DMA_FROMDEVICE);
6175 if( (rxdp1->Buffer0_ptr == 0) ||
6176 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6177 goto memalloc_failed;
6178 }
6301 rxdp->Host_Control = (unsigned long) (*skb); 6179 rxdp->Host_Control = (unsigned long) (*skb);
6302 } 6180 }
6303 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 6181 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6182 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6304 /* Two buffer Mode */ 6183 /* Two buffer Mode */
6305 if (*skb) { 6184 if (*skb) {
6306 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; 6185 rxdp3->Buffer2_ptr = *temp2;
6307 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; 6186 rxdp3->Buffer0_ptr = *temp0;
6308 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; 6187 rxdp3->Buffer1_ptr = *temp1;
6309 } else { 6188 } else {
6310 *skb = dev_alloc_skb(size); 6189 *skb = dev_alloc_skb(size);
6311 if (!(*skb)) { 6190 if (!(*skb)) {
@@ -6318,73 +6197,47 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6318 } 6197 }
6319 sp->mac_control.stats_info->sw_stat.mem_allocated 6198 sp->mac_control.stats_info->sw_stat.mem_allocated
6320 += (*skb)->truesize; 6199 += (*skb)->truesize;
6321 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = 6200 rxdp3->Buffer2_ptr = *temp2 =
6322 pci_map_single(sp->pdev, (*skb)->data, 6201 pci_map_single(sp->pdev, (*skb)->data,
6323 dev->mtu + 4, 6202 dev->mtu + 4,
6324 PCI_DMA_FROMDEVICE); 6203 PCI_DMA_FROMDEVICE);
6325 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = 6204 if( (rxdp3->Buffer2_ptr == 0) ||
6205 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6206 goto memalloc_failed;
6207 }
6208 rxdp3->Buffer0_ptr = *temp0 =
6326 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 6209 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6327 PCI_DMA_FROMDEVICE); 6210 PCI_DMA_FROMDEVICE);
6211 if( (rxdp3->Buffer0_ptr == 0) ||
6212 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6213 pci_unmap_single (sp->pdev,
6214 (dma_addr_t)(*skb)->data,
6215 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6216 goto memalloc_failed;
6217 }
6328 rxdp->Host_Control = (unsigned long) (*skb); 6218 rxdp->Host_Control = (unsigned long) (*skb);
6329 6219
6330 /* Buffer-1 will be dummy buffer not used */ 6220 /* Buffer-1 will be dummy buffer not used */
6331 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = 6221 rxdp3->Buffer1_ptr = *temp1 =
6332 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 6222 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6333 PCI_DMA_FROMDEVICE);
6334 }
6335 } else if ((rxdp->Host_Control == 0)) {
6336 /* Three buffer mode */
6337 if (*skb) {
6338 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6339 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6340 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6341 } else {
6342 *skb = dev_alloc_skb(size);
6343 if (!(*skb)) {
6344 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6345 DBG_PRINT(INFO_DBG, "memory to allocate ");
6346 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6347 sp->mac_control.stats_info->sw_stat. \
6348 mem_alloc_fail_cnt++;
6349 return -ENOMEM;
6350 }
6351 sp->mac_control.stats_info->sw_stat.mem_allocated
6352 += (*skb)->truesize;
6353 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6354 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6355 PCI_DMA_FROMDEVICE);
6356 /* Buffer-1 receives L3/L4 headers */
6357 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6358 pci_map_single( sp->pdev, (*skb)->data,
6359 l3l4hdr_size + 4,
6360 PCI_DMA_FROMDEVICE); 6223 PCI_DMA_FROMDEVICE);
6361 /* 6224 if( (rxdp3->Buffer1_ptr == 0) ||
6362 * skb_shinfo(skb)->frag_list will have L4 6225 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6363 * data payload 6226 pci_unmap_single (sp->pdev,
6364 */ 6227 (dma_addr_t)(*skb)->data,
6365 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + 6228 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6366 ALIGN_SIZE); 6229 goto memalloc_failed;
6367 if (skb_shinfo(*skb)->frag_list == NULL) {
6368 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6369 failed\n ", dev->name);
6370 sp->mac_control.stats_info->sw_stat. \
6371 mem_alloc_fail_cnt++;
6372 return -ENOMEM ;
6373 } 6230 }
6374 frag_list = skb_shinfo(*skb)->frag_list;
6375 frag_list->next = NULL;
6376 sp->mac_control.stats_info->sw_stat.mem_allocated
6377 += frag_list->truesize;
6378 /*
6379 * Buffer-2 receives L4 data payload
6380 */
6381 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6382 pci_map_single( sp->pdev, frag_list->data,
6383 dev->mtu, PCI_DMA_FROMDEVICE);
6384 } 6231 }
6385 } 6232 }
6386 return 0; 6233 return 0;
6234 memalloc_failed:
6235 stats->pci_map_fail_cnt++;
6236 stats->mem_freed += (*skb)->truesize;
6237 dev_kfree_skb(*skb);
6238 return -ENOMEM;
6387} 6239}
6240
6388static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, 6241static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6389 int size) 6242 int size)
6390{ 6243{
@@ -6395,10 +6248,6 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 6248 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6396 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 6249 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6397 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); 6250 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6398 } else {
6399 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6400 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6401 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6402 } 6251 }
6403} 6252}
6404 6253
@@ -6420,8 +6269,6 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6420 size += NET_IP_ALIGN; 6269 size += NET_IP_ALIGN;
6421 else if (sp->rxd_mode == RXD_MODE_3B) 6270 else if (sp->rxd_mode == RXD_MODE_3B)
6422 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 6271 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6423 else
6424 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6425 6272
6426 for (i = 0; i < config->rx_ring_num; i++) { 6273 for (i = 0; i < config->rx_ring_num; i++) {
6427 blk_cnt = config->rx_cfg[i].num_rxd / 6274 blk_cnt = config->rx_cfg[i].num_rxd /
@@ -6431,7 +6278,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6431 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { 6278 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6432 rxdp = mac_control->rings[i]. 6279 rxdp = mac_control->rings[i].
6433 rx_blocks[j].rxds[k].virt_addr; 6280 rx_blocks[j].rxds[k].virt_addr;
6434 if(sp->rxd_mode >= RXD_MODE_3A) 6281 if(sp->rxd_mode == RXD_MODE_3B)
6435 ba = &mac_control->rings[i].ba[j][k]; 6282 ba = &mac_control->rings[i].ba[j][k];
6436 if (set_rxd_buffer_pointer(sp, rxdp, ba, 6283 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6437 &skb,(u64 *)&temp0_64, 6284 &skb,(u64 *)&temp0_64,
@@ -6458,9 +6305,7 @@ static int s2io_add_isr(struct s2io_nic * sp)
6458 struct net_device *dev = sp->dev; 6305 struct net_device *dev = sp->dev;
6459 int err = 0; 6306 int err = 0;
6460 6307
6461 if (sp->intr_type == MSI) 6308 if (sp->intr_type == MSI_X)
6462 ret = s2io_enable_msi(sp);
6463 else if (sp->intr_type == MSI_X)
6464 ret = s2io_enable_msi_x(sp); 6309 ret = s2io_enable_msi_x(sp);
6465 if (ret) { 6310 if (ret) {
6466 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); 6311 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
@@ -6471,16 +6316,6 @@ static int s2io_add_isr(struct s2io_nic * sp)
6471 store_xmsi_data(sp); 6316 store_xmsi_data(sp);
6472 6317
6473 /* After proper initialization of H/W, register ISR */ 6318 /* After proper initialization of H/W, register ISR */
6474 if (sp->intr_type == MSI) {
6475 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6476 IRQF_SHARED, sp->name, dev);
6477 if (err) {
6478 pci_disable_msi(sp->pdev);
6479 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6480 dev->name);
6481 return -1;
6482 }
6483 }
6484 if (sp->intr_type == MSI_X) { 6319 if (sp->intr_type == MSI_X) {
6485 int i, msix_tx_cnt=0,msix_rx_cnt=0; 6320 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6486 6321
@@ -6567,14 +6402,6 @@ static void s2io_rem_isr(struct s2io_nic * sp)
6567 pci_disable_msix(sp->pdev); 6402 pci_disable_msix(sp->pdev);
6568 } else { 6403 } else {
6569 free_irq(sp->pdev->irq, dev); 6404 free_irq(sp->pdev->irq, dev);
6570 if (sp->intr_type == MSI) {
6571 u16 val;
6572
6573 pci_disable_msi(sp->pdev);
6574 pci_read_config_word(sp->pdev, 0x4c, &val);
6575 val ^= 0x1;
6576 pci_write_config_word(sp->pdev, 0x4c, val);
6577 }
6578 } 6405 }
6579 /* Waiting till all Interrupt handlers are complete */ 6406 /* Waiting till all Interrupt handlers are complete */
6580 cnt = 0; 6407 cnt = 0;
@@ -6907,6 +6734,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6907 } 6734 }
6908 6735
6909 /* Updating statistics */ 6736 /* Updating statistics */
6737 sp->stats.rx_packets++;
6910 rxdp->Host_Control = 0; 6738 rxdp->Host_Control = 0;
6911 if (sp->rxd_mode == RXD_MODE_1) { 6739 if (sp->rxd_mode == RXD_MODE_1) {
6912 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 6740 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
@@ -6914,7 +6742,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6914 sp->stats.rx_bytes += len; 6742 sp->stats.rx_bytes += len;
6915 skb_put(skb, len); 6743 skb_put(skb, len);
6916 6744
6917 } else if (sp->rxd_mode >= RXD_MODE_3A) { 6745 } else if (sp->rxd_mode == RXD_MODE_3B) {
6918 int get_block = ring_data->rx_curr_get_info.block_index; 6746 int get_block = ring_data->rx_curr_get_info.block_index;
6919 int get_off = ring_data->rx_curr_get_info.offset; 6747 int get_off = ring_data->rx_curr_get_info.offset;
6920 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); 6748 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
@@ -6924,18 +6752,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6924 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 6752 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6925 sp->stats.rx_bytes += buf0_len + buf2_len; 6753 sp->stats.rx_bytes += buf0_len + buf2_len;
6926 memcpy(buff, ba->ba_0, buf0_len); 6754 memcpy(buff, ba->ba_0, buf0_len);
6927 6755 skb_put(skb, buf2_len);
6928 if (sp->rxd_mode == RXD_MODE_3A) {
6929 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6930
6931 skb_put(skb, buf1_len);
6932 skb->len += buf2_len;
6933 skb->data_len += buf2_len;
6934 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6935 sp->stats.rx_bytes += buf1_len;
6936
6937 } else
6938 skb_put(skb, buf2_len);
6939 } 6756 }
6940 6757
6941 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || 6758 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
@@ -7131,7 +6948,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7131 *dev_intr_type = INTA; 6948 *dev_intr_type = INTA;
7132 } 6949 }
7133#else 6950#else
7134 if (*dev_intr_type > MSI_X) { 6951 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7135 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 6952 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7136 "Defaulting to INTA\n"); 6953 "Defaulting to INTA\n");
7137 *dev_intr_type = INTA; 6954 *dev_intr_type = INTA;
@@ -7145,10 +6962,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7145 *dev_intr_type = INTA; 6962 *dev_intr_type = INTA;
7146 } 6963 }
7147 6964
7148 if (rx_ring_mode > 3) { 6965 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7149 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6966 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7150 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6967 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7151 rx_ring_mode = 3; 6968 rx_ring_mode = 1;
7152 } 6969 }
7153 return SUCCESS; 6970 return SUCCESS;
7154} 6971}
@@ -7240,28 +7057,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7240 pci_disable_device(pdev); 7057 pci_disable_device(pdev);
7241 return -ENOMEM; 7058 return -ENOMEM;
7242 } 7059 }
7243 if (dev_intr_type != MSI_X) { 7060 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7244 if (pci_request_regions(pdev, s2io_driver_name)) { 7061 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7245 DBG_PRINT(ERR_DBG, "Request Regions failed\n"); 7062 pci_disable_device(pdev);
7246 pci_disable_device(pdev); 7063 return -ENODEV;
7247 return -ENODEV;
7248 }
7249 }
7250 else {
7251 if (!(request_mem_region(pci_resource_start(pdev, 0),
7252 pci_resource_len(pdev, 0), s2io_driver_name))) {
7253 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7254 pci_disable_device(pdev);
7255 return -ENODEV;
7256 }
7257 if (!(request_mem_region(pci_resource_start(pdev, 2),
7258 pci_resource_len(pdev, 2), s2io_driver_name))) {
7259 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7260 release_mem_region(pci_resource_start(pdev, 0),
7261 pci_resource_len(pdev, 0));
7262 pci_disable_device(pdev);
7263 return -ENODEV;
7264 }
7265 } 7064 }
7266 7065
7267 dev = alloc_etherdev(sizeof(struct s2io_nic)); 7066 dev = alloc_etherdev(sizeof(struct s2io_nic));
@@ -7288,8 +7087,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7288 sp->rxd_mode = RXD_MODE_1; 7087 sp->rxd_mode = RXD_MODE_1;
7289 if (rx_ring_mode == 2) 7088 if (rx_ring_mode == 2)
7290 sp->rxd_mode = RXD_MODE_3B; 7089 sp->rxd_mode = RXD_MODE_3B;
7291 if (rx_ring_mode == 3)
7292 sp->rxd_mode = RXD_MODE_3A;
7293 7090
7294 sp->intr_type = dev_intr_type; 7091 sp->intr_type = dev_intr_type;
7295 7092
@@ -7565,10 +7362,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7565 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", 7362 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7566 dev->name); 7363 dev->name);
7567 break; 7364 break;
7568 case RXD_MODE_3A:
7569 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7570 dev->name);
7571 break;
7572 } 7365 }
7573 7366
7574 if (napi) 7367 if (napi)
@@ -7577,9 +7370,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7577 case INTA: 7370 case INTA:
7578 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 7371 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7579 break; 7372 break;
7580 case MSI:
7581 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7582 break;
7583 case MSI_X: 7373 case MSI_X:
7584 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); 7374 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7585 break; 7375 break;
@@ -7619,14 +7409,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7619 mem_alloc_failed: 7409 mem_alloc_failed:
7620 free_shared_mem(sp); 7410 free_shared_mem(sp);
7621 pci_disable_device(pdev); 7411 pci_disable_device(pdev);
7622 if (dev_intr_type != MSI_X) 7412 pci_release_regions(pdev);
7623 pci_release_regions(pdev);
7624 else {
7625 release_mem_region(pci_resource_start(pdev, 0),
7626 pci_resource_len(pdev, 0));
7627 release_mem_region(pci_resource_start(pdev, 2),
7628 pci_resource_len(pdev, 2));
7629 }
7630 pci_set_drvdata(pdev, NULL); 7413 pci_set_drvdata(pdev, NULL);
7631 free_netdev(dev); 7414 free_netdev(dev);
7632 7415
@@ -7661,14 +7444,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7661 free_shared_mem(sp); 7444 free_shared_mem(sp);
7662 iounmap(sp->bar0); 7445 iounmap(sp->bar0);
7663 iounmap(sp->bar1); 7446 iounmap(sp->bar1);
7664 if (sp->intr_type != MSI_X) 7447 pci_release_regions(pdev);
7665 pci_release_regions(pdev);
7666 else {
7667 release_mem_region(pci_resource_start(pdev, 0),
7668 pci_resource_len(pdev, 0));
7669 release_mem_region(pci_resource_start(pdev, 2),
7670 pci_resource_len(pdev, 2));
7671 }
7672 pci_set_drvdata(pdev, NULL); 7448 pci_set_drvdata(pdev, NULL);
7673 free_netdev(dev); 7449 free_netdev(dev);
7674 pci_disable_device(pdev); 7450 pci_disable_device(pdev);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 3887fe63a908..92983ee7df8c 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -74,6 +74,10 @@ static int debug_level = ERR_DBG;
74/* DEBUG message print. */ 74/* DEBUG message print. */
75#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 75#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
76 76
77#ifndef DMA_ERROR_CODE
78#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
79#endif
80
77/* Protocol assist features of the NIC */ 81/* Protocol assist features of the NIC */
78#define L3_CKSUM_OK 0xFFFF 82#define L3_CKSUM_OK 0xFFFF
79#define L4_CKSUM_OK 0xFFFF 83#define L4_CKSUM_OK 0xFFFF
@@ -97,6 +101,7 @@ struct swStat {
97 unsigned long long num_aggregations; 101 unsigned long long num_aggregations;
98 /* Other statistics */ 102 /* Other statistics */
99 unsigned long long mem_alloc_fail_cnt; 103 unsigned long long mem_alloc_fail_cnt;
104 unsigned long long pci_map_fail_cnt;
100 unsigned long long watchdog_timer_cnt; 105 unsigned long long watchdog_timer_cnt;
101 unsigned long long mem_allocated; 106 unsigned long long mem_allocated;
102 unsigned long long mem_freed; 107 unsigned long long mem_freed;
@@ -575,8 +580,7 @@ struct RxD_block {
575#define SIZE_OF_BLOCK 4096 580#define SIZE_OF_BLOCK 4096
576 581
577#define RXD_MODE_1 0 /* One Buffer mode */ 582#define RXD_MODE_1 0 /* One Buffer mode */
578#define RXD_MODE_3A 1 /* Three Buffer mode */ 583#define RXD_MODE_3B 1 /* Two Buffer mode */
579#define RXD_MODE_3B 2 /* Two Buffer mode */
580 584
581/* Structure to hold virtual addresses of Buf0 and Buf1 in 585/* Structure to hold virtual addresses of Buf0 and Buf1 in
582 * 2buf mode. */ 586 * 2buf mode. */
@@ -876,7 +880,6 @@ struct s2io_nic {
876 u16 lro_max_aggr_per_sess; 880 u16 lro_max_aggr_per_sess;
877 881
878#define INTA 0 882#define INTA 0
879#define MSI 1
880#define MSI_X 2 883#define MSI_X 2
881 u8 intr_type; 884 u8 intr_type;
882 885
@@ -1020,8 +1023,6 @@ static int s2io_poll(struct net_device *dev, int *budget);
1020static void s2io_init_pci(struct s2io_nic * sp); 1023static void s2io_init_pci(struct s2io_nic * sp);
1021static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 1024static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
1022static void s2io_alarm_handle(unsigned long data); 1025static void s2io_alarm_handle(unsigned long data);
1023static int s2io_enable_msi(struct s2io_nic *nic);
1024static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
1025static irqreturn_t 1026static irqreturn_t
1026s2io_msix_ring_handle(int irq, void *dev_id); 1027s2io_msix_ring_handle(int irq, void *dev_id);
1027static irqreturn_t 1028static irqreturn_t
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index a05fd97e5bc2..04cba6bf3d54 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -768,11 +768,13 @@ done:
768static void write_bulk_callback(struct urb *urb) 768static void write_bulk_callback(struct urb *urb)
769{ 769{
770 pegasus_t *pegasus = urb->context; 770 pegasus_t *pegasus = urb->context;
771 struct net_device *net = pegasus->net; 771 struct net_device *net;
772 772
773 if (!pegasus) 773 if (!pegasus)
774 return; 774 return;
775 775
776 net = pegasus->net;
777
776 if (!netif_device_present(net) || !netif_running(net)) 778 if (!netif_device_present(net) || !netif_running(net))
777 return; 779 return;
778 780