aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c503.c16
-rw-r--r--drivers/net/3c527.h50
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig37
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ac3200.c16
-rw-r--r--drivers/net/arm/Kconfig13
-rw-r--r--drivers/net/arm/Makefile1
-rw-r--r--drivers/net/arm/am79c961a.c12
-rw-r--r--drivers/net/arm/ether00.c1017
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/etherh.c1
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bonding.h12
-rw-r--r--drivers/net/cassini.c40
-rw-r--r--drivers/net/cs89x0.c171
-rw-r--r--drivers/net/cs89x0.h19
-rw-r--r--drivers/net/e100.c192
-rw-r--r--drivers/net/e1000/e1000.h51
-rw-r--r--drivers/net/e1000/e1000_ethtool.c474
-rw-r--r--drivers/net/e1000/e1000_hw.c71
-rw-r--r--drivers/net/e1000/e1000_hw.h45
-rw-r--r--drivers/net/e1000/e1000_main.c1612
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c68
-rw-r--r--drivers/net/e2100.c14
-rw-r--r--drivers/net/es3210.c14
-rw-r--r--drivers/net/forcedeth.c164
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/gianfar.h4
-rw-r--r--drivers/net/gianfar_mii.c5
-rw-r--r--drivers/net/gianfar_sysfs.c2
-rw-r--r--drivers/net/hamradio/6pack.c7
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hp-plus.c12
-rw-r--r--drivers/net/hp.c12
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h3
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ifb.c294
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irport.c15
-rw-r--r--drivers/net/irda/irtty-sir.c18
-rw-r--r--drivers/net/irda/sir-dev.h2
-rw-r--r--drivers/net/irda/sir_core.c56
-rw-r--r--drivers/net/irda/sir_dev.c10
-rw-r--r--drivers/net/irda/sir_dongle.c2
-rw-r--r--drivers/net/irda/sir_kthread.c11
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/lance.c22
-rw-r--r--drivers/net/lne390.c14
-rw-r--r--drivers/net/mac8390.c31
-rw-r--r--drivers/net/mv643xx_eth.c682
-rw-r--r--drivers/net/ne.c18
-rw-r--r--drivers/net/ne2.c16
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp_async.c9
-rw-r--r--drivers/net/ppp_synctty.c9
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sb1000.c4
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sk98lin/skdim.c2
-rw-r--r--drivers/net/sk98lin/skge.c131
-rw-r--r--drivers/net/sk98lin/skgepnmi.c8
-rw-r--r--drivers/net/skge.c20
-rw-r--r--drivers/net/sky2.c219
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-ultra.c24
-rw-r--r--drivers/net/smc91x.c5
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/spider_net.c512
-rw-r--r--drivers/net/spider_net.h75
-rw-r--r--drivers/net/spider_net_ethtool.c19
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/tulip/uli526x.c6
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c6
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wd.c14
-rw-r--r--drivers/net/wireless/Kconfig10
-rw-r--r--drivers/net/wireless/airo.c21
-rw-r--r--drivers/net/wireless/atmel.c231
-rw-r--r--drivers/net/wireless/hostap/Kconfig22
-rw-r--r--drivers/net/wireless/hostap/Makefile3
-rw-r--r--drivers/net/wireless/hostap/hostap.h37
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c21
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c36
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h13
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c12
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c60
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h4
-rw-r--r--drivers/net/wireless/ipw2100.c465
-rw-r--r--drivers/net/wireless/ipw2200.c56
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/strip.c10
-rw-r--r--drivers/net/wireless/wavelan.c38
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
118 files changed, 3899 insertions, 3784 deletions
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5c5eebdb6914..dcc98afa65d7 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -148,14 +148,6 @@ el2_pio_probe(struct net_device *dev)
148 return -ENODEV; 148 return -ENODEV;
149} 149}
150 150
151static void cleanup_card(struct net_device *dev)
152{
153 /* NB: el2_close() handles free_irq */
154 release_region(dev->base_addr, EL2_IO_EXTENT);
155 if (ei_status.mem)
156 iounmap(ei_status.mem);
157}
158
159#ifndef MODULE 151#ifndef MODULE
160struct net_device * __init el2_probe(int unit) 152struct net_device * __init el2_probe(int unit)
161{ 153{
@@ -726,6 +718,14 @@ init_module(void)
726 return -ENXIO; 718 return -ENXIO;
727} 719}
728 720
721static void cleanup_card(struct net_device *dev)
722{
723 /* NB: el2_close() handles free_irq */
724 release_region(dev->base_addr, EL2_IO_EXTENT);
725 if (ei_status.mem)
726 iounmap(ei_status.mem);
727}
728
729void 729void
730cleanup_module(void) 730cleanup_module(void)
731{ 731{
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index c10f009ce9b6..53b5b071df08 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -32,43 +32,43 @@
32 32
33struct mc32_mailbox 33struct mc32_mailbox
34{ 34{
35 u16 mbox __attribute((packed)); 35 u16 mbox;
36 u16 data[1] __attribute((packed)); 36 u16 data[1];
37}; 37} __attribute((packed));
38 38
39struct skb_header 39struct skb_header
40{ 40{
41 u8 status __attribute((packed)); 41 u8 status;
42 u8 control __attribute((packed)); 42 u8 control;
43 u16 next __attribute((packed)); /* Do not change! */ 43 u16 next; /* Do not change! */
44 u16 length __attribute((packed)); 44 u16 length;
45 u32 data __attribute((packed)); 45 u32 data;
46}; 46} __attribute((packed));
47 47
48struct mc32_stats 48struct mc32_stats
49{ 49{
50 /* RX Errors */ 50 /* RX Errors */
51 u32 rx_crc_errors __attribute((packed)); 51 u32 rx_crc_errors;
52 u32 rx_alignment_errors __attribute((packed)); 52 u32 rx_alignment_errors;
53 u32 rx_overrun_errors __attribute((packed)); 53 u32 rx_overrun_errors;
54 u32 rx_tooshort_errors __attribute((packed)); 54 u32 rx_tooshort_errors;
55 u32 rx_toolong_errors __attribute((packed)); 55 u32 rx_toolong_errors;
56 u32 rx_outofresource_errors __attribute((packed)); 56 u32 rx_outofresource_errors;
57 57
58 u32 rx_discarded __attribute((packed)); /* via card pattern match filter */ 58 u32 rx_discarded; /* via card pattern match filter */
59 59
60 /* TX Errors */ 60 /* TX Errors */
61 u32 tx_max_collisions __attribute((packed)); 61 u32 tx_max_collisions;
62 u32 tx_carrier_errors __attribute((packed)); 62 u32 tx_carrier_errors;
63 u32 tx_underrun_errors __attribute((packed)); 63 u32 tx_underrun_errors;
64 u32 tx_cts_errors __attribute((packed)); 64 u32 tx_cts_errors;
65 u32 tx_timeout_errors __attribute((packed)) ; 65 u32 tx_timeout_errors;
66 66
67 /* various cruft */ 67 /* various cruft */
68 u32 dataA[6] __attribute((packed)); 68 u32 dataA[6];
69 u16 dataB[5] __attribute((packed)); 69 u16 dataB[5];
70 u32 dataC[14] __attribute((packed)); 70 u32 dataC[14];
71}; 71} __attribute((packed));
72 72
73#define STATUS_MASK 0x0F 73#define STATUS_MASK 0x0F
74#define COMPLETED (1<<7) 74#define COMPLETED (1<<7)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d2102a27d307..adfba44dac5a 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -505,7 +505,7 @@ enum chip_flags {
505#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) 505#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
506 506
507/* directly indexed by chip_t, above */ 507/* directly indexed by chip_t, above */
508const static struct { 508static const struct {
509 const char *name; 509 const char *name;
510 u32 version; /* from RTL8139C/RTL8139D docs */ 510 u32 version; /* from RTL8139C/RTL8139D docs */
511 u32 flags; 511 u32 flags;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e2fa29b612cd..626508afe1b1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
7 7
8config NETDEVICES 8config NETDEVICES
9 depends on NET 9 depends on NET
10 default y if UML
10 bool "Network device support" 11 bool "Network device support"
11 ---help--- 12 ---help---
12 You can say N here if you don't intend to connect your Linux box to 13 You can say N here if you don't intend to connect your Linux box to
@@ -27,6 +28,19 @@ config NETDEVICES
27# that for each of the symbols. 28# that for each of the symbols.
28if NETDEVICES 29if NETDEVICES
29 30
31config IFB
32 tristate "Intermediate Functional Block support"
33 depends on NET_CLS_ACT
34 ---help---
35 This is an intermidiate driver that allows sharing of
36 resources.
37 To compile this driver as a module, choose M here: the module
38 will be called ifb. If you want to use more than one ifb
39 device at a time, you need to compile this driver as a module.
40 Instead of 'ifb', the devices will then be called 'ifb0',
41 'ifb1' etc.
42 Look at the iproute2 documentation directory for usage etc
43
30config DUMMY 44config DUMMY
31 tristate "Dummy net driver support" 45 tristate "Dummy net driver support"
32 ---help--- 46 ---help---
@@ -129,7 +143,7 @@ config NET_SB1000
129 143
130 If you don't have this card, of course say N. 144 If you don't have this card, of course say N.
131 145
132 source "drivers/net/arcnet/Kconfig" 146source "drivers/net/arcnet/Kconfig"
133 147
134source "drivers/net/phy/Kconfig" 148source "drivers/net/phy/Kconfig"
135 149
@@ -844,7 +858,7 @@ config SMC9194
844 858
845config DM9000 859config DM9000
846 tristate "DM9000 support" 860 tristate "DM9000 support"
847 depends on ARM && NET_ETHERNET 861 depends on (ARM || MIPS) && NET_ETHERNET
848 select CRC32 862 select CRC32
849 select MII 863 select MII
850 ---help--- 864 ---help---
@@ -1374,7 +1388,7 @@ config FORCEDETH
1374 1388
1375config CS89x0 1389config CS89x0
1376 tristate "CS89x0 support" 1390 tristate "CS89x0 support"
1377 depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105 1391 depends on NET_PCI && (ISA || MACH_IXDP2351 || ARCH_IXDP2X01 || ARCH_PNX010X)
1378 ---help--- 1392 ---help---
1379 Support for CS89x0 chipset based Ethernet cards. If you have a 1393 Support for CS89x0 chipset based Ethernet cards. If you have a
1380 network (Ethernet) card of this type, say Y and read the 1394 network (Ethernet) card of this type, say Y and read the
@@ -1384,7 +1398,7 @@ config CS89x0
1384 1398
1385 To compile this driver as a module, choose M here and read 1399 To compile this driver as a module, choose M here and read
1386 <file:Documentation/networking/net-modules.txt>. The module will be 1400 <file:Documentation/networking/net-modules.txt>. The module will be
1387 called cs89x. 1401 called cs89x0.
1388 1402
1389config TC35815 1403config TC35815
1390 tristate "TOSHIBA TC35815 Ethernet support" 1404 tristate "TOSHIBA TC35815 Ethernet support"
@@ -1791,7 +1805,7 @@ config 68360_ENET
1791 1805
1792config FEC 1806config FEC
1793 bool "FEC ethernet controller (of ColdFire CPUs)" 1807 bool "FEC ethernet controller (of ColdFire CPUs)"
1794 depends on M523x || M527x || M5272 || M528x 1808 depends on M523x || M527x || M5272 || M528x || M520x
1795 help 1809 help
1796 Say Y here if you want to use the built-in 10/100 Fast ethernet 1810 Say Y here if you want to use the built-in 10/100 Fast ethernet
1797 controller on some Motorola ColdFire processors. 1811 controller on some Motorola ColdFire processors.
@@ -1901,6 +1915,15 @@ config E1000_NAPI
1901 1915
1902 If in doubt, say N. 1916 If in doubt, say N.
1903 1917
1918config E1000_DISABLE_PACKET_SPLIT
1919 bool "Disable Packet Split for PCI express adapters"
1920 depends on E1000
1921 help
1922 Say Y here if you want to use the legacy receive path for PCI express
1923 hadware.
1924
1925 If in doubt, say N.
1926
1904source "drivers/net/ixp2000/Kconfig" 1927source "drivers/net/ixp2000/Kconfig"
1905 1928
1906config MYRI_SBUS 1929config MYRI_SBUS
@@ -2663,10 +2686,6 @@ config SHAPER
2663 Class-Based Queueing (CBQ) scheduling support which you get if you 2686 Class-Based Queueing (CBQ) scheduling support which you get if you
2664 say Y to "QoS and/or fair queueing" above. 2687 say Y to "QoS and/or fair queueing" above.
2665 2688
2666 To set up and configure shaper devices, you need the shapecfg
2667 program, available from <ftp://shadow.cabi.net/pub/Linux/> in the
2668 shaper package.
2669
2670 To compile this driver as a module, choose M here: the module 2689 To compile this driver as a module, choose M here: the module
2671 will be called shaper. If unsure, say N. 2690 will be called shaper. If unsure, say N.
2672 2691
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b74a7cb5bae6..00e72b12fb92 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -125,6 +125,7 @@ ifeq ($(CONFIG_SLIP_COMPRESSED),y)
125endif 125endif
126 126
127obj-$(CONFIG_DUMMY) += dummy.o 127obj-$(CONFIG_DUMMY) += dummy.o
128obj-$(CONFIG_IFB) += ifb.o
128obj-$(CONFIG_DE600) += de600.o 129obj-$(CONFIG_DE600) += de600.o
129obj-$(CONFIG_DE620) += de620.o 130obj-$(CONFIG_DE620) += de620.o
130obj-$(CONFIG_LANCE) += lance.o 131obj-$(CONFIG_LANCE) += lance.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 8a0af5453e21..7952dc6d77e3 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -123,14 +123,6 @@ static int __init do_ac3200_probe(struct net_device *dev)
123 return -ENODEV; 123 return -ENODEV;
124} 124}
125 125
126static void cleanup_card(struct net_device *dev)
127{
128 /* Someday free_irq may be in ac_close_card() */
129 free_irq(dev->irq, dev);
130 release_region(dev->base_addr, AC_IO_EXTENT);
131 iounmap(ei_status.mem);
132}
133
134#ifndef MODULE 126#ifndef MODULE
135struct net_device * __init ac3200_probe(int unit) 127struct net_device * __init ac3200_probe(int unit)
136{ 128{
@@ -406,6 +398,14 @@ init_module(void)
406 return -ENXIO; 398 return -ENXIO;
407} 399}
408 400
401static void cleanup_card(struct net_device *dev)
402{
403 /* Someday free_irq may be in ac_close_card() */
404 free_irq(dev->irq, dev);
405 release_region(dev->base_addr, AC_IO_EXTENT);
406 iounmap(ei_status.mem);
407}
408
409void 409void
410cleanup_module(void) 410cleanup_module(void)
411{ 411{
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 470364deded0..625184b65e38 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -31,16 +31,3 @@ config ARM_ETHERH
31 help 31 help
32 If you have an Acorn system with one of these network cards, you 32 If you have an Acorn system with one of these network cards, you
33 should say Y to this option if you wish to use it with Linux. 33 should say Y to this option if you wish to use it with Linux.
34
35config ARM_ETHER00
36 tristate "Altera Ether00 support"
37 depends on NET_ETHERNET && ARM && ARCH_CAMELOT
38 help
39 This is the driver for Altera's ether00 ethernet mac IP core. Say
40 Y here if you want to build support for this into the kernel. It
41 is also available as a module (say M here) that can be inserted/
42 removed from the kernel at the same time as the PLD is configured.
43 If this driver is running on an epxa10 development board then it
44 will generate a suitable hw address based on the board serial
45 number (MTD support is required for this). Otherwise you will
46 need to set a suitable hw address using ifconfig.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index b0d706834d89..bc263edf06a7 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -4,7 +4,6 @@
4# 4#
5 5
6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o 6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
7obj-$(CONFIG_ARM_ETHER00) += ether00.o
8obj-$(CONFIG_ARM_ETHERH) += etherh.o 7obj-$(CONFIG_ARM_ETHERH) += etherh.o
9obj-$(CONFIG_ARM_ETHER3) += ether3.o 8obj-$(CONFIG_ARM_ETHER3) += ether3.o
10obj-$(CONFIG_ARM_ETHER1) += ether1.o 9obj-$(CONFIG_ARM_ETHER1) += ether1.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 877891a29aaa..53e3afc1b7b7 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -668,9 +668,8 @@ static void __init am79c961_banner(void)
668 printk(KERN_INFO "%s", version); 668 printk(KERN_INFO "%s", version);
669} 669}
670 670
671static int __init am79c961_probe(struct device *_dev) 671static int __init am79c961_probe(struct platform_device *pdev)
672{ 672{
673 struct platform_device *pdev = to_platform_device(_dev);
674 struct resource *res; 673 struct resource *res;
675 struct net_device *dev; 674 struct net_device *dev;
676 struct dev_priv *priv; 675 struct dev_priv *priv;
@@ -758,15 +757,16 @@ out:
758 return ret; 757 return ret;
759} 758}
760 759
761static struct device_driver am79c961_driver = { 760static struct platform_driver am79c961_driver = {
762 .name = "am79c961",
763 .bus = &platform_bus_type,
764 .probe = am79c961_probe, 761 .probe = am79c961_probe,
762 .driver = {
763 .name = "am79c961",
764 },
765}; 765};
766 766
767static int __init am79c961_init(void) 767static int __init am79c961_init(void)
768{ 768{
769 return driver_register(&am79c961_driver); 769 return platform_driver_register(&am79c961_driver);
770} 770}
771 771
772__initcall(am79c961_init); 772__initcall(am79c961_init);
diff --git a/drivers/net/arm/ether00.c b/drivers/net/arm/ether00.c
deleted file mode 100644
index 4f1f4e31bda5..000000000000
--- a/drivers/net/arm/ether00.c
+++ /dev/null
@@ -1,1017 +0,0 @@
1/*
2 * drivers/net/ether00.c
3 *
4 * Copyright (C) 2001 Altera Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* includes */
22#include <linux/config.h>
23#include <linux/pci.h>
24#include <linux/sched.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h>
27#include <linux/etherdevice.h>
28#include <linux/module.h>
29#include <linux/tqueue.h>
30#include <linux/mtd/mtd.h>
31#include <linux/pld/pld_hotswap.h>
32#include <asm/arch/excalibur.h>
33#include <asm/arch/hardware.h>
34#include <asm/irq.h>
35#include <asm/io.h>
36#include <asm/sizes.h>
37
38#include <asm/arch/ether00.h>
39#include <asm/arch/tdkphy.h>
40
41
42MODULE_AUTHOR("Clive Davies");
43MODULE_DESCRIPTION("Altera Ether00 IP core driver");
44MODULE_LICENSE("GPL");
45
46#define PKT_BUF_SZ 1540 /* Size of each rx buffer */
47#define ETH_NR 4 /* Number of MACs this driver supports */
48
49#define DEBUG(x)
50
51#define __dma_va(x) (unsigned int)((unsigned int)priv->dma_data+(((unsigned int)(x))&(EXC_SPSRAM_BLOCK0_SIZE-1)))
52#define __dma_pa(x) (unsigned int)(EXC_SPSRAM_BLOCK0_BASE+(((unsigned int)(x))-(unsigned int)priv->dma_data))
53
54#define ETHER00_BASE 0
55#define ETHER00_TYPE
56#define ETHER00_NAME "ether00"
57#define MAC_REG_SIZE 0x400 /* size of MAC register area */
58
59
60
61/* typedefs */
62
63/* The definition of the driver control structure */
64
65#define RX_NUM_BUFF 10
66#define RX_NUM_FDESC 10
67#define TX_NUM_FDESC 10
68
69struct tx_fda_ent{
70 FDA_DESC fd;
71 BUF_DESC bd;
72 BUF_DESC pad;
73};
74struct rx_fda_ent{
75 FDA_DESC fd;
76 BUF_DESC bd;
77 BUF_DESC pad;
78};
79struct rx_blist_ent{
80 FDA_DESC fd;
81 BUF_DESC bd;
82 BUF_DESC pad;
83};
84struct net_priv
85{
86 struct net_device_stats stats;
87 struct sk_buff* skb;
88 void* dma_data;
89 struct rx_blist_ent* rx_blist_vp;
90 struct rx_fda_ent* rx_fda_ptr;
91 struct tx_fda_ent* tx_fdalist_vp;
92 struct tq_struct tq_memupdate;
93 unsigned char memupdate_scheduled;
94 unsigned char rx_disabled;
95 unsigned char queue_stopped;
96 spinlock_t rx_lock;
97};
98
99static const char vendor_id[2]={0x07,0xed};
100
101#ifdef ETHER00_DEBUG
102
103/* Dump (most) registers for debugging puposes */
104
105static void dump_regs(struct net_device *dev){
106 struct net_priv* priv=dev->priv;
107 unsigned int* i;
108
109 printk("\n RX free descriptor area:\n");
110
111 for(i=(unsigned int*)priv->rx_fda_ptr;
112 i<((unsigned int*)(priv->rx_fda_ptr+RX_NUM_FDESC));){
113 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
114 i+=4;
115 }
116
117 printk("\n RX buffer list:\n");
118
119 for(i=(unsigned int*)priv->rx_blist_vp;
120 i<((unsigned int*)(priv->rx_blist_vp+RX_NUM_BUFF));){
121 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
122 i+=4;
123 }
124
125 printk("\n TX frame descriptor list:\n");
126
127 for(i=(unsigned int*)priv->tx_fdalist_vp;
128 i<((unsigned int*)(priv->tx_fdalist_vp+TX_NUM_FDESC));){
129 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
130 i+=4;
131 }
132
133 printk("\ndma ctl=%#x\n",readw(ETHER_DMA_CTL(dev->base_addr)));
134 printk("txfrmptr=%#x\n",readw(ETHER_TXFRMPTR(dev->base_addr)));
135 printk("txthrsh=%#x\n",readw(ETHER_TXTHRSH(dev->base_addr)));
136 printk("txpollctr=%#x\n",readw(ETHER_TXPOLLCTR(dev->base_addr)));
137 printk("blfrmptr=%#x\n",readw(ETHER_BLFRMPTR(dev->base_addr)));
138 printk("rxfragsize=%#x\n",readw(ETHER_RXFRAGSIZE(dev->base_addr)));
139 printk("tx_int_en=%#x\n",readw(ETHER_INT_EN(dev->base_addr)));
140 printk("fda_bas=%#x\n",readw(ETHER_FDA_BAS(dev->base_addr)));
141 printk("fda_lim=%#x\n",readw(ETHER_FDA_LIM(dev->base_addr)));
142 printk("int_src=%#x\n",readw(ETHER_INT_SRC(dev->base_addr)));
143 printk("pausecnt=%#x\n",readw(ETHER_PAUSECNT(dev->base_addr)));
144 printk("rempaucnt=%#x\n",readw(ETHER_REMPAUCNT(dev->base_addr)));
145 printk("txconfrmstat=%#x\n",readw(ETHER_TXCONFRMSTAT(dev->base_addr)));
146 printk("mac_ctl=%#x\n",readw(ETHER_MAC_CTL(dev->base_addr)));
147 printk("arc_ctl=%#x\n",readw(ETHER_ARC_CTL(dev->base_addr)));
148 printk("tx_ctl=%#x\n",readw(ETHER_TX_CTL(dev->base_addr)));
149}
150#endif /* ETHER00_DEBUG */
151
152
153static int ether00_write_phy(struct net_device *dev, short address, short value)
154{
155 volatile int count = 1024;
156 writew(value,ETHER_MD_DATA(dev->base_addr));
157 writew( ETHER_MD_CA_BUSY_MSK |
158 ETHER_MD_CA_WR_MSK |
159 (address & ETHER_MD_CA_ADDR_MSK),
160 ETHER_MD_CA(dev->base_addr));
161
162 /* Wait for the command to complete */
163 while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
164 count--;
165 }
166 if (!count){
167 printk("Write to phy failed, addr=%#x, data=%#x\n",address, value);
168 return -EIO;
169 }
170 return 0;
171}
172
173static int ether00_read_phy(struct net_device *dev, short address)
174{
175 volatile int count = 1024;
176 writew( ETHER_MD_CA_BUSY_MSK |
177 (address & ETHER_MD_CA_ADDR_MSK),
178 ETHER_MD_CA(dev->base_addr));
179
180 /* Wait for the command to complete */
181 while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
182 count--;
183 }
184 if (!count){
185 printk(KERN_WARNING "Read from phy timed out\n");
186 return -EIO;
187 }
188 return readw(ETHER_MD_DATA(dev->base_addr));
189}
190
191static void ether00_phy_int(int irq_num, void* dev_id, struct pt_regs* regs)
192{
193 struct net_device* dev=dev_id;
194 int irq_status;
195
196 irq_status=ether00_read_phy(dev, PHY_IRQ_CONTROL);
197
198 if(irq_status & PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK){
199 /*
200 * Autonegotiation complete on epxa10db. The mac doesn't
201 * twig if we're in full duplex so we need to check the
202 * phy status register and configure the mac accordingly
203 */
204 if(ether00_read_phy(dev, PHY_STATUS)&(PHY_STATUS_10T_F_MSK|PHY_STATUS_100_X_F_MSK)){
205 int tmp;
206 tmp=readl(ETHER_MAC_CTL(dev->base_addr));
207 writel(tmp|ETHER_MAC_CTL_FULLDUP_MSK,ETHER_MAC_CTL(dev->base_addr));
208 }
209 }
210
211 if(irq_status&PHY_IRQ_CONTROL_LS_CHG_INT_MSK){
212
213 if(ether00_read_phy(dev, PHY_STATUS)& PHY_STATUS_LINK_MSK){
214 /* Link is up */
215 netif_carrier_on(dev);
216 //printk("Carrier on\n");
217 }else{
218 netif_carrier_off(dev);
219 //printk("Carrier off\n");
220
221 }
222 }
223
224}
225
226static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
227 /* Make the buffer consistent with the cache as the mac is going to write
228 * directly into it*/
229 blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
230 blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
231 consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
232 /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
233 skb_reserve(skb,2);
234 blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
235 blist_ent_ptr->fd.FDLength=1;
236 blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
237 blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
238}
239
240
241static int ether00_mem_init(struct net_device* dev)
242{
243 struct net_priv* priv=dev->priv;
244 struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr;
245 struct rx_blist_ent* blist_ent_ptr;
246 int i;
247
248 /*
249 * Grab a block of on chip SRAM to contain the control stuctures for
250 * the ethernet MAC. This uncached becuase it needs to be accesses by both
251 * bus masters (cpu + mac). However, it shouldn't matter too much in terms
252 * of speed as its on chip memory
253 */
254 priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE );
255 if (!priv->dma_data)
256 return -ENOMEM;
257
258 priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data;
259 /*
260 * Now share it out amongst the Frame descriptors and the buffer list
261 */
262 priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent));
263
264 /*
265 *Initalise the FDA list
266 */
267 /* set ownership to the controller */
268 memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent));
269
270 /*
271 *Initialise the buffer list
272 */
273 blist_ent_ptr=priv->rx_blist_vp;
274 i=0;
275 while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
276 struct sk_buff *skb;
277 blist_ent_ptr->fd.FDLength=1;
278 skb=dev_alloc_skb(PKT_BUF_SZ);
279 if(skb){
280 setup_blist_entry(skb,blist_ent_ptr);
281 blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1);
282 blist_ent_ptr->bd.BDStat=i++;
283 blist_ent_ptr++;
284 }
285 else
286 {
287 printk("Failed to initalise buffer list\n");
288 }
289
290 }
291 blist_ent_ptr--;
292 blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp);
293
294 priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF);
295
296 /* Initialise the buffers to be a circular list. The mac will then go poll
297 * the list until it finds a frame ready to transmit */
298 tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC;
299 for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){
300 tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1));
301 tx_fd_ptr->fd.FDCtl=1;
302 tx_fd_ptr->fd.FDStat=0;
303 tx_fd_ptr->fd.FDLength=1;
304
305 }
306 /* Change the last FDNext pointer to make a circular list */
307 tx_fd_ptr--;
308 tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp);
309
310 /* Point the device at the chain of Rx and Tx Buffers */
311 writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr));
312 writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr));
313 writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr));
314
315 writel((unsigned int)__dma_pa(priv->tx_fdalist_vp),ETHER_TXFRMPTR(dev->base_addr));
316
317 return 0;
318}
319
320
321void ether00_mem_update(void* dev_id)
322{
323 struct net_device* dev=dev_id;
324 struct net_priv* priv=dev->priv;
325 struct sk_buff* skb;
326 struct tx_fda_ent *fda_ptr=priv->tx_fdalist_vp;
327 struct rx_blist_ent* blist_ent_ptr;
328 unsigned long flags;
329
330 priv->tq_memupdate.sync=0;
331 //priv->tq_memupdate.list=
332 priv->memupdate_scheduled=0;
333
334 /* Transmit interrupt */
335 while(fda_ptr<(priv->tx_fdalist_vp+TX_NUM_FDESC)){
336 if(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && (ETHER_TX_STAT_COMP_MSK&fda_ptr->fd.FDStat)){
337 priv->stats.tx_packets++;
338 priv->stats.tx_bytes+=fda_ptr->bd.BuffLength;
339 skb=(struct sk_buff*)fda_ptr->fd.FDSystem;
340 //printk("%d:txcln:fda=%#x skb=%#x\n",jiffies,fda_ptr,skb);
341 dev_kfree_skb(skb);
342 fda_ptr->fd.FDSystem=0;
343 fda_ptr->fd.FDStat=0;
344 fda_ptr->fd.FDCtl=0;
345 }
346 fda_ptr++;
347 }
348 /* Fill in any missing buffers from the received queue */
349 spin_lock_irqsave(&priv->rx_lock,flags);
350 blist_ent_ptr=priv->rx_blist_vp;
351 while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
352 /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */
353 if(!blist_ent_ptr->fd.FDSystem){
354 struct sk_buff *skb;
355 skb=dev_alloc_skb(PKT_BUF_SZ);
356 blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
357 if(skb){
358 setup_blist_entry(skb,blist_ent_ptr);
359 }
360 else
361 {
362 break;
363 }
364 }
365 blist_ent_ptr++;
366 }
367 spin_unlock_irqrestore(&priv->rx_lock,flags);
368 if(priv->queue_stopped){
369 //printk("%d:cln:start q\n",jiffies);
370 netif_start_queue(dev);
371 }
372 if(priv->rx_disabled){
373 //printk("%d:enable_irq\n",jiffies);
374 priv->rx_disabled=0;
375 writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
376
377 }
378}
379
380
381static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs)
382{
383 struct net_device* dev=dev_id;
384 struct net_priv* priv=dev->priv;
385
386 unsigned int interruptValue;
387
388 interruptValue=readl(ETHER_INT_SRC(dev->base_addr));
389
390 //printk("INT_SRC=%x\n",interruptValue);
391
392 if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK))
393 {
394 return; /* Interrupt wasn't caused by us!! */
395 }
396
397 if(readl(ETHER_INT_SRC(dev->base_addr))&
398 (ETHER_INT_SRC_INTMACRX_MSK |
399 ETHER_INT_SRC_FDAEX_MSK |
400 ETHER_INT_SRC_BLEX_MSK)) {
401 struct rx_blist_ent* blist_ent_ptr;
402 struct rx_fda_ent* fda_ent_ptr;
403 struct sk_buff* skb;
404
405 fda_ent_ptr=priv->rx_fda_ptr;
406 spin_lock(&priv->rx_lock);
407 while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){
408 int result;
409
410 if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK))
411 {
412 /* This frame is ready for processing */
413 /*find the corresponding buffer in the bufferlist */
414 blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat;
415 skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem;
416
417 /* Pass this skb up the stack */
418 skb->dev=dev;
419 skb_put(skb,fda_ent_ptr->fd.FDLength);
420 skb->protocol=eth_type_trans(skb,dev);
421 skb->ip_summed=CHECKSUM_UNNECESSARY;
422 result=netif_rx(skb);
423 /* Update statistics */
424 priv->stats.rx_packets++;
425 priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength;
426
427 /* Free the FDA entry */
428 fda_ent_ptr->bd.BDStat=0xff;
429 fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
430
431 /* Allocate a new skb and point the bd entry to it */
432 blist_ent_ptr->fd.FDSystem=0;
433 skb=dev_alloc_skb(PKT_BUF_SZ);
434 //printk("allocskb=%#x\n",skb);
435 if(skb){
436 setup_blist_entry(skb,blist_ent_ptr);
437
438 }
439 else if(!priv->memupdate_scheduled){
440 int tmp;
441 /* There are no buffers at the moment, so schedule */
442 /* the background task to sort this out */
443 schedule_task(&priv->tq_memupdate);
444 priv->memupdate_scheduled=1;
445 printk(KERN_DEBUG "%s:No buffers",dev->name);
446 /* If this interrupt was due to a lack of buffers then
447 * we'd better stop the receiver too */
448 if(interruptValue&ETHER_INT_SRC_BLEX_MSK){
449 priv->rx_disabled=1;
450 tmp=readl(ETHER_INT_SRC(dev->base_addr));
451 writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
452 printk(KERN_DEBUG "%s:Halting rx",dev->name);
453 }
454
455 }
456
457 }
458 fda_ent_ptr++;
459 }
460 spin_unlock(&priv->rx_lock);
461
462 /* Clear the interrupts */
463 writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK
464 | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr));
465
466 }
467
468 if(readl(ETHER_INT_SRC(dev->base_addr))&ETHER_INT_SRC_INTMACTX_MSK){
469
470 if(!priv->memupdate_scheduled){
471 schedule_task(&priv->tq_memupdate);
472 priv->memupdate_scheduled=1;
473 }
474 /* Clear the interrupt */
475 writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr));
476 }
477
478 if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK|
479 ETHER_INT_SRC_INTEARNOT_MSK|
480 ETHER_INT_SRC_INTLINK_MSK|
481 ETHER_INT_SRC_INTEXBD_MSK|
482 ETHER_INT_SRC_INTTXCTLCMP_MSK))
483 {
484 /*
485 * Not using any of these so they shouldn't happen
486 *
487 * In the cased of INTEXBD - if you allocate more
488 * than 28 decsriptors you may need to think about this
489 */
490 printk("Not using this interrupt\n");
491 }
492
493 if (readl(ETHER_INT_SRC(dev->base_addr)) &
494 (ETHER_INT_SRC_INTSBUS_MSK |
495 ETHER_INT_SRC_INTNRABT_MSK
496 |ETHER_INT_SRC_DMPARERR_MSK))
497 {
498 /*
499 * Hardware errors, we can either ignore them and hope they go away
500 *or reset the device, I'll try the first for now to see if they happen
501 */
502 printk("Hardware error\n");
503 }
504}
505
506static void ether00_setup_ethernet_address(struct net_device* dev)
507{
508 int tmp;
509
510 dev->addr_len=6;
511 writew(0,ETHER_ARC_ADR(dev->base_addr));
512 writel((dev->dev_addr[0]<<24) |
513 (dev->dev_addr[1]<<16) |
514 (dev->dev_addr[2]<<8) |
515 dev->dev_addr[3],
516 ETHER_ARC_DATA(dev->base_addr));
517
518 writew(4,ETHER_ARC_ADR(dev->base_addr));
519 tmp=readl(ETHER_ARC_DATA(dev->base_addr));
520 tmp&=0xffff;
521 tmp|=(dev->dev_addr[4]<<24) | (dev->dev_addr[5]<<16);
522 writel(tmp, ETHER_ARC_DATA(dev->base_addr));
523 /* Enable this entry in the ARC */
524
525 writel(1,ETHER_ARC_ENA(dev->base_addr));
526
527 return;
528}
529
530
531static void ether00_reset(struct net_device *dev)
532{
533 /* reset the controller */
534 writew(ETHER_MAC_CTL_RESET_MSK,ETHER_MAC_CTL(dev->base_addr));
535
536 /*
537 * Make sure we're not going to send anything
538 */
539
540 writew(ETHER_TX_CTL_TXHALT_MSK,ETHER_TX_CTL(dev->base_addr));
541
542 /*
543 * Make sure we're not going to receive anything
544 */
545 writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
546
547 /*
548 * Disable Interrupts for now, and set the burst size to 8 bytes
549 */
550
551 writel(ETHER_DMA_CTL_INTMASK_MSK |
552 ((8 << ETHER_DMA_CTL_DMBURST_OFST) & ETHER_DMA_CTL_DMBURST_MSK)
553 |(2<<ETHER_DMA_CTL_RXALIGN_OFST),
554 ETHER_DMA_CTL(dev->base_addr));
555
556
557 /*
558 * Set TxThrsh - start transmitting a packet after 1514
559 * bytes or when a packet is complete, whichever comes first
560 */
561 writew(1514,ETHER_TXTHRSH(dev->base_addr));
562
563 /*
564 * Set TxPollCtr. Each cycle is
565 * 61.44 microseconds with a 33 MHz bus
566 */
567 writew(1,ETHER_TXPOLLCTR(dev->base_addr));
568
569 /*
570 * Set Rx_Ctl - Turn off reception and let RxData turn it
571 * on later
572 */
573 writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
574
575}
576
577
578static void ether00_set_multicast(struct net_device* dev)
579{
580 int count=dev->mc_count;
581
582 /* Set promiscuous mode if it's asked for. */
583
584 if (dev->flags&IFF_PROMISC){
585
586 writew( ETHER_ARC_CTL_COMPEN_MSK |
587 ETHER_ARC_CTL_BROADACC_MSK |
588 ETHER_ARC_CTL_GROUPACC_MSK |
589 ETHER_ARC_CTL_STATIONACC_MSK,
590 ETHER_ARC_CTL(dev->base_addr));
591 return;
592 }
593
594 /*
595 * Get all multicast packets if required, or if there are too
596 * many addresses to fit in hardware
597 */
598 if (dev->flags & IFF_ALLMULTI){
599 writew( ETHER_ARC_CTL_COMPEN_MSK |
600 ETHER_ARC_CTL_GROUPACC_MSK |
601 ETHER_ARC_CTL_BROADACC_MSK,
602 ETHER_ARC_CTL(dev->base_addr));
603 return;
604 }
605 if (dev->mc_count > (ETHER_ARC_SIZE - 1)){
606
607 printk(KERN_WARNING "Too many multicast addresses for hardware to filter - receiving all multicast packets\n");
608 writew( ETHER_ARC_CTL_COMPEN_MSK |
609 ETHER_ARC_CTL_GROUPACC_MSK |
610 ETHER_ARC_CTL_BROADACC_MSK,
611 ETHER_ARC_CTL(dev->base_addr));
612 return;
613 }
614
615 if(dev->mc_count){
616 struct dev_mc_list *mc_list_ent=dev->mc_list;
617 unsigned int temp,i;
618 DEBUG(printk("mc_count=%d mc_list=%#x\n",dev-> mc_count, dev->mc_list));
619 DEBUG(printk("mc addr=%02#x%02x%02x%02x%02x%02x\n",
620 mc_list_ent->dmi_addr[5],
621 mc_list_ent->dmi_addr[4],
622 mc_list_ent->dmi_addr[3],
623 mc_list_ent->dmi_addr[2],
624 mc_list_ent->dmi_addr[1],
625 mc_list_ent->dmi_addr[0]);)
626
627 /*
628 * The first 6 bytes are the MAC address, so
629 * don't change them!
630 */
631 writew(4,ETHER_ARC_ADR(dev->base_addr));
632 temp=readl(ETHER_ARC_DATA(dev->base_addr));
633 temp&=0xffff0000;
634
635 /* Disable the current multicast stuff */
636 writel(1,ETHER_ARC_ENA(dev->base_addr));
637
638 for(;;){
639 temp|=mc_list_ent->dmi_addr[1] |
640 mc_list_ent->dmi_addr[0]<<8;
641 writel(temp,ETHER_ARC_DATA(dev->base_addr));
642
643 i=readl(ETHER_ARC_ADR(dev->base_addr));
644 writew(i+4,ETHER_ARC_ADR(dev->base_addr));
645
646 temp=mc_list_ent->dmi_addr[5]|
647 mc_list_ent->dmi_addr[4]<<8 |
648 mc_list_ent->dmi_addr[3]<<16 |
649 mc_list_ent->dmi_addr[2]<<24;
650 writel(temp,ETHER_ARC_DATA(dev->base_addr));
651
652 count--;
653 if(!mc_list_ent->next || !count){
654 break;
655 }
656 DEBUG(printk("mc_list_next=%#x\n",mc_list_ent->next);)
657 mc_list_ent=mc_list_ent->next;
658
659
660 i=readl(ETHER_ARC_ADR(dev->base_addr));
661 writel(i+4,ETHER_ARC_ADR(dev->base_addr));
662
663 temp=mc_list_ent->dmi_addr[3]|
664 mc_list_ent->dmi_addr[2]<<8 |
665 mc_list_ent->dmi_addr[1]<<16 |
666 mc_list_ent->dmi_addr[0]<<24;
667 writel(temp,ETHER_ARC_DATA(dev->base_addr));
668
669 i=readl(ETHER_ARC_ADR(dev->base_addr));
670 writel(i+4,ETHER_ARC_ADR(dev->base_addr));
671
672 temp=mc_list_ent->dmi_addr[4]<<16 |
673 mc_list_ent->dmi_addr[5]<<24;
674
675 writel(temp,ETHER_ARC_DATA(dev->base_addr));
676
677 count--;
678 if(!mc_list_ent->next || !count){
679 break;
680 }
681 mc_list_ent=mc_list_ent->next;
682 }
683
684
685 if(count)
686 printk(KERN_WARNING "Multicast list size error\n");
687
688
689 writew( ETHER_ARC_CTL_BROADACC_MSK|
690 ETHER_ARC_CTL_COMPEN_MSK,
691 ETHER_ARC_CTL(dev->base_addr));
692
693 }
694
695 /* enable the active ARC enties */
696 writew((1<<(count+2))-1,ETHER_ARC_ENA(dev->base_addr));
697}
698
699
700static int ether00_open(struct net_device* dev)
701{
702 int result,tmp;
703 struct net_priv* priv;
704
705 if (!is_valid_ether_addr(dev->dev_addr))
706 return -EINVAL;
707
708 /* Install interrupt handlers */
709 result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
710 if(result)
711 goto open_err1;
712
713 result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
714 if(result)
715 goto open_err2;
716
717 ether00_reset(dev);
718 result=ether00_mem_init(dev);
719 if(result)
720 goto open_err3;
721
722
723 ether00_setup_ethernet_address(dev);
724
725 ether00_set_multicast(dev);
726
727 result=ether00_write_phy(dev,PHY_CONTROL, PHY_CONTROL_ANEGEN_MSK | PHY_CONTROL_RANEG_MSK);
728 if(result)
729 goto open_err4;
730 result=ether00_write_phy(dev,PHY_IRQ_CONTROL, PHY_IRQ_CONTROL_LS_CHG_IE_MSK |
731 PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK);
732 if(result)
733 goto open_err4;
734
735 /* Start the device enable interrupts */
736 writew(ETHER_RX_CTL_RXEN_MSK
737// | ETHER_RX_CTL_STRIPCRC_MSK
738 | ETHER_RX_CTL_ENGOOD_MSK
739 | ETHER_RX_CTL_ENRXPAR_MSK| ETHER_RX_CTL_ENLONGERR_MSK
740 | ETHER_RX_CTL_ENOVER_MSK| ETHER_RX_CTL_ENCRCERR_MSK,
741 ETHER_RX_CTL(dev->base_addr));
742
743 writew(ETHER_TX_CTL_TXEN_MSK|
744 ETHER_TX_CTL_ENEXDEFER_MSK|
745 ETHER_TX_CTL_ENLCARR_MSK|
746 ETHER_TX_CTL_ENEXCOLL_MSK|
747 ETHER_TX_CTL_ENLATECOLL_MSK|
748 ETHER_TX_CTL_ENTXPAR_MSK|
749 ETHER_TX_CTL_ENCOMP_MSK,
750 ETHER_TX_CTL(dev->base_addr));
751
752 tmp=readl(ETHER_DMA_CTL(dev->base_addr));
753 writel(tmp&~ETHER_DMA_CTL_INTMASK_MSK,ETHER_DMA_CTL(dev->base_addr));
754
755 return 0;
756
757 open_err4:
758 ether00_reset(dev);
759 open_err3:
760 free_irq(2,dev);
761 open_err2:
762 free_irq(dev->irq,dev);
763 open_err1:
764 return result;
765
766}
767
768
769static int ether00_tx(struct sk_buff* skb, struct net_device* dev)
770{
771 struct net_priv *priv=dev->priv;
772 struct tx_fda_ent *fda_ptr;
773 int i;
774
775
776 /*
777 * Find an empty slot in which to stick the frame
778 */
779 fda_ptr=(struct tx_fda_ent*)__dma_va(readl(ETHER_TXFRMPTR(dev->base_addr)));
780 i=0;
781 while(i<TX_NUM_FDESC){
782 if (fda_ptr->fd.FDStat||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
783 fda_ptr =(struct tx_fda_ent*) __dma_va((struct tx_fda_ent*)fda_ptr->fd.FDNext);
784 }
785 else {
786 break;
787 }
788 i++;
789 }
790
791 /* Write the skb data from the cache*/
792 consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE);
793 fda_ptr->bd.BuffData=(char*)__pa(skb->data);
794 fda_ptr->bd.BuffLength=(unsigned short)skb->len;
795 /* Save the pointer to the skb for freeing later */
796 fda_ptr->fd.FDSystem=(unsigned int)skb;
797 fda_ptr->fd.FDStat=0;
798 /* Pass ownership of the buffers to the controller */
799 fda_ptr->fd.FDCtl=1;
800 fda_ptr->fd.FDCtl|=FDCTL_COWNSFD_MSK;
801
802 /* If the next buffer in the list is full, stop the queue */
803 fda_ptr=(struct tx_fda_ent*)__dma_va(fda_ptr->fd.FDNext);
804 if ((fda_ptr->fd.FDStat)||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
805 netif_stop_queue(dev);
806 priv->queue_stopped=1;
807 }
808
809 return 0;
810}
811
812static struct net_device_stats *ether00_stats(struct net_device* dev)
813{
814 struct net_priv *priv=dev->priv;
815 return &priv->stats;
816}
817
818
819static int ether00_stop(struct net_device* dev)
820{
821 struct net_priv *priv=dev->priv;
822 int tmp;
823
824 /* Stop/disable the device. */
825 tmp=readw(ETHER_RX_CTL(dev->base_addr));
826 tmp&=~(ETHER_RX_CTL_RXEN_MSK | ETHER_RX_CTL_ENGOOD_MSK);
827 tmp|=ETHER_RX_CTL_RXHALT_MSK;
828 writew(tmp,ETHER_RX_CTL(dev->base_addr));
829
830 tmp=readl(ETHER_TX_CTL(dev->base_addr));
831 tmp&=~ETHER_TX_CTL_TXEN_MSK;
832 tmp|=ETHER_TX_CTL_TXHALT_MSK;
833 writel(tmp,ETHER_TX_CTL(dev->base_addr));
834
835 /* Free up system resources */
836 free_irq(dev->irq,dev);
837 free_irq(2,dev);
838 iounmap(priv->dma_data);
839
840 return 0;
841}
842
843
844static void ether00_get_ethernet_address(struct net_device* dev)
845{
846 struct mtd_info *mymtd=NULL;
847 int i;
848 size_t retlen;
849
850 /*
851 * For the Epxa10 dev board (camelot), the ethernet MAC
852 * address is of the form 00:aa:aa:00:xx:xx where
853 * 00:aa:aa is the Altera vendor ID and xx:xx is the
854 * last 2 bytes of the board serial number, as programmed
855 * into the OTP area of the flash device on EBI1. If this
856 * isn't an expa10 dev board, or there's no mtd support to
857 * read the serial number from flash then we'll force the
858 * use to set their own mac address using ifconfig.
859 */
860
861#ifdef CONFIG_ARCH_CAMELOT
862#ifdef CONFIG_MTD
863 /* get the mtd_info structure for the first mtd device*/
864 for(i=0;i<MAX_MTD_DEVICES;i++){
865 mymtd=get_mtd_device(NULL,i);
866 if(!mymtd||!strcmp(mymtd->name,"EPXA10DB flash"))
867 break;
868 }
869
870 if(!mymtd || !mymtd->read_user_prot_reg){
871 printk(KERN_WARNING "%s: Failed to read MAC address from flash\n",dev->name);
872 }else{
873 mymtd->read_user_prot_reg(mymtd,2,1,&retlen,&dev->dev_addr[5]);
874 mymtd->read_user_prot_reg(mymtd,3,1,&retlen,&dev->dev_addr[4]);
875 dev->dev_addr[3]=0;
876 dev->dev_addr[2]=vendor_id[1];
877 dev->dev_addr[1]=vendor_id[0];
878 dev->dev_addr[0]=0;
879 }
880#else
881 printk(KERN_WARNING "%s: MTD support required to read MAC address from EPXA10 dev board\n", dev->name);
882#endif
883#endif
884
885 if (!is_valid_ether_addr(dev->dev_addr))
886 printk("%s: Invalid ethernet MAC address. Please set using "
887 "ifconfig\n", dev->name);
888
889}
890
891/*
892 * Keep a mapping of dev_info addresses -> port lines to use when
893 * removing ports dev==NULL indicates unused entry
894 */
895
896
897static struct net_device* dev_list[ETH_NR];
898
899static int ether00_add_device(struct pldhs_dev_info* dev_info,void* dev_ps_data)
900{
901 struct net_device *dev;
902 struct net_priv *priv;
903 void *map_addr;
904 int result;
905 int i;
906
907 i=0;
908 while(dev_list[i] && i < ETH_NR)
909 i++;
910
911 if(i==ETH_NR){
912 printk(KERN_WARNING "ether00: Maximum number of ports reached\n");
913 return 0;
914 }
915
916
917 if (!request_mem_region(dev_info->base_addr, MAC_REG_SIZE, "ether00"))
918 return -EBUSY;
919
920 dev = alloc_etherdev(sizeof(struct net_priv));
921 if(!dev) {
922 result = -ENOMEM;
923 goto out_release;
924 }
925 priv = dev->priv;
926
927 priv->tq_memupdate.routine=ether00_mem_update;
928 priv->tq_memupdate.data=(void*) dev;
929
930 spin_lock_init(&priv->rx_lock);
931
932 map_addr=ioremap_nocache(dev_info->base_addr,SZ_4K);
933 if(!map_addr){
934 result = -ENOMEM;
935 out_kfree;
936 }
937
938 dev->open=ether00_open;
939 dev->stop=ether00_stop;
940 dev->set_multicast_list=ether00_set_multicast;
941 dev->hard_start_xmit=ether00_tx;
942 dev->get_stats=ether00_stats;
943
944 ether00_get_ethernet_address(dev);
945
946 SET_MODULE_OWNER(dev);
947
948 dev->base_addr=(unsigned int)map_addr;
949 dev->irq=dev_info->irq;
950 dev->features=NETIF_F_DYNALLOC | NETIF_F_HW_CSUM;
951
952 result=register_netdev(dev);
953 if(result){
954 printk("Ether00: Error %i registering driver\n",result);
955 goto out_unmap;
956 }
957 printk("registered ether00 device at %#x\n",dev_info->base_addr);
958
959 dev_list[i]=dev;
960
961 return result;
962
963 out_unmap:
964 iounmap(map_addr);
965 out_kfree:
966 free_netdev(dev);
967 out_release:
968 release_mem_region(dev_info->base_addr, MAC_REG_SIZE);
969 return result;
970}
971
972
973static int ether00_remove_devices(void)
974{
975 int i;
976
977 for(i=0;i<ETH_NR;i++){
978 if(dev_list[i]){
979 netif_device_detach(dev_list[i]);
980 unregister_netdev(dev_list[i]);
981 iounmap((void*)dev_list[i]->base_addr);
982 release_mem_region(dev_list[i]->base_addr, MAC_REG_SIZE);
983 free_netdev(dev_list[i]);
984 dev_list[i]=0;
985 }
986 }
987 return 0;
988}
989
990static struct pld_hotswap_ops ether00_pldhs_ops={
991 .name = ETHER00_NAME,
992 .add_device = ether00_add_device,
993 .remove_devices = ether00_remove_devices,
994};
995
996
997static void __exit ether00_cleanup_module(void)
998{
999 int result;
1000 result=ether00_remove_devices();
1001 if(result)
1002 printk(KERN_WARNING "ether00: failed to remove all devices\n");
1003
1004 pldhs_unregister_driver(ETHER00_NAME);
1005}
1006module_exit(ether00_cleanup_module);
1007
1008
1009static int __init ether00_mod_init(void)
1010{
1011 printk("mod init\n");
1012 return pldhs_register_driver(&ether00_pldhs_ops);
1013
1014}
1015
1016module_init(ether00_mod_init);
1017
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1cc53abc3a39..f1d5b1027ff7 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -69,7 +69,6 @@
69#include <asm/system.h> 69#include <asm/system.h>
70#include <asm/ecard.h> 70#include <asm/ecard.h>
71#include <asm/io.h> 71#include <asm/io.h>
72#include <asm/irq.h>
73 72
74static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; 73static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
75 74
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 942a2819576c..6a93b666eb72 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -50,7 +50,6 @@
50#include <asm/system.h> 50#include <asm/system.h>
51#include <asm/ecard.h> 51#include <asm/ecard.h>
52#include <asm/io.h> 52#include <asm/io.h>
53#include <asm/irq.h>
54 53
55#include "../8390.h" 54#include "../8390.h"
56 55
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7aa49b974dc5..df9d6e80c4f2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
2136 2136
2137 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2137 /* Setup paramaters for syncing RX/TX DMA descriptors */
2138 dma_desc_align_mask = ~(dma_desc_align_size - 1); 2138 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2139 dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); 2139 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2140 2140
2141 return pci_module_init(&b44_driver); 2141 return pci_module_init(&b44_driver);
2142} 2142}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 854ddfb90da1..f2a63186ae05 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -169,9 +169,9 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
169 index = next_index; 169 index = next_index;
170 } 170 }
171 171
172 _unlock_tx_hashtbl(bond);
173
174 tlb_init_slave(slave); 172 tlb_init_slave(slave);
173
174 _unlock_tx_hashtbl(bond);
175} 175}
176 176
177/* Must be called before starting the monitor timer */ 177/* Must be called before starting the monitor timer */
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 015c7f1d1bc0..3dd78d048c3e 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.0.0" 25#define DRV_VERSION "3.0.1"
26#define DRV_RELDATE "November 8, 2005" 26#define DRV_RELDATE "January 9, 2006"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -205,7 +205,7 @@ struct bonding {
205 * 205 *
206 * Caller must hold bond lock for read 206 * Caller must hold bond lock for read
207 */ 207 */
208extern inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) 208static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
209{ 209{
210 struct slave *slave = NULL; 210 struct slave *slave = NULL;
211 int i; 211 int i;
@@ -219,7 +219,7 @@ extern inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
219 return slave; 219 return slave;
220} 220}
221 221
222extern inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 222static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
223{ 223{
224 if (!slave || !slave->dev->master) { 224 if (!slave || !slave->dev->master) {
225 return NULL; 225 return NULL;
@@ -228,13 +228,13 @@ extern inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
228 return (struct bonding *)slave->dev->master->priv; 228 return (struct bonding *)slave->dev->master->priv;
229} 229}
230 230
231extern inline void bond_set_slave_inactive_flags(struct slave *slave) 231static inline void bond_set_slave_inactive_flags(struct slave *slave)
232{ 232{
233 slave->state = BOND_STATE_BACKUP; 233 slave->state = BOND_STATE_BACKUP;
234 slave->dev->flags |= IFF_NOARP; 234 slave->dev->flags |= IFF_NOARP;
235} 235}
236 236
237extern inline void bond_set_slave_active_flags(struct slave *slave) 237static inline void bond_set_slave_active_flags(struct slave *slave)
238{ 238{
239 slave->state = BOND_STATE_ACTIVE; 239 slave->state = BOND_STATE_ACTIVE;
240 slave->dev->flags &= ~IFF_NOARP; 240 slave->dev->flags &= ~IFF_NOARP;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 1f7ca453bb4a..6e295fce5c6f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
335 cas_disable_irq(cp, i); 335 cas_disable_irq(cp, i);
336} 336}
337 337
338static inline void cas_buffer_init(cas_page_t *cp)
339{
340 struct page *page = cp->buffer;
341 atomic_set((atomic_t *)&page->lru.next, 1);
342}
343
344static inline int cas_buffer_count(cas_page_t *cp)
345{
346 struct page *page = cp->buffer;
347 return atomic_read((atomic_t *)&page->lru.next);
348}
349
350static inline void cas_buffer_inc(cas_page_t *cp)
351{
352 struct page *page = cp->buffer;
353 atomic_inc((atomic_t *)&page->lru.next);
354}
355
356static inline void cas_buffer_dec(cas_page_t *cp)
357{
358 struct page *page = cp->buffer;
359 atomic_dec((atomic_t *)&page->lru.next);
360}
361
338static void cas_enable_irq(struct cas *cp, const int ring) 362static void cas_enable_irq(struct cas *cp, const int ring)
339{ 363{
340 if (ring == 0) { /* all but TX_DONE */ 364 if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
472{ 496{
473 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 497 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 PCI_DMA_FROMDEVICE); 498 PCI_DMA_FROMDEVICE);
499 cas_buffer_dec(page);
475 __free_pages(page->buffer, cp->page_order); 500 __free_pages(page->buffer, cp->page_order);
476 kfree(page); 501 kfree(page);
477 return 0; 502 return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
501 page->buffer = alloc_pages(flags, cp->page_order); 526 page->buffer = alloc_pages(flags, cp->page_order);
502 if (!page->buffer) 527 if (!page->buffer)
503 goto page_err; 528 goto page_err;
529 cas_buffer_init(page);
504 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 530 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 cp->page_size, PCI_DMA_FROMDEVICE); 531 cp->page_size, PCI_DMA_FROMDEVICE);
506 return page; 532 return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
579 list_for_each_safe(elem, tmp, &list) { 605 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list); 606 cas_page_t *page = list_entry(elem, cas_page_t, list);
581 607
582 if (page_count(page->buffer) > 1) 608 if (cas_buffer_count(page) > 1)
583 continue; 609 continue;
584 610
585 list_del(elem); 611 list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347 cas_page_t *page = cp->rx_pages[1][index]; 1373 cas_page_t *page = cp->rx_pages[1][index];
1348 cas_page_t *new; 1374 cas_page_t *new;
1349 1375
1350 if (page_count(page->buffer) == 1) 1376 if (cas_buffer_count(page) == 1)
1351 return page; 1377 return page;
1352 1378
1353 new = cas_page_dequeue(cp); 1379 new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1367 cas_page_t **page1 = cp->rx_pages[1]; 1393 cas_page_t **page1 = cp->rx_pages[1];
1368 1394
1369 /* swap if buffer is in use */ 1395 /* swap if buffer is in use */
1370 if (page_count(page0[index]->buffer) > 1) { 1396 if (cas_buffer_count(page0[index]) > 1) {
1371 cas_page_t *new = cas_page_spare(cp, index); 1397 cas_page_t *new = cas_page_spare(cp, index);
1372 if (new) { 1398 if (new) {
1373 page1[index] = page0[index]; 1399 page1[index] = page0[index];
@@ -1925,8 +1951,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
1925 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1951 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1926#endif 1952#endif
1927 if (netif_msg_intr(cp)) 1953 if (netif_msg_intr(cp))
1928 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", 1954 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
1929 cp->dev->name, status, compwb); 1955 cp->dev->name, status, (unsigned long long)compwb);
1930 /* process all the rings */ 1956 /* process all the rings */
1931 for (ring = 0; ring < N_TX_RINGS; ring++) { 1957 for (ring = 0; ring < N_TX_RINGS; ring++) {
1932#ifdef USE_TX_COMPWB 1958#ifdef USE_TX_COMPWB
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2039 skb->len += hlen - swivel; 2065 skb->len += hlen - swivel;
2040 2066
2041 get_page(page->buffer); 2067 get_page(page->buffer);
2068 cas_buffer_inc(page);
2042 frag->page = page->buffer; 2069 frag->page = page->buffer;
2043 frag->page_offset = off; 2070 frag->page_offset = off;
2044 frag->size = hlen - swivel; 2071 frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2063 frag++; 2090 frag++;
2064 2091
2065 get_page(page->buffer); 2092 get_page(page->buffer);
2093 cas_buffer_inc(page);
2066 frag->page = page->buffer; 2094 frag->page = page->buffer;
2067 frag->page_offset = 0; 2095 frag->page_offset = 0;
2068 frag->size = hlen; 2096 frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2225 released = 0; 2253 released = 0;
2226 while (entry != last) { 2254 while (entry != last) {
2227 /* make a new buffer if it's still in use */ 2255 /* make a new buffer if it's still in use */
2228 if (page_count(page[entry]->buffer) > 1) { 2256 if (cas_buffer_count(page[entry]) > 1) {
2229 cas_page_t *new = cas_page_dequeue(cp); 2257 cas_page_t *new = cas_page_dequeue(cp);
2230 if (!new) { 2258 if (!new) {
2231 /* let the timer know that we need to 2259 /* let the timer know that we need to
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index a6078ad9b654..ef54ebeb29b8 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -87,6 +87,15 @@
87 Deepak Saxena : dsaxena@plexity.net 87 Deepak Saxena : dsaxena@plexity.net
88 : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support 88 : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
89 89
90 Dmitry Pervushin : dpervushin@ru.mvista.com
91 : PNX010X platform support
92
93 Deepak Saxena : dsaxena@plexity.net
94 : Intel IXDP2351 platform support
95
96 Dmitry Pervushin : dpervushin@ru.mvista.com
97 : PNX010X platform support
98
90*/ 99*/
91 100
92/* Always include 'config.h' first in case the user wants to turn on 101/* Always include 'config.h' first in case the user wants to turn on
@@ -100,7 +109,7 @@
100 * Note that even if DMA is turned off we still support the 'dma' and 'use_dma' 109 * Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
101 * module options so we don't break any startup scripts. 110 * module options so we don't break any startup scripts.
102 */ 111 */
103#ifndef CONFIG_ARCH_IXDP2X01 112#ifndef CONFIG_ISA_DMA_API
104#define ALLOW_DMA 0 113#define ALLOW_DMA 0
105#else 114#else
106#define ALLOW_DMA 1 115#define ALLOW_DMA 1
@@ -171,11 +180,15 @@ static unsigned int cs8900_irq_map[] = {12,0,0,0};
171static unsigned int netcard_portlist[] __initdata = 180static unsigned int netcard_portlist[] __initdata =
172 { 0x0300, 0}; 181 { 0x0300, 0};
173static unsigned int cs8900_irq_map[] = {1,0,0,0}; 182static unsigned int cs8900_irq_map[] = {1,0,0,0};
183#elif defined(CONFIG_MACH_IXDP2351)
184static unsigned int netcard_portlist[] __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
185static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
186#include <asm/irq.h>
174#elif defined(CONFIG_ARCH_IXDP2X01) 187#elif defined(CONFIG_ARCH_IXDP2X01)
175#include <asm/irq.h> 188#include <asm/irq.h>
176static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 189static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
177static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 190static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
178#elif defined(CONFIG_ARCH_PNX0105) 191#elif defined(CONFIG_ARCH_PNX010X)
179#include <asm/irq.h> 192#include <asm/irq.h>
180#include <asm/arch/gpio.h> 193#include <asm/arch/gpio.h>
181#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ 194#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
@@ -338,30 +351,96 @@ out:
338} 351}
339#endif 352#endif
340 353
341static int 354#if defined(CONFIG_MACH_IXDP2351)
342readreg(struct net_device *dev, int portno) 355static u16
356readword(unsigned long base_addr, int portno)
343{ 357{
344 outw(portno, dev->base_addr + ADD_PORT); 358 return __raw_readw(base_addr + (portno << 1));
345 return inw(dev->base_addr + DATA_PORT);
346} 359}
347 360
348static void 361static void
349writereg(struct net_device *dev, int portno, int value) 362writeword(unsigned long base_addr, int portno, u16 value)
350{ 363{
351 outw(portno, dev->base_addr + ADD_PORT); 364 __raw_writew(value, base_addr + (portno << 1));
352 outw(value, dev->base_addr + DATA_PORT); 365}
366#elif defined(CONFIG_ARCH_IXDP2X01)
367static u16
368readword(unsigned long base_addr, int portno)
369{
370 return __raw_readl(base_addr + (portno << 1));
353} 371}
354 372
355static int 373static void
356readword(struct net_device *dev, int portno) 374writeword(unsigned long base_addr, int portno, u16 value)
375{
376 __raw_writel(value, base_addr + (portno << 1));
377}
378#elif defined(CONFIG_ARCH_PNX010X)
379static u16
380readword(unsigned long base_addr, int portno)
381{
382 return inw(base_addr + (portno << 1));
383}
384
385static void
386writeword(unsigned long base_addr, int portno, u16 value)
387{
388 outw(value, base_addr + (portno << 1));
389}
390#else
391static u16
392readword(unsigned long base_addr, int portno)
393{
394 return inw(base_addr + portno);
395}
396
397static void
398writeword(unsigned long base_addr, int portno, u16 value)
399{
400 outw(value, base_addr + portno);
401}
402#endif
403
404static void
405readwords(unsigned long base_addr, int portno, void *buf, int length)
406{
407 u8 *buf8 = (u8 *)buf;
408
409 do {
410 u16 tmp16;
411
412 tmp16 = readword(base_addr, portno);
413 *buf8++ = (u8)tmp16;
414 *buf8++ = (u8)(tmp16 >> 8);
415 } while (--length);
416}
417
418static void
419writewords(unsigned long base_addr, int portno, void *buf, int length)
420{
421 u8 *buf8 = (u8 *)buf;
422
423 do {
424 u16 tmp16;
425
426 tmp16 = *buf8++;
427 tmp16 |= (*buf8++) << 8;
428 writeword(base_addr, portno, tmp16);
429 } while (--length);
430}
431
432static u16
433readreg(struct net_device *dev, u16 regno)
357{ 434{
358 return inw(dev->base_addr + portno); 435 writeword(dev->base_addr, ADD_PORT, regno);
436 return readword(dev->base_addr, DATA_PORT);
359} 437}
360 438
361static void 439static void
362writeword(struct net_device *dev, int portno, int value) 440writereg(struct net_device *dev, u16 regno, u16 value)
363{ 441{
364 outw(value, dev->base_addr + portno); 442 writeword(dev->base_addr, ADD_PORT, regno);
443 writeword(dev->base_addr, DATA_PORT, value);
365} 444}
366 445
367static int __init 446static int __init
@@ -456,7 +535,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
456#endif 535#endif
457 } 536 }
458 537
459#ifdef CONFIG_ARCH_PNX0105 538#ifdef CONFIG_ARCH_PNX010X
460 initialize_ebi(); 539 initialize_ebi();
461 540
462 /* Map GPIO registers for the pins connected to the CS8900a. */ 541 /* Map GPIO registers for the pins connected to the CS8900a. */
@@ -491,8 +570,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
491 570
492#ifdef CONFIG_SH_HICOSH4 571#ifdef CONFIG_SH_HICOSH4
493 /* truely reset the chip */ 572 /* truely reset the chip */
494 outw(0x0114, ioaddr + ADD_PORT); 573 writeword(ioaddr, ADD_PORT, 0x0114);
495 outw(0x0040, ioaddr + DATA_PORT); 574 writeword(ioaddr, DATA_PORT, 0x0040);
496#endif 575#endif
497 576
498 /* if they give us an odd I/O address, then do ONE write to 577 /* if they give us an odd I/O address, then do ONE write to
@@ -503,24 +582,24 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
503 if (net_debug > 1) 582 if (net_debug > 1)
504 printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr); 583 printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
505 if ((ioaddr & 2) != 2) 584 if ((ioaddr & 2) != 2)
506 if ((inw((ioaddr & ~3)+ ADD_PORT) & ADD_MASK) != ADD_SIG) { 585 if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
507 printk(KERN_ERR "%s: bad signature 0x%x\n", 586 printk(KERN_ERR "%s: bad signature 0x%x\n",
508 dev->name, inw((ioaddr & ~3)+ ADD_PORT)); 587 dev->name, readword(ioaddr & ~3, ADD_PORT));
509 retval = -ENODEV; 588 retval = -ENODEV;
510 goto out2; 589 goto out2;
511 } 590 }
512 } 591 }
513 printk(KERN_DEBUG "PP_addr at %x: 0x%x\n", 592 printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
514 ioaddr + ADD_PORT, inw(ioaddr + ADD_PORT)); 593 ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
515 594
516 ioaddr &= ~3; 595 ioaddr &= ~3;
517 outw(PP_ChipID, ioaddr + ADD_PORT); 596 writeword(ioaddr, ADD_PORT, PP_ChipID);
518 597
519 tmp = inw(ioaddr + DATA_PORT); 598 tmp = readword(ioaddr, DATA_PORT);
520 if (tmp != CHIP_EISA_ID_SIG) { 599 if (tmp != CHIP_EISA_ID_SIG) {
521 printk(KERN_DEBUG "%s: incorrect signature at %x: 0x%x!=" 600 printk(KERN_DEBUG "%s: incorrect signature at %x[%x]: 0x%x!="
522 CHIP_EISA_ID_SIG_STR "\n", 601 CHIP_EISA_ID_SIG_STR "\n",
523 dev->name, ioaddr + DATA_PORT, tmp); 602 dev->name, ioaddr, DATA_PORT, tmp);
524 retval = -ENODEV; 603 retval = -ENODEV;
525 goto out2; 604 goto out2;
526 } 605 }
@@ -724,7 +803,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
724 } else { 803 } else {
725 i = lp->isa_config & INT_NO_MASK; 804 i = lp->isa_config & INT_NO_MASK;
726 if (lp->chip_type == CS8900) { 805 if (lp->chip_type == CS8900) {
727#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105) 806#if defined(CONFIG_MACH_IXDP2351) || defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX010X)
728 i = cs8900_irq_map[0]; 807 i = cs8900_irq_map[0];
729#else 808#else
730 /* Translate the IRQ using the IRQ mapping table. */ 809 /* Translate the IRQ using the IRQ mapping table. */
@@ -790,7 +869,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
790 goto out3; 869 goto out3;
791 return 0; 870 return 0;
792out3: 871out3:
793 outw(PP_ChipID, dev->base_addr + ADD_PORT); 872 writeword(dev->base_addr, ADD_PORT, PP_ChipID);
794out2: 873out2:
795 release_region(ioaddr & ~3, NETCARD_IO_EXTENT); 874 release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
796out1: 875out1:
@@ -956,7 +1035,7 @@ skip_this_frame:
956 1035
957void __init reset_chip(struct net_device *dev) 1036void __init reset_chip(struct net_device *dev)
958{ 1037{
959#ifndef CONFIG_ARCH_IXDP2X01 1038#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
960 struct net_local *lp = netdev_priv(dev); 1039 struct net_local *lp = netdev_priv(dev);
961 int ioaddr = dev->base_addr; 1040 int ioaddr = dev->base_addr;
962#endif 1041#endif
@@ -967,14 +1046,14 @@ void __init reset_chip(struct net_device *dev)
967 /* wait 30 ms */ 1046 /* wait 30 ms */
968 msleep(30); 1047 msleep(30);
969 1048
970#ifndef CONFIG_ARCH_IXDP2X01 1049#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
971 if (lp->chip_type != CS8900) { 1050 if (lp->chip_type != CS8900) {
972 /* Hardware problem requires PNP registers to be reconfigured after a reset */ 1051 /* Hardware problem requires PNP registers to be reconfigured after a reset */
973 outw(PP_CS8920_ISAINT, ioaddr + ADD_PORT); 1052 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
974 outb(dev->irq, ioaddr + DATA_PORT); 1053 outb(dev->irq, ioaddr + DATA_PORT);
975 outb(0, ioaddr + DATA_PORT + 1); 1054 outb(0, ioaddr + DATA_PORT + 1);
976 1055
977 outw(PP_CS8920_ISAMemB, ioaddr + ADD_PORT); 1056 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
978 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); 1057 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
979 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); 1058 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
980 } 1059 }
@@ -1104,8 +1183,8 @@ send_test_pkt(struct net_device *dev)
1104 memcpy(test_packet, dev->dev_addr, ETH_ALEN); 1183 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
1105 memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN); 1184 memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
1106 1185
1107 writeword(dev, TX_CMD_PORT, TX_AFTER_ALL); 1186 writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
1108 writeword(dev, TX_LEN_PORT, ETH_ZLEN); 1187 writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
1109 1188
1110 /* Test to see if the chip has allocated memory for the packet */ 1189 /* Test to see if the chip has allocated memory for the packet */
1111 while (jiffies - timenow < 5) 1190 while (jiffies - timenow < 5)
@@ -1115,7 +1194,7 @@ send_test_pkt(struct net_device *dev)
1115 return 0; /* this shouldn't happen */ 1194 return 0; /* this shouldn't happen */
1116 1195
1117 /* Write the contents of the packet */ 1196 /* Write the contents of the packet */
1118 outsw(dev->base_addr + TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1); 1197 writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
1119 1198
1120 if (net_debug > 1) printk("Sending test packet "); 1199 if (net_debug > 1) printk("Sending test packet ");
1121 /* wait a couple of jiffies for packet to be received */ 1200 /* wait a couple of jiffies for packet to be received */
@@ -1200,7 +1279,7 @@ net_open(struct net_device *dev)
1200 int i; 1279 int i;
1201 int ret; 1280 int ret;
1202 1281
1203#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX0105) /* uses irq#1, so this won't work */ 1282#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX010X) /* uses irq#1, so this won't work */
1204 if (dev->irq < 2) { 1283 if (dev->irq < 2) {
1205 /* Allow interrupts to be generated by the chip */ 1284 /* Allow interrupts to be generated by the chip */
1206/* Cirrus' release had this: */ 1285/* Cirrus' release had this: */
@@ -1231,7 +1310,7 @@ net_open(struct net_device *dev)
1231 else 1310 else
1232#endif 1311#endif
1233 { 1312 {
1234#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105) 1313#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX010X)
1235 if (((1 << dev->irq) & lp->irq_map) == 0) { 1314 if (((1 << dev->irq) & lp->irq_map) == 0) {
1236 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", 1315 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
1237 dev->name, dev->irq, lp->irq_map); 1316 dev->name, dev->irq, lp->irq_map);
@@ -1316,7 +1395,7 @@ net_open(struct net_device *dev)
1316 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; 1395 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
1317 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); 1396 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
1318 } 1397 }
1319#ifdef CONFIG_ARCH_PNX0105 1398#ifdef CONFIG_ARCH_PNX010X
1320 result = A_CNF_10B_T; 1399 result = A_CNF_10B_T;
1321#endif 1400#endif
1322 if (!result) { 1401 if (!result) {
@@ -1457,8 +1536,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
1457 netif_stop_queue(dev); 1536 netif_stop_queue(dev);
1458 1537
1459 /* initiate a transmit sequence */ 1538 /* initiate a transmit sequence */
1460 writeword(dev, TX_CMD_PORT, lp->send_cmd); 1539 writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
1461 writeword(dev, TX_LEN_PORT, skb->len); 1540 writeword(dev->base_addr, TX_LEN_PORT, skb->len);
1462 1541
1463 /* Test to see if the chip has allocated memory for the packet */ 1542 /* Test to see if the chip has allocated memory for the packet */
1464 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { 1543 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
@@ -1472,7 +1551,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
1472 return 1; 1551 return 1;
1473 } 1552 }
1474 /* Write the contents of the packet */ 1553 /* Write the contents of the packet */
1475 outsw(dev->base_addr + TX_FRAME_PORT,skb->data,(skb->len+1) >>1); 1554 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
1476 spin_unlock_irq(&lp->lock); 1555 spin_unlock_irq(&lp->lock);
1477 lp->stats.tx_bytes += skb->len; 1556 lp->stats.tx_bytes += skb->len;
1478 dev->trans_start = jiffies; 1557 dev->trans_start = jiffies;
@@ -1512,7 +1591,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1512 course, if you're on a slow machine, and packets are arriving 1591 course, if you're on a slow machine, and packets are arriving
1513 faster than you can read them off, you're screwed. Hasta la 1592 faster than you can read them off, you're screwed. Hasta la
1514 vista, baby! */ 1593 vista, baby! */
1515 while ((status = readword(dev, ISQ_PORT))) { 1594 while ((status = readword(dev->base_addr, ISQ_PORT))) {
1516 if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); 1595 if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
1517 handled = 1; 1596 handled = 1;
1518 switch(status & ISQ_EVENT_MASK) { 1597 switch(status & ISQ_EVENT_MASK) {
@@ -1606,8 +1685,8 @@ net_rx(struct net_device *dev)
1606 int status, length; 1685 int status, length;
1607 1686
1608 int ioaddr = dev->base_addr; 1687 int ioaddr = dev->base_addr;
1609 status = inw(ioaddr + RX_FRAME_PORT); 1688 status = readword(ioaddr, RX_FRAME_PORT);
1610 length = inw(ioaddr + RX_FRAME_PORT); 1689 length = readword(ioaddr, RX_FRAME_PORT);
1611 1690
1612 if ((status & RX_OK) == 0) { 1691 if ((status & RX_OK) == 0) {
1613 count_rx_errors(status, lp); 1692 count_rx_errors(status, lp);
@@ -1626,9 +1705,9 @@ net_rx(struct net_device *dev)
1626 skb_reserve(skb, 2); /* longword align L3 header */ 1705 skb_reserve(skb, 2); /* longword align L3 header */
1627 skb->dev = dev; 1706 skb->dev = dev;
1628 1707
1629 insw(ioaddr + RX_FRAME_PORT, skb_put(skb, length), length >> 1); 1708 readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
1630 if (length & 1) 1709 if (length & 1)
1631 skb->data[length-1] = inw(ioaddr + RX_FRAME_PORT); 1710 skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT);
1632 1711
1633 if (net_debug > 3) { 1712 if (net_debug > 3) {
1634 printk( "%s: received %d byte packet of type %x\n", 1713 printk( "%s: received %d byte packet of type %x\n",
@@ -1901,7 +1980,7 @@ void
1901cleanup_module(void) 1980cleanup_module(void)
1902{ 1981{
1903 unregister_netdev(dev_cs89x0); 1982 unregister_netdev(dev_cs89x0);
1904 outw(PP_ChipID, dev_cs89x0->base_addr + ADD_PORT); 1983 writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID);
1905 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); 1984 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
1906 free_netdev(dev_cs89x0); 1985 free_netdev(dev_cs89x0);
1907} 1986}
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
index decea264f121..bd954aaa636f 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/cs89x0.h
@@ -16,13 +16,6 @@
16 16
17#include <linux/config.h> 17#include <linux/config.h>
18 18
19#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105)
20/* IXDP2401/IXDP2801 uses dword-aligned register addressing */
21#define CS89x0_PORT(reg) ((reg) * 2)
22#else
23#define CS89x0_PORT(reg) (reg)
24#endif
25
26#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */ 19#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */
27 /* offset 2h -> Model/Product Number */ 20 /* offset 2h -> Model/Product Number */
28 /* offset 3h -> Chip Revision Number */ 21 /* offset 3h -> Chip Revision Number */
@@ -332,16 +325,16 @@
332#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */ 325#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */
333#define PKT_START PP_TxFrame /* Start of packet RAM */ 326#define PKT_START PP_TxFrame /* Start of packet RAM */
334 327
335#define RX_FRAME_PORT CS89x0_PORT(0x0000) 328#define RX_FRAME_PORT 0x0000
336#define TX_FRAME_PORT RX_FRAME_PORT 329#define TX_FRAME_PORT RX_FRAME_PORT
337#define TX_CMD_PORT CS89x0_PORT(0x0004) 330#define TX_CMD_PORT 0x0004
338#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */ 331#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */
339#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */ 332#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */
340#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */ 333#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */
341#define TX_LEN_PORT CS89x0_PORT(0x0006) 334#define TX_LEN_PORT 0x0006
342#define ISQ_PORT CS89x0_PORT(0x0008) 335#define ISQ_PORT 0x0008
343#define ADD_PORT CS89x0_PORT(0x000A) 336#define ADD_PORT 0x000A
344#define DATA_PORT CS89x0_PORT(0x000C) 337#define DATA_PORT 0x000C
345 338
346#define EEPROM_WRITE_EN 0x00F0 339#define EEPROM_WRITE_EN 0x00F0
347#define EEPROM_WRITE_DIS 0x0000 340#define EEPROM_WRITE_DIS 0x0000
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 22cd04556707..bf1fd2b98bf8 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,25 +1,25 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option) 8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version. 9 any later version.
10 10
11 This program is distributed in the hope that it will be useful, but WITHOUT 11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details. 14 more details.
15 15
16 You should have received a copy of the GNU General Public License along with 16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59 17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 19
20 The full GNU General Public License is included in this distribution in the 20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE. 21 file called LICENSE.
22 22
23 Contact Information: 23 Contact Information:
24 Linux NICS <linux.nics@intel.com> 24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -132,6 +132,10 @@
132 * TODO: 132 * TODO:
133 * o several entry points race with dev->close 133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q 134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
135 */ 139 */
136 140
137#include <linux/config.h> 141#include <linux/config.h>
@@ -156,7 +160,7 @@
156 160
157#define DRV_NAME "e100" 161#define DRV_NAME "e100"
158#define DRV_EXT "-NAPI" 162#define DRV_EXT "-NAPI"
159#define DRV_VERSION "3.4.14-k4"DRV_EXT 163#define DRV_VERSION "3.5.10-k2"DRV_EXT
160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 164#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 165#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
162#define PFX DRV_NAME ": " 166#define PFX DRV_NAME ": "
@@ -316,7 +320,7 @@ enum cuc_dump {
316 cuc_dump_complete = 0x0000A005, 320 cuc_dump_complete = 0x0000A005,
317 cuc_dump_reset_complete = 0x0000A007, 321 cuc_dump_reset_complete = 0x0000A007,
318}; 322};
319 323
320enum port { 324enum port {
321 software_reset = 0x0000, 325 software_reset = 0x0000,
322 selftest = 0x0001, 326 selftest = 0x0001,
@@ -578,6 +582,7 @@ struct nic {
578 u16 leds; 582 u16 leds;
579 u16 eeprom_wc; 583 u16 eeprom_wc;
580 u16 eeprom[256]; 584 u16 eeprom[256];
585 spinlock_t mdio_lock;
581}; 586};
582 587
583static inline void e100_write_flush(struct nic *nic) 588static inline void e100_write_flush(struct nic *nic)
@@ -587,7 +592,7 @@ static inline void e100_write_flush(struct nic *nic)
587 (void)readb(&nic->csr->scb.status); 592 (void)readb(&nic->csr->scb.status);
588} 593}
589 594
590static inline void e100_enable_irq(struct nic *nic) 595static void e100_enable_irq(struct nic *nic)
591{ 596{
592 unsigned long flags; 597 unsigned long flags;
593 598
@@ -597,7 +602,7 @@ static inline void e100_enable_irq(struct nic *nic)
597 e100_write_flush(nic); 602 e100_write_flush(nic);
598} 603}
599 604
600static inline void e100_disable_irq(struct nic *nic) 605static void e100_disable_irq(struct nic *nic)
601{ 606{
602 unsigned long flags; 607 unsigned long flags;
603 608
@@ -710,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
710 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; 715 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
711 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 716 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
712 e100_write_flush(nic); udelay(4); 717 e100_write_flush(nic); udelay(4);
713 718
714 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 719 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4); 720 e100_write_flush(nic); udelay(4);
716 721
717 /* Eeprom drives a dummy zero to EEDO after receiving 722 /* Eeprom drives a dummy zero to EEDO after receiving
718 * complete address. Use this to adjust addr_len. */ 723 * complete address. Use this to adjust addr_len. */
719 ctrl = readb(&nic->csr->eeprom_ctrl_lo); 724 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -721,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
721 *addr_len -= (i - 16); 726 *addr_len -= (i - 16);
722 i = 17; 727 i = 17;
723 } 728 }
724 729
725 data = (data << 1) | (ctrl & eedo ? 1 : 0); 730 data = (data << 1) | (ctrl & eedo ? 1 : 0);
726 } 731 }
727 732
@@ -786,7 +791,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
786 791
787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 792#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ 793#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 794static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
790{ 795{
791 unsigned long flags; 796 unsigned long flags;
792 unsigned int i; 797 unsigned int i;
@@ -817,7 +822,7 @@ err_unlock:
817 return err; 822 return err;
818} 823}
819 824
820static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 825static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
821 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 826 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
822{ 827{
823 struct cb *cb; 828 struct cb *cb;
@@ -876,15 +881,35 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
876{ 881{
877 u32 data_out = 0; 882 u32 data_out = 0;
878 unsigned int i; 883 unsigned int i;
884 unsigned long flags;
879 885
886
887 /*
888 * Stratus87247: we shouldn't be writing the MDI control
889 * register until the Ready bit shows True. Also, since
890 * manipulation of the MDI control registers is a multi-step
891 * procedure it should be done under lock.
892 */
893 spin_lock_irqsave(&nic->mdio_lock, flags);
894 for (i = 100; i; --i) {
895 if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
896 break;
897 udelay(20);
898 }
899 if (unlikely(!i)) {
900 printk("e100.mdio_ctrl(%s) won't go Ready\n",
901 nic->netdev->name );
902 spin_unlock_irqrestore(&nic->mdio_lock, flags);
903 return 0; /* No way to indicate timeout error */
904 }
880 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); 905 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
881 906
882 for(i = 0; i < 100; i++) { 907 for (i = 0; i < 100; i++) {
883 udelay(20); 908 udelay(20);
884 if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready) 909 if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
885 break; 910 break;
886 } 911 }
887 912 spin_unlock_irqrestore(&nic->mdio_lock, flags);
888 DPRINTK(HW, DEBUG, 913 DPRINTK(HW, DEBUG,
889 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", 914 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
890 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); 915 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
@@ -1145,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \ 11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
1146} 1171}
1147 1172
1148static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1173static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1149{ 1174{
1150/* *INDENT-OFF* */ 1175/* *INDENT-OFF* */
1151 static struct { 1176 static struct {
@@ -1188,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1188* driver can change the algorithm. 1213* driver can change the algorithm.
1189* 1214*
1190* INTDELAY - This loads the dead-man timer with its inital value. 1215* INTDELAY - This loads the dead-man timer with its inital value.
1191* When this timer expires the interrupt is asserted, and the 1216* When this timer expires the interrupt is asserted, and the
1192* timer is reset each time a new packet is received. (see 1217* timer is reset each time a new packet is received. (see
1193* BUNDLEMAX below to set the limit on number of chained packets) 1218* BUNDLEMAX below to set the limit on number of chained packets)
1194* The current default is 0x600 or 1536. Experiments show that 1219* The current default is 0x600 or 1536. Experiments show that
1195* the value should probably stay within the 0x200 - 0x1000. 1220* the value should probably stay within the 0x200 - 0x1000.
1196* 1221*
1197* BUNDLEMAX - 1222* BUNDLEMAX -
1198* This sets the maximum number of frames that will be bundled. In 1223* This sets the maximum number of frames that will be bundled. In
1199* some situations, such as the TCP windowing algorithm, it may be 1224* some situations, such as the TCP windowing algorithm, it may be
1200* better to limit the growth of the bundle size than let it go as 1225* better to limit the growth of the bundle size than let it go as
@@ -1204,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1204* an interrupt for every frame received. If you do not want to put 1229* an interrupt for every frame received. If you do not want to put
1205* a limit on the bundle size, set this value to xFFFF. 1230* a limit on the bundle size, set this value to xFFFF.
1206* 1231*
1207* BUNDLESMALL - 1232* BUNDLESMALL -
1208* This contains a bit-mask describing the minimum size frame that 1233* This contains a bit-mask describing the minimum size frame that
1209* will be bundled. The default masks the lower 7 bits, which means 1234* will be bundled. The default masks the lower 7 bits, which means
1210* that any frame less than 128 bytes in length will not be bundled, 1235* that any frame less than 128 bytes in length will not be bundled,
@@ -1219,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1219* 1244*
1220* The current default is 0xFF80, which masks out the lower 7 bits. 1245* The current default is 0xFF80, which masks out the lower 7 bits.
1221* This means that any frame which is x7F (127) bytes or smaller 1246* This means that any frame which is x7F (127) bytes or smaller
1222* will cause an immediate interrupt. Because this value must be a 1247* will cause an immediate interrupt. Because this value must be a
1223* bit mask, there are only a few valid values that can be used. To 1248* bit mask, there are only a few valid values that can be used. To
1224* turn this feature off, the driver can write the value xFFFF to the 1249* turn this feature off, the driver can write the value xFFFF to the
1225* lower word of this instruction (in the same way that the other 1250* lower word of this instruction (in the same way that the other
@@ -1228,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1228* standard Ethernet frames are <= 2047 bytes in length. 1253* standard Ethernet frames are <= 2047 bytes in length.
1229*************************************************************************/ 1254*************************************************************************/
1230 1255
1231/* if you wish to disable the ucode functionality, while maintaining the 1256/* if you wish to disable the ucode functionality, while maintaining the
1232 * workarounds it provides, set the following defines to: 1257 * workarounds it provides, set the following defines to:
1233 * BUNDLESMALL 0 1258 * BUNDLESMALL 0
1234 * BUNDLEMAX 1 1259 * BUNDLEMAX 1
@@ -1259,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1259 1284
1260 for (i = 0; i < UCODE_SIZE; i++) 1285 for (i = 0; i < UCODE_SIZE; i++)
1261 cb->u.ucode[i] = cpu_to_le32(ucode[i]); 1286 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1262 cb->command = cpu_to_le16(cb_ucode); 1287 cb->command = cpu_to_le16(cb_ucode | cb_el);
1263 return; 1288 return;
1264 } 1289 }
1265 1290
1266noloaducode: 1291noloaducode:
1267 cb->command = cpu_to_le16(cb_nop); 1292 cb->command = cpu_to_le16(cb_nop | cb_el);
1293}
1294
1295static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1296 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1297{
1298 int err = 0, counter = 50;
1299 struct cb *cb = nic->cb_to_clean;
1300
1301 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1302 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1303
1304 /* must restart cuc */
1305 nic->cuc_cmd = cuc_start;
1306
1307 /* wait for completion */
1308 e100_write_flush(nic);
1309 udelay(10);
1310
1311 /* wait for possibly (ouch) 500ms */
1312 while (!(cb->status & cpu_to_le16(cb_complete))) {
1313 msleep(10);
1314 if (!--counter) break;
1315 }
1316
1317 /* ack any interupts, something could have been set */
1318 writeb(~0, &nic->csr->scb.stat_ack);
1319
1320 /* if the command failed, or is not OK, notify and return */
1321 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1322 DPRINTK(PROBE,ERR, "ucode load failed\n");
1323 err = -EPERM;
1324 }
1325
1326 return err;
1268} 1327}
1269 1328
1270static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1329static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1332,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
1332 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); 1391 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1333 } 1392 }
1334 1393
1335 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1394 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1336 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { 1395 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
1337 /* enable/disable MDI/MDI-X auto-switching. 1396 /* enable/disable MDI/MDI-X auto-switching.
1338 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ 1397 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
1339 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || 1398 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
1340 (nic->mac == mac_82551_10) || (nic->mii.force_media) || 1399 (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
1341 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 1400 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1342 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); 1401 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1343 else 1402 else
1344 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); 1403 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1363,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
1363 return err; 1422 return err;
1364 if((err = e100_exec_cmd(nic, ruc_load_base, 0))) 1423 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1365 return err; 1424 return err;
1366 if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) 1425 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1367 return err; 1426 return err;
1368 if((err = e100_exec_cb(nic, NULL, e100_configure))) 1427 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1369 return err; 1428 return err;
@@ -1468,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
1468 } 1527 }
1469 } 1528 }
1470 1529
1471 1530
1472 if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1531 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1473 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1532 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1474} 1533}
@@ -1517,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
1517 mii_check_link(&nic->mii); 1576 mii_check_link(&nic->mii);
1518 1577
1519 /* Software generated interrupt to recover from (rare) Rx 1578 /* Software generated interrupt to recover from (rare) Rx
1520 * allocation failure. 1579 * allocation failure.
1521 * Unfortunately have to use a spinlock to not re-enable interrupts 1580 * Unfortunately have to use a spinlock to not re-enable interrupts
1522 * accidentally, due to hardware that shares a register between the 1581 * accidentally, due to hardware that shares a register between the
1523 * interrupt mask bit and the SW Interrupt generation bit */ 1582 * interrupt mask bit and the SW Interrupt generation bit */
1524 spin_lock_irq(&nic->cmd_lock); 1583 spin_lock_irq(&nic->cmd_lock);
1525 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1584 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1526 spin_unlock_irq(&nic->cmd_lock); 1585 spin_unlock_irq(&nic->cmd_lock);
@@ -1542,7 +1601,7 @@ static void e100_watchdog(unsigned long data)
1542 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); 1601 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1543} 1602}
1544 1603
1545static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1604static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1546 struct sk_buff *skb) 1605 struct sk_buff *skb)
1547{ 1606{
1548 cb->command = nic->tx_command; 1607 cb->command = nic->tx_command;
@@ -1592,7 +1651,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1592 return 0; 1651 return 0;
1593} 1652}
1594 1653
1595static inline int e100_tx_clean(struct nic *nic) 1654static int e100_tx_clean(struct nic *nic)
1596{ 1655{
1597 struct cb *cb; 1656 struct cb *cb;
1598 int tx_cleaned = 0; 1657 int tx_cleaned = 0;
@@ -1703,7 +1762,7 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1703} 1762}
1704 1763
1705#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1764#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1706static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1765static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1707{ 1766{
1708 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) 1767 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
1709 return -ENOMEM; 1768 return -ENOMEM;
@@ -1737,7 +1796,7 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1737 return 0; 1796 return 0;
1738} 1797}
1739 1798
1740static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, 1799static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1741 unsigned int *work_done, unsigned int work_to_do) 1800 unsigned int *work_done, unsigned int work_to_do)
1742{ 1801{
1743 struct sk_buff *skb = rx->skb; 1802 struct sk_buff *skb = rx->skb;
@@ -1797,7 +1856,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1797 return 0; 1856 return 0;
1798} 1857}
1799 1858
1800static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, 1859static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1801 unsigned int work_to_do) 1860 unsigned int work_to_do)
1802{ 1861{
1803 struct rx *rx; 1862 struct rx *rx;
@@ -1805,7 +1864,7 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1805 struct rx *rx_to_start = NULL; 1864 struct rx *rx_to_start = NULL;
1806 1865
1807 /* are we already rnr? then pay attention!!! this ensures that 1866 /* are we already rnr? then pay attention!!! this ensures that
1808 * the state machine progression never allows a start with a 1867 * the state machine progression never allows a start with a
1809 * partially cleaned list, avoiding a race between hardware 1868 * partially cleaned list, avoiding a race between hardware
1810 * and rx_to_clean when in NAPI mode */ 1869 * and rx_to_clean when in NAPI mode */
1811 if(RU_SUSPENDED == nic->ru_running) 1870 if(RU_SUSPENDED == nic->ru_running)
@@ -2041,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
2041{ 2100{
2042 struct nic *nic = netdev_priv(netdev); 2101 struct nic *nic = netdev_priv(netdev);
2043 2102
2044 /* Reset outside of interrupt context, to avoid request_irq 2103 /* Reset outside of interrupt context, to avoid request_irq
2045 * in interrupt context */ 2104 * in interrupt context */
2046 schedule_work(&nic->tx_timeout_task); 2105 schedule_work(&nic->tx_timeout_task);
2047} 2106}
@@ -2288,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
2288 struct param_range *rfds = &nic->params.rfds; 2347 struct param_range *rfds = &nic->params.rfds;
2289 struct param_range *cbs = &nic->params.cbs; 2348 struct param_range *cbs = &nic->params.cbs;
2290 2349
2291 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2350 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2292 return -EINVAL; 2351 return -EINVAL;
2293 2352
2294 if(netif_running(netdev)) 2353 if(netif_running(netdev))
@@ -2562,6 +2621,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2562 /* locks must be initialized before calling hw_reset */ 2621 /* locks must be initialized before calling hw_reset */
2563 spin_lock_init(&nic->cb_lock); 2622 spin_lock_init(&nic->cb_lock);
2564 spin_lock_init(&nic->cmd_lock); 2623 spin_lock_init(&nic->cmd_lock);
2624 spin_lock_init(&nic->mdio_lock);
2565 2625
2566 /* Reset the device before pci_set_master() in case device is in some 2626 /* Reset the device before pci_set_master() in case device is in some
2567 * funky state and has an interrupt pending - hint: we don't have the 2627 * funky state and has an interrupt pending - hint: we don't have the
@@ -2605,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2605 nic->flags |= wol_magic; 2665 nic->flags |= wol_magic;
2606 2666
2607 /* ack any pending wake events, disable PME */ 2667 /* ack any pending wake events, disable PME */
2608 pci_enable_wake(pdev, 0, 0); 2668 err = pci_enable_wake(pdev, 0, 0);
2669 if (err)
2670 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2609 2671
2610 strcpy(netdev->name, "eth%d"); 2672 strcpy(netdev->name, "eth%d");
2611 if((err = register_netdev(netdev))) { 2673 if((err = register_netdev(netdev))) {
@@ -2656,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2656{ 2718{
2657 struct net_device *netdev = pci_get_drvdata(pdev); 2719 struct net_device *netdev = pci_get_drvdata(pdev);
2658 struct nic *nic = netdev_priv(netdev); 2720 struct nic *nic = netdev_priv(netdev);
2721 int retval;
2659 2722
2660 if(netif_running(netdev)) 2723 if(netif_running(netdev))
2661 e100_down(nic); 2724 e100_down(nic);
@@ -2663,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2663 netif_device_detach(netdev); 2726 netif_device_detach(netdev);
2664 2727
2665 pci_save_state(pdev); 2728 pci_save_state(pdev);
2666 pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); 2729 retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
2730 nic->flags & (wol_magic | e100_asf(nic)));
2731 if (retval)
2732 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2667 pci_disable_device(pdev); 2733 pci_disable_device(pdev);
2668 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2734 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
2735 if (retval)
2736 DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
2669 2737
2670 return 0; 2738 return 0;
2671} 2739}
@@ -2674,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
2674{ 2742{
2675 struct net_device *netdev = pci_get_drvdata(pdev); 2743 struct net_device *netdev = pci_get_drvdata(pdev);
2676 struct nic *nic = netdev_priv(netdev); 2744 struct nic *nic = netdev_priv(netdev);
2745 int retval;
2677 2746
2678 pci_set_power_state(pdev, PCI_D0); 2747 retval = pci_set_power_state(pdev, PCI_D0);
2748 if (retval)
2749 DPRINTK(PROBE,ERR, "Error waking adapter\n");
2679 pci_restore_state(pdev); 2750 pci_restore_state(pdev);
2680 /* ack any pending wake events, disable PME */ 2751 /* ack any pending wake events, disable PME */
2681 pci_enable_wake(pdev, 0, 0); 2752 retval = pci_enable_wake(pdev, 0, 0);
2753 if (retval)
2754 DPRINTK(PROBE,ERR, "Error clearing wake events\n");
2682 if(e100_hw_init(nic)) 2755 if(e100_hw_init(nic))
2683 DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2756 DPRINTK(HW, ERR, "e100_hw_init failed\n");
2684 2757
@@ -2695,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
2695{ 2768{
2696 struct net_device *netdev = pci_get_drvdata(pdev); 2769 struct net_device *netdev = pci_get_drvdata(pdev);
2697 struct nic *nic = netdev_priv(netdev); 2770 struct nic *nic = netdev_priv(netdev);
2771 int retval;
2698 2772
2699#ifdef CONFIG_PM 2773#ifdef CONFIG_PM
2700 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2774 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2701#else 2775#else
2702 pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2776 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2703#endif 2777#endif
2778 if (retval)
2779 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2704} 2780}
2705 2781
2706 2782
@@ -2713,7 +2789,7 @@ static struct pci_driver e100_driver = {
2713 .suspend = e100_suspend, 2789 .suspend = e100_suspend,
2714 .resume = e100_resume, 2790 .resume = e100_resume,
2715#endif 2791#endif
2716 .shutdown = e100_shutdown, 2792 .shutdown = e100_shutdown,
2717}; 2793};
2718 2794
2719static int __init e100_init_module(void) 2795static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e02e9ba2e18b..27c77306193b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,10 +72,6 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
79 75
80#define BAR_0 0 76#define BAR_0 0
81#define BAR_1 1 77#define BAR_1 1
@@ -87,6 +83,10 @@
87struct e1000_adapter; 83struct e1000_adapter;
88 84
89#include "e1000_hw.h" 85#include "e1000_hw.h"
86#ifdef CONFIG_E1000_MQ
87#include <linux/cpu.h>
88#include <linux/smp.h>
89#endif
90 90
91#ifdef DBG 91#ifdef DBG
92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) 92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 169 uint16_t next_to_watch;
170}; 170};
171 171
172#ifdef CONFIG_E1000_MQ
173struct e1000_queue_stats {
174 uint64_t packets;
175 uint64_t bytes;
176};
177#endif
178
172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 179struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 180struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174 181
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
191 spinlock_t tx_lock; 198 spinlock_t tx_lock;
192 uint16_t tdh; 199 uint16_t tdh;
193 uint16_t tdt; 200 uint16_t tdt;
194 uint64_t pkt;
195 201
196 boolean_t last_tx_tso; 202 boolean_t last_tx_tso;
197 203
204#ifdef CONFIG_E1000_MQ
205 struct e1000_queue_stats tx_stats;
206#endif
198}; 207};
199 208
200struct e1000_rx_ring { 209struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
216 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
217 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
218 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */
232 int cpu;
233
219 uint16_t rdh; 234 uint16_t rdh;
220 uint16_t rdt; 235 uint16_t rdt;
221 uint64_t pkt; 236#ifdef CONFIG_E1000_MQ
237 struct e1000_queue_stats rx_stats;
238#endif
222}; 239};
223 240
224#define E1000_DESC_UNUSED(R) \ 241#define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
251 uint16_t link_speed; 268 uint16_t link_speed;
252 uint16_t link_duplex; 269 uint16_t link_duplex;
253 spinlock_t stats_lock; 270 spinlock_t stats_lock;
271#ifdef CONFIG_E1000_NAPI
272 spinlock_t tx_queue_lock;
273#endif
254 atomic_t irq_sem; 274 atomic_t irq_sem;
255 struct work_struct tx_timeout_task; 275 struct work_struct tx_timeout_task;
256 struct work_struct watchdog_task; 276 struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
264#ifdef CONFIG_E1000_MQ 284#ifdef CONFIG_E1000_MQ
265 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ 285 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
266#endif 286#endif
287 unsigned long tx_queue_len;
267 uint32_t txd_cmd; 288 uint32_t txd_cmd;
268 uint32_t tx_int_delay; 289 uint32_t tx_int_delay;
269 uint32_t tx_abs_int_delay; 290 uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
271 uint64_t gotcl_old; 292 uint64_t gotcl_old;
272 uint64_t tpt_old; 293 uint64_t tpt_old;
273 uint64_t colc_old; 294 uint64_t colc_old;
295 uint32_t tx_timeout_count;
274 uint32_t tx_fifo_head; 296 uint32_t tx_fifo_head;
275 uint32_t tx_head_addr; 297 uint32_t tx_head_addr;
276 uint32_t tx_fifo_size; 298 uint32_t tx_fifo_size;
299 uint8_t tx_timeout_factor;
277 atomic_t tx_fifo_stall; 300 atomic_t tx_fifo_stall;
278 boolean_t pcix_82544; 301 boolean_t pcix_82544;
279 boolean_t detect_tx_hung; 302 boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
281 /* RX */ 304 /* RX */
282#ifdef CONFIG_E1000_NAPI 305#ifdef CONFIG_E1000_NAPI
283 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 306 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
284 struct e1000_rx_ring *rx_ring, 307 struct e1000_rx_ring *rx_ring,
285 int *work_done, int work_to_do); 308 int *work_done, int work_to_do);
286#else 309#else
287 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 310 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
288 struct e1000_rx_ring *rx_ring); 311 struct e1000_rx_ring *rx_ring);
289#endif 312#endif
290 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 313 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
291 struct e1000_rx_ring *rx_ring); 314 struct e1000_rx_ring *rx_ring,
315 int cleaned_count);
292 struct e1000_rx_ring *rx_ring; /* One per active queue */ 316 struct e1000_rx_ring *rx_ring; /* One per active queue */
293#ifdef CONFIG_E1000_NAPI 317#ifdef CONFIG_E1000_NAPI
294 struct net_device *polling_netdev; /* One per active queue */ 318 struct net_device *polling_netdev; /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
296#ifdef CONFIG_E1000_MQ 320#ifdef CONFIG_E1000_MQ
297 struct net_device **cpu_netdev; /* per-cpu */ 321 struct net_device **cpu_netdev; /* per-cpu */
298 struct call_async_data_struct rx_sched_call_data; 322 struct call_async_data_struct rx_sched_call_data;
299 int cpu_for_queue[4]; 323 cpumask_t cpumask;
300#endif 324#endif
301 int num_queues; 325 int num_tx_queues;
326 int num_rx_queues;
302 327
303 uint64_t hw_csum_err; 328 uint64_t hw_csum_err;
304 uint64_t hw_csum_good; 329 uint64_t hw_csum_good;
305 uint64_t rx_hdr_split; 330 uint64_t rx_hdr_split;
331 uint32_t alloc_rx_buff_failed;
306 uint32_t rx_int_delay; 332 uint32_t rx_int_delay;
307 uint32_t rx_abs_int_delay; 333 uint32_t rx_abs_int_delay;
308 boolean_t rx_csum; 334 boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
330 struct e1000_rx_ring test_rx_ring; 356 struct e1000_rx_ring test_rx_ring;
331 357
332 358
359 u32 *config_space;
333 int msg_enable; 360 int msg_enable;
334#ifdef CONFIG_PCI_MSI 361#ifdef CONFIG_PCI_MSI
335 boolean_t have_msi; 362 boolean_t have_msi;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c88f1a3c1b1d..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
80 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
81 { "tx_single_coll_ok", E1000_STAT(stats.scc) }, 81 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, 82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
83 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
83 { "rx_long_length_errors", E1000_STAT(stats.roc) }, 84 { "rx_long_length_errors", E1000_STAT(stats.roc) },
84 { "rx_short_length_errors", E1000_STAT(stats.ruc) }, 85 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
85 { "rx_align_errors", E1000_STAT(stats.algnerrc) }, 86 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 94 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 95 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 96 { "rx_header_split", E1000_STAT(rx_hdr_split) },
97 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
96}; 98};
97#define E1000_STATS_LEN \ 99
100#ifdef CONFIG_E1000_MQ
101#define E1000_QUEUE_STATS_LEN \
102 (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
103 ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
104 * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
105#else
106#define E1000_QUEUE_STATS_LEN 0
107#endif
108#define E1000_GLOBAL_STATS_LEN \
98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 109 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
110#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
99static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { 111static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
100 "Register test (offline)", "Eeprom test (offline)", 112 "Register test (offline)", "Eeprom test (offline)",
101 "Interrupt test (offline)", "Loopback test (offline)", 113 "Interrupt test (offline)", "Loopback test (offline)",
@@ -109,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
109 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
110 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
111 123
112 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
113 125
114 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
115 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -121,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 133
122 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
123 135
124 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
125 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
126 138
127 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -132,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
132 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
133 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
134 146
135 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
136 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
137 else 149 else
138 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -148,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
148 160
149 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
150 162
151 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
152 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
153 else 165 else
154 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
155 } 167 }
156 168
157 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
158 170
159 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
160 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -163,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
163 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
164 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
165 177
166 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
167 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
168 else 180 else
169 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -183,13 +195,21 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
183 struct e1000_adapter *adapter = netdev_priv(netdev); 195 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 196 struct e1000_hw *hw = &adapter->hw;
185 197
186 if(ecmd->autoneg == AUTONEG_ENABLE) { 198 /* When SoL/IDER sessions are active, autoneg/speed/duplex
199 * cannot be changed */
200 if (e1000_check_phy_reset_block(hw)) {
201 DPRINTK(DRV, ERR, "Cannot change link characteristics "
202 "when SoL/IDER is active.\n");
203 return -EINVAL;
204 }
205
206 if (ecmd->autoneg == AUTONEG_ENABLE) {
187 hw->autoneg = 1; 207 hw->autoneg = 1;
188 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
189 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
190 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
191 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
192 else 212 else
193 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
194 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
195 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -199,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
199 ADVERTISED_TP; 219 ADVERTISED_TP;
200 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
201 } else 221 } else
202 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
203 return -EINVAL; 223 return -EINVAL;
204 224
205 /* reset the link */ 225 /* reset the link */
206 226
207 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
208 e1000_down(adapter); 228 e1000_down(adapter);
209 e1000_reset(adapter); 229 e1000_reset(adapter);
210 e1000_up(adapter); 230 e1000_up(adapter);
@@ -221,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
221 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
222 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
223 243
224 pause->autoneg = 244 pause->autoneg =
225 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
226 246
227 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
228 pause->rx_pause = 1; 248 pause->rx_pause = 1;
229 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
230 pause->tx_pause = 1; 250 pause->tx_pause = 1;
231 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
232 pause->rx_pause = 1; 252 pause->rx_pause = 1;
233 pause->tx_pause = 1; 253 pause->tx_pause = 1;
234 } 254 }
@@ -240,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
240{ 260{
241 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
243 263
244 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
245 265
246 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
247 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
248 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
249 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
250 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
251 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
252 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
253 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
254 274
255 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
256 276
257 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
258 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
259 e1000_down(adapter); 279 e1000_down(adapter);
260 e1000_up(adapter); 280 e1000_up(adapter);
261 } else 281 } else
262 e1000_reset(adapter); 282 e1000_reset(adapter);
263 } 283 } else
264 else
265 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
266 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
267 286
268 return 0; 287 return 0;
269} 288}
270 289
@@ -281,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
281 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
282 adapter->rx_csum = data; 301 adapter->rx_csum = data;
283 302
284 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
285 e1000_down(adapter); 304 e1000_down(adapter);
286 e1000_up(adapter); 305 e1000_up(adapter);
287 } else 306 } else
288 e1000_reset(adapter); 307 e1000_reset(adapter);
289 return 0; 308 return 0;
290} 309}
291 310
292static uint32_t 311static uint32_t
293e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
294{ 313{
@@ -300,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
300{ 319{
301 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
302 321
303 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
304 if (!data) 323 if (!data)
305 return -EINVAL; 324 return -EINVAL;
306 return 0; 325 return 0;
@@ -319,8 +338,8 @@ static int
319e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
320{ 339{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
322 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
323 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
324 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
325 344
326 if (data) 345 if (data)
@@ -328,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
328 else 347 else
329 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
330 return 0; 349 return 0;
331} 350}
332#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
333 352
334static uint32_t 353static uint32_t
@@ -345,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
345 adapter->msg_enable = data; 364 adapter->msg_enable = data;
346} 365}
347 366
348static int 367static int
349e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
350{ 369{
351#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -381,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
381 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
382 401
383 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
384 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
385 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
386 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
387 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -435,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
435 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
436 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
437 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
438 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
439 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
440 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
441 } 460 }
@@ -459,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
459 int ret_val = 0; 478 int ret_val = 0;
460 uint16_t i; 479 uint16_t i;
461 480
462 if(eeprom->len == 0) 481 if (eeprom->len == 0)
463 return -EINVAL; 482 return -EINVAL;
464 483
465 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -469,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
469 488
470 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
471 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
472 if(!eeprom_buff) 491 if (!eeprom_buff)
473 return -ENOMEM; 492 return -ENOMEM;
474 493
475 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
476 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
477 last_word - first_word + 1, 496 last_word - first_word + 1,
478 eeprom_buff); 497 eeprom_buff);
479 else { 498 else {
480 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
481 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
482 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
483 break; 502 break;
484 } 503 }
@@ -505,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
505 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
506 uint16_t i; 525 uint16_t i;
507 526
508 if(eeprom->len == 0) 527 if (eeprom->len == 0)
509 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
510 529
511 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
512 return -EFAULT; 531 return -EFAULT;
513 532
514 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -516,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
516 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
517 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
518 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
519 if(!eeprom_buff) 538 if (!eeprom_buff)
520 return -ENOMEM; 539 return -ENOMEM;
521 540
522 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
523 542
524 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
525 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
526 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
527 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
528 &eeprom_buff[0]); 547 &eeprom_buff[0]);
529 ptr++; 548 ptr++;
530 } 549 }
531 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
532 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
533 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
534 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -547,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
547 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
548 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
549 568
550 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
551 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
552 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
553 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
554 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
555 574
@@ -567,21 +586,21 @@ e1000_get_drvinfo(struct net_device *netdev,
567 586
568 strncpy(drvinfo->driver, e1000_driver_name, 32); 587 strncpy(drvinfo->driver, e1000_driver_name, 32);
569 strncpy(drvinfo->version, e1000_driver_version, 32); 588 strncpy(drvinfo->version, e1000_driver_version, 32);
570 589
571 /* EEPROM image version # is reported as firware version # for 590 /* EEPROM image version # is reported as firmware version # for
572 * 8257{1|2|3} controllers */ 591 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); 592 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) { 593 switch (adapter->hw.mac_type) {
575 case e1000_82571: 594 case e1000_82571:
576 case e1000_82572: 595 case e1000_82572:
577 case e1000_82573: 596 case e1000_82573:
578 sprintf(firmware_version, "%d.%d-%d", 597 sprintf(firmware_version, "%d.%d-%d",
579 (eeprom_data & 0xF000) >> 12, 598 (eeprom_data & 0xF000) >> 12,
580 (eeprom_data & 0x0FF0) >> 4, 599 (eeprom_data & 0x0FF0) >> 4,
581 eeprom_data & 0x000F); 600 eeprom_data & 0x000F);
582 break; 601 break;
583 default: 602 default:
584 sprintf(firmware_version, "n/a"); 603 sprintf(firmware_version, "N/A");
585 } 604 }
586 605
587 strncpy(drvinfo->fw_version, firmware_version, 32); 606 strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -613,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
613 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
614} 633}
615 634
616static int 635static int
617e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
618 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
619{ 638{
@@ -623,8 +642,8 @@ e1000_set_ringparam(struct net_device *netdev,
623 struct e1000_rx_ring *rxdr, *rx_old, *rx_new; 642 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
624 int i, err, tx_ring_size, rx_ring_size; 643 int i, err, tx_ring_size, rx_ring_size;
625 644
626 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 645 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
627 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 646 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
628 647
629 if (netif_running(adapter->netdev)) 648 if (netif_running(adapter->netdev))
630 e1000_down(adapter); 649 e1000_down(adapter);
@@ -650,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
650 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
651 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
652 671
653 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
654 return -EINVAL; 673 return -EINVAL;
655 674
656 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
657 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
658 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
659 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
660 679
661 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
662 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
663 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
664 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
665 684
666 for (i = 0; i < adapter->num_queues; i++) { 685 for (i = 0; i < adapter->num_tx_queues; i++)
667 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
687 for (i = 0; i < adapter->num_rx_queues; i++)
668 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
669 }
670 689
671 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
672 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
673 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
674 goto err_setup_rx; 693 goto err_setup_rx;
@@ -688,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
688 kfree(rx_old); 707 kfree(rx_old);
689 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
690 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
691 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
692 return err; 711 return err;
693 } 712 }
694 713
@@ -707,10 +726,10 @@ err_setup_rx:
707 uint32_t pat, value; \ 726 uint32_t pat, value; \
708 uint32_t test[] = \ 727 uint32_t test[] = \
709 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
710 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
711 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
712 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
713 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
714 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
715 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
716 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -726,7 +745,7 @@ err_setup_rx:
726 uint32_t value; \ 745 uint32_t value; \
727 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
728 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
729 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
730 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
731 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
732 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -762,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
762 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
763 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
764 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
765 if(value != after) { 784 if (value != after) {
766 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
767 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
768 *data = 1; 787 *data = 1;
@@ -790,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
790 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
791 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
792 811
793 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
794 813
795 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
796 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -798,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
798 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
799 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
800 819
801 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
802 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
803 0xFFFFFFFF); 822 0xFFFFFFFF);
804 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -814,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
814 833
815 } 834 }
816 835
817 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
818 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
819 838
820 *data = 0; 839 *data = 0;
@@ -830,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
830 849
831 *data = 0; 850 *data = 0;
832 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
833 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
834 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
835 *data = 1; 854 *data = 1;
836 break; 855 break;
837 } 856 }
@@ -839,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
839 } 858 }
840 859
841 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
842 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
843 *data = 2; 862 *data = 2;
844 863
845 return *data; 864 return *data;
@@ -868,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
868 *data = 0; 887 *data = 0;
869 888
870 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
871 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
872 shared_int = FALSE; 891 shared_int = FALSE;
873 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
874 netdev->name, netdev)){ 893 netdev->name, netdev)){
875 *data = 1; 894 *data = 1;
876 return -1; 895 return -1;
@@ -881,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
881 msec_delay(10); 900 msec_delay(10);
882 901
883 /* Test each interrupt */ 902 /* Test each interrupt */
884 for(; i < 10; i++) { 903 for (; i < 10; i++) {
885 904
886 /* Interrupt to test */ 905 /* Interrupt to test */
887 mask = 1 << i; 906 mask = 1 << i;
888 907
889 if(!shared_int) { 908 if (!shared_int) {
890 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
891 * the cause register and then force the same 910 * the cause register and then force the same
892 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -897,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
897 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
898 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
899 msec_delay(10); 918 msec_delay(10);
900 919
901 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
902 *data = 3; 921 *data = 3;
903 break; 922 break;
904 } 923 }
@@ -915,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
915 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
916 msec_delay(10); 935 msec_delay(10);
917 936
918 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
919 *data = 4; 938 *data = 4;
920 break; 939 break;
921 } 940 }
922 941
923 if(!shared_int) { 942 if (!shared_int) {
924 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
925 * the cause register and then force the other 944 * the cause register and then force the other
926 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -932,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
932 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
933 msec_delay(10); 952 msec_delay(10);
934 953
935 if(adapter->test_icr) { 954 if (adapter->test_icr) {
936 *data = 5; 955 *data = 5;
937 break; 956 break;
938 } 957 }
@@ -957,40 +976,39 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
957 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
958 int i; 977 int i;
959 978
960 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
961 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
962 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
963 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
964 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
965 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
966 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
967 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
968 } 987 }
969 } 988 }
970 989
971 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
972 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
973 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
974 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
975 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
976 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
977 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
978 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
979 } 998 }
980 } 999 }
981 1000
982 if(txdr->desc) { 1001 if (txdr->desc) {
983 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1002 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
984 txdr->desc = NULL; 1003 txdr->desc = NULL;
985 } 1004 }
986 if(rxdr->desc) { 1005 if (rxdr->desc) {
987 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1006 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
988 rxdr->desc = NULL; 1007 rxdr->desc = NULL;
989 } 1008 }
990 1009
991 kfree(txdr->buffer_info); 1010 kfree(txdr->buffer_info);
992 txdr->buffer_info = NULL; 1011 txdr->buffer_info = NULL;
993
994 kfree(rxdr->buffer_info); 1012 kfree(rxdr->buffer_info);
995 rxdr->buffer_info = NULL; 1013 rxdr->buffer_info = NULL;
996 1014
@@ -1008,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1008 1026
1009 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1010 1028
1011 if(!txdr->count) 1029 if (!txdr->count)
1012 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1013 1031
1014 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1015 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1016 ret_val = 1; 1034 ret_val = 1;
1017 goto err_nomem; 1035 goto err_nomem;
1018 } 1036 }
@@ -1020,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1020 1038
1021 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1022 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1023 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1024 ret_val = 2; 1042 ret_val = 2;
1025 goto err_nomem; 1043 goto err_nomem;
1026 } 1044 }
@@ -1039,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1040 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1041 1059
1042 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1043 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1044 struct sk_buff *skb; 1062 struct sk_buff *skb;
1045 unsigned int size = 1024; 1063 unsigned int size = 1024;
1046 1064
1047 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1048 ret_val = 3; 1066 ret_val = 3;
1049 goto err_nomem; 1067 goto err_nomem;
1050 } 1068 }
@@ -1064,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1064 1082
1065 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1066 1084
1067 if(!rxdr->count) 1085 if (!rxdr->count)
1068 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1069 1087
1070 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1071 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1072 ret_val = 4; 1090 ret_val = 4;
1073 goto err_nomem; 1091 goto err_nomem;
1074 } 1092 }
1075 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1076 1094
1077 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1078 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1079 ret_val = 5; 1097 ret_val = 5;
1080 goto err_nomem; 1098 goto err_nomem;
1081 } 1099 }
@@ -1095,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1095 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1096 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1097 1115
1098 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1099 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1100 struct sk_buff *skb; 1118 struct sk_buff *skb;
1101 1119
1102 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1103 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1104 ret_val = 6; 1122 ret_val = 6;
1105 goto err_nomem; 1123 goto err_nomem;
@@ -1208,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1208 1226
1209 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1210 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1211 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1212 return 9; 1230 return 9;
1213 1231
1214 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1215 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1216 return 10; 1234 return 10;
1217 1235
1218 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1219 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1220 return 11; 1238 return 11;
1221 1239
1222 return 0; 1240 return 0;
@@ -1230,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1230 1248
1231 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1232 1250
1233 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1234 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1235 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1236 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1250,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1250 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1251 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1252 1270
1253 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1254 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1255 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1256 } else { 1274 } else {
1257 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1258 * duplex link is detected. */ 1276 * duplex link is detected. */
1259 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1260 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1261 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1262 } 1280 }
1263 1281
@@ -1266,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1266 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1267 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1268 */ 1286 */
1269 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1270 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1271 1289
1272 udelay(500); 1290 udelay(500);
@@ -1282,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1282 1300
1283 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1284 case e1000_82543: 1302 case e1000_82543:
1285 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1286 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1287 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1288 * attempt this 10 times. 1306 * attempt this 10 times.
1289 */ 1307 */
1290 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1291 count++ < 10); 1309 count++ < 10);
1292 if(count < 11) 1310 if (count < 11)
1293 return 0; 1311 return 0;
1294 } 1312 }
1295 break; 1313 break;
@@ -1327,11 +1345,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1327static int 1345static int
1328e1000_setup_loopback_test(struct e1000_adapter *adapter) 1346e1000_setup_loopback_test(struct e1000_adapter *adapter)
1329{ 1347{
1330 uint32_t rctl;
1331 struct e1000_hw *hw = &adapter->hw; 1348 struct e1000_hw *hw = &adapter->hw;
1349 uint32_t rctl;
1332 1350
1333 if (hw->media_type == e1000_media_type_fiber || 1351 if (hw->media_type == e1000_media_type_fiber ||
1334 hw->media_type == e1000_media_type_internal_serdes) { 1352 hw->media_type == e1000_media_type_internal_serdes) {
1335 switch (hw->mac_type) { 1353 switch (hw->mac_type) {
1336 case e1000_82545: 1354 case e1000_82545:
1337 case e1000_82546: 1355 case e1000_82546:
@@ -1362,25 +1380,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
1362static void 1380static void
1363e1000_loopback_cleanup(struct e1000_adapter *adapter) 1381e1000_loopback_cleanup(struct e1000_adapter *adapter)
1364{ 1382{
1383 struct e1000_hw *hw = &adapter->hw;
1365 uint32_t rctl; 1384 uint32_t rctl;
1366 uint16_t phy_reg; 1385 uint16_t phy_reg;
1367 struct e1000_hw *hw = &adapter->hw;
1368 1386
1369 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1387 rctl = E1000_READ_REG(hw, RCTL);
1370 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1388 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1389 E1000_WRITE_REG(hw, RCTL, rctl);
1372 1390
1373 switch (hw->mac_type) { 1391 switch (hw->mac_type) {
1374 case e1000_82571: 1392 case e1000_82571:
1375 case e1000_82572: 1393 case e1000_82572:
1376 if (hw->media_type == e1000_media_type_fiber || 1394 if (hw->media_type == e1000_media_type_fiber ||
1377 hw->media_type == e1000_media_type_internal_serdes){ 1395 hw->media_type == e1000_media_type_internal_serdes) {
1378#define E1000_SERDES_LB_OFF 0x400 1396#define E1000_SERDES_LB_OFF 0x400
1379 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); 1397 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1380 msec_delay(10); 1398 msec_delay(10);
1381 break; 1399 break;
1382 } 1400 }
1383 /* fall thru for Cu adapters */ 1401 /* Fall Through */
1384 case e1000_82545: 1402 case e1000_82545:
1385 case e1000_82546: 1403 case e1000_82546:
1386 case e1000_82545_rev_3: 1404 case e1000_82545_rev_3:
@@ -1401,7 +1419,7 @@ static void
1401e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1419e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1402{ 1420{
1403 memset(skb->data, 0xFF, frame_size); 1421 memset(skb->data, 0xFF, frame_size);
1404 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1422 frame_size &= ~1;
1405 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1423 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1406 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1424 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1407 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1425 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,9 +1428,9 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1410static int 1428static int
1411e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1412{ 1430{
1413 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1431 frame_size &= ~1;
1414 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1415 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1416 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1417 return 0; 1435 return 0;
1418 } 1436 }
@@ -1431,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1431 1449
1432 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1433 1451
1434 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1435 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1436 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1437 */ 1455 */
1438 1456
1439 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1440 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1441 else 1459 else
1442 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1443 1461
1444 k = l = 0; 1462 k = l = 0;
1445 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1446 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1447 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1448 1024); 1466 1024);
1449 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1450 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1451 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1452 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1453 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1454 } 1472 }
1455 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1456 msec_delay(200); 1474 msec_delay(200);
1457 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1458 good_cnt = 0; 1476 good_cnt = 0;
1459 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1460 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1461 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1462 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1463 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1464 1482
1465 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1466 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1467 1024); 1485 1024);
1468 if(!ret_val) 1486 if (!ret_val)
1469 good_cnt++; 1487 good_cnt++;
1470 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1471 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1472 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1473 * exceeded, break and error off 1491 * exceeded, break and error off
1474 */ 1492 */
1475 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1476 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1477 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1478 break; 1496 break;
1479 } 1497 }
1480 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1481 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1482 break; 1500 break;
1483 } 1501 }
@@ -1488,14 +1506,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1488static int 1506static int
1489e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1507e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1490{ 1508{
1491 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; 1509 /* PHY loopback cannot be performed if SoL/IDER
1492 if((*data = e1000_setup_loopback_test(adapter))) 1510 * sessions are active */
1493 goto err_loopback_setup; 1511 if (e1000_check_phy_reset_block(&adapter->hw)) {
1512 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1513 "when SoL/IDER is active.\n");
1514 *data = 0;
1515 goto out;
1516 }
1517
1518 if ((*data = e1000_setup_desc_rings(adapter)))
1519 goto out;
1520 if ((*data = e1000_setup_loopback_test(adapter)))
1521 goto err_loopback;
1494 *data = e1000_run_loopback_test(adapter); 1522 *data = e1000_run_loopback_test(adapter);
1495 e1000_loopback_cleanup(adapter); 1523 e1000_loopback_cleanup(adapter);
1496err_loopback_setup: 1524
1497 e1000_free_desc_rings(adapter);
1498err_loopback: 1525err_loopback:
1526 e1000_free_desc_rings(adapter);
1527out:
1499 return *data; 1528 return *data;
1500} 1529}
1501 1530
@@ -1519,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1519 *data = 1; 1548 *data = 1;
1520 } else { 1549 } else {
1521 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1522 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1523 msec_delay(4000); 1552 msec_delay(4000);
1524 1553
1525 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1526 *data = 1; 1555 *data = 1;
1527 } 1556 }
1528 } 1557 }
1529 return *data; 1558 return *data;
1530} 1559}
1531 1560
1532static int 1561static int
1533e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1534{ 1563{
1535 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1542,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1542 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1543 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1544 1573
1545 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1546 /* Offline tests */ 1575 /* Offline tests */
1547 1576
1548 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1552,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1552 1581
1553 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1554 * interfere with test result */ 1583 * interfere with test result */
1555 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1556 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1557 1586
1558 if(if_running) 1587 if (if_running)
1559 e1000_down(adapter); 1588 e1000_down(adapter);
1560 else 1589 else
1561 e1000_reset(adapter); 1590 e1000_reset(adapter);
1562 1591
1563 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1564 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1565 1594
1566 e1000_reset(adapter); 1595 e1000_reset(adapter);
1567 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1568 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1569 1598
1570 e1000_reset(adapter); 1599 e1000_reset(adapter);
1571 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1572 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1573 1602
1574 e1000_reset(adapter); 1603 e1000_reset(adapter);
1575 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1576 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1577 1606
1578 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1581,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1581 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1582 1611
1583 e1000_reset(adapter); 1612 e1000_reset(adapter);
1584 if(if_running) 1613 if (if_running)
1585 e1000_up(adapter); 1614 e1000_up(adapter);
1586 } else { 1615 } else {
1587 /* Online tests */ 1616 /* Online tests */
1588 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1589 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1590 1619
1591 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1603,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1603 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1604 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1605 1634
1606 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1607 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1608 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1609 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1617,8 +1646,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1617 1646
1618 case E1000_DEV_ID_82546EB_FIBER: 1647 case E1000_DEV_ID_82546EB_FIBER:
1619 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1649 case E1000_DEV_ID_82571EB_FIBER:
1620 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1621 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1622 wol->supported = 0; 1652 wol->supported = 0;
1623 wol->wolopts = 0; 1653 wol->wolopts = 0;
1624 return; 1654 return;
@@ -1630,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1630 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1631 1661
1632 wol->wolopts = 0; 1662 wol->wolopts = 0;
1633 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1634 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1635 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1636 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1637 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1638 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1639 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1640 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1641 return; 1671 return;
1642 } 1672 }
@@ -1648,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1648 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1649 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1650 1680
1651 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1652 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1653 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1654 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1660,24 +1690,25 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1660 1690
1661 case E1000_DEV_ID_82546EB_FIBER: 1691 case E1000_DEV_ID_82546EB_FIBER:
1662 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1693 case E1000_DEV_ID_82571EB_FIBER:
1663 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1664 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1665 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1666 /* Fall Through */ 1697 /* Fall Through */
1667 1698
1668 default: 1699 default:
1669 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1670 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1671 1702
1672 adapter->wol = 0; 1703 adapter->wol = 0;
1673 1704
1674 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1675 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1676 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1677 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1678 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1679 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1680 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1681 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1682 } 1713 }
1683 1714
@@ -1695,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1695{ 1726{
1696 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1697 1728
1698 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1699 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1700 else 1731 else
1701 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1708,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1708{ 1739{
1709 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1710 1741
1711 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1712 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1713 1744
1714 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1715 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1716 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1717 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1718 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1721,21 +1752,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1721 mod_timer(&adapter->blink_timer, jiffies); 1752 mod_timer(&adapter->blink_timer, jiffies);
1722 msleep_interruptible(data * 1000); 1753 msleep_interruptible(data * 1000);
1723 del_timer_sync(&adapter->blink_timer); 1754 del_timer_sync(&adapter->blink_timer);
1724 } 1755 } else if (adapter->hw.mac_type < e1000_82573) {
1725 else if(adapter->hw.mac_type < e1000_82573) { 1756 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1726 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1757 (E1000_LEDCTL_LED2_BLINK_RATE |
1727 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | 1758 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1728 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1759 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1729 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | 1760 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1730 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); 1761 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1731 msleep_interruptible(data * 1000); 1762 msleep_interruptible(data * 1000);
1732 } 1763 } else {
1733 else { 1764 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1734 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1765 (E1000_LEDCTL_LED2_BLINK_RATE |
1735 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 1766 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1736 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1767 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1737 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | 1768 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1738 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); 1769 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1739 msleep_interruptible(data * 1000); 1770 msleep_interruptible(data * 1000);
1740 } 1771 }
1741 1772
@@ -1750,50 +1781,89 @@ static int
1750e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1751{ 1782{
1752 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1753 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1754 e1000_down(adapter); 1785 e1000_down(adapter);
1755 e1000_up(adapter); 1786 e1000_up(adapter);
1756 } 1787 }
1757 return 0; 1788 return 0;
1758} 1789}
1759 1790
1760static int 1791static int
1761e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1762{ 1793{
1763 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1764} 1795}
1765 1796
1766static void 1797static void
1767e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1768 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1769{ 1800{
1770 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
1802#ifdef CONFIG_E1000_MQ
1803 uint64_t *queue_stat;
1804 int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
1805 int j, k;
1806#endif
1771 int i; 1807 int i;
1772 1808
1773 e1000_update_stats(adapter); 1809 e1000_update_stats(adapter);
1774 for(i = 0; i < E1000_STATS_LEN; i++) { 1810 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1775 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1811 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1776 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1812 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1777 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1813 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1778 } 1814 }
1815#ifdef CONFIG_E1000_MQ
1816 for (j = 0; j < adapter->num_tx_queues; j++) {
1817 queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
1818 for (k = 0; k < stat_count; k++)
1819 data[i + k] = queue_stat[k];
1820 i += k;
1821 }
1822 for (j = 0; j < adapter->num_rx_queues; j++) {
1823 queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
1824 for (k = 0; k < stat_count; k++)
1825 data[i + k] = queue_stat[k];
1826 i += k;
1827 }
1828#endif
1829/* BUG_ON(i != E1000_STATS_LEN); */
1779} 1830}
1780 1831
1781static void 1832static void
1782e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1783{ 1834{
1835#ifdef CONFIG_E1000_MQ
1836 struct e1000_adapter *adapter = netdev_priv(netdev);
1837#endif
1838 uint8_t *p = data;
1784 int i; 1839 int i;
1785 1840
1786 switch(stringset) { 1841 switch (stringset) {
1787 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1788 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1789 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1790 break; 1845 break;
1791 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
1792 for (i=0; i < E1000_STATS_LEN; i++) { 1847 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1793 memcpy(data + i * ETH_GSTRING_LEN, 1848 memcpy(p, e1000_gstrings_stats[i].stat_string,
1794 e1000_gstrings_stats[i].stat_string, 1849 ETH_GSTRING_LEN);
1795 ETH_GSTRING_LEN); 1850 p += ETH_GSTRING_LEN;
1796 } 1851 }
1852#ifdef CONFIG_E1000_MQ
1853 for (i = 0; i < adapter->num_tx_queues; i++) {
1854 sprintf(p, "tx_queue_%u_packets", i);
1855 p += ETH_GSTRING_LEN;
1856 sprintf(p, "tx_queue_%u_bytes", i);
1857 p += ETH_GSTRING_LEN;
1858 }
1859 for (i = 0; i < adapter->num_rx_queues; i++) {
1860 sprintf(p, "rx_queue_%u_packets", i);
1861 p += ETH_GSTRING_LEN;
1862 sprintf(p, "rx_queue_%u_bytes", i);
1863 p += ETH_GSTRING_LEN;
1864 }
1865#endif
1866/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1797 break; 1867 break;
1798 } 1868 }
1799} 1869}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 136fc031e4ad..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82546GB_FIBER: 318 case E1000_DEV_ID_82546GB_FIBER:
319 case E1000_DEV_ID_82546GB_SERDES: 319 case E1000_DEV_ID_82546GB_SERDES:
320 case E1000_DEV_ID_82546GB_PCIE: 320 case E1000_DEV_ID_82546GB_PCIE:
321 case E1000_DEV_ID_82546GB_QUAD_COPPER:
322 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
321 hw->mac_type = e1000_82546_rev_3; 323 hw->mac_type = e1000_82546_rev_3;
322 break; 324 break;
323 case E1000_DEV_ID_82541EI: 325 case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
639 uint16_t cmd_mmrbc; 641 uint16_t cmd_mmrbc;
640 uint16_t stat_mmrbc; 642 uint16_t stat_mmrbc;
641 uint32_t mta_size; 643 uint32_t mta_size;
644 uint32_t ctrl_ext;
642 645
643 DEBUGFUNC("e1000_init_hw"); 646 DEBUGFUNC("e1000_init_hw");
644 647
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
735 break; 738 break;
736 case e1000_82571: 739 case e1000_82571:
737 case e1000_82572: 740 case e1000_82572:
738 ctrl |= (1 << 22);
739 case e1000_82573: 741 case e1000_82573:
740 ctrl |= E1000_TXDCTL_COUNT_DESC; 742 ctrl |= E1000_TXDCTL_COUNT_DESC;
741 break; 743 break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
775 */ 777 */
776 e1000_clear_hw_cntrs(hw); 778 e1000_clear_hw_cntrs(hw);
777 779
780 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
781 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
782 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
783 /* Relaxed ordering must be disabled to avoid a parity
784 * error crash in a PCI slot. */
785 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
786 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
787 }
788
778 return ret_val; 789 return ret_val;
779} 790}
780 791
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
838 849
839 DEBUGFUNC("e1000_setup_link"); 850 DEBUGFUNC("e1000_setup_link");
840 851
852 /* In the case of the phy reset being blocked, we already have a link.
853 * We do not have to set it up again. */
854 if (e1000_check_phy_reset_block(hw))
855 return E1000_SUCCESS;
856
841 /* Read and store word 0x0F of the EEPROM. This word contains bits 857 /* Read and store word 0x0F of the EEPROM. This word contains bits
842 * that determine the hardware's default PAUSE (flow control) mode, 858 * that determine the hardware's default PAUSE (flow control) mode,
843 * a bit that determines whether the HW defaults to enabling or 859 * a bit that determines whether the HW defaults to enabling or
@@ -1584,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1584 if(ret_val) 1600 if(ret_val)
1585 return ret_val; 1601 return ret_val;
1586 1602
1587 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1588 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1589 if(ret_val) 1605 if(ret_val)
1590 return ret_val; 1606 return ret_val;
1591 1607
1592 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1593 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1929void 1945void
1930e1000_config_collision_dist(struct e1000_hw *hw) 1946e1000_config_collision_dist(struct e1000_hw *hw)
1931{ 1947{
1932 uint32_t tctl; 1948 uint32_t tctl, coll_dist;
1933 1949
1934 DEBUGFUNC("e1000_config_collision_dist"); 1950 DEBUGFUNC("e1000_config_collision_dist");
1935 1951
1952 if (hw->mac_type < e1000_82543)
1953 coll_dist = E1000_COLLISION_DISTANCE_82542;
1954 else
1955 coll_dist = E1000_COLLISION_DISTANCE;
1956
1936 tctl = E1000_READ_REG(hw, TCTL); 1957 tctl = E1000_READ_REG(hw, TCTL);
1937 1958
1938 tctl &= ~E1000_TCTL_COLD; 1959 tctl &= ~E1000_TCTL_COLD;
1939 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 1960 tctl |= coll_dist << E1000_COLD_SHIFT;
1940 1961
1941 E1000_WRITE_REG(hw, TCTL, tctl); 1962 E1000_WRITE_REG(hw, TCTL, tctl);
1942 E1000_WRITE_FLUSH(hw); 1963 E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2982 3003
2983 if (hw->mac_type < e1000_82571) 3004 if (hw->mac_type < e1000_82571)
2984 msec_delay(10); 3005 msec_delay(10);
3006 else
3007 udelay(100);
2985 3008
2986 E1000_WRITE_REG(hw, CTRL, ctrl); 3009 E1000_WRITE_REG(hw, CTRL, ctrl);
2987 E1000_WRITE_FLUSH(hw); 3010 E1000_WRITE_FLUSH(hw);
@@ -3881,17 +3904,19 @@ e1000_read_eeprom(struct e1000_hw *hw,
3881 return -E1000_ERR_EEPROM; 3904 return -E1000_ERR_EEPROM;
3882 } 3905 }
3883 3906
3884 /* FLASH reads without acquiring the semaphore are safe in 82573-based 3907 /* FLASH reads without acquiring the semaphore are safe */
3885 * controllers. 3908 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
3886 */ 3909 hw->eeprom.use_eerd == FALSE) {
3887 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3910 switch (hw->mac_type) {
3888 (hw->mac_type != e1000_82573)) { 3911 default:
3889 /* Prepare the EEPROM for reading */ 3912 /* Prepare the EEPROM for reading */
3890 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3913 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3891 return -E1000_ERR_EEPROM; 3914 return -E1000_ERR_EEPROM;
3915 break;
3916 }
3892 } 3917 }
3893 3918
3894 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3895 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3896 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3897 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4398,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4398 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4399 } 4424 }
4400 4425
4401 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4402 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4403 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4404 } 4429 }
@@ -4406,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4406 /* Perform the flash update */ 4431 /* Perform the flash update */
4407 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4408 4433
4409 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4410 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4411 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4412 break; 4437 break;
@@ -4479,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4479 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4480 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4481 } 4506 }
4507
4482 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4483 default: 4509 default:
4484 break; 4510 break;
@@ -6720,6 +6746,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6720 break; 6746 break;
6721 } 6747 }
6722 6748
6749 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6750 * Need to wait for PHY configuration completion before accessing NVM
6751 * and PHY. */
6752 if (hw->mac_type == e1000_82573)
6753 msec_delay(25);
6754
6723 return E1000_SUCCESS; 6755 return E1000_SUCCESS;
6724} 6756}
6725 6757
@@ -6809,7 +6841,8 @@ int32_t
6809e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6810{ 6842{
6811 uint32_t manc = 0; 6843 uint32_t manc = 0;
6812 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6813 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6814 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6815 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 7caa35748cea..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
378 378
379/* Filters (multicast, vlan, receive) */ 379/* Filters (multicast, vlan, receive) */
380void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
380uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 381uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
381void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 382void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
382void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 383void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
401void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 402void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
402/* Port I/O is only supported on 82544 and newer */ 403/* Port I/O is only supported on 82544 and newer */
403uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); 404uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
405uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
404void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 406void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
407void e1000_enable_pciex_master(struct e1000_hw *hw);
405int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 408int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
406int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 409int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
407void e1000_release_software_semaphore(struct e1000_hw *hw); 410void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -439,6 +442,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
439#define E1000_DEV_ID_82546GB_FIBER 0x107A 442#define E1000_DEV_ID_82546GB_FIBER 0x107A
440#define E1000_DEV_ID_82546GB_SERDES 0x107B 443#define E1000_DEV_ID_82546GB_SERDES 0x107B
441#define E1000_DEV_ID_82546GB_PCIE 0x108A 444#define E1000_DEV_ID_82546GB_PCIE 0x108A
445#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
442#define E1000_DEV_ID_82547EI 0x1019 446#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82571EB_COPPER 0x105E 447#define E1000_DEV_ID_82571EB_COPPER 0x105E
444#define E1000_DEV_ID_82571EB_FIBER 0x105F 448#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -449,6 +453,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
449#define E1000_DEV_ID_82573E 0x108B 453#define E1000_DEV_ID_82573E 0x108B
450#define E1000_DEV_ID_82573E_IAMT 0x108C 454#define E1000_DEV_ID_82573E_IAMT 0x108C
451#define E1000_DEV_ID_82573L 0x109A 455#define E1000_DEV_ID_82573L 0x109A
456#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
452 457
453 458
454#define NODE_ADDRESS_SIZE 6 459#define NODE_ADDRESS_SIZE 6
@@ -897,14 +902,14 @@ struct e1000_ffvt_entry {
897#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
898#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
899#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
900#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
901#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
902#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
903#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
904#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
905#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
906#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
907#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
908#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
909#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
910#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1497,6 +1502,7 @@ struct e1000_hw {
1497#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1502#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1498#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1503#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1499#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1504#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1505#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1500#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1506#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1501#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1507#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1502#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1508#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1758,7 +1764,6 @@ struct e1000_hw {
1758#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1759#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1760 still to be processed. */ 1766 still to be processed. */
1761
1762/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1763#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1764#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
@@ -1954,6 +1959,23 @@ struct e1000_host_command_info {
1954 1959
1955#define E1000_MDALIGN 4096 1960#define E1000_MDALIGN 4096
1956 1961
1962/* PCI-Ex registers */
1963
1964/* PCI-Ex Control Register */
1965#define E1000_GCR_RXD_NO_SNOOP 0x00000001
1966#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
1967#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
1968#define E1000_GCR_TXD_NO_SNOOP 0x00000008
1969#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
1970#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
1971
1972#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
1973 E1000_GCR_RXDSCW_NO_SNOOP | \
1974 E1000_GCR_RXDSCR_NO_SNOOP | \
1975 E1000_GCR TXD_NO_SNOOP | \
1976 E1000_GCR_TXDSCW_NO_SNOOP | \
1977 E1000_GCR_TXDSCR_NO_SNOOP)
1978
1957#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1979#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1958/* Function Active and Power State to MNG */ 1980/* Function Active and Power State to MNG */
1959#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1981#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2077,7 +2099,10 @@ struct e1000_host_command_info {
2077/* Collision related configuration parameters */ 2099/* Collision related configuration parameters */
2078#define E1000_COLLISION_THRESHOLD 15 2100#define E1000_COLLISION_THRESHOLD 15
2079#define E1000_CT_SHIFT 4 2101#define E1000_CT_SHIFT 4
2080#define E1000_COLLISION_DISTANCE 64 2102/* Collision distance is a 0-based value that applies to
2103 * half-duplex-capable hardware only. */
2104#define E1000_COLLISION_DISTANCE 63
2105#define E1000_COLLISION_DISTANCE_82542 64
2081#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2106#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2082#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2107#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2083#define E1000_COLD_SHIFT 12 2108#define E1000_COLD_SHIFT 12
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 438a931fd55d..31e332935e5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 6.0.58 4/20/05 32 * 6.3.9 12/16/2005
33 * o Accepted ethtool cleanup patch from Stephen Hemminger 33 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.0.44+ 2/15/05 34 * 6.3.7 11/18/2005
35 * o applied Anton's patch to resolve tx hang in hardware 35 * o Honor eeprom setting for enabling/disabling Wake On Lan
36 * o Applied Andrew Mortons patch - e1000 stops working after resume 36 * 6.3.5 11/17/2005
37 * o Fix memory leak in rx ring handling for PCI Express adapters
38 * 6.3.4 11/8/05
39 * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
40 * 6.3.2 9/20/05
41 * o Render logic that sets/resets DRV_LOAD as inline functions to
42 * avoid code replication. If f/w is AMT then set DRV_LOAD only when
43 * network interface is open.
44 * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
45 * o Adjust PBA partioning for Jumbo frames using MTU size and not
46 * rx_buffer_len
47 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
37 */ 97 */
38 98
39char e1000_driver_name[] = "e1000"; 99char e1000_driver_name[] = "e1000";
@@ -43,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 103#else
44#define DRIVERNAPI "-NAPI" 104#define DRIVERNAPI "-NAPI"
45#endif 105#endif
46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI 106#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 107char e1000_driver_version[] = DRV_VERSION;
48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 108static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 109
@@ -97,7 +157,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
97 INTEL_E1000_ETHERNET_DEVICE(0x108A), 157 INTEL_E1000_ETHERNET_DEVICE(0x108A),
98 INTEL_E1000_ETHERNET_DEVICE(0x108B), 158 INTEL_E1000_ETHERNET_DEVICE(0x108B),
99 INTEL_E1000_ETHERNET_DEVICE(0x108C), 159 INTEL_E1000_ETHERNET_DEVICE(0x108C),
160 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A), 161 INTEL_E1000_ETHERNET_DEVICE(0x109A),
162 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
101 /* required last entry */ 163 /* required last entry */
102 {0,} 164 {0,}
103}; 165};
@@ -171,9 +233,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring); 233 struct e1000_rx_ring *rx_ring);
172#endif 234#endif
173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 235static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
174 struct e1000_rx_ring *rx_ring); 236 struct e1000_rx_ring *rx_ring,
237 int cleaned_count);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 238static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring); 239 struct e1000_rx_ring *rx_ring,
240 int cleaned_count);
177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 241static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 242static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
179 int cmd); 243 int cmd);
@@ -291,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
291static inline void 355static inline void
292e1000_irq_enable(struct e1000_adapter *adapter) 356e1000_irq_enable(struct e1000_adapter *adapter)
293{ 357{
294 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
295 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
296 E1000_WRITE_FLUSH(&adapter->hw); 360 E1000_WRITE_FLUSH(&adapter->hw);
297 } 361 }
@@ -303,23 +367,91 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
303 struct net_device *netdev = adapter->netdev; 367 struct net_device *netdev = adapter->netdev;
304 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
305 uint16_t old_vid = adapter->mng_vlan_id; 369 uint16_t old_vid = adapter->mng_vlan_id;
306 if(adapter->vlgrp) { 370 if (adapter->vlgrp) {
307 if(!adapter->vlgrp->vlan_devices[vid]) { 371 if (!adapter->vlgrp->vlan_devices[vid]) {
308 if(adapter->hw.mng_cookie.status & 372 if (adapter->hw.mng_cookie.status &
309 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
310 e1000_vlan_rx_add_vid(netdev, vid); 374 e1000_vlan_rx_add_vid(netdev, vid);
311 adapter->mng_vlan_id = vid; 375 adapter->mng_vlan_id = vid;
312 } else 376 } else
313 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
314 378
315 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
316 (vid != old_vid) && 380 (vid != old_vid) &&
317 !adapter->vlgrp->vlan_devices[old_vid]) 381 !adapter->vlgrp->vlan_devices[old_vid])
318 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 e1000_vlan_rx_kill_vid(netdev, old_vid);
319 } 383 }
320 } 384 }
321} 385}
322 386
387/**
388 * e1000_release_hw_control - release control of the h/w to f/w
389 * @adapter: address of board private structure
390 *
391 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
392 * For ASF and Pass Through versions of f/w this means that the
393 * driver is no longer loaded. For AMT version (only with 82573) i
394 * of the f/w this means that the netowrk i/f is closed.
395 *
396 **/
397
398static inline void
399e1000_release_hw_control(struct e1000_adapter *adapter)
400{
401 uint32_t ctrl_ext;
402 uint32_t swsm;
403
404 /* Let firmware taken over control of h/w */
405 switch (adapter->hw.mac_type) {
406 case e1000_82571:
407 case e1000_82572:
408 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
409 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
410 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
411 break;
412 case e1000_82573:
413 swsm = E1000_READ_REG(&adapter->hw, SWSM);
414 E1000_WRITE_REG(&adapter->hw, SWSM,
415 swsm & ~E1000_SWSM_DRV_LOAD);
416 default:
417 break;
418 }
419}
420
421/**
422 * e1000_get_hw_control - get control of the h/w from f/w
423 * @adapter: address of board private structure
424 *
425 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
426 * For ASF and Pass Through versions of f/w this means that
427 * the driver is loaded. For AMT version (only with 82573)
428 * of the f/w this means that the netowrk i/f is open.
429 *
430 **/
431
432static inline void
433e1000_get_hw_control(struct e1000_adapter *adapter)
434{
435 uint32_t ctrl_ext;
436 uint32_t swsm;
437 /* Let firmware know the driver has taken over */
438 switch (adapter->hw.mac_type) {
439 case e1000_82571:
440 case e1000_82572:
441 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
442 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
443 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
444 break;
445 case e1000_82573:
446 swsm = E1000_READ_REG(&adapter->hw, SWSM);
447 E1000_WRITE_REG(&adapter->hw, SWSM,
448 swsm | E1000_SWSM_DRV_LOAD);
449 break;
450 default:
451 break;
452 }
453}
454
323int 455int
324e1000_up(struct e1000_adapter *adapter) 456e1000_up(struct e1000_adapter *adapter)
325{ 457{
@@ -329,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
329 /* hardware has been reset, we need to reload some things */ 461 /* hardware has been reset, we need to reload some things */
330 462
331 /* Reset the PHY if it was previously powered down */ 463 /* Reset the PHY if it was previously powered down */
332 if(adapter->hw.media_type == e1000_media_type_copper) { 464 if (adapter->hw.media_type == e1000_media_type_copper) {
333 uint16_t mii_reg; 465 uint16_t mii_reg;
334 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 466 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
335 if(mii_reg & MII_CR_POWER_DOWN) 467 if (mii_reg & MII_CR_POWER_DOWN)
336 e1000_phy_reset(&adapter->hw); 468 e1000_phy_reset(&adapter->hw);
337 } 469 }
338 470
@@ -343,20 +475,26 @@ e1000_up(struct e1000_adapter *adapter)
343 e1000_configure_tx(adapter); 475 e1000_configure_tx(adapter);
344 e1000_setup_rctl(adapter); 476 e1000_setup_rctl(adapter);
345 e1000_configure_rx(adapter); 477 e1000_configure_rx(adapter);
346 for (i = 0; i < adapter->num_queues; i++) 478 /* call E1000_DESC_UNUSED which always leaves
347 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); 479 * at least 1 descriptor unused to make sure
480 * next_to_use != next_to_clean */
481 for (i = 0; i < adapter->num_rx_queues; i++) {
482 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
483 adapter->alloc_rx_buf(adapter, ring,
484 E1000_DESC_UNUSED(ring));
485 }
348 486
349#ifdef CONFIG_PCI_MSI 487#ifdef CONFIG_PCI_MSI
350 if(adapter->hw.mac_type > e1000_82547_rev_2) { 488 if (adapter->hw.mac_type > e1000_82547_rev_2) {
351 adapter->have_msi = TRUE; 489 adapter->have_msi = TRUE;
352 if((err = pci_enable_msi(adapter->pdev))) { 490 if ((err = pci_enable_msi(adapter->pdev))) {
353 DPRINTK(PROBE, ERR, 491 DPRINTK(PROBE, ERR,
354 "Unable to allocate MSI interrupt Error: %d\n", err); 492 "Unable to allocate MSI interrupt Error: %d\n", err);
355 adapter->have_msi = FALSE; 493 adapter->have_msi = FALSE;
356 } 494 }
357 } 495 }
358#endif 496#endif
359 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 497 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
360 SA_SHIRQ | SA_SAMPLE_RANDOM, 498 SA_SHIRQ | SA_SAMPLE_RANDOM,
361 netdev->name, netdev))) { 499 netdev->name, netdev))) {
362 DPRINTK(PROBE, ERR, 500 DPRINTK(PROBE, ERR,
@@ -364,6 +502,12 @@ e1000_up(struct e1000_adapter *adapter)
364 return err; 502 return err;
365 } 503 }
366 504
505#ifdef CONFIG_E1000_MQ
506 e1000_setup_queue_mapping(adapter);
507#endif
508
509 adapter->tx_queue_len = netdev->tx_queue_len;
510
367 mod_timer(&adapter->watchdog_timer, jiffies); 511 mod_timer(&adapter->watchdog_timer, jiffies);
368 512
369#ifdef CONFIG_E1000_NAPI 513#ifdef CONFIG_E1000_NAPI
@@ -378,6 +522,8 @@ void
378e1000_down(struct e1000_adapter *adapter) 522e1000_down(struct e1000_adapter *adapter)
379{ 523{
380 struct net_device *netdev = adapter->netdev; 524 struct net_device *netdev = adapter->netdev;
525 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
526 e1000_check_mng_mode(&adapter->hw);
381 527
382 e1000_irq_disable(adapter); 528 e1000_irq_disable(adapter);
383#ifdef CONFIG_E1000_MQ 529#ifdef CONFIG_E1000_MQ
@@ -385,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
385#endif 531#endif
386 free_irq(adapter->pdev->irq, netdev); 532 free_irq(adapter->pdev->irq, netdev);
387#ifdef CONFIG_PCI_MSI 533#ifdef CONFIG_PCI_MSI
388 if(adapter->hw.mac_type > e1000_82547_rev_2 && 534 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
389 adapter->have_msi == TRUE) 535 adapter->have_msi == TRUE)
390 pci_disable_msi(adapter->pdev); 536 pci_disable_msi(adapter->pdev);
391#endif 537#endif
@@ -396,6 +542,7 @@ e1000_down(struct e1000_adapter *adapter)
396#ifdef CONFIG_E1000_NAPI 542#ifdef CONFIG_E1000_NAPI
397 netif_poll_disable(netdev); 543 netif_poll_disable(netdev);
398#endif 544#endif
545 netdev->tx_queue_len = adapter->tx_queue_len;
399 adapter->link_speed = 0; 546 adapter->link_speed = 0;
400 adapter->link_duplex = 0; 547 adapter->link_duplex = 0;
401 netif_carrier_off(netdev); 548 netif_carrier_off(netdev);
@@ -405,12 +552,16 @@ e1000_down(struct e1000_adapter *adapter)
405 e1000_clean_all_tx_rings(adapter); 552 e1000_clean_all_tx_rings(adapter);
406 e1000_clean_all_rx_rings(adapter); 553 e1000_clean_all_rx_rings(adapter);
407 554
408 /* If WoL is not enabled and management mode is not IAMT 555 /* Power down the PHY so no link is implied when interface is down *
409 * Power down the PHY so no link is implied when interface is down */ 556 * The PHY cannot be powered down if any of the following is TRUE *
410 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 557 * (a) WoL is enabled
558 * (b) AMT is active
559 * (c) SoL/IDER session is active */
560 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
411 adapter->hw.media_type == e1000_media_type_copper && 561 adapter->hw.media_type == e1000_media_type_copper &&
412 !e1000_check_mng_mode(&adapter->hw) && 562 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
413 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 563 !mng_mode_enabled &&
564 !e1000_check_phy_reset_block(&adapter->hw)) {
414 uint16_t mii_reg; 565 uint16_t mii_reg;
415 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 566 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
416 mii_reg |= MII_CR_POWER_DOWN; 567 mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +573,8 @@ e1000_down(struct e1000_adapter *adapter)
422void 573void
423e1000_reset(struct e1000_adapter *adapter) 574e1000_reset(struct e1000_adapter *adapter)
424{ 575{
425 struct net_device *netdev = adapter->netdev;
426 uint32_t pba, manc; 576 uint32_t pba, manc;
427 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 577 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
428 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
429 578
430 /* Repartition Pba for greater than 9k mtu 579 /* Repartition Pba for greater than 9k mtu
431 * To take effect CTRL.RST is required. 580 * To take effect CTRL.RST is required.
@@ -448,19 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
448 break; 597 break;
449 } 598 }
450 599
451 if((adapter->hw.mac_type != e1000_82573) && 600 if ((adapter->hw.mac_type != e1000_82573) &&
452 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { 601 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
453 pba -= 8; /* allocate more FIFO for Tx */ 602 pba -= 8; /* allocate more FIFO for Tx */
454 /* send an XOFF when there is enough space in the
455 * Rx FIFO to hold one extra full size Rx packet
456 */
457 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
458 ETHERNET_FCS_SIZE + 1;
459 fc_low_water_mark = fc_high_water_mark + 8;
460 }
461 603
462 604
463 if(adapter->hw.mac_type == e1000_82547) { 605 if (adapter->hw.mac_type == e1000_82547) {
464 adapter->tx_fifo_head = 0; 606 adapter->tx_fifo_head = 0;
465 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 607 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
466 adapter->tx_fifo_size = 608 adapter->tx_fifo_size =
@@ -471,19 +613,21 @@ e1000_reset(struct e1000_adapter *adapter)
471 E1000_WRITE_REG(&adapter->hw, PBA, pba); 613 E1000_WRITE_REG(&adapter->hw, PBA, pba);
472 614
473 /* flow control settings */ 615 /* flow control settings */
474 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 616 /* Set the FC high water mark to 90% of the FIFO size.
475 fc_high_water_mark; 617 * Required to clear last 3 LSB */
476 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 618 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
477 fc_low_water_mark; 619
620 adapter->hw.fc_high_water = fc_high_water_mark;
621 adapter->hw.fc_low_water = fc_high_water_mark - 8;
478 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 622 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
479 adapter->hw.fc_send_xon = 1; 623 adapter->hw.fc_send_xon = 1;
480 adapter->hw.fc = adapter->hw.original_fc; 624 adapter->hw.fc = adapter->hw.original_fc;
481 625
482 /* Allow time for pending master requests to run */ 626 /* Allow time for pending master requests to run */
483 e1000_reset_hw(&adapter->hw); 627 e1000_reset_hw(&adapter->hw);
484 if(adapter->hw.mac_type >= e1000_82544) 628 if (adapter->hw.mac_type >= e1000_82544)
485 E1000_WRITE_REG(&adapter->hw, WUC, 0); 629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
486 if(e1000_init_hw(&adapter->hw)) 630 if (e1000_init_hw(&adapter->hw))
487 DPRINTK(PROBE, ERR, "Hardware Error\n"); 631 DPRINTK(PROBE, ERR, "Hardware Error\n");
488 e1000_update_mng_vlan(adapter); 632 e1000_update_mng_vlan(adapter);
489 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -517,33 +661,31 @@ e1000_probe(struct pci_dev *pdev,
517 struct net_device *netdev; 661 struct net_device *netdev;
518 struct e1000_adapter *adapter; 662 struct e1000_adapter *adapter;
519 unsigned long mmio_start, mmio_len; 663 unsigned long mmio_start, mmio_len;
520 uint32_t ctrl_ext;
521 uint32_t swsm;
522 664
523 static int cards_found = 0; 665 static int cards_found = 0;
524 int i, err, pci_using_dac; 666 int i, err, pci_using_dac;
525 uint16_t eeprom_data; 667 uint16_t eeprom_data;
526 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
527 if((err = pci_enable_device(pdev))) 669 if ((err = pci_enable_device(pdev)))
528 return err; 670 return err;
529 671
530 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 672 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
531 pci_using_dac = 1; 673 pci_using_dac = 1;
532 } else { 674 } else {
533 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 675 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
534 E1000_ERR("No usable DMA configuration, aborting\n"); 676 E1000_ERR("No usable DMA configuration, aborting\n");
535 return err; 677 return err;
536 } 678 }
537 pci_using_dac = 0; 679 pci_using_dac = 0;
538 } 680 }
539 681
540 if((err = pci_request_regions(pdev, e1000_driver_name))) 682 if ((err = pci_request_regions(pdev, e1000_driver_name)))
541 return err; 683 return err;
542 684
543 pci_set_master(pdev); 685 pci_set_master(pdev);
544 686
545 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 687 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
546 if(!netdev) { 688 if (!netdev) {
547 err = -ENOMEM; 689 err = -ENOMEM;
548 goto err_alloc_etherdev; 690 goto err_alloc_etherdev;
549 } 691 }
@@ -562,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
562 mmio_len = pci_resource_len(pdev, BAR_0); 704 mmio_len = pci_resource_len(pdev, BAR_0);
563 705
564 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 706 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
565 if(!adapter->hw.hw_addr) { 707 if (!adapter->hw.hw_addr) {
566 err = -EIO; 708 err = -EIO;
567 goto err_ioremap; 709 goto err_ioremap;
568 } 710 }
569 711
570 for(i = BAR_1; i <= BAR_5; i++) { 712 for (i = BAR_1; i <= BAR_5; i++) {
571 if(pci_resource_len(pdev, i) == 0) 713 if (pci_resource_len(pdev, i) == 0)
572 continue; 714 continue;
573 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 715 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
574 adapter->hw.io_base = pci_resource_start(pdev, i); 716 adapter->hw.io_base = pci_resource_start(pdev, i);
575 break; 717 break;
576 } 718 }
@@ -607,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
607 749
608 /* setup the private structure */ 750 /* setup the private structure */
609 751
610 if((err = e1000_sw_init(adapter))) 752 if ((err = e1000_sw_init(adapter)))
611 goto err_sw_init; 753 goto err_sw_init;
612 754
613 if((err = e1000_check_phy_reset_block(&adapter->hw))) 755 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
614 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
615 757
616 if(adapter->hw.mac_type >= e1000_82543) { 758 if (adapter->hw.mac_type >= e1000_82543) {
617 netdev->features = NETIF_F_SG | 759 netdev->features = NETIF_F_SG |
618 NETIF_F_HW_CSUM | 760 NETIF_F_HW_CSUM |
619 NETIF_F_HW_VLAN_TX | 761 NETIF_F_HW_VLAN_TX |
@@ -622,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
622 } 764 }
623 765
624#ifdef NETIF_F_TSO 766#ifdef NETIF_F_TSO
625 if((adapter->hw.mac_type >= e1000_82544) && 767 if ((adapter->hw.mac_type >= e1000_82544) &&
626 (adapter->hw.mac_type != e1000_82547)) 768 (adapter->hw.mac_type != e1000_82547))
627 netdev->features |= NETIF_F_TSO; 769 netdev->features |= NETIF_F_TSO;
628 770
629#ifdef NETIF_F_TSO_IPV6 771#ifdef NETIF_F_TSO_IPV6
630 if(adapter->hw.mac_type > e1000_82547_rev_2) 772 if (adapter->hw.mac_type > e1000_82547_rev_2)
631 netdev->features |= NETIF_F_TSO_IPV6; 773 netdev->features |= NETIF_F_TSO_IPV6;
632#endif 774#endif
633#endif 775#endif
634 if(pci_using_dac) 776 if (pci_using_dac)
635 netdev->features |= NETIF_F_HIGHDMA; 777 netdev->features |= NETIF_F_HIGHDMA;
636 778
637 /* hard_start_xmit is safe against parallel locking */ 779 /* hard_start_xmit is safe against parallel locking */
@@ -639,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
639 781
640 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 782 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
641 783
642 /* before reading the EEPROM, reset the controller to 784 /* before reading the EEPROM, reset the controller to
643 * put the device in a known good starting state */ 785 * put the device in a known good starting state */
644 786
645 e1000_reset_hw(&adapter->hw); 787 e1000_reset_hw(&adapter->hw);
646 788
647 /* make sure the EEPROM is good */ 789 /* make sure the EEPROM is good */
648 790
649 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 791 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
650 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 792 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
651 err = -EIO; 793 err = -EIO;
652 goto err_eeprom; 794 goto err_eeprom;
@@ -654,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
654 796
655 /* copy the MAC address out of the EEPROM */ 797 /* copy the MAC address out of the EEPROM */
656 798
657 if(e1000_read_mac_addr(&adapter->hw)) 799 if (e1000_read_mac_addr(&adapter->hw))
658 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 800 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
659 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 801 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
660 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 802 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
661 803
662 if(!is_valid_ether_addr(netdev->perm_addr)) { 804 if (!is_valid_ether_addr(netdev->perm_addr)) {
663 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 805 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
664 err = -EIO; 806 err = -EIO;
665 goto err_eeprom; 807 goto err_eeprom;
@@ -699,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
699 * enable the ACPI Magic Packet filter 841 * enable the ACPI Magic Packet filter
700 */ 842 */
701 843
702 switch(adapter->hw.mac_type) { 844 switch (adapter->hw.mac_type) {
703 case e1000_82542_rev2_0: 845 case e1000_82542_rev2_0:
704 case e1000_82542_rev2_1: 846 case e1000_82542_rev2_1:
705 case e1000_82543: 847 case e1000_82543:
@@ -712,8 +854,7 @@ e1000_probe(struct pci_dev *pdev,
712 case e1000_82546: 854 case e1000_82546:
713 case e1000_82546_rev_3: 855 case e1000_82546_rev_3:
714 case e1000_82571: 856 case e1000_82571:
715 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
716 && (adapter->hw.media_type == e1000_media_type_copper)) {
717 e1000_read_eeprom(&adapter->hw, 858 e1000_read_eeprom(&adapter->hw,
718 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
719 break; 860 break;
@@ -724,31 +865,42 @@ e1000_probe(struct pci_dev *pdev,
724 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 865 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
725 break; 866 break;
726 } 867 }
727 if(eeprom_data & eeprom_apme_mask) 868 if (eeprom_data & eeprom_apme_mask)
728 adapter->wol |= E1000_WUFC_MAG; 869 adapter->wol |= E1000_WUFC_MAG;
729 870
871 /* print bus type/speed/width info */
872 {
873 struct e1000_hw *hw = &adapter->hw;
874 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
875 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
876 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
877 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
878 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
879 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
880 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
881 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
882 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
883 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
884 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
885 "32-bit"));
886 }
887
888 for (i = 0; i < 6; i++)
889 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
890
730 /* reset the hardware with the new settings */ 891 /* reset the hardware with the new settings */
731 e1000_reset(adapter); 892 e1000_reset(adapter);
732 893
733 /* Let firmware know the driver has taken over */ 894 /* If the controller is 82573 and f/w is AMT, do not set
734 switch(adapter->hw.mac_type) { 895 * DRV_LOAD until the interface is up. For all other cases,
735 case e1000_82571: 896 * let the f/w know that the h/w is now under the control
736 case e1000_82572: 897 * of the driver. */
737 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 898 if (adapter->hw.mac_type != e1000_82573 ||
738 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 899 !e1000_check_mng_mode(&adapter->hw))
739 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 900 e1000_get_hw_control(adapter);
740 break;
741 case e1000_82573:
742 swsm = E1000_READ_REG(&adapter->hw, SWSM);
743 E1000_WRITE_REG(&adapter->hw, SWSM,
744 swsm | E1000_SWSM_DRV_LOAD);
745 break;
746 default:
747 break;
748 }
749 901
750 strcpy(netdev->name, "eth%d"); 902 strcpy(netdev->name, "eth%d");
751 if((err = register_netdev(netdev))) 903 if ((err = register_netdev(netdev)))
752 goto err_register; 904 goto err_register;
753 905
754 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 906 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -782,47 +934,33 @@ e1000_remove(struct pci_dev *pdev)
782{ 934{
783 struct net_device *netdev = pci_get_drvdata(pdev); 935 struct net_device *netdev = pci_get_drvdata(pdev);
784 struct e1000_adapter *adapter = netdev_priv(netdev); 936 struct e1000_adapter *adapter = netdev_priv(netdev);
785 uint32_t ctrl_ext; 937 uint32_t manc;
786 uint32_t manc, swsm;
787#ifdef CONFIG_E1000_NAPI 938#ifdef CONFIG_E1000_NAPI
788 int i; 939 int i;
789#endif 940#endif
790 941
791 flush_scheduled_work(); 942 flush_scheduled_work();
792 943
793 if(adapter->hw.mac_type >= e1000_82540 && 944 if (adapter->hw.mac_type >= e1000_82540 &&
794 adapter->hw.media_type == e1000_media_type_copper) { 945 adapter->hw.media_type == e1000_media_type_copper) {
795 manc = E1000_READ_REG(&adapter->hw, MANC); 946 manc = E1000_READ_REG(&adapter->hw, MANC);
796 if(manc & E1000_MANC_SMBUS_EN) { 947 if (manc & E1000_MANC_SMBUS_EN) {
797 manc |= E1000_MANC_ARP_EN; 948 manc |= E1000_MANC_ARP_EN;
798 E1000_WRITE_REG(&adapter->hw, MANC, manc); 949 E1000_WRITE_REG(&adapter->hw, MANC, manc);
799 } 950 }
800 } 951 }
801 952
802 switch(adapter->hw.mac_type) { 953 /* Release control of h/w to f/w. If f/w is AMT enabled, this
803 case e1000_82571: 954 * would have already happened in close and is redundant. */
804 case e1000_82572: 955 e1000_release_hw_control(adapter);
805 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
806 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
807 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
808 break;
809 case e1000_82573:
810 swsm = E1000_READ_REG(&adapter->hw, SWSM);
811 E1000_WRITE_REG(&adapter->hw, SWSM,
812 swsm & ~E1000_SWSM_DRV_LOAD);
813 break;
814
815 default:
816 break;
817 }
818 956
819 unregister_netdev(netdev); 957 unregister_netdev(netdev);
820#ifdef CONFIG_E1000_NAPI 958#ifdef CONFIG_E1000_NAPI
821 for (i = 0; i < adapter->num_queues; i++) 959 for (i = 0; i < adapter->num_rx_queues; i++)
822 __dev_put(&adapter->polling_netdev[i]); 960 __dev_put(&adapter->polling_netdev[i]);
823#endif 961#endif
824 962
825 if(!e1000_check_phy_reset_block(&adapter->hw)) 963 if (!e1000_check_phy_reset_block(&adapter->hw))
826 e1000_phy_hw_reset(&adapter->hw); 964 e1000_phy_hw_reset(&adapter->hw);
827 965
828 kfree(adapter->tx_ring); 966 kfree(adapter->tx_ring);
@@ -881,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
881 1019
882 /* identify the MAC */ 1020 /* identify the MAC */
883 1021
884 if(e1000_set_mac_type(hw)) { 1022 if (e1000_set_mac_type(hw)) {
885 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1023 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
886 return -EIO; 1024 return -EIO;
887 } 1025 }
888 1026
889 /* initialize eeprom parameters */ 1027 /* initialize eeprom parameters */
890 1028
891 if(e1000_init_eeprom_params(hw)) { 1029 if (e1000_init_eeprom_params(hw)) {
892 E1000_ERR("EEPROM initialization failed\n"); 1030 E1000_ERR("EEPROM initialization failed\n");
893 return -EIO; 1031 return -EIO;
894 } 1032 }
895 1033
896 switch(hw->mac_type) { 1034 switch (hw->mac_type) {
897 default: 1035 default:
898 break; 1036 break;
899 case e1000_82541: 1037 case e1000_82541:
@@ -912,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
912 1050
913 /* Copper options */ 1051 /* Copper options */
914 1052
915 if(hw->media_type == e1000_media_type_copper) { 1053 if (hw->media_type == e1000_media_type_copper) {
916 hw->mdix = AUTO_ALL_MODES; 1054 hw->mdix = AUTO_ALL_MODES;
917 hw->disable_polarity_correction = FALSE; 1055 hw->disable_polarity_correction = FALSE;
918 hw->master_slave = E1000_MASTER_SLAVE; 1056 hw->master_slave = E1000_MASTER_SLAVE;
@@ -923,15 +1061,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
923 switch (hw->mac_type) { 1061 switch (hw->mac_type) {
924 case e1000_82571: 1062 case e1000_82571:
925 case e1000_82572: 1063 case e1000_82572:
926 adapter->num_queues = 2; 1064 /* These controllers support 2 tx queues, but with a single
1065 * qdisc implementation, multiple tx queues aren't quite as
1066 * interesting. If we can find a logical way of mapping
1067 * flows to a queue, then perhaps we can up the num_tx_queue
1068 * count back to its default. Until then, we run the risk of
1069 * terrible performance due to SACK overload. */
1070 adapter->num_tx_queues = 1;
1071 adapter->num_rx_queues = 2;
927 break; 1072 break;
928 default: 1073 default:
929 adapter->num_queues = 1; 1074 adapter->num_tx_queues = 1;
1075 adapter->num_rx_queues = 1;
930 break; 1076 break;
931 } 1077 }
932 adapter->num_queues = min(adapter->num_queues, num_online_cpus()); 1078 adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
1079 adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
1080 DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
1081 adapter->num_rx_queues,
1082 ((adapter->num_rx_queues == 1)
1083 ? ((num_online_cpus() > 1)
1084 ? "(due to unsupported feature in current adapter)"
1085 : "(due to unsupported system configuration)")
1086 : ""));
1087 DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
1088 adapter->num_tx_queues);
933#else 1089#else
934 adapter->num_queues = 1; 1090 adapter->num_tx_queues = 1;
1091 adapter->num_rx_queues = 1;
935#endif 1092#endif
936 1093
937 if (e1000_alloc_queues(adapter)) { 1094 if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1097,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
940 } 1097 }
941 1098
942#ifdef CONFIG_E1000_NAPI 1099#ifdef CONFIG_E1000_NAPI
943 for (i = 0; i < adapter->num_queues; i++) { 1100 for (i = 0; i < adapter->num_rx_queues; i++) {
944 adapter->polling_netdev[i].priv = adapter; 1101 adapter->polling_netdev[i].priv = adapter;
945 adapter->polling_netdev[i].poll = &e1000_clean; 1102 adapter->polling_netdev[i].poll = &e1000_clean;
946 adapter->polling_netdev[i].weight = 64; 1103 adapter->polling_netdev[i].weight = 64;
947 dev_hold(&adapter->polling_netdev[i]); 1104 dev_hold(&adapter->polling_netdev[i]);
948 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); 1105 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
949 } 1106 }
950#endif 1107 spin_lock_init(&adapter->tx_queue_lock);
951
952#ifdef CONFIG_E1000_MQ
953 e1000_setup_queue_mapping(adapter);
954#endif 1108#endif
955 1109
956 atomic_set(&adapter->irq_sem, 1); 1110 atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1127,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
973{ 1127{
974 int size; 1128 int size;
975 1129
976 size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 1130 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
977 adapter->tx_ring = kmalloc(size, GFP_KERNEL); 1131 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
978 if (!adapter->tx_ring) 1132 if (!adapter->tx_ring)
979 return -ENOMEM; 1133 return -ENOMEM;
980 memset(adapter->tx_ring, 0, size); 1134 memset(adapter->tx_ring, 0, size);
981 1135
982 size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 1136 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
983 adapter->rx_ring = kmalloc(size, GFP_KERNEL); 1137 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
984 if (!adapter->rx_ring) { 1138 if (!adapter->rx_ring) {
985 kfree(adapter->tx_ring); 1139 kfree(adapter->tx_ring);
@@ -988,7 +1142,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
988 memset(adapter->rx_ring, 0, size); 1142 memset(adapter->rx_ring, 0, size);
989 1143
990#ifdef CONFIG_E1000_NAPI 1144#ifdef CONFIG_E1000_NAPI
991 size = sizeof(struct net_device) * adapter->num_queues; 1145 size = sizeof(struct net_device) * adapter->num_rx_queues;
992 adapter->polling_netdev = kmalloc(size, GFP_KERNEL); 1146 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
993 if (!adapter->polling_netdev) { 1147 if (!adapter->polling_netdev) {
994 kfree(adapter->tx_ring); 1148 kfree(adapter->tx_ring);
@@ -998,6 +1152,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
998 memset(adapter->polling_netdev, 0, size); 1152 memset(adapter->polling_netdev, 0, size);
999#endif 1153#endif
1000 1154
1155#ifdef CONFIG_E1000_MQ
1156 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1157 adapter->rx_sched_call_data.info = adapter->netdev;
1158
1159 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1160 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1161#endif
1162
1001 return E1000_SUCCESS; 1163 return E1000_SUCCESS;
1002} 1164}
1003 1165
@@ -1017,14 +1179,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1017 lock_cpu_hotplug(); 1179 lock_cpu_hotplug();
1018 i = 0; 1180 i = 0;
1019 for_each_online_cpu(cpu) { 1181 for_each_online_cpu(cpu) {
1020 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; 1182 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
1021 /* This is incomplete because we'd like to assign separate 1183 /* This is incomplete because we'd like to assign separate
1022 * physical cpus to these netdev polling structures and 1184 * physical cpus to these netdev polling structures and
1023 * avoid saturating a subset of cpus. 1185 * avoid saturating a subset of cpus.
1024 */ 1186 */
1025 if (i < adapter->num_queues) { 1187 if (i < adapter->num_rx_queues) {
1026 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; 1188 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1027 adapter->cpu_for_queue[i] = cpu; 1189 adapter->rx_ring[i].cpu = cpu;
1190 cpu_set(cpu, adapter->cpumask);
1028 } else 1191 } else
1029 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; 1192 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1030 1193
@@ -1063,14 +1226,20 @@ e1000_open(struct net_device *netdev)
1063 if ((err = e1000_setup_all_rx_resources(adapter))) 1226 if ((err = e1000_setup_all_rx_resources(adapter)))
1064 goto err_setup_rx; 1227 goto err_setup_rx;
1065 1228
1066 if((err = e1000_up(adapter))) 1229 if ((err = e1000_up(adapter)))
1067 goto err_up; 1230 goto err_up;
1068 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1231 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1069 if((adapter->hw.mng_cookie.status & 1232 if ((adapter->hw.mng_cookie.status &
1070 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1233 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1071 e1000_update_mng_vlan(adapter); 1234 e1000_update_mng_vlan(adapter);
1072 } 1235 }
1073 1236
1237 /* If AMT is enabled, let the firmware know that the network
1238 * interface is now open */
1239 if (adapter->hw.mac_type == e1000_82573 &&
1240 e1000_check_mng_mode(&adapter->hw))
1241 e1000_get_hw_control(adapter);
1242
1074 return E1000_SUCCESS; 1243 return E1000_SUCCESS;
1075 1244
1076err_up: 1245err_up:
@@ -1105,10 +1274,17 @@ e1000_close(struct net_device *netdev)
1105 e1000_free_all_tx_resources(adapter); 1274 e1000_free_all_tx_resources(adapter);
1106 e1000_free_all_rx_resources(adapter); 1275 e1000_free_all_rx_resources(adapter);
1107 1276
1108 if((adapter->hw.mng_cookie.status & 1277 if ((adapter->hw.mng_cookie.status &
1109 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1110 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1279 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1111 } 1280 }
1281
1282 /* If AMT is enabled, let the firmware know that the network
1283 * interface is now closed */
1284 if (adapter->hw.mac_type == e1000_82573 &&
1285 e1000_check_mng_mode(&adapter->hw))
1286 e1000_release_hw_control(adapter);
1287
1112 return 0; 1288 return 0;
1113} 1289}
1114 1290
@@ -1153,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1153 size = sizeof(struct e1000_buffer) * txdr->count; 1329 size = sizeof(struct e1000_buffer) * txdr->count;
1154 1330
1155 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1331 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1156 if(!txdr->buffer_info) { 1332 if (!txdr->buffer_info) {
1157 DPRINTK(PROBE, ERR, 1333 DPRINTK(PROBE, ERR,
1158 "Unable to allocate memory for the transmit descriptor ring\n"); 1334 "Unable to allocate memory for the transmit descriptor ring\n");
1159 return -ENOMEM; 1335 return -ENOMEM;
@@ -1166,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1166 E1000_ROUNDUP(txdr->size, 4096); 1342 E1000_ROUNDUP(txdr->size, 4096);
1167 1343
1168 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1344 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1169 if(!txdr->desc) { 1345 if (!txdr->desc) {
1170setup_tx_desc_die: 1346setup_tx_desc_die:
1171 vfree(txdr->buffer_info); 1347 vfree(txdr->buffer_info);
1172 DPRINTK(PROBE, ERR, 1348 DPRINTK(PROBE, ERR,
@@ -1182,8 +1358,8 @@ setup_tx_desc_die:
1182 "at %p\n", txdr->size, txdr->desc); 1358 "at %p\n", txdr->size, txdr->desc);
1183 /* Try again, without freeing the previous */ 1359 /* Try again, without freeing the previous */
1184 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1360 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1185 if(!txdr->desc) {
1186 /* Failed allocation, critical failure */ 1361 /* Failed allocation, critical failure */
1362 if (!txdr->desc) {
1187 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1363 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1188 goto setup_tx_desc_die; 1364 goto setup_tx_desc_die;
1189 } 1365 }
@@ -1229,7 +1405,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1229{ 1405{
1230 int i, err = 0; 1406 int i, err = 0;
1231 1407
1232 for (i = 0; i < adapter->num_queues; i++) { 1408 for (i = 0; i < adapter->num_tx_queues; i++) {
1233 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1409 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1234 if (err) { 1410 if (err) {
1235 DPRINTK(PROBE, ERR, 1411 DPRINTK(PROBE, ERR,
@@ -1254,10 +1430,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1254 uint64_t tdba; 1430 uint64_t tdba;
1255 struct e1000_hw *hw = &adapter->hw; 1431 struct e1000_hw *hw = &adapter->hw;
1256 uint32_t tdlen, tctl, tipg, tarc; 1432 uint32_t tdlen, tctl, tipg, tarc;
1433 uint32_t ipgr1, ipgr2;
1257 1434
1258 /* Setup the HW Tx Head and Tail descriptor pointers */ 1435 /* Setup the HW Tx Head and Tail descriptor pointers */
1259 1436
1260 switch (adapter->num_queues) { 1437 switch (adapter->num_tx_queues) {
1261 case 2: 1438 case 2:
1262 tdba = adapter->tx_ring[1].dma; 1439 tdba = adapter->tx_ring[1].dma;
1263 tdlen = adapter->tx_ring[1].count * 1440 tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1464,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1287 1464
1288 /* Set the default values for the Tx Inter Packet Gap timer */ 1465 /* Set the default values for the Tx Inter Packet Gap timer */
1289 1466
1467 if (hw->media_type == e1000_media_type_fiber ||
1468 hw->media_type == e1000_media_type_internal_serdes)
1469 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1470 else
1471 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1472
1290 switch (hw->mac_type) { 1473 switch (hw->mac_type) {
1291 case e1000_82542_rev2_0: 1474 case e1000_82542_rev2_0:
1292 case e1000_82542_rev2_1: 1475 case e1000_82542_rev2_1:
1293 tipg = DEFAULT_82542_TIPG_IPGT; 1476 tipg = DEFAULT_82542_TIPG_IPGT;
1294 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1477 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1295 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1478 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1296 break; 1479 break;
1297 default: 1480 default:
1298 if (hw->media_type == e1000_media_type_fiber || 1481 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1299 hw->media_type == e1000_media_type_internal_serdes) 1482 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1300 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1483 break;
1301 else
1302 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1303 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1304 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1305 } 1484 }
1485 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1486 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1306 E1000_WRITE_REG(hw, TIPG, tipg); 1487 E1000_WRITE_REG(hw, TIPG, tipg);
1307 1488
1308 /* Set the Tx Interrupt Delay register */ 1489 /* Set the Tx Interrupt Delay register */
@@ -1378,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1378 1559
1379 size = sizeof(struct e1000_ps_page) * rxdr->count; 1560 size = sizeof(struct e1000_ps_page) * rxdr->count;
1380 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1561 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1381 if(!rxdr->ps_page) { 1562 if (!rxdr->ps_page) {
1382 vfree(rxdr->buffer_info); 1563 vfree(rxdr->buffer_info);
1383 DPRINTK(PROBE, ERR, 1564 DPRINTK(PROBE, ERR,
1384 "Unable to allocate memory for the receive descriptor ring\n"); 1565 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1388,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1388 1569
1389 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1570 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1390 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1571 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1391 if(!rxdr->ps_page_dma) { 1572 if (!rxdr->ps_page_dma) {
1392 vfree(rxdr->buffer_info); 1573 vfree(rxdr->buffer_info);
1393 kfree(rxdr->ps_page); 1574 kfree(rxdr->ps_page);
1394 DPRINTK(PROBE, ERR, 1575 DPRINTK(PROBE, ERR,
@@ -1397,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1397 } 1578 }
1398 memset(rxdr->ps_page_dma, 0, size); 1579 memset(rxdr->ps_page_dma, 0, size);
1399 1580
1400 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1581 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1401 desc_len = sizeof(struct e1000_rx_desc); 1582 desc_len = sizeof(struct e1000_rx_desc);
1402 else 1583 else
1403 desc_len = sizeof(union e1000_rx_desc_packet_split); 1584 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1454,6 +1635,8 @@ setup_rx_desc_die:
1454 1635
1455 rxdr->next_to_clean = 0; 1636 rxdr->next_to_clean = 0;
1456 rxdr->next_to_use = 0; 1637 rxdr->next_to_use = 0;
1638 rxdr->rx_skb_top = NULL;
1639 rxdr->rx_skb_prev = NULL;
1457 1640
1458 return 0; 1641 return 0;
1459} 1642}
@@ -1475,7 +1658,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1475{ 1658{
1476 int i, err = 0; 1659 int i, err = 0;
1477 1660
1478 for (i = 0; i < adapter->num_queues; i++) { 1661 for (i = 0; i < adapter->num_rx_queues; i++) {
1479 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1662 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1480 if (err) { 1663 if (err) {
1481 DPRINTK(PROBE, ERR, 1664 DPRINTK(PROBE, ERR,
@@ -1498,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1498{ 1681{
1499 uint32_t rctl, rfctl; 1682 uint32_t rctl, rfctl;
1500 uint32_t psrctl = 0; 1683 uint32_t psrctl = 0;
1501#ifdef CONFIG_E1000_PACKET_SPLIT 1684#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1502 uint32_t pages = 0; 1685 uint32_t pages = 0;
1503#endif 1686#endif
1504 1687
@@ -1510,7 +1693,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1510 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1693 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1511 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1694 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1512 1695
1513 if(adapter->hw.tbi_compatibility_on == 1) 1696 if (adapter->hw.mac_type > e1000_82543)
1697 rctl |= E1000_RCTL_SECRC;
1698
1699 if (adapter->hw.tbi_compatibility_on == 1)
1514 rctl |= E1000_RCTL_SBP; 1700 rctl |= E1000_RCTL_SBP;
1515 else 1701 else
1516 rctl &= ~E1000_RCTL_SBP; 1702 rctl &= ~E1000_RCTL_SBP;
@@ -1521,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1521 rctl |= E1000_RCTL_LPE; 1707 rctl |= E1000_RCTL_LPE;
1522 1708
1523 /* Setup buffer sizes */ 1709 /* Setup buffer sizes */
1524 if(adapter->hw.mac_type >= e1000_82571) { 1710 if (adapter->hw.mac_type >= e1000_82571) {
1525 /* We can now specify buffers in 1K increments. 1711 /* We can now specify buffers in 1K increments.
1526 * BSIZE and BSEX are ignored in this case. */ 1712 * BSIZE and BSEX are ignored in this case. */
1527 rctl |= adapter->rx_buffer_len << 0x11; 1713 rctl |= adapter->rx_buffer_len << 0x11;
1528 } else { 1714 } else {
1529 rctl &= ~E1000_RCTL_SZ_4096; 1715 rctl &= ~E1000_RCTL_SZ_4096;
1530 rctl |= E1000_RCTL_BSEX; 1716 rctl &= ~E1000_RCTL_BSEX;
1531 switch (adapter->rx_buffer_len) { 1717 rctl |= E1000_RCTL_SZ_2048;
1532 case E1000_RXBUFFER_2048:
1533 default:
1534 rctl |= E1000_RCTL_SZ_2048;
1535 rctl &= ~E1000_RCTL_BSEX;
1536 break;
1537 case E1000_RXBUFFER_4096:
1538 rctl |= E1000_RCTL_SZ_4096;
1539 break;
1540 case E1000_RXBUFFER_8192:
1541 rctl |= E1000_RCTL_SZ_8192;
1542 break;
1543 case E1000_RXBUFFER_16384:
1544 rctl |= E1000_RCTL_SZ_16384;
1545 break;
1546 }
1547 } 1718 }
1548 1719
1549#ifdef CONFIG_E1000_PACKET_SPLIT 1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1550 /* 82571 and greater support packet-split where the protocol 1721 /* 82571 and greater support packet-split where the protocol
1551 * header is placed in skb->data and the packet data is 1722 * header is placed in skb->data and the packet data is
1552 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1723 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1570,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1570 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1741 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1571 1742
1572 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1743 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1573 1744
1574 psrctl |= adapter->rx_ps_bsize0 >> 1745 psrctl |= adapter->rx_ps_bsize0 >>
1575 E1000_PSRCTL_BSIZE0_SHIFT; 1746 E1000_PSRCTL_BSIZE0_SHIFT;
1576 1747
@@ -1632,22 +1803,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1632 1803
1633 if (hw->mac_type >= e1000_82540) { 1804 if (hw->mac_type >= e1000_82540) {
1634 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1805 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1635 if(adapter->itr > 1) 1806 if (adapter->itr > 1)
1636 E1000_WRITE_REG(hw, ITR, 1807 E1000_WRITE_REG(hw, ITR,
1637 1000000000 / (adapter->itr * 256)); 1808 1000000000 / (adapter->itr * 256));
1638 } 1809 }
1639 1810
1640 if (hw->mac_type >= e1000_82571) { 1811 if (hw->mac_type >= e1000_82571) {
1641 /* Reset delay timers after every interrupt */
1642 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 1812 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1813 /* Reset delay timers after every interrupt */
1643 ctrl_ext |= E1000_CTRL_EXT_CANC; 1814 ctrl_ext |= E1000_CTRL_EXT_CANC;
1815#ifdef CONFIG_E1000_NAPI
1816 /* Auto-Mask interrupts upon ICR read. */
1817 ctrl_ext |= E1000_CTRL_EXT_IAME;
1818#endif
1644 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1819 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1820 E1000_WRITE_REG(hw, IAM, ~0);
1645 E1000_WRITE_FLUSH(hw); 1821 E1000_WRITE_FLUSH(hw);
1646 } 1822 }
1647 1823
1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1824 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1649 * the Base and Length of the Rx Descriptor Ring */ 1825 * the Base and Length of the Rx Descriptor Ring */
1650 switch (adapter->num_queues) { 1826 switch (adapter->num_rx_queues) {
1651#ifdef CONFIG_E1000_MQ 1827#ifdef CONFIG_E1000_MQ
1652 case 2: 1828 case 2:
1653 rdba = adapter->rx_ring[1].dma; 1829 rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1850,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1674 } 1850 }
1675 1851
1676#ifdef CONFIG_E1000_MQ 1852#ifdef CONFIG_E1000_MQ
1677 if (adapter->num_queues > 1) { 1853 if (adapter->num_rx_queues > 1) {
1678 uint32_t random[10]; 1854 uint32_t random[10];
1679 1855
1680 get_random_bytes(&random[0], 40); 1856 get_random_bytes(&random[0], 40);
@@ -1684,7 +1860,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1684 E1000_WRITE_REG(hw, RSSIM, 0); 1860 E1000_WRITE_REG(hw, RSSIM, 0);
1685 } 1861 }
1686 1862
1687 switch (adapter->num_queues) { 1863 switch (adapter->num_rx_queues) {
1688 case 2: 1864 case 2:
1689 default: 1865 default:
1690 reta = 0x00800080; 1866 reta = 0x00800080;
@@ -1716,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1716 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1892 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1717 if (hw->mac_type >= e1000_82543) { 1893 if (hw->mac_type >= e1000_82543) {
1718 rxcsum = E1000_READ_REG(hw, RXCSUM); 1894 rxcsum = E1000_READ_REG(hw, RXCSUM);
1719 if(adapter->rx_csum == TRUE) { 1895 if (adapter->rx_csum == TRUE) {
1720 rxcsum |= E1000_RXCSUM_TUOFL; 1896 rxcsum |= E1000_RXCSUM_TUOFL;
1721 1897
1722 /* Enable 82571 IPv4 payload checksum for UDP fragments 1898 /* Enable 82571 IPv4 payload checksum for UDP fragments
1723 * Must be used in conjunction with packet-split. */ 1899 * Must be used in conjunction with packet-split. */
1724 if ((hw->mac_type >= e1000_82571) && 1900 if ((hw->mac_type >= e1000_82571) &&
1725 (adapter->rx_ps_pages)) { 1901 (adapter->rx_ps_pages)) {
1726 rxcsum |= E1000_RXCSUM_IPPCSE; 1902 rxcsum |= E1000_RXCSUM_IPPCSE;
1727 } 1903 }
1728 } else { 1904 } else {
@@ -1776,7 +1952,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1776{ 1952{
1777 int i; 1953 int i;
1778 1954
1779 for (i = 0; i < adapter->num_queues; i++) 1955 for (i = 0; i < adapter->num_tx_queues; i++)
1780 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1956 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1781} 1957}
1782 1958
@@ -1784,17 +1960,15 @@ static inline void
1784e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1785 struct e1000_buffer *buffer_info) 1961 struct e1000_buffer *buffer_info)
1786{ 1962{
1787 if(buffer_info->dma) { 1963 if (buffer_info->dma) {
1788 pci_unmap_page(adapter->pdev, 1964 pci_unmap_page(adapter->pdev,
1789 buffer_info->dma, 1965 buffer_info->dma,
1790 buffer_info->length, 1966 buffer_info->length,
1791 PCI_DMA_TODEVICE); 1967 PCI_DMA_TODEVICE);
1792 buffer_info->dma = 0;
1793 } 1968 }
1794 if(buffer_info->skb) { 1969 if (buffer_info->skb)
1795 dev_kfree_skb_any(buffer_info->skb); 1970 dev_kfree_skb_any(buffer_info->skb);
1796 buffer_info->skb = NULL; 1971 memset(buffer_info, 0, sizeof(struct e1000_buffer));
1797 }
1798} 1972}
1799 1973
1800/** 1974/**
@@ -1813,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1813 1987
1814 /* Free all the Tx ring sk_buffs */ 1988 /* Free all the Tx ring sk_buffs */
1815 1989
1816 for(i = 0; i < tx_ring->count; i++) { 1990 for (i = 0; i < tx_ring->count; i++) {
1817 buffer_info = &tx_ring->buffer_info[i]; 1991 buffer_info = &tx_ring->buffer_info[i];
1818 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1992 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1819 } 1993 }
@@ -1843,7 +2017,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1843{ 2017{
1844 int i; 2018 int i;
1845 2019
1846 for (i = 0; i < adapter->num_queues; i++) 2020 for (i = 0; i < adapter->num_tx_queues; i++)
1847 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2021 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1848} 2022}
1849 2023
@@ -1887,7 +2061,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1887{ 2061{
1888 int i; 2062 int i;
1889 2063
1890 for (i = 0; i < adapter->num_queues; i++) 2064 for (i = 0; i < adapter->num_rx_queues; i++)
1891 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2065 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1892} 2066}
1893 2067
@@ -1909,12 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1909 unsigned int i, j; 2083 unsigned int i, j;
1910 2084
1911 /* Free all the Rx ring sk_buffs */ 2085 /* Free all the Rx ring sk_buffs */
1912 2086 for (i = 0; i < rx_ring->count; i++) {
1913 for(i = 0; i < rx_ring->count; i++) {
1914 buffer_info = &rx_ring->buffer_info[i]; 2087 buffer_info = &rx_ring->buffer_info[i];
1915 if(buffer_info->skb) { 2088 if (buffer_info->skb) {
1916 ps_page = &rx_ring->ps_page[i];
1917 ps_page_dma = &rx_ring->ps_page_dma[i];
1918 pci_unmap_single(pdev, 2089 pci_unmap_single(pdev,
1919 buffer_info->dma, 2090 buffer_info->dma,
1920 buffer_info->length, 2091 buffer_info->length,
@@ -1922,19 +2093,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1922 2093
1923 dev_kfree_skb(buffer_info->skb); 2094 dev_kfree_skb(buffer_info->skb);
1924 buffer_info->skb = NULL; 2095 buffer_info->skb = NULL;
1925
1926 for(j = 0; j < adapter->rx_ps_pages; j++) {
1927 if(!ps_page->ps_page[j]) break;
1928 pci_unmap_single(pdev,
1929 ps_page_dma->ps_page_dma[j],
1930 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1931 ps_page_dma->ps_page_dma[j] = 0;
1932 put_page(ps_page->ps_page[j]);
1933 ps_page->ps_page[j] = NULL;
1934 }
1935 } 2096 }
2097 ps_page = &rx_ring->ps_page[i];
2098 ps_page_dma = &rx_ring->ps_page_dma[i];
2099 for (j = 0; j < adapter->rx_ps_pages; j++) {
2100 if (!ps_page->ps_page[j]) break;
2101 pci_unmap_page(pdev,
2102 ps_page_dma->ps_page_dma[j],
2103 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2104 ps_page_dma->ps_page_dma[j] = 0;
2105 put_page(ps_page->ps_page[j]);
2106 ps_page->ps_page[j] = NULL;
2107 }
2108 }
2109
2110 /* there also may be some cached data in our adapter */
2111 if (rx_ring->rx_skb_top) {
2112 dev_kfree_skb(rx_ring->rx_skb_top);
2113
2114 /* rx_skb_prev will be wiped out by rx_skb_top */
2115 rx_ring->rx_skb_top = NULL;
2116 rx_ring->rx_skb_prev = NULL;
1936 } 2117 }
1937 2118
2119
1938 size = sizeof(struct e1000_buffer) * rx_ring->count; 2120 size = sizeof(struct e1000_buffer) * rx_ring->count;
1939 memset(rx_ring->buffer_info, 0, size); 2121 memset(rx_ring->buffer_info, 0, size);
1940 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2122 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2145,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1963{ 2145{
1964 int i; 2146 int i;
1965 2147
1966 for (i = 0; i < adapter->num_queues; i++) 2148 for (i = 0; i < adapter->num_rx_queues; i++)
1967 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2149 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1968} 2150}
1969 2151
@@ -1984,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
1984 E1000_WRITE_FLUSH(&adapter->hw); 2166 E1000_WRITE_FLUSH(&adapter->hw);
1985 mdelay(5); 2167 mdelay(5);
1986 2168
1987 if(netif_running(netdev)) 2169 if (netif_running(netdev))
1988 e1000_clean_all_rx_rings(adapter); 2170 e1000_clean_all_rx_rings(adapter);
1989} 2171}
1990 2172
@@ -2000,12 +2182,14 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2000 E1000_WRITE_FLUSH(&adapter->hw); 2182 E1000_WRITE_FLUSH(&adapter->hw);
2001 mdelay(5); 2183 mdelay(5);
2002 2184
2003 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2185 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2004 e1000_pci_set_mwi(&adapter->hw); 2186 e1000_pci_set_mwi(&adapter->hw);
2005 2187
2006 if(netif_running(netdev)) { 2188 if (netif_running(netdev)) {
2189 /* No need to loop, because 82542 supports only 1 queue */
2190 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2007 e1000_configure_rx(adapter); 2191 e1000_configure_rx(adapter);
2008 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); 2192 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2009 } 2193 }
2010} 2194}
2011 2195
@@ -2023,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2023 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_adapter *adapter = netdev_priv(netdev);
2024 struct sockaddr *addr = p; 2208 struct sockaddr *addr = p;
2025 2209
2026 if(!is_valid_ether_addr(addr->sa_data)) 2210 if (!is_valid_ether_addr(addr->sa_data))
2027 return -EADDRNOTAVAIL; 2211 return -EADDRNOTAVAIL;
2028 2212
2029 /* 82542 2.0 needs to be in reset to write receive address registers */ 2213 /* 82542 2.0 needs to be in reset to write receive address registers */
2030 2214
2031 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2215 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2032 e1000_enter_82542_rst(adapter); 2216 e1000_enter_82542_rst(adapter);
2033 2217
2034 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2042,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2042 /* activate the work around */ 2226 /* activate the work around */
2043 adapter->hw.laa_is_present = 1; 2227 adapter->hw.laa_is_present = 1;
2044 2228
2045 /* Hold a copy of the LAA in RAR[14] This is done so that 2229 /* Hold a copy of the LAA in RAR[14] This is done so that
2046 * between the time RAR[0] gets clobbered and the time it 2230 * between the time RAR[0] gets clobbered and the time it
2047 * gets fixed (in e1000_watchdog), the actual LAA is in one 2231 * gets fixed (in e1000_watchdog), the actual LAA is in one
2048 * of the RARs and no incoming packets directed to this port 2232 * of the RARs and no incoming packets directed to this port
2049 * are dropped. Eventaully the LAA will be in RAR[0] and 2233 * are dropped. Eventaully the LAA will be in RAR[0] and
2050 * RAR[14] */ 2234 * RAR[14] */
2051 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2235 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2052 E1000_RAR_ENTRIES - 1); 2236 E1000_RAR_ENTRIES - 1);
2053 } 2237 }
2054 2238
2055 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2239 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2056 e1000_leave_82542_rst(adapter); 2240 e1000_leave_82542_rst(adapter);
2057 2241
2058 return 0; 2242 return 0;
@@ -2086,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
2086 2270
2087 rctl = E1000_READ_REG(hw, RCTL); 2271 rctl = E1000_READ_REG(hw, RCTL);
2088 2272
2089 if(netdev->flags & IFF_PROMISC) { 2273 if (netdev->flags & IFF_PROMISC) {
2090 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2274 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2091 } else if(netdev->flags & IFF_ALLMULTI) { 2275 } else if (netdev->flags & IFF_ALLMULTI) {
2092 rctl |= E1000_RCTL_MPE; 2276 rctl |= E1000_RCTL_MPE;
2093 rctl &= ~E1000_RCTL_UPE; 2277 rctl &= ~E1000_RCTL_UPE;
2094 } else { 2278 } else {
@@ -2099,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
2099 2283
2100 /* 82542 2.0 needs to be in reset to write receive address registers */ 2284 /* 82542 2.0 needs to be in reset to write receive address registers */
2101 2285
2102 if(hw->mac_type == e1000_82542_rev2_0) 2286 if (hw->mac_type == e1000_82542_rev2_0)
2103 e1000_enter_82542_rst(adapter); 2287 e1000_enter_82542_rst(adapter);
2104 2288
2105 /* load the first 14 multicast address into the exact filters 1-14 2289 /* load the first 14 multicast address into the exact filters 1-14
@@ -2109,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
2109 */ 2293 */
2110 mc_ptr = netdev->mc_list; 2294 mc_ptr = netdev->mc_list;
2111 2295
2112 for(i = 1; i < rar_entries; i++) { 2296 for (i = 1; i < rar_entries; i++) {
2113 if (mc_ptr) { 2297 if (mc_ptr) {
2114 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2298 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2115 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
@@ -2121,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
2121 2305
2122 /* clear the old settings from the multicast hash table */ 2306 /* clear the old settings from the multicast hash table */
2123 2307
2124 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2308 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2125 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2309 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2126 2310
2127 /* load any remaining addresses into the hash table */ 2311 /* load any remaining addresses into the hash table */
2128 2312
2129 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2313 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2130 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2314 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2131 e1000_mta_set(hw, hash_value); 2315 e1000_mta_set(hw, hash_value);
2132 } 2316 }
2133 2317
2134 if(hw->mac_type == e1000_82542_rev2_0) 2318 if (hw->mac_type == e1000_82542_rev2_0)
2135 e1000_leave_82542_rst(adapter); 2319 e1000_leave_82542_rst(adapter);
2136} 2320}
2137 2321
@@ -2157,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2157 struct net_device *netdev = adapter->netdev; 2341 struct net_device *netdev = adapter->netdev;
2158 uint32_t tctl; 2342 uint32_t tctl;
2159 2343
2160 if(atomic_read(&adapter->tx_fifo_stall)) { 2344 if (atomic_read(&adapter->tx_fifo_stall)) {
2161 if((E1000_READ_REG(&adapter->hw, TDT) == 2345 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2162 E1000_READ_REG(&adapter->hw, TDH)) && 2346 E1000_READ_REG(&adapter->hw, TDH)) &&
2163 (E1000_READ_REG(&adapter->hw, TDFT) == 2347 (E1000_READ_REG(&adapter->hw, TDFT) ==
2164 E1000_READ_REG(&adapter->hw, TDFH)) && 2348 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2204,24 +2388,24 @@ static void
2204e1000_watchdog_task(struct e1000_adapter *adapter) 2388e1000_watchdog_task(struct e1000_adapter *adapter)
2205{ 2389{
2206 struct net_device *netdev = adapter->netdev; 2390 struct net_device *netdev = adapter->netdev;
2207 struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; 2391 struct e1000_tx_ring *txdr = adapter->tx_ring;
2208 uint32_t link; 2392 uint32_t link;
2209 2393
2210 e1000_check_for_link(&adapter->hw); 2394 e1000_check_for_link(&adapter->hw);
2211 if (adapter->hw.mac_type == e1000_82573) { 2395 if (adapter->hw.mac_type == e1000_82573) {
2212 e1000_enable_tx_pkt_filtering(&adapter->hw); 2396 e1000_enable_tx_pkt_filtering(&adapter->hw);
2213 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2397 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2214 e1000_update_mng_vlan(adapter); 2398 e1000_update_mng_vlan(adapter);
2215 } 2399 }
2216 2400
2217 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2401 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2218 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2402 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2219 link = !adapter->hw.serdes_link_down; 2403 link = !adapter->hw.serdes_link_down;
2220 else 2404 else
2221 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2405 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2222 2406
2223 if(link) { 2407 if (link) {
2224 if(!netif_carrier_ok(netdev)) { 2408 if (!netif_carrier_ok(netdev)) {
2225 e1000_get_speed_and_duplex(&adapter->hw, 2409 e1000_get_speed_and_duplex(&adapter->hw,
2226 &adapter->link_speed, 2410 &adapter->link_speed,
2227 &adapter->link_duplex); 2411 &adapter->link_duplex);
@@ -2231,13 +2415,28 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2231 adapter->link_duplex == FULL_DUPLEX ? 2415 adapter->link_duplex == FULL_DUPLEX ?
2232 "Full Duplex" : "Half Duplex"); 2416 "Full Duplex" : "Half Duplex");
2233 2417
2418 /* tweak tx_queue_len according to speed/duplex */
2419 netdev->tx_queue_len = adapter->tx_queue_len;
2420 adapter->tx_timeout_factor = 1;
2421 if (adapter->link_duplex == HALF_DUPLEX) {
2422 switch (adapter->link_speed) {
2423 case SPEED_10:
2424 netdev->tx_queue_len = 10;
2425 adapter->tx_timeout_factor = 8;
2426 break;
2427 case SPEED_100:
2428 netdev->tx_queue_len = 100;
2429 break;
2430 }
2431 }
2432
2234 netif_carrier_on(netdev); 2433 netif_carrier_on(netdev);
2235 netif_wake_queue(netdev); 2434 netif_wake_queue(netdev);
2236 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2435 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2237 adapter->smartspeed = 0; 2436 adapter->smartspeed = 0;
2238 } 2437 }
2239 } else { 2438 } else {
2240 if(netif_carrier_ok(netdev)) { 2439 if (netif_carrier_ok(netdev)) {
2241 adapter->link_speed = 0; 2440 adapter->link_speed = 0;
2242 adapter->link_duplex = 0; 2441 adapter->link_duplex = 0;
2243 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2442 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2263,7 +2462,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2263 2462
2264 e1000_update_adaptive(&adapter->hw); 2463 e1000_update_adaptive(&adapter->hw);
2265 2464
2266 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { 2465#ifdef CONFIG_E1000_MQ
2466 txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2467#endif
2468 if (!netif_carrier_ok(netdev)) {
2267 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2469 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2268 /* We've lost link, so the controller stops DMA, 2470 /* We've lost link, so the controller stops DMA,
2269 * but we've got queued Tx work that's never going 2471 * but we've got queued Tx work that's never going
@@ -2274,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2274 } 2476 }
2275 2477
2276 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2277 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2479 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2278 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2279 * asymmetrical Tx or Rx gets ITR=8000; everyone 2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2280 * else is between 2000-8000. */ 2482 * else is between 2000-8000. */
2281 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2483 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2282 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2484 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2283 adapter->gotcl - adapter->gorcl : 2485 adapter->gotcl - adapter->gorcl :
2284 adapter->gorcl - adapter->gotcl) / 10000; 2486 adapter->gorcl - adapter->gotcl) / 10000;
2285 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2487 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2292,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2292 /* Force detection of hung controller every watchdog period */ 2494 /* Force detection of hung controller every watchdog period */
2293 adapter->detect_tx_hung = TRUE; 2495 adapter->detect_tx_hung = TRUE;
2294 2496
2295 /* With 82571 controllers, LAA may be overwritten due to controller 2497 /* With 82571 controllers, LAA may be overwritten due to controller
2296 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2297 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2499 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2298 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2500 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2314,13 +2516,14 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2314{ 2516{
2315#ifdef NETIF_F_TSO 2517#ifdef NETIF_F_TSO
2316 struct e1000_context_desc *context_desc; 2518 struct e1000_context_desc *context_desc;
2519 struct e1000_buffer *buffer_info;
2317 unsigned int i; 2520 unsigned int i;
2318 uint32_t cmd_length = 0; 2521 uint32_t cmd_length = 0;
2319 uint16_t ipcse = 0, tucse, mss; 2522 uint16_t ipcse = 0, tucse, mss;
2320 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2523 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2321 int err; 2524 int err;
2322 2525
2323 if(skb_shinfo(skb)->tso_size) { 2526 if (skb_shinfo(skb)->tso_size) {
2324 if (skb_header_cloned(skb)) { 2527 if (skb_header_cloned(skb)) {
2325 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2528 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2326 if (err) 2529 if (err)
@@ -2329,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2329 2532
2330 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2533 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2331 mss = skb_shinfo(skb)->tso_size; 2534 mss = skb_shinfo(skb)->tso_size;
2332 if(skb->protocol == ntohs(ETH_P_IP)) { 2535 if (skb->protocol == ntohs(ETH_P_IP)) {
2333 skb->nh.iph->tot_len = 0; 2536 skb->nh.iph->tot_len = 0;
2334 skb->nh.iph->check = 0; 2537 skb->nh.iph->check = 0;
2335 skb->h.th->check = 2538 skb->h.th->check =
@@ -2341,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2341 cmd_length = E1000_TXD_CMD_IP; 2544 cmd_length = E1000_TXD_CMD_IP;
2342 ipcse = skb->h.raw - skb->data - 1; 2545 ipcse = skb->h.raw - skb->data - 1;
2343#ifdef NETIF_F_TSO_IPV6 2546#ifdef NETIF_F_TSO_IPV6
2344 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2547 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2345 skb->nh.ipv6h->payload_len = 0; 2548 skb->nh.ipv6h->payload_len = 0;
2346 skb->h.th->check = 2549 skb->h.th->check =
2347 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2550 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2363,6 +2566,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2363 2566
2364 i = tx_ring->next_to_use; 2567 i = tx_ring->next_to_use;
2365 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2568 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2569 buffer_info = &tx_ring->buffer_info[i];
2366 2570
2367 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2571 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2368 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2572 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -2374,14 +2578,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2374 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2578 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2375 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2579 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2376 2580
2581 buffer_info->time_stamp = jiffies;
2582
2377 if (++i == tx_ring->count) i = 0; 2583 if (++i == tx_ring->count) i = 0;
2378 tx_ring->next_to_use = i; 2584 tx_ring->next_to_use = i;
2379 2585
2380 return 1; 2586 return TRUE;
2381 } 2587 }
2382#endif 2588#endif
2383 2589
2384 return 0; 2590 return FALSE;
2385} 2591}
2386 2592
2387static inline boolean_t 2593static inline boolean_t
@@ -2389,13 +2595,15 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2389 struct sk_buff *skb) 2595 struct sk_buff *skb)
2390{ 2596{
2391 struct e1000_context_desc *context_desc; 2597 struct e1000_context_desc *context_desc;
2598 struct e1000_buffer *buffer_info;
2392 unsigned int i; 2599 unsigned int i;
2393 uint8_t css; 2600 uint8_t css;
2394 2601
2395 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2602 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2396 css = skb->h.raw - skb->data; 2603 css = skb->h.raw - skb->data;
2397 2604
2398 i = tx_ring->next_to_use; 2605 i = tx_ring->next_to_use;
2606 buffer_info = &tx_ring->buffer_info[i];
2399 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2607 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2400 2608
2401 context_desc->upper_setup.tcp_fields.tucss = css; 2609 context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2612,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2404 context_desc->tcp_seg_setup.data = 0; 2612 context_desc->tcp_seg_setup.data = 0;
2405 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2613 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2406 2614
2615 buffer_info->time_stamp = jiffies;
2616
2407 if (unlikely(++i == tx_ring->count)) i = 0; 2617 if (unlikely(++i == tx_ring->count)) i = 0;
2408 tx_ring->next_to_use = i; 2618 tx_ring->next_to_use = i;
2409 2619
@@ -2429,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2429 2639
2430 i = tx_ring->next_to_use; 2640 i = tx_ring->next_to_use;
2431 2641
2432 while(len) { 2642 while (len) {
2433 buffer_info = &tx_ring->buffer_info[i]; 2643 buffer_info = &tx_ring->buffer_info[i];
2434 size = min(len, max_per_txd); 2644 size = min(len, max_per_txd);
2435#ifdef NETIF_F_TSO 2645#ifdef NETIF_F_TSO
@@ -2445,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2445 2655
2446 /* Workaround for premature desc write-backs 2656 /* Workaround for premature desc write-backs
2447 * in TSO mode. Append 4-byte sentinel desc */ 2657 * in TSO mode. Append 4-byte sentinel desc */
2448 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2658 if (unlikely(mss && !nr_frags && size == len && size > 8))
2449 size -= 4; 2659 size -= 4;
2450#endif 2660#endif
2451 /* work-around for errata 10 and it applies 2661 /* work-around for errata 10 and it applies
@@ -2453,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2453 * The fix is to make sure that the first descriptor of a 2663 * The fix is to make sure that the first descriptor of a
2454 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2455 */ 2665 */
2456 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2666 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2457 (size > 2015) && count == 0)) 2667 (size > 2015) && count == 0))
2458 size = 2015; 2668 size = 2015;
2459 2669
2460 /* Workaround for potential 82544 hang in PCI-X. Avoid 2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2461 * terminating buffers within evenly-aligned dwords. */ 2671 * terminating buffers within evenly-aligned dwords. */
2462 if(unlikely(adapter->pcix_82544 && 2672 if (unlikely(adapter->pcix_82544 &&
2463 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2673 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2464 size > 4)) 2674 size > 4))
2465 size -= 4; 2675 size -= 4;
@@ -2475,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2475 len -= size; 2685 len -= size;
2476 offset += size; 2686 offset += size;
2477 count++; 2687 count++;
2478 if(unlikely(++i == tx_ring->count)) i = 0; 2688 if (unlikely(++i == tx_ring->count)) i = 0;
2479 } 2689 }
2480 2690
2481 for(f = 0; f < nr_frags; f++) { 2691 for (f = 0; f < nr_frags; f++) {
2482 struct skb_frag_struct *frag; 2692 struct skb_frag_struct *frag;
2483 2693
2484 frag = &skb_shinfo(skb)->frags[f]; 2694 frag = &skb_shinfo(skb)->frags[f];
2485 len = frag->size; 2695 len = frag->size;
2486 offset = frag->page_offset; 2696 offset = frag->page_offset;
2487 2697
2488 while(len) { 2698 while (len) {
2489 buffer_info = &tx_ring->buffer_info[i]; 2699 buffer_info = &tx_ring->buffer_info[i];
2490 size = min(len, max_per_txd); 2700 size = min(len, max_per_txd);
2491#ifdef NETIF_F_TSO 2701#ifdef NETIF_F_TSO
2492 /* Workaround for premature desc write-backs 2702 /* Workaround for premature desc write-backs
2493 * in TSO mode. Append 4-byte sentinel desc */ 2703 * in TSO mode. Append 4-byte sentinel desc */
2494 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2704 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2495 size -= 4; 2705 size -= 4;
2496#endif 2706#endif
2497 /* Workaround for potential 82544 hang in PCI-X. 2707 /* Workaround for potential 82544 hang in PCI-X.
2498 * Avoid terminating buffers within evenly-aligned 2708 * Avoid terminating buffers within evenly-aligned
2499 * dwords. */ 2709 * dwords. */
2500 if(unlikely(adapter->pcix_82544 && 2710 if (unlikely(adapter->pcix_82544 &&
2501 !((unsigned long)(frag->page+offset+size-1) & 4) && 2711 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2502 size > 4)) 2712 size > 4))
2503 size -= 4; 2713 size -= 4;
@@ -2514,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2514 len -= size; 2724 len -= size;
2515 offset += size; 2725 offset += size;
2516 count++; 2726 count++;
2517 if(unlikely(++i == tx_ring->count)) i = 0; 2727 if (unlikely(++i == tx_ring->count)) i = 0;
2518 } 2728 }
2519 } 2729 }
2520 2730
@@ -2534,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2534 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2744 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2535 unsigned int i; 2745 unsigned int i;
2536 2746
2537 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2747 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2538 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2748 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2539 E1000_TXD_CMD_TSE; 2749 E1000_TXD_CMD_TSE;
2540 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2750 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2541 2751
2542 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2752 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2543 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2753 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2544 } 2754 }
2545 2755
2546 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2756 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2547 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2757 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2548 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2758 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2549 } 2759 }
2550 2760
2551 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2761 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2552 txd_lower |= E1000_TXD_CMD_VLE; 2762 txd_lower |= E1000_TXD_CMD_VLE;
2553 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2763 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2554 } 2764 }
2555 2765
2556 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2557 2767
2558 while(count--) { 2768 while (count--) {
2559 buffer_info = &tx_ring->buffer_info[i]; 2769 buffer_info = &tx_ring->buffer_info[i];
2560 tx_desc = E1000_TX_DESC(*tx_ring, i); 2770 tx_desc = E1000_TX_DESC(*tx_ring, i);
2561 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2771 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2562 tx_desc->lower.data = 2772 tx_desc->lower.data =
2563 cpu_to_le32(txd_lower | buffer_info->length); 2773 cpu_to_le32(txd_lower | buffer_info->length);
2564 tx_desc->upper.data = cpu_to_le32(txd_upper); 2774 tx_desc->upper.data = cpu_to_le32(txd_upper);
2565 if(unlikely(++i == tx_ring->count)) i = 0; 2775 if (unlikely(++i == tx_ring->count)) i = 0;
2566 } 2776 }
2567 2777
2568 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2778 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2597,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2597 2807
2598 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2808 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2599 2809
2600 if(adapter->link_duplex != HALF_DUPLEX) 2810 if (adapter->link_duplex != HALF_DUPLEX)
2601 goto no_fifo_stall_required; 2811 goto no_fifo_stall_required;
2602 2812
2603 if(atomic_read(&adapter->tx_fifo_stall)) 2813 if (atomic_read(&adapter->tx_fifo_stall))
2604 return 1; 2814 return 1;
2605 2815
2606 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2816 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2607 atomic_set(&adapter->tx_fifo_stall, 1); 2817 atomic_set(&adapter->tx_fifo_stall, 1);
2608 return 1; 2818 return 1;
2609 } 2819 }
2610 2820
2611no_fifo_stall_required: 2821no_fifo_stall_required:
2612 adapter->tx_fifo_head += skb_fifo_len; 2822 adapter->tx_fifo_head += skb_fifo_len;
2613 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2823 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2614 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2824 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2615 return 0; 2825 return 0;
2616} 2826}
@@ -2621,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2621{ 2831{
2622 struct e1000_hw *hw = &adapter->hw; 2832 struct e1000_hw *hw = &adapter->hw;
2623 uint16_t length, offset; 2833 uint16_t length, offset;
2624 if(vlan_tx_tag_present(skb)) { 2834 if (vlan_tx_tag_present(skb)) {
2625 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2835 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2626 ( adapter->hw.mng_cookie.status & 2836 ( adapter->hw.mng_cookie.status &
2627 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2628 return 0; 2838 return 0;
2629 } 2839 }
2630 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2840 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2631 struct ethhdr *eth = (struct ethhdr *) skb->data; 2841 struct ethhdr *eth = (struct ethhdr *) skb->data;
2632 if((htons(ETH_P_IP) == eth->h_proto)) { 2842 if ((htons(ETH_P_IP) == eth->h_proto)) {
2633 const struct iphdr *ip = 2843 const struct iphdr *ip =
2634 (struct iphdr *)((uint8_t *)skb->data+14); 2844 (struct iphdr *)((uint8_t *)skb->data+14);
2635 if(IPPROTO_UDP == ip->protocol) { 2845 if (IPPROTO_UDP == ip->protocol) {
2636 struct udphdr *udp = 2846 struct udphdr *udp =
2637 (struct udphdr *)((uint8_t *)ip + 2847 (struct udphdr *)((uint8_t *)ip +
2638 (ip->ihl << 2)); 2848 (ip->ihl << 2));
2639 if(ntohs(udp->dest) == 67) { 2849 if (ntohs(udp->dest) == 67) {
2640 offset = (uint8_t *)udp + 8 - skb->data; 2850 offset = (uint8_t *)udp + 8 - skb->data;
2641 length = skb->len - offset; 2851 length = skb->len - offset;
2642 2852
2643 return e1000_mng_write_dhcp_info(hw, 2853 return e1000_mng_write_dhcp_info(hw,
2644 (uint8_t *)udp + 8, 2854 (uint8_t *)udp + 8,
2645 length); 2855 length);
2646 } 2856 }
2647 } 2857 }
@@ -2664,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2664 unsigned int nr_frags = 0; 2874 unsigned int nr_frags = 0;
2665 unsigned int mss = 0; 2875 unsigned int mss = 0;
2666 int count = 0; 2876 int count = 0;
2667 int tso; 2877 int tso;
2668 unsigned int f; 2878 unsigned int f;
2669 len -= skb->data_len; 2879 len -= skb->data_len;
2670 2880
@@ -2687,16 +2897,35 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2687 * 4 = ceil(buffer len/mss). To make sure we don't 2897 * 4 = ceil(buffer len/mss). To make sure we don't
2688 * overrun the FIFO, adjust the max buffer len if mss 2898 * overrun the FIFO, adjust the max buffer len if mss
2689 * drops. */ 2899 * drops. */
2690 if(mss) { 2900 if (mss) {
2901 uint8_t hdr_len;
2691 max_per_txd = min(mss << 2, max_per_txd); 2902 max_per_txd = min(mss << 2, max_per_txd);
2692 max_txd_pwr = fls(max_per_txd) - 1; 2903 max_txd_pwr = fls(max_per_txd) - 1;
2904
2905 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2906 * points to just header, pull a few bytes of payload from
2907 * frags into skb->data */
2908 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2909 if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
2910 (adapter->hw.mac_type == e1000_82571 ||
2911 adapter->hw.mac_type == e1000_82572)) {
2912 unsigned int pull_size;
2913 pull_size = min((unsigned int)4, skb->data_len);
2914 if (!__pskb_pull_tail(skb, pull_size)) {
2915 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2916 dev_kfree_skb_any(skb);
2917 return -EFAULT;
2918 }
2919 len = skb->len - skb->data_len;
2920 }
2693 } 2921 }
2694 2922
2695 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2923 /* reserve a descriptor for the offload context */
2924 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2696 count++; 2925 count++;
2697 count++; 2926 count++;
2698#else 2927#else
2699 if(skb->ip_summed == CHECKSUM_HW) 2928 if (skb->ip_summed == CHECKSUM_HW)
2700 count++; 2929 count++;
2701#endif 2930#endif
2702 2931
@@ -2709,45 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2709 2938
2710 count += TXD_USE_COUNT(len, max_txd_pwr); 2939 count += TXD_USE_COUNT(len, max_txd_pwr);
2711 2940
2712 if(adapter->pcix_82544) 2941 if (adapter->pcix_82544)
2713 count++; 2942 count++;
2714 2943
2715 /* work-around for errata 10 and it applies to all controllers 2944 /* work-around for errata 10 and it applies to all controllers
2716 * in PCI-X mode, so add one more descriptor to the count 2945 * in PCI-X mode, so add one more descriptor to the count
2717 */ 2946 */
2718 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2947 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2719 (len > 2015))) 2948 (len > 2015)))
2720 count++; 2949 count++;
2721 2950
2722 nr_frags = skb_shinfo(skb)->nr_frags; 2951 nr_frags = skb_shinfo(skb)->nr_frags;
2723 for(f = 0; f < nr_frags; f++) 2952 for (f = 0; f < nr_frags; f++)
2724 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2953 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2725 max_txd_pwr); 2954 max_txd_pwr);
2726 if(adapter->pcix_82544) 2955 if (adapter->pcix_82544)
2727 count += nr_frags; 2956 count += nr_frags;
2728 2957
2729#ifdef NETIF_F_TSO 2958 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2730 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2731 * points to just header, pull a few bytes of payload from
2732 * frags into skb->data */
2733 if (skb_shinfo(skb)->tso_size) {
2734 uint8_t hdr_len;
2735 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2736 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2737 (adapter->hw.mac_type == e1000_82571 ||
2738 adapter->hw.mac_type == e1000_82572)) {
2739 unsigned int pull_size;
2740 pull_size = min((unsigned int)4, skb->data_len);
2741 if (!__pskb_pull_tail(skb, pull_size)) {
2742 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2743 dev_kfree_skb_any(skb);
2744 return -EFAULT;
2745 }
2746 }
2747 }
2748#endif
2749
2750 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2751 e1000_transfer_dhcp_info(adapter, skb); 2959 e1000_transfer_dhcp_info(adapter, skb);
2752 2960
2753 local_irq_save(flags); 2961 local_irq_save(flags);
@@ -2765,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2765 return NETDEV_TX_BUSY; 2973 return NETDEV_TX_BUSY;
2766 } 2974 }
2767 2975
2768 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2976 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2769 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2977 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2770 netif_stop_queue(netdev); 2978 netif_stop_queue(netdev);
2771 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2979 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2772 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2980 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2774,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2774 } 2982 }
2775 } 2983 }
2776 2984
2777 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2985 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2778 tx_flags |= E1000_TX_FLAGS_VLAN; 2986 tx_flags |= E1000_TX_FLAGS_VLAN;
2779 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2987 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2780 } 2988 }
2781 2989
2782 first = tx_ring->next_to_use; 2990 first = tx_ring->next_to_use;
2783 2991
2784 tso = e1000_tso(adapter, tx_ring, skb); 2992 tso = e1000_tso(adapter, tx_ring, skb);
2785 if (tso < 0) { 2993 if (tso < 0) {
2786 dev_kfree_skb_any(skb); 2994 dev_kfree_skb_any(skb);
@@ -2833,6 +3041,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
2833{ 3041{
2834 struct e1000_adapter *adapter = netdev_priv(netdev); 3042 struct e1000_adapter *adapter = netdev_priv(netdev);
2835 3043
3044 adapter->tx_timeout_count++;
2836 e1000_down(adapter); 3045 e1000_down(adapter);
2837 e1000_up(adapter); 3046 e1000_up(adapter);
2838} 3047}
@@ -2850,7 +3059,7 @@ e1000_get_stats(struct net_device *netdev)
2850{ 3059{
2851 struct e1000_adapter *adapter = netdev_priv(netdev); 3060 struct e1000_adapter *adapter = netdev_priv(netdev);
2852 3061
2853 e1000_update_stats(adapter); 3062 /* only return the current stats */
2854 return &adapter->net_stats; 3063 return &adapter->net_stats;
2855} 3064}
2856 3065
@@ -2868,56 +3077,57 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2868 struct e1000_adapter *adapter = netdev_priv(netdev); 3077 struct e1000_adapter *adapter = netdev_priv(netdev);
2869 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3078 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2870 3079
2871 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3080 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2872 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3081 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2873 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3082 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2874 return -EINVAL;
2875 }
2876
2877#define MAX_STD_JUMBO_FRAME_SIZE 9234
2878 /* might want this to be bigger enum check... */
2879 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2880 if ((adapter->hw.mac_type == e1000_82571 ||
2881 adapter->hw.mac_type == e1000_82572) &&
2882 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2883 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2884 "on 82571 and 82572 controllers.\n");
2885 return -EINVAL;
2886 }
2887
2888 if(adapter->hw.mac_type == e1000_82573 &&
2889 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2890 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2891 "on 82573\n");
2892 return -EINVAL; 3083 return -EINVAL;
2893 } 3084 }
2894 3085
2895 if(adapter->hw.mac_type > e1000_82547_rev_2) { 3086 /* Adapter-specific max frame size limits. */
2896 adapter->rx_buffer_len = max_frame; 3087 switch (adapter->hw.mac_type) {
2897 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3088 case e1000_82542_rev2_0:
2898 } else { 3089 case e1000_82542_rev2_1:
2899 if(unlikely((adapter->hw.mac_type < e1000_82543) && 3090 case e1000_82573:
2900 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 3091 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2901 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3092 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2902 "on 82542\n"); 3093 return -EINVAL;
3094 }
3095 break;
3096 case e1000_82571:
3097 case e1000_82572:
3098#define MAX_STD_JUMBO_FRAME_SIZE 9234
3099 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3100 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
2903 return -EINVAL; 3101 return -EINVAL;
2904
2905 } else {
2906 if(max_frame <= E1000_RXBUFFER_2048) {
2907 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2908 } else if(max_frame <= E1000_RXBUFFER_4096) {
2909 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2910 } else if(max_frame <= E1000_RXBUFFER_8192) {
2911 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2912 } else if(max_frame <= E1000_RXBUFFER_16384) {
2913 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2914 }
2915 } 3102 }
3103 break;
3104 default:
3105 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3106 break;
2916 } 3107 }
2917 3108
3109 /* since the driver code now supports splitting a packet across
3110 * multiple descriptors, most of the fifo related limitations on
3111 * jumbo frame traffic have gone away.
3112 * simply use 2k descriptors for everything.
3113 *
3114 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3115 * means we reserve 2 more, this pushes us to allocate from the next
3116 * larger slab size
3117 * i.e. RXBUFFER_2048 --> size-4096 slab */
3118
3119 /* recent hardware supports 1KB granularity */
3120 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3121 adapter->rx_buffer_len =
3122 ((max_frame < E1000_RXBUFFER_2048) ?
3123 max_frame : E1000_RXBUFFER_2048);
3124 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3125 } else
3126 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3127
2918 netdev->mtu = new_mtu; 3128 netdev->mtu = new_mtu;
2919 3129
2920 if(netif_running(netdev)) { 3130 if (netif_running(netdev)) {
2921 e1000_down(adapter); 3131 e1000_down(adapter);
2922 e1000_up(adapter); 3132 e1000_up(adapter);
2923 } 3133 }
@@ -3004,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3004 hw->collision_delta = E1000_READ_REG(hw, COLC); 3214 hw->collision_delta = E1000_READ_REG(hw, COLC);
3005 adapter->stats.colc += hw->collision_delta; 3215 adapter->stats.colc += hw->collision_delta;
3006 3216
3007 if(hw->mac_type >= e1000_82543) { 3217 if (hw->mac_type >= e1000_82543) {
3008 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3218 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3009 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3219 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3010 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3220 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3012,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3012 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3222 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3013 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3223 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3014 } 3224 }
3015 if(hw->mac_type > e1000_82547_rev_2) { 3225 if (hw->mac_type > e1000_82547_rev_2) {
3016 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3226 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3017 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3227 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3018 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3228 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3037,12 +3247,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
3037 3247
3038 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3248 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3039 adapter->stats.crcerrs + adapter->stats.algnerrc + 3249 adapter->stats.crcerrs + adapter->stats.algnerrc +
3040 adapter->stats.rlec + adapter->stats.mpc + 3250 adapter->stats.rlec + adapter->stats.cexterr;
3041 adapter->stats.cexterr; 3251 adapter->net_stats.rx_dropped = 0;
3042 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3252 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3043 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3253 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3044 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3254 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3045 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
3046 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3255 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3047 3256
3048 /* Tx Errors */ 3257 /* Tx Errors */
@@ -3057,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3057 3266
3058 /* Phy Stats */ 3267 /* Phy Stats */
3059 3268
3060 if(hw->media_type == e1000_media_type_copper) { 3269 if (hw->media_type == e1000_media_type_copper) {
3061 if((adapter->link_speed == SPEED_1000) && 3270 if ((adapter->link_speed == SPEED_1000) &&
3062 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3271 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3063 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3272 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3064 adapter->phy_stats.idle_errors += phy_tmp; 3273 adapter->phy_stats.idle_errors += phy_tmp;
3065 } 3274 }
3066 3275
3067 if((hw->mac_type <= e1000_82546) && 3276 if ((hw->mac_type <= e1000_82546) &&
3068 (hw->phy_type == e1000_phy_m88) && 3277 (hw->phy_type == e1000_phy_m88) &&
3069 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3278 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3070 adapter->phy_stats.receive_errors += phy_tmp; 3279 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3110,32 +3319,44 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3110 struct e1000_adapter *adapter = netdev_priv(netdev); 3319 struct e1000_adapter *adapter = netdev_priv(netdev);
3111 struct e1000_hw *hw = &adapter->hw; 3320 struct e1000_hw *hw = &adapter->hw;
3112 uint32_t icr = E1000_READ_REG(hw, ICR); 3321 uint32_t icr = E1000_READ_REG(hw, ICR);
3113#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) 3322#ifndef CONFIG_E1000_NAPI
3114 int i; 3323 int i;
3324#else
3325 /* Interrupt Auto-Mask...upon reading ICR,
3326 * interrupts are masked. No need for the
3327 * IMC write, but it does mean we should
3328 * account for it ASAP. */
3329 if (likely(hw->mac_type >= e1000_82571))
3330 atomic_inc(&adapter->irq_sem);
3115#endif 3331#endif
3116 3332
3117 if(unlikely(!icr)) 3333 if (unlikely(!icr)) {
3334#ifdef CONFIG_E1000_NAPI
3335 if (hw->mac_type >= e1000_82571)
3336 e1000_irq_enable(adapter);
3337#endif
3118 return IRQ_NONE; /* Not our interrupt */ 3338 return IRQ_NONE; /* Not our interrupt */
3339 }
3119 3340
3120 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3341 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3121 hw->get_link_status = 1; 3342 hw->get_link_status = 1;
3122 mod_timer(&adapter->watchdog_timer, jiffies); 3343 mod_timer(&adapter->watchdog_timer, jiffies);
3123 } 3344 }
3124 3345
3125#ifdef CONFIG_E1000_NAPI 3346#ifdef CONFIG_E1000_NAPI
3126 atomic_inc(&adapter->irq_sem); 3347 if (unlikely(hw->mac_type < e1000_82571)) {
3127 E1000_WRITE_REG(hw, IMC, ~0); 3348 atomic_inc(&adapter->irq_sem);
3128 E1000_WRITE_FLUSH(hw); 3349 E1000_WRITE_REG(hw, IMC, ~0);
3350 E1000_WRITE_FLUSH(hw);
3351 }
3129#ifdef CONFIG_E1000_MQ 3352#ifdef CONFIG_E1000_MQ
3130 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { 3353 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
3131 cpu_set(adapter->cpu_for_queue[0], 3354 /* We must setup the cpumask once count == 0 since
3132 adapter->rx_sched_call_data.cpumask); 3355 * each cpu bit is cleared when the work is done. */
3133 for (i = 1; i < adapter->num_queues; i++) { 3356 adapter->rx_sched_call_data.cpumask = adapter->cpumask;
3134 cpu_set(adapter->cpu_for_queue[i], 3357 atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
3135 adapter->rx_sched_call_data.cpumask); 3358 atomic_set(&adapter->rx_sched_call_data.count,
3136 atomic_inc(&adapter->irq_sem); 3359 adapter->num_rx_queues);
3137 }
3138 atomic_set(&adapter->rx_sched_call_data.count, i);
3139 smp_call_async_mask(&adapter->rx_sched_call_data); 3360 smp_call_async_mask(&adapter->rx_sched_call_data);
3140 } else { 3361 } else {
3141 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); 3362 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3149,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3149 3370
3150#else /* if !CONFIG_E1000_NAPI */ 3371#else /* if !CONFIG_E1000_NAPI */
3151 /* Writing IMC and IMS is needed for 82547. 3372 /* Writing IMC and IMS is needed for 82547.
3152 Due to Hub Link bus being occupied, an interrupt 3373 * Due to Hub Link bus being occupied, an interrupt
3153 de-assertion message is not able to be sent. 3374 * de-assertion message is not able to be sent.
3154 When an interrupt assertion message is generated later, 3375 * When an interrupt assertion message is generated later,
3155 two messages are re-ordered and sent out. 3376 * two messages are re-ordered and sent out.
3156 That causes APIC to think 82547 is in de-assertion 3377 * That causes APIC to think 82547 is in de-assertion
3157 state, while 82547 is in assertion state, resulting 3378 * state, while 82547 is in assertion state, resulting
3158 in dead lock. Writing IMC forces 82547 into 3379 * in dead lock. Writing IMC forces 82547 into
3159 de-assertion state. 3380 * de-assertion state.
3160 */ 3381 */
3161 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3382 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3162 atomic_inc(&adapter->irq_sem); 3383 atomic_inc(&adapter->irq_sem);
3163 E1000_WRITE_REG(hw, IMC, ~0); 3384 E1000_WRITE_REG(hw, IMC, ~0);
3164 } 3385 }
3165 3386
3166 for(i = 0; i < E1000_MAX_INTR; i++) 3387 for (i = 0; i < E1000_MAX_INTR; i++)
3167 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3388 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3168 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3389 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3169 break; 3390 break;
3170 3391
3171 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3392 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3172 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3173 3394
3174#endif /* CONFIG_E1000_NAPI */ 3395#endif /* CONFIG_E1000_NAPI */
@@ -3187,7 +3408,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3187{ 3408{
3188 struct e1000_adapter *adapter; 3409 struct e1000_adapter *adapter;
3189 int work_to_do = min(*budget, poll_dev->quota); 3410 int work_to_do = min(*budget, poll_dev->quota);
3190 int tx_cleaned, i = 0, work_done = 0; 3411 int tx_cleaned = 0, i = 0, work_done = 0;
3191 3412
3192 /* Must NOT use netdev_priv macro here. */ 3413 /* Must NOT use netdev_priv macro here. */
3193 adapter = poll_dev->priv; 3414 adapter = poll_dev->priv;
@@ -3198,19 +3419,31 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3198 3419
3199 while (poll_dev != &adapter->polling_netdev[i]) { 3420 while (poll_dev != &adapter->polling_netdev[i]) {
3200 i++; 3421 i++;
3201 if (unlikely(i == adapter->num_queues)) 3422 if (unlikely(i == adapter->num_rx_queues))
3202 BUG(); 3423 BUG();
3203 } 3424 }
3204 3425
3205 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); 3426 if (likely(adapter->num_tx_queues == 1)) {
3427 /* e1000_clean is called per-cpu. This lock protects
3428 * tx_ring[0] from being cleaned by multiple cpus
3429 * simultaneously. A failure obtaining the lock means
3430 * tx_ring[0] is currently being cleaned anyway. */
3431 if (spin_trylock(&adapter->tx_queue_lock)) {
3432 tx_cleaned = e1000_clean_tx_irq(adapter,
3433 &adapter->tx_ring[0]);
3434 spin_unlock(&adapter->tx_queue_lock);
3435 }
3436 } else
3437 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3438
3206 adapter->clean_rx(adapter, &adapter->rx_ring[i], 3439 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3207 &work_done, work_to_do); 3440 &work_done, work_to_do);
3208 3441
3209 *budget -= work_done; 3442 *budget -= work_done;
3210 poll_dev->quota -= work_done; 3443 poll_dev->quota -= work_done;
3211 3444
3212 /* If no Tx and not enough Rx work done, exit the polling mode */ 3445 /* If no Tx and not enough Rx work done, exit the polling mode */
3213 if((!tx_cleaned && (work_done == 0)) || 3446 if ((!tx_cleaned && (work_done == 0)) ||
3214 !netif_running(adapter->netdev)) { 3447 !netif_running(adapter->netdev)) {
3215quit_polling: 3448quit_polling:
3216 netif_rx_complete(poll_dev); 3449 netif_rx_complete(poll_dev);
@@ -3242,22 +3475,24 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3242 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3243 3476
3244 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3477 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3245 for(cleaned = FALSE; !cleaned; ) { 3478 for (cleaned = FALSE; !cleaned; ) {
3246 tx_desc = E1000_TX_DESC(*tx_ring, i); 3479 tx_desc = E1000_TX_DESC(*tx_ring, i);
3247 buffer_info = &tx_ring->buffer_info[i]; 3480 buffer_info = &tx_ring->buffer_info[i];
3248 cleaned = (i == eop); 3481 cleaned = (i == eop);
3249 3482
3483#ifdef CONFIG_E1000_MQ
3484 tx_ring->tx_stats.bytes += buffer_info->length;
3485#endif
3250 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3486 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3487 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3251 3488
3252 tx_desc->buffer_addr = 0; 3489 if (unlikely(++i == tx_ring->count)) i = 0;
3253 tx_desc->lower.data = 0;
3254 tx_desc->upper.data = 0;
3255
3256 if(unlikely(++i == tx_ring->count)) i = 0;
3257 } 3490 }
3258 3491
3259 tx_ring->pkt++; 3492#ifdef CONFIG_E1000_MQ
3260 3493 tx_ring->tx_stats.packets++;
3494#endif
3495
3261 eop = tx_ring->buffer_info[i].next_to_watch; 3496 eop = tx_ring->buffer_info[i].next_to_watch;
3262 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3497 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3263 } 3498 }
@@ -3266,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3266 3501
3267 spin_lock(&tx_ring->tx_lock); 3502 spin_lock(&tx_ring->tx_lock);
3268 3503
3269 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3504 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3270 netif_carrier_ok(netdev))) 3505 netif_carrier_ok(netdev)))
3271 netif_wake_queue(netdev); 3506 netif_wake_queue(netdev);
3272 3507
@@ -3276,32 +3511,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3276 /* Detect a transmit hang in hardware, this serializes the 3511 /* Detect a transmit hang in hardware, this serializes the
3277 * check with the clearing of time_stamp and movement of i */ 3512 * check with the clearing of time_stamp and movement of i */
3278 adapter->detect_tx_hung = FALSE; 3513 adapter->detect_tx_hung = FALSE;
3279 if (tx_ring->buffer_info[i].dma && 3514 if (tx_ring->buffer_info[eop].dma &&
3280 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 3515 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3516 adapter->tx_timeout_factor * HZ)
3281 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3517 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3282 E1000_STATUS_TXOFF)) { 3518 E1000_STATUS_TXOFF)) {
3283 3519
3284 /* detected Tx unit hang */ 3520 /* detected Tx unit hang */
3285 i = tx_ring->next_to_clean;
3286 eop = tx_ring->buffer_info[i].next_to_watch;
3287 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3288 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3521 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3522 " Tx Queue <%lu>\n"
3289 " TDH <%x>\n" 3523 " TDH <%x>\n"
3290 " TDT <%x>\n" 3524 " TDT <%x>\n"
3291 " next_to_use <%x>\n" 3525 " next_to_use <%x>\n"
3292 " next_to_clean <%x>\n" 3526 " next_to_clean <%x>\n"
3293 "buffer_info[next_to_clean]\n" 3527 "buffer_info[next_to_clean]\n"
3294 " dma <%llx>\n"
3295 " time_stamp <%lx>\n" 3528 " time_stamp <%lx>\n"
3296 " next_to_watch <%x>\n" 3529 " next_to_watch <%x>\n"
3297 " jiffies <%lx>\n" 3530 " jiffies <%lx>\n"
3298 " next_to_watch.status <%x>\n", 3531 " next_to_watch.status <%x>\n",
3532 (unsigned long)((tx_ring - adapter->tx_ring) /
3533 sizeof(struct e1000_tx_ring)),
3299 readl(adapter->hw.hw_addr + tx_ring->tdh), 3534 readl(adapter->hw.hw_addr + tx_ring->tdh),
3300 readl(adapter->hw.hw_addr + tx_ring->tdt), 3535 readl(adapter->hw.hw_addr + tx_ring->tdt),
3301 tx_ring->next_to_use, 3536 tx_ring->next_to_use,
3302 i, 3537 tx_ring->next_to_clean,
3303 (unsigned long long)tx_ring->buffer_info[i].dma, 3538 tx_ring->buffer_info[eop].time_stamp,
3304 tx_ring->buffer_info[i].time_stamp,
3305 eop, 3539 eop,
3306 jiffies, 3540 jiffies,
3307 eop_desc->upper.fields.status); 3541 eop_desc->upper.fields.status);
@@ -3329,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3329 skb->ip_summed = CHECKSUM_NONE; 3563 skb->ip_summed = CHECKSUM_NONE;
3330 3564
3331 /* 82543 or newer only */ 3565 /* 82543 or newer only */
3332 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3566 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3333 /* Ignore Checksum bit is set */ 3567 /* Ignore Checksum bit is set */
3334 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3568 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3335 /* TCP/UDP checksum error bit is set */ 3569 /* TCP/UDP checksum error bit is set */
3336 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3570 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3337 /* let the stack verify checksum errors */ 3571 /* let the stack verify checksum errors */
3338 adapter->hw_csum_err++; 3572 adapter->hw_csum_err++;
3339 return; 3573 return;
3340 } 3574 }
3341 /* TCP/UDP Checksum has not been calculated */ 3575 /* TCP/UDP Checksum has not been calculated */
3342 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3576 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3343 if(!(status & E1000_RXD_STAT_TCPCS)) 3577 if (!(status & E1000_RXD_STAT_TCPCS))
3344 return; 3578 return;
3345 } else { 3579 } else {
3346 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3580 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3347 return; 3581 return;
3348 } 3582 }
3349 /* It must be a TCP or UDP packet with a valid checksum */ 3583 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3379,46 +3613,87 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3379{ 3613{
3380 struct net_device *netdev = adapter->netdev; 3614 struct net_device *netdev = adapter->netdev;
3381 struct pci_dev *pdev = adapter->pdev; 3615 struct pci_dev *pdev = adapter->pdev;
3382 struct e1000_rx_desc *rx_desc; 3616 struct e1000_rx_desc *rx_desc, *next_rxd;
3383 struct e1000_buffer *buffer_info; 3617 struct e1000_buffer *buffer_info, *next_buffer;
3384 struct sk_buff *skb;
3385 unsigned long flags; 3618 unsigned long flags;
3386 uint32_t length; 3619 uint32_t length;
3387 uint8_t last_byte; 3620 uint8_t last_byte;
3388 unsigned int i; 3621 unsigned int i;
3389 boolean_t cleaned = FALSE; 3622 int cleaned_count = 0;
3623 boolean_t cleaned = FALSE, multi_descriptor = FALSE;
3390 3624
3391 i = rx_ring->next_to_clean; 3625 i = rx_ring->next_to_clean;
3392 rx_desc = E1000_RX_DESC(*rx_ring, i); 3626 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 buffer_info = &rx_ring->buffer_info[i];
3393 3628
3394 while(rx_desc->status & E1000_RXD_STAT_DD) { 3629 while (rx_desc->status & E1000_RXD_STAT_DD) {
3395 buffer_info = &rx_ring->buffer_info[i]; 3630 struct sk_buff *skb, *next_skb;
3631 u8 status;
3396#ifdef CONFIG_E1000_NAPI 3632#ifdef CONFIG_E1000_NAPI
3397 if(*work_done >= work_to_do) 3633 if (*work_done >= work_to_do)
3398 break; 3634 break;
3399 (*work_done)++; 3635 (*work_done)++;
3400#endif 3636#endif
3401 cleaned = TRUE; 3637 status = rx_desc->status;
3638 skb = buffer_info->skb;
3639 buffer_info->skb = NULL;
3640
3641 if (++i == rx_ring->count) i = 0;
3642 next_rxd = E1000_RX_DESC(*rx_ring, i);
3643 next_buffer = &rx_ring->buffer_info[i];
3644 next_skb = next_buffer->skb;
3402 3645
3646 cleaned = TRUE;
3647 cleaned_count++;
3403 pci_unmap_single(pdev, 3648 pci_unmap_single(pdev,
3404 buffer_info->dma, 3649 buffer_info->dma,
3405 buffer_info->length, 3650 buffer_info->length,
3406 PCI_DMA_FROMDEVICE); 3651 PCI_DMA_FROMDEVICE);
3407 3652
3408 skb = buffer_info->skb;
3409 length = le16_to_cpu(rx_desc->length); 3653 length = le16_to_cpu(rx_desc->length);
3410 3654
3411 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 3655 skb_put(skb, length);
3412 /* All receives must fit into a single buffer */ 3656
3413 E1000_DBG("%s: Receive packet consumed multiple" 3657 if (!(status & E1000_RXD_STAT_EOP)) {
3414 " buffers\n", netdev->name); 3658 if (!rx_ring->rx_skb_top) {
3415 dev_kfree_skb_irq(skb); 3659 rx_ring->rx_skb_top = skb;
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3416 goto next_desc; 3672 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3417 } 3692 }
3418 3693
3419 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3420 last_byte = *(skb->data + length - 1); 3695 last_byte = *(skb->data + length - 1);
3421 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3696 if (TBI_ACCEPT(&adapter->hw, status,
3422 rx_desc->errors, length, last_byte)) { 3697 rx_desc->errors, length, last_byte)) {
3423 spin_lock_irqsave(&adapter->stats_lock, flags); 3698 spin_lock_irqsave(&adapter->stats_lock, flags);
3424 e1000_tbi_adjust_stats(&adapter->hw, 3699 e1000_tbi_adjust_stats(&adapter->hw,
@@ -3433,18 +3708,41 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3433 } 3708 }
3434 } 3709 }
3435 3710
3436 /* Good Receive */ 3711 /* code added for copybreak, this should improve
3437 skb_put(skb, length - ETHERNET_FCS_SIZE); 3712 * performance for small packets with large amounts
3713 * of reassembly being done in the stack */
3714#define E1000_CB_LENGTH 256
3715 if ((length < E1000_CB_LENGTH) &&
3716 !rx_ring->rx_skb_top &&
3717 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3718 !multi_descriptor) {
3719 struct sk_buff *new_skb =
3720 dev_alloc_skb(length + NET_IP_ALIGN);
3721 if (new_skb) {
3722 skb_reserve(new_skb, NET_IP_ALIGN);
3723 new_skb->dev = netdev;
3724 memcpy(new_skb->data - NET_IP_ALIGN,
3725 skb->data - NET_IP_ALIGN,
3726 length + NET_IP_ALIGN);
3727 /* save the skb in buffer_info as good */
3728 buffer_info->skb = skb;
3729 skb = new_skb;
3730 skb_put(skb, length);
3731 }
3732 }
3733
3734 /* end copybreak code */
3438 3735
3439 /* Receive Checksum Offload */ 3736 /* Receive Checksum Offload */
3440 e1000_rx_checksum(adapter, 3737 e1000_rx_checksum(adapter,
3441 (uint32_t)(rx_desc->status) | 3738 (uint32_t)(status) |
3442 ((uint32_t)(rx_desc->errors) << 24), 3739 ((uint32_t)(rx_desc->errors) << 24),
3443 rx_desc->csum, skb); 3740 rx_desc->csum, skb);
3741
3444 skb->protocol = eth_type_trans(skb, netdev); 3742 skb->protocol = eth_type_trans(skb, netdev);
3445#ifdef CONFIG_E1000_NAPI 3743#ifdef CONFIG_E1000_NAPI
3446 if(unlikely(adapter->vlgrp && 3744 if (unlikely(adapter->vlgrp &&
3447 (rx_desc->status & E1000_RXD_STAT_VP))) { 3745 (status & E1000_RXD_STAT_VP))) {
3448 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3746 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3449 le16_to_cpu(rx_desc->special) & 3747 le16_to_cpu(rx_desc->special) &
3450 E1000_RXD_SPC_VLAN_MASK); 3748 E1000_RXD_SPC_VLAN_MASK);
@@ -3452,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3452 netif_receive_skb(skb); 3750 netif_receive_skb(skb);
3453 } 3751 }
3454#else /* CONFIG_E1000_NAPI */ 3752#else /* CONFIG_E1000_NAPI */
3455 if(unlikely(adapter->vlgrp && 3753 if (unlikely(adapter->vlgrp &&
3456 (rx_desc->status & E1000_RXD_STAT_VP))) { 3754 (status & E1000_RXD_STAT_VP))) {
3457 vlan_hwaccel_rx(skb, adapter->vlgrp, 3755 vlan_hwaccel_rx(skb, adapter->vlgrp,
3458 le16_to_cpu(rx_desc->special) & 3756 le16_to_cpu(rx_desc->special) &
3459 E1000_RXD_SPC_VLAN_MASK); 3757 E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3760,28 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3462 } 3760 }
3463#endif /* CONFIG_E1000_NAPI */ 3761#endif /* CONFIG_E1000_NAPI */
3464 netdev->last_rx = jiffies; 3762 netdev->last_rx = jiffies;
3465 rx_ring->pkt++; 3763#ifdef CONFIG_E1000_MQ
3764 rx_ring->rx_stats.packets++;
3765 rx_ring->rx_stats.bytes += length;
3766#endif
3466 3767
3467next_desc: 3768next_desc:
3468 rx_desc->status = 0; 3769 rx_desc->status = 0;
3469 buffer_info->skb = NULL;
3470 if(unlikely(++i == rx_ring->count)) i = 0;
3471 3770
3472 rx_desc = E1000_RX_DESC(*rx_ring, i); 3771 /* return some buffers to hardware, one at a time is too slow */
3772 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3773 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3774 cleaned_count = 0;
3775 }
3776
3777 rx_desc = next_rxd;
3778 buffer_info = next_buffer;
3473 } 3779 }
3474 rx_ring->next_to_clean = i; 3780 rx_ring->next_to_clean = i;
3475 adapter->alloc_rx_buf(adapter, rx_ring); 3781
3782 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3783 if (cleaned_count)
3784 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3476 3785
3477 return cleaned; 3786 return cleaned;
3478} 3787}
@@ -3492,52 +3801,59 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3492 struct e1000_rx_ring *rx_ring) 3801 struct e1000_rx_ring *rx_ring)
3493#endif 3802#endif
3494{ 3803{
3495 union e1000_rx_desc_packet_split *rx_desc; 3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3496 struct net_device *netdev = adapter->netdev; 3805 struct net_device *netdev = adapter->netdev;
3497 struct pci_dev *pdev = adapter->pdev; 3806 struct pci_dev *pdev = adapter->pdev;
3498 struct e1000_buffer *buffer_info; 3807 struct e1000_buffer *buffer_info, *next_buffer;
3499 struct e1000_ps_page *ps_page; 3808 struct e1000_ps_page *ps_page;
3500 struct e1000_ps_page_dma *ps_page_dma; 3809 struct e1000_ps_page_dma *ps_page_dma;
3501 struct sk_buff *skb; 3810 struct sk_buff *skb, *next_skb;
3502 unsigned int i, j; 3811 unsigned int i, j;
3503 uint32_t length, staterr; 3812 uint32_t length, staterr;
3813 int cleaned_count = 0;
3504 boolean_t cleaned = FALSE; 3814 boolean_t cleaned = FALSE;
3505 3815
3506 i = rx_ring->next_to_clean; 3816 i = rx_ring->next_to_clean;
3507 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3508 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3509 3820
3510 while(staterr & E1000_RXD_STAT_DD) { 3821 while (staterr & E1000_RXD_STAT_DD) {
3511 buffer_info = &rx_ring->buffer_info[i];
3512 ps_page = &rx_ring->ps_page[i]; 3822 ps_page = &rx_ring->ps_page[i];
3513 ps_page_dma = &rx_ring->ps_page_dma[i]; 3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3514#ifdef CONFIG_E1000_NAPI 3824#ifdef CONFIG_E1000_NAPI
3515 if(unlikely(*work_done >= work_to_do)) 3825 if (unlikely(*work_done >= work_to_do))
3516 break; 3826 break;
3517 (*work_done)++; 3827 (*work_done)++;
3518#endif 3828#endif
3829 skb = buffer_info->skb;
3830
3831 if (++i == rx_ring->count) i = 0;
3832 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3833 next_buffer = &rx_ring->buffer_info[i];
3834 next_skb = next_buffer->skb;
3835
3519 cleaned = TRUE; 3836 cleaned = TRUE;
3837 cleaned_count++;
3520 pci_unmap_single(pdev, buffer_info->dma, 3838 pci_unmap_single(pdev, buffer_info->dma,
3521 buffer_info->length, 3839 buffer_info->length,
3522 PCI_DMA_FROMDEVICE); 3840 PCI_DMA_FROMDEVICE);
3523 3841
3524 skb = buffer_info->skb; 3842 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3525
3526 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3527 E1000_DBG("%s: Packet Split buffers didn't pick up" 3843 E1000_DBG("%s: Packet Split buffers didn't pick up"
3528 " the full packet\n", netdev->name); 3844 " the full packet\n", netdev->name);
3529 dev_kfree_skb_irq(skb); 3845 dev_kfree_skb_irq(skb);
3530 goto next_desc; 3846 goto next_desc;
3531 } 3847 }
3532 3848
3533 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3849 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3534 dev_kfree_skb_irq(skb); 3850 dev_kfree_skb_irq(skb);
3535 goto next_desc; 3851 goto next_desc;
3536 } 3852 }
3537 3853
3538 length = le16_to_cpu(rx_desc->wb.middle.length0); 3854 length = le16_to_cpu(rx_desc->wb.middle.length0);
3539 3855
3540 if(unlikely(!length)) { 3856 if (unlikely(!length)) {
3541 E1000_DBG("%s: Last part of the packet spanning" 3857 E1000_DBG("%s: Last part of the packet spanning"
3542 " multiple descriptors\n", netdev->name); 3858 " multiple descriptors\n", netdev->name);
3543 dev_kfree_skb_irq(skb); 3859 dev_kfree_skb_irq(skb);
@@ -3547,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3547 /* Good Receive */ 3863 /* Good Receive */
3548 skb_put(skb, length); 3864 skb_put(skb, length);
3549 3865
3550 for(j = 0; j < adapter->rx_ps_pages; j++) { 3866 for (j = 0; j < adapter->rx_ps_pages; j++) {
3551 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3867 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3552 break; 3868 break;
3553 3869
3554 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3870 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3568,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3568 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3884 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3569 skb->protocol = eth_type_trans(skb, netdev); 3885 skb->protocol = eth_type_trans(skb, netdev);
3570 3886
3571 if(likely(rx_desc->wb.upper.header_status & 3887 if (likely(rx_desc->wb.upper.header_status &
3572 E1000_RXDPS_HDRSTAT_HDRSP)) { 3888 E1000_RXDPS_HDRSTAT_HDRSP))
3573 adapter->rx_hdr_split++; 3889 adapter->rx_hdr_split++;
3574#ifdef HAVE_RX_ZERO_COPY
3575 skb_shinfo(skb)->zero_copy = TRUE;
3576#endif
3577 }
3578#ifdef CONFIG_E1000_NAPI 3890#ifdef CONFIG_E1000_NAPI
3579 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3891 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3580 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3892 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3581 le16_to_cpu(rx_desc->wb.middle.vlan) & 3893 le16_to_cpu(rx_desc->wb.middle.vlan) &
3582 E1000_RXD_SPC_VLAN_MASK); 3894 E1000_RXD_SPC_VLAN_MASK);
@@ -3584,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3584 netif_receive_skb(skb); 3896 netif_receive_skb(skb);
3585 } 3897 }
3586#else /* CONFIG_E1000_NAPI */ 3898#else /* CONFIG_E1000_NAPI */
3587 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3899 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3588 vlan_hwaccel_rx(skb, adapter->vlgrp, 3900 vlan_hwaccel_rx(skb, adapter->vlgrp,
3589 le16_to_cpu(rx_desc->wb.middle.vlan) & 3901 le16_to_cpu(rx_desc->wb.middle.vlan) &
3590 E1000_RXD_SPC_VLAN_MASK); 3902 E1000_RXD_SPC_VLAN_MASK);
@@ -3593,18 +3905,31 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3593 } 3905 }
3594#endif /* CONFIG_E1000_NAPI */ 3906#endif /* CONFIG_E1000_NAPI */
3595 netdev->last_rx = jiffies; 3907 netdev->last_rx = jiffies;
3596 rx_ring->pkt++; 3908#ifdef CONFIG_E1000_MQ
3909 rx_ring->rx_stats.packets++;
3910 rx_ring->rx_stats.bytes += length;
3911#endif
3597 3912
3598next_desc: 3913next_desc:
3599 rx_desc->wb.middle.status_error &= ~0xFF; 3914 rx_desc->wb.middle.status_error &= ~0xFF;
3600 buffer_info->skb = NULL; 3915 buffer_info->skb = NULL;
3601 if(unlikely(++i == rx_ring->count)) i = 0;
3602 3916
3603 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3917 /* return some buffers to hardware, one at a time is too slow */
3918 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3919 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3920 cleaned_count = 0;
3921 }
3922
3923 rx_desc = next_rxd;
3924 buffer_info = next_buffer;
3925
3604 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3926 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3605 } 3927 }
3606 rx_ring->next_to_clean = i; 3928 rx_ring->next_to_clean = i;
3607 adapter->alloc_rx_buf(adapter, rx_ring); 3929
3930 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3931 if (cleaned_count)
3932 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3608 3933
3609 return cleaned; 3934 return cleaned;
3610} 3935}
@@ -3616,7 +3941,8 @@ next_desc:
3616 3941
3617static void 3942static void
3618e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 3943e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3619 struct e1000_rx_ring *rx_ring) 3944 struct e1000_rx_ring *rx_ring,
3945 int cleaned_count)
3620{ 3946{
3621 struct net_device *netdev = adapter->netdev; 3947 struct net_device *netdev = adapter->netdev;
3622 struct pci_dev *pdev = adapter->pdev; 3948 struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3955,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3629 i = rx_ring->next_to_use; 3955 i = rx_ring->next_to_use;
3630 buffer_info = &rx_ring->buffer_info[i]; 3956 buffer_info = &rx_ring->buffer_info[i];
3631 3957
3632 while(!buffer_info->skb) { 3958 while (cleaned_count--) {
3633 skb = dev_alloc_skb(bufsz); 3959 if (!(skb = buffer_info->skb))
3960 skb = dev_alloc_skb(bufsz);
3961 else {
3962 skb_trim(skb, 0);
3963 goto map_skb;
3964 }
3965
3634 3966
3635 if(unlikely(!skb)) { 3967 if (unlikely(!skb)) {
3636 /* Better luck next round */ 3968 /* Better luck next round */
3969 adapter->alloc_rx_buff_failed++;
3637 break; 3970 break;
3638 } 3971 }
3639 3972
@@ -3670,6 +4003,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3670 4003
3671 buffer_info->skb = skb; 4004 buffer_info->skb = skb;
3672 buffer_info->length = adapter->rx_buffer_len; 4005 buffer_info->length = adapter->rx_buffer_len;
4006map_skb:
3673 buffer_info->dma = pci_map_single(pdev, 4007 buffer_info->dma = pci_map_single(pdev,
3674 skb->data, 4008 skb->data,
3675 adapter->rx_buffer_len, 4009 adapter->rx_buffer_len,
@@ -3695,20 +4029,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3695 rx_desc = E1000_RX_DESC(*rx_ring, i); 4029 rx_desc = E1000_RX_DESC(*rx_ring, i);
3696 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4030 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3697 4031
3698 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4032 if (unlikely(++i == rx_ring->count))
3699 /* Force memory writes to complete before letting h/w 4033 i = 0;
3700 * know there are new descriptors to fetch. (Only
3701 * applicable for weak-ordered memory model archs,
3702 * such as IA-64). */
3703 wmb();
3704 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3705 }
3706
3707 if(unlikely(++i == rx_ring->count)) i = 0;
3708 buffer_info = &rx_ring->buffer_info[i]; 4034 buffer_info = &rx_ring->buffer_info[i];
3709 } 4035 }
3710 4036
3711 rx_ring->next_to_use = i; 4037 if (likely(rx_ring->next_to_use != i)) {
4038 rx_ring->next_to_use = i;
4039 if (unlikely(i-- == 0))
4040 i = (rx_ring->count - 1);
4041
4042 /* Force memory writes to complete before letting h/w
4043 * know there are new descriptors to fetch. (Only
4044 * applicable for weak-ordered memory model archs,
4045 * such as IA-64). */
4046 wmb();
4047 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4048 }
3712} 4049}
3713 4050
3714/** 4051/**
@@ -3718,7 +4055,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3718 4055
3719static void 4056static void
3720e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 4057e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3721 struct e1000_rx_ring *rx_ring) 4058 struct e1000_rx_ring *rx_ring,
4059 int cleaned_count)
3722{ 4060{
3723 struct net_device *netdev = adapter->netdev; 4061 struct net_device *netdev = adapter->netdev;
3724 struct pci_dev *pdev = adapter->pdev; 4062 struct pci_dev *pdev = adapter->pdev;
@@ -3734,16 +4072,18 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3734 ps_page = &rx_ring->ps_page[i]; 4072 ps_page = &rx_ring->ps_page[i];
3735 ps_page_dma = &rx_ring->ps_page_dma[i]; 4073 ps_page_dma = &rx_ring->ps_page_dma[i];
3736 4074
3737 while(!buffer_info->skb) { 4075 while (cleaned_count--) {
3738 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4076 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3739 4077
3740 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4078 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
3741 if (j < adapter->rx_ps_pages) { 4079 if (j < adapter->rx_ps_pages) {
3742 if (likely(!ps_page->ps_page[j])) { 4080 if (likely(!ps_page->ps_page[j])) {
3743 ps_page->ps_page[j] = 4081 ps_page->ps_page[j] =
3744 alloc_page(GFP_ATOMIC); 4082 alloc_page(GFP_ATOMIC);
3745 if (unlikely(!ps_page->ps_page[j])) 4083 if (unlikely(!ps_page->ps_page[j])) {
4084 adapter->alloc_rx_buff_failed++;
3746 goto no_buffers; 4085 goto no_buffers;
4086 }
3747 ps_page_dma->ps_page_dma[j] = 4087 ps_page_dma->ps_page_dma[j] =
3748 pci_map_page(pdev, 4088 pci_map_page(pdev,
3749 ps_page->ps_page[j], 4089 ps_page->ps_page[j],
@@ -3751,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3751 PCI_DMA_FROMDEVICE); 4091 PCI_DMA_FROMDEVICE);
3752 } 4092 }
3753 /* Refresh the desc even if buffer_addrs didn't 4093 /* Refresh the desc even if buffer_addrs didn't
3754 * change because each write-back erases 4094 * change because each write-back erases
3755 * this info. 4095 * this info.
3756 */ 4096 */
3757 rx_desc->read.buffer_addr[j+1] = 4097 rx_desc->read.buffer_addr[j+1] =
@@ -3762,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3762 4102
3763 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4103 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3764 4104
3765 if(unlikely(!skb)) 4105 if (unlikely(!skb)) {
4106 adapter->alloc_rx_buff_failed++;
3766 break; 4107 break;
4108 }
3767 4109
3768 /* Make buffer alignment 2 beyond a 16 byte boundary 4110 /* Make buffer alignment 2 beyond a 16 byte boundary
3769 * this will result in a 16 byte aligned IP header after 4111 * this will result in a 16 byte aligned IP header after
@@ -3781,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3781 4123
3782 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4124 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3783 4125
3784 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4126 if (unlikely(++i == rx_ring->count)) i = 0;
3785 /* Force memory writes to complete before letting h/w
3786 * know there are new descriptors to fetch. (Only
3787 * applicable for weak-ordered memory model archs,
3788 * such as IA-64). */
3789 wmb();
3790 /* Hardware increments by 16 bytes, but packet split
3791 * descriptors are 32 bytes...so we increment tail
3792 * twice as much.
3793 */
3794 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
3795 }
3796
3797 if(unlikely(++i == rx_ring->count)) i = 0;
3798 buffer_info = &rx_ring->buffer_info[i]; 4127 buffer_info = &rx_ring->buffer_info[i];
3799 ps_page = &rx_ring->ps_page[i]; 4128 ps_page = &rx_ring->ps_page[i];
3800 ps_page_dma = &rx_ring->ps_page_dma[i]; 4129 ps_page_dma = &rx_ring->ps_page_dma[i];
3801 } 4130 }
3802 4131
3803no_buffers: 4132no_buffers:
3804 rx_ring->next_to_use = i; 4133 if (likely(rx_ring->next_to_use != i)) {
4134 rx_ring->next_to_use = i;
4135 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4136
4137 /* Force memory writes to complete before letting h/w
4138 * know there are new descriptors to fetch. (Only
4139 * applicable for weak-ordered memory model archs,
4140 * such as IA-64). */
4141 wmb();
4142 /* Hardware increments by 16 bytes, but packet split
4143 * descriptors are 32 bytes...so we increment tail
4144 * twice as much.
4145 */
4146 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4147 }
3805} 4148}
3806 4149
3807/** 4150/**
@@ -3815,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3815 uint16_t phy_status; 4158 uint16_t phy_status;
3816 uint16_t phy_ctrl; 4159 uint16_t phy_ctrl;
3817 4160
3818 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4161 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
3819 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4162 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
3820 return; 4163 return;
3821 4164
3822 if(adapter->smartspeed == 0) { 4165 if (adapter->smartspeed == 0) {
3823 /* If Master/Slave config fault is asserted twice, 4166 /* If Master/Slave config fault is asserted twice,
3824 * we assume back-to-back */ 4167 * we assume back-to-back */
3825 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4168 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3826 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4169 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3827 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4170 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3828 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4171 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3829 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4172 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3830 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4173 if (phy_ctrl & CR_1000T_MS_ENABLE) {
3831 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4174 phy_ctrl &= ~CR_1000T_MS_ENABLE;
3832 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4175 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3833 phy_ctrl); 4176 phy_ctrl);
3834 adapter->smartspeed++; 4177 adapter->smartspeed++;
3835 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4178 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
3836 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4179 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
3837 &phy_ctrl)) { 4180 &phy_ctrl)) {
3838 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4181 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -3842,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3842 } 4185 }
3843 } 4186 }
3844 return; 4187 return;
3845 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4188 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3846 /* If still no link, perhaps using 2/3 pair cable */ 4189 /* If still no link, perhaps using 2/3 pair cable */
3847 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4190 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3848 phy_ctrl |= CR_1000T_MS_ENABLE; 4191 phy_ctrl |= CR_1000T_MS_ENABLE;
3849 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4192 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
3850 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4193 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
3851 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4194 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
3852 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4195 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3853 MII_CR_RESTART_AUTO_NEG); 4196 MII_CR_RESTART_AUTO_NEG);
@@ -3855,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
3855 } 4198 }
3856 } 4199 }
3857 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4200 /* Restart process after E1000_SMARTSPEED_MAX iterations */
3858 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4201 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3859 adapter->smartspeed = 0; 4202 adapter->smartspeed = 0;
3860} 4203}
3861 4204
@@ -3896,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3896 uint16_t spddplx; 4239 uint16_t spddplx;
3897 unsigned long flags; 4240 unsigned long flags;
3898 4241
3899 if(adapter->hw.media_type != e1000_media_type_copper) 4242 if (adapter->hw.media_type != e1000_media_type_copper)
3900 return -EOPNOTSUPP; 4243 return -EOPNOTSUPP;
3901 4244
3902 switch (cmd) { 4245 switch (cmd) {
@@ -3904,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3904 data->phy_id = adapter->hw.phy_addr; 4247 data->phy_id = adapter->hw.phy_addr;
3905 break; 4248 break;
3906 case SIOCGMIIREG: 4249 case SIOCGMIIREG:
3907 if(!capable(CAP_NET_ADMIN)) 4250 if (!capable(CAP_NET_ADMIN))
3908 return -EPERM; 4251 return -EPERM;
3909 spin_lock_irqsave(&adapter->stats_lock, flags); 4252 spin_lock_irqsave(&adapter->stats_lock, flags);
3910 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4253 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
3911 &data->val_out)) { 4254 &data->val_out)) {
3912 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4255 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3913 return -EIO; 4256 return -EIO;
@@ -3915,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3915 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3916 break; 4259 break;
3917 case SIOCSMIIREG: 4260 case SIOCSMIIREG:
3918 if(!capable(CAP_NET_ADMIN)) 4261 if (!capable(CAP_NET_ADMIN))
3919 return -EPERM; 4262 return -EPERM;
3920 if(data->reg_num & ~(0x1F)) 4263 if (data->reg_num & ~(0x1F))
3921 return -EFAULT; 4264 return -EFAULT;
3922 mii_reg = data->val_in; 4265 mii_reg = data->val_in;
3923 spin_lock_irqsave(&adapter->stats_lock, flags); 4266 spin_lock_irqsave(&adapter->stats_lock, flags);
3924 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4267 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
3925 mii_reg)) { 4268 mii_reg)) {
3926 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4269 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3927 return -EIO; 4270 return -EIO;
3928 } 4271 }
3929 if(adapter->hw.phy_type == e1000_phy_m88) { 4272 if (adapter->hw.phy_type == e1000_phy_m88) {
3930 switch (data->reg_num) { 4273 switch (data->reg_num) {
3931 case PHY_CTRL: 4274 case PHY_CTRL:
3932 if(mii_reg & MII_CR_POWER_DOWN) 4275 if (mii_reg & MII_CR_POWER_DOWN)
3933 break; 4276 break;
3934 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4277 if (mii_reg & MII_CR_AUTO_NEG_EN) {
3935 adapter->hw.autoneg = 1; 4278 adapter->hw.autoneg = 1;
3936 adapter->hw.autoneg_advertised = 0x2F; 4279 adapter->hw.autoneg_advertised = 0x2F;
3937 } else { 4280 } else {
@@ -3946,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3946 HALF_DUPLEX; 4289 HALF_DUPLEX;
3947 retval = e1000_set_spd_dplx(adapter, 4290 retval = e1000_set_spd_dplx(adapter,
3948 spddplx); 4291 spddplx);
3949 if(retval) { 4292 if (retval) {
3950 spin_unlock_irqrestore( 4293 spin_unlock_irqrestore(
3951 &adapter->stats_lock, 4294 &adapter->stats_lock,
3952 flags); 4295 flags);
3953 return retval; 4296 return retval;
3954 } 4297 }
3955 } 4298 }
3956 if(netif_running(adapter->netdev)) { 4299 if (netif_running(adapter->netdev)) {
3957 e1000_down(adapter); 4300 e1000_down(adapter);
3958 e1000_up(adapter); 4301 e1000_up(adapter);
3959 } else 4302 } else
@@ -3961,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3961 break; 4304 break;
3962 case M88E1000_PHY_SPEC_CTRL: 4305 case M88E1000_PHY_SPEC_CTRL:
3963 case M88E1000_EXT_PHY_SPEC_CTRL: 4306 case M88E1000_EXT_PHY_SPEC_CTRL:
3964 if(e1000_phy_reset(&adapter->hw)) { 4307 if (e1000_phy_reset(&adapter->hw)) {
3965 spin_unlock_irqrestore( 4308 spin_unlock_irqrestore(
3966 &adapter->stats_lock, flags); 4309 &adapter->stats_lock, flags);
3967 return -EIO; 4310 return -EIO;
@@ -3971,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3971 } else { 4314 } else {
3972 switch (data->reg_num) { 4315 switch (data->reg_num) {
3973 case PHY_CTRL: 4316 case PHY_CTRL:
3974 if(mii_reg & MII_CR_POWER_DOWN) 4317 if (mii_reg & MII_CR_POWER_DOWN)
3975 break; 4318 break;
3976 if(netif_running(adapter->netdev)) { 4319 if (netif_running(adapter->netdev)) {
3977 e1000_down(adapter); 4320 e1000_down(adapter);
3978 e1000_up(adapter); 4321 e1000_up(adapter);
3979 } else 4322 } else
@@ -3995,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
3995 struct e1000_adapter *adapter = hw->back; 4338 struct e1000_adapter *adapter = hw->back;
3996 int ret_val = pci_set_mwi(adapter->pdev); 4339 int ret_val = pci_set_mwi(adapter->pdev);
3997 4340
3998 if(ret_val) 4341 if (ret_val)
3999 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4342 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4000} 4343}
4001 4344
@@ -4044,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4044 e1000_irq_disable(adapter); 4387 e1000_irq_disable(adapter);
4045 adapter->vlgrp = grp; 4388 adapter->vlgrp = grp;
4046 4389
4047 if(grp) { 4390 if (grp) {
4048 /* enable VLAN tag insert/strip */ 4391 /* enable VLAN tag insert/strip */
4049 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4392 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4050 ctrl |= E1000_CTRL_VME; 4393 ctrl |= E1000_CTRL_VME;
@@ -4066,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4066 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4409 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4067 rctl &= ~E1000_RCTL_VFE; 4410 rctl &= ~E1000_RCTL_VFE;
4068 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4411 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4069 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4412 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4070 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4413 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4071 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4414 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4072 } 4415 }
@@ -4080,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4080{ 4423{
4081 struct e1000_adapter *adapter = netdev_priv(netdev); 4424 struct e1000_adapter *adapter = netdev_priv(netdev);
4082 uint32_t vfta, index; 4425 uint32_t vfta, index;
4083 if((adapter->hw.mng_cookie.status & 4426
4084 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4427 if ((adapter->hw.mng_cookie.status &
4085 (vid == adapter->mng_vlan_id)) 4428 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4429 (vid == adapter->mng_vlan_id))
4086 return; 4430 return;
4087 /* add VID to filter table */ 4431 /* add VID to filter table */
4088 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
@@ -4099,15 +4443,19 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4099 4443
4100 e1000_irq_disable(adapter); 4444 e1000_irq_disable(adapter);
4101 4445
4102 if(adapter->vlgrp) 4446 if (adapter->vlgrp)
4103 adapter->vlgrp->vlan_devices[vid] = NULL; 4447 adapter->vlgrp->vlan_devices[vid] = NULL;
4104 4448
4105 e1000_irq_enable(adapter); 4449 e1000_irq_enable(adapter);
4106 4450
4107 if((adapter->hw.mng_cookie.status & 4451 if ((adapter->hw.mng_cookie.status &
4108 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4109 (vid == adapter->mng_vlan_id)) 4453 (vid == adapter->mng_vlan_id)) {
4454 /* release control to f/w */
4455 e1000_release_hw_control(adapter);
4110 return; 4456 return;
4457 }
4458
4111 /* remove VID from filter table */ 4459 /* remove VID from filter table */
4112 index = (vid >> 5) & 0x7F; 4460 index = (vid >> 5) & 0x7F;
4113 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4461 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4120,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4120{ 4468{
4121 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4122 4470
4123 if(adapter->vlgrp) { 4471 if (adapter->vlgrp) {
4124 uint16_t vid; 4472 uint16_t vid;
4125 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4126 if(!adapter->vlgrp->vlan_devices[vid]) 4474 if (!adapter->vlgrp->vlan_devices[vid])
4127 continue; 4475 continue;
4128 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4129 } 4477 }
@@ -4136,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4136 adapter->hw.autoneg = 0; 4484 adapter->hw.autoneg = 0;
4137 4485
4138 /* Fiber NICs only allow 1000 gbps Full duplex */ 4486 /* Fiber NICs only allow 1000 gbps Full duplex */
4139 if((adapter->hw.media_type == e1000_media_type_fiber) && 4487 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4140 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4141 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4489 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4142 return -EINVAL; 4490 return -EINVAL;
4143 } 4491 }
4144 4492
4145 switch(spddplx) { 4493 switch (spddplx) {
4146 case SPEED_10 + DUPLEX_HALF: 4494 case SPEED_10 + DUPLEX_HALF:
4147 adapter->hw.forced_speed_duplex = e1000_10_half; 4495 adapter->hw.forced_speed_duplex = e1000_10_half;
4148 break; 4496 break;
@@ -4168,35 +4516,92 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4168} 4516}
4169 4517
4170#ifdef CONFIG_PM 4518#ifdef CONFIG_PM
4519/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
4520 * space versus the 64 bytes that pci_[save|restore]_state handle
4521 */
4522#define PCIE_CONFIG_SPACE_LEN 256
4523#define PCI_CONFIG_SPACE_LEN 64
4524static int
4525e1000_pci_save_state(struct e1000_adapter *adapter)
4526{
4527 struct pci_dev *dev = adapter->pdev;
4528 int size;
4529 int i;
4530 if (adapter->hw.mac_type >= e1000_82571)
4531 size = PCIE_CONFIG_SPACE_LEN;
4532 else
4533 size = PCI_CONFIG_SPACE_LEN;
4534
4535 WARN_ON(adapter->config_space != NULL);
4536
4537 adapter->config_space = kmalloc(size, GFP_KERNEL);
4538 if (!adapter->config_space) {
4539 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4540 return -ENOMEM;
4541 }
4542 for (i = 0; i < (size / 4); i++)
4543 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4544 return 0;
4545}
4546
4547static void
4548e1000_pci_restore_state(struct e1000_adapter *adapter)
4549{
4550 struct pci_dev *dev = adapter->pdev;
4551 int size;
4552 int i;
4553 if (adapter->config_space == NULL)
4554 return;
4555 if (adapter->hw.mac_type >= e1000_82571)
4556 size = PCIE_CONFIG_SPACE_LEN;
4557 else
4558 size = PCI_CONFIG_SPACE_LEN;
4559 for (i = 0; i < (size / 4); i++)
4560 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4561 kfree(adapter->config_space);
4562 adapter->config_space = NULL;
4563 return;
4564}
4565#endif /* CONFIG_PM */
4566
4171static int 4567static int
4172e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4568e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4173{ 4569{
4174 struct net_device *netdev = pci_get_drvdata(pdev); 4570 struct net_device *netdev = pci_get_drvdata(pdev);
4175 struct e1000_adapter *adapter = netdev_priv(netdev); 4571 struct e1000_adapter *adapter = netdev_priv(netdev);
4176 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 4572 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4177 uint32_t wufc = adapter->wol; 4573 uint32_t wufc = adapter->wol;
4574 int retval = 0;
4178 4575
4179 netif_device_detach(netdev); 4576 netif_device_detach(netdev);
4180 4577
4181 if(netif_running(netdev)) 4578 if (netif_running(netdev))
4182 e1000_down(adapter); 4579 e1000_down(adapter);
4183 4580
4581#ifdef CONFIG_PM
4582 /* implement our own version of pci_save_state(pdev) because pci
4583 * express adapters have larger 256 byte config spaces */
4584 retval = e1000_pci_save_state(adapter);
4585 if (retval)
4586 return retval;
4587#endif
4588
4184 status = E1000_READ_REG(&adapter->hw, STATUS); 4589 status = E1000_READ_REG(&adapter->hw, STATUS);
4185 if(status & E1000_STATUS_LU) 4590 if (status & E1000_STATUS_LU)
4186 wufc &= ~E1000_WUFC_LNKC; 4591 wufc &= ~E1000_WUFC_LNKC;
4187 4592
4188 if(wufc) { 4593 if (wufc) {
4189 e1000_setup_rctl(adapter); 4594 e1000_setup_rctl(adapter);
4190 e1000_set_multi(netdev); 4595 e1000_set_multi(netdev);
4191 4596
4192 /* turn on all-multi mode if wake on multicast is enabled */ 4597 /* turn on all-multi mode if wake on multicast is enabled */
4193 if(adapter->wol & E1000_WUFC_MC) { 4598 if (adapter->wol & E1000_WUFC_MC) {
4194 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4599 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4195 rctl |= E1000_RCTL_MPE; 4600 rctl |= E1000_RCTL_MPE;
4196 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4601 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4197 } 4602 }
4198 4603
4199 if(adapter->hw.mac_type >= e1000_82540) { 4604 if (adapter->hw.mac_type >= e1000_82540) {
4200 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4605 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4201 /* advertise wake from D3Cold */ 4606 /* advertise wake from D3Cold */
4202 #define E1000_CTRL_ADVD3WUC 0x00100000 4607 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4207,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4207 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4612 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4208 } 4613 }
4209 4614
4210 if(adapter->hw.media_type == e1000_media_type_fiber || 4615 if (adapter->hw.media_type == e1000_media_type_fiber ||
4211 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4616 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4212 /* keep the laser running in D3 */ 4617 /* keep the laser running in D3 */
4213 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4618 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4220,96 +4625,96 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4220 4625
4221 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 4626 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
4222 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 4627 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
4223 pci_enable_wake(pdev, 3, 1); 4628 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4224 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4629 if (retval)
4630 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4631 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4632 if (retval)
4633 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4225 } else { 4634 } else {
4226 E1000_WRITE_REG(&adapter->hw, WUC, 0); 4635 E1000_WRITE_REG(&adapter->hw, WUC, 0);
4227 E1000_WRITE_REG(&adapter->hw, WUFC, 0); 4636 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
4228 pci_enable_wake(pdev, 3, 0); 4637 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4229 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 4638 if (retval)
4639 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4640 retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
4641 if (retval)
4642 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4230 } 4643 }
4231 4644
4232 pci_save_state(pdev); 4645 if (adapter->hw.mac_type >= e1000_82540 &&
4233
4234 if(adapter->hw.mac_type >= e1000_82540 &&
4235 adapter->hw.media_type == e1000_media_type_copper) { 4646 adapter->hw.media_type == e1000_media_type_copper) {
4236 manc = E1000_READ_REG(&adapter->hw, MANC); 4647 manc = E1000_READ_REG(&adapter->hw, MANC);
4237 if(manc & E1000_MANC_SMBUS_EN) { 4648 if (manc & E1000_MANC_SMBUS_EN) {
4238 manc |= E1000_MANC_ARP_EN; 4649 manc |= E1000_MANC_ARP_EN;
4239 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4650 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4240 pci_enable_wake(pdev, 3, 1); 4651 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4241 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4652 if (retval)
4653 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4654 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4655 if (retval)
4656 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4242 } 4657 }
4243 } 4658 }
4244 4659
4245 switch(adapter->hw.mac_type) { 4660 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4246 case e1000_82571: 4661 * would have already happened in close and is redundant. */
4247 case e1000_82572: 4662 e1000_release_hw_control(adapter);
4248 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4249 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4250 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4251 break;
4252 case e1000_82573:
4253 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4254 E1000_WRITE_REG(&adapter->hw, SWSM,
4255 swsm & ~E1000_SWSM_DRV_LOAD);
4256 break;
4257 default:
4258 break;
4259 }
4260 4663
4261 pci_disable_device(pdev); 4664 pci_disable_device(pdev);
4262 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4665
4666 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
4667 if (retval)
4668 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4263 4669
4264 return 0; 4670 return 0;
4265} 4671}
4266 4672
4673#ifdef CONFIG_PM
4267static int 4674static int
4268e1000_resume(struct pci_dev *pdev) 4675e1000_resume(struct pci_dev *pdev)
4269{ 4676{
4270 struct net_device *netdev = pci_get_drvdata(pdev); 4677 struct net_device *netdev = pci_get_drvdata(pdev);
4271 struct e1000_adapter *adapter = netdev_priv(netdev); 4678 struct e1000_adapter *adapter = netdev_priv(netdev);
4272 uint32_t manc, ret_val, swsm; 4679 int retval;
4273 uint32_t ctrl_ext; 4680 uint32_t manc, ret_val;
4274 4681
4275 pci_set_power_state(pdev, PCI_D0); 4682 retval = pci_set_power_state(pdev, PCI_D0);
4276 pci_restore_state(pdev); 4683 if (retval)
4684 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4685 e1000_pci_restore_state(adapter);
4277 ret_val = pci_enable_device(pdev); 4686 ret_val = pci_enable_device(pdev);
4278 pci_set_master(pdev); 4687 pci_set_master(pdev);
4279 4688
4280 pci_enable_wake(pdev, PCI_D3hot, 0); 4689 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4281 pci_enable_wake(pdev, PCI_D3cold, 0); 4690 if (retval)
4691 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4692 retval = pci_enable_wake(pdev, PCI_D3cold, 0);
4693 if (retval)
4694 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4282 4695
4283 e1000_reset(adapter); 4696 e1000_reset(adapter);
4284 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4697 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4285 4698
4286 if(netif_running(netdev)) 4699 if (netif_running(netdev))
4287 e1000_up(adapter); 4700 e1000_up(adapter);
4288 4701
4289 netif_device_attach(netdev); 4702 netif_device_attach(netdev);
4290 4703
4291 if(adapter->hw.mac_type >= e1000_82540 && 4704 if (adapter->hw.mac_type >= e1000_82540 &&
4292 adapter->hw.media_type == e1000_media_type_copper) { 4705 adapter->hw.media_type == e1000_media_type_copper) {
4293 manc = E1000_READ_REG(&adapter->hw, MANC); 4706 manc = E1000_READ_REG(&adapter->hw, MANC);
4294 manc &= ~(E1000_MANC_ARP_EN); 4707 manc &= ~(E1000_MANC_ARP_EN);
4295 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4708 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4296 } 4709 }
4297 4710
4298 switch(adapter->hw.mac_type) { 4711 /* If the controller is 82573 and f/w is AMT, do not set
4299 case e1000_82571: 4712 * DRV_LOAD until the interface is up. For all other cases,
4300 case e1000_82572: 4713 * let the f/w know that the h/w is now under the control
4301 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4714 * of the driver. */
4302 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 4715 if (adapter->hw.mac_type != e1000_82573 ||
4303 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 4716 !e1000_check_mng_mode(&adapter->hw))
4304 break; 4717 e1000_get_hw_control(adapter);
4305 case e1000_82573:
4306 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4307 E1000_WRITE_REG(&adapter->hw, SWSM,
4308 swsm | E1000_SWSM_DRV_LOAD);
4309 break;
4310 default:
4311 break;
4312 }
4313 4718
4314 return 0; 4719 return 0;
4315} 4720}
@@ -4327,6 +4732,9 @@ e1000_netpoll(struct net_device *netdev)
4327 disable_irq(adapter->pdev->irq); 4732 disable_irq(adapter->pdev->irq);
4328 e1000_intr(adapter->pdev->irq, netdev, NULL); 4733 e1000_intr(adapter->pdev->irq, netdev, NULL);
4329 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4734 e1000_clean_tx_irq(adapter, adapter->tx_ring);
4735#ifndef CONFIG_E1000_NAPI
4736 adapter->clean_rx(adapter, adapter->rx_ring);
4737#endif
4330 enable_irq(adapter->pdev->irq); 4738 enable_irq(adapter->pdev->irq);
4331} 4739}
4332#endif 4740#endif
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38695d5b4637..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
177 * 177 *
178 * Valid Range: 100-100000 (0=off, 1=dynamic) 178 * Valid Range: 100-100000 (0=off, 1=dynamic)
179 * 179 *
180 * Default Value: 1 180 * Default Value: 8000
181 */ 181 */
182 182
183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,12 +315,12 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
322 } 322 }
323 for (i = 0; i < adapter->num_queues; i++) 323 for (i = 0; i < adapter->num_tx_queues; i++)
324 tx_ring[i].count = tx_ring->count; 324 tx_ring[i].count = tx_ring->count;
325 } 325 }
326 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
@@ -341,12 +341,12 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
348 } 348 }
349 for (i = 0; i < adapter->num_queues; i++) 349 for (i = 0; i < adapter->num_rx_queues; i++)
350 rx_ring[i].count = rx_ring->count; 350 rx_ring[i].count = rx_ring->count;
351 } 351 }
352 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
388 e1000_validate_option(&fc, &opt, adapter); 388 e1000_validate_option(&fc, &opt, adapter);
389 adapter->hw.fc = adapter->hw.original_fc = fc; 389 adapter->hw.fc = adapter->hw.original_fc = fc;
390 } else { 390 } else {
391 adapter->hw.fc = opt.def; 391 adapter->hw.fc = adapter->hw.original_fc = opt.def;
392 } 392 }
393 } 393 }
394 { /* Transmit Interrupt Delay */ 394 { /* Transmit Interrupt Delay */
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -545,7 +545,7 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
545static void __devinit 545static void __devinit
546e1000_check_copper_options(struct e1000_adapter *adapter) 546e1000_check_copper_options(struct e1000_adapter *adapter)
547{ 547{
548 int speed, dplx; 548 int speed, dplx, an;
549 int bd = adapter->bd_number; 549 int bd = adapter->bd_number;
550 550
551 { /* Speed */ 551 { /* Speed */
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
584 .p = dplx_list }} 584 .p = dplx_list }}
585 }; 585 };
586 586
587 if (e1000_check_phy_reset_block(&adapter->hw)) {
588 DPRINTK(PROBE, INFO,
589 "Link active due to SoL/IDER Session. "
590 "Speed/Duplex/AutoNeg parameter ignored.\n");
591 return;
592 }
587 if (num_Duplex > bd) { 593 if (num_Duplex > bd) {
588 dplx = Duplex[bd]; 594 dplx = Duplex[bd];
589 e1000_validate_option(&dplx, &opt, adapter); 595 e1000_validate_option(&dplx, &opt, adapter);
@@ -592,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
592 } 598 }
593 } 599 }
594 600
595 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
596 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
597 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
598 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -641,15 +647,19 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
641 .p = an_list }} 647 .p = an_list }}
642 }; 648 };
643 649
644 int an = AutoNeg[bd]; 650 if (num_AutoNeg > bd) {
645 e1000_validate_option(&an, &opt, adapter); 651 an = AutoNeg[bd];
652 e1000_validate_option(&an, &opt, adapter);
653 } else {
654 an = opt.def;
655 }
646 adapter->hw.autoneg_advertised = an; 656 adapter->hw.autoneg_advertised = an;
647 } 657 }
648 658
649 switch (speed + dplx) { 659 switch (speed + dplx) {
650 case 0: 660 case 0:
651 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
652 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
653 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
654 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
655 break; 665 break;
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index f5a4dd7d8564..e5c5cd2a2712 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -140,13 +140,6 @@ static int __init do_e2100_probe(struct net_device *dev)
140 return -ENODEV; 140 return -ENODEV;
141} 141}
142 142
143static void cleanup_card(struct net_device *dev)
144{
145 /* NB: e21_close() handles free_irq */
146 iounmap(ei_status.mem);
147 release_region(dev->base_addr, E21_IO_EXTENT);
148}
149
150#ifndef MODULE 143#ifndef MODULE
151struct net_device * __init e2100_probe(int unit) 144struct net_device * __init e2100_probe(int unit)
152{ 145{
@@ -463,6 +456,13 @@ init_module(void)
463 return -ENXIO; 456 return -ENXIO;
464} 457}
465 458
459static void cleanup_card(struct net_device *dev)
460{
461 /* NB: e21_close() handles free_irq */
462 iounmap(ei_status.mem);
463 release_region(dev->base_addr, E21_IO_EXTENT);
464}
465
466void 466void
467cleanup_module(void) 467cleanup_module(void)
468{ 468{
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 50f8e23bb9e5..6b0ab1eac3fb 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -155,13 +155,6 @@ static int __init do_es_probe(struct net_device *dev)
155 return -ENODEV; 155 return -ENODEV;
156} 156}
157 157
158static void cleanup_card(struct net_device *dev)
159{
160 free_irq(dev->irq, dev);
161 release_region(dev->base_addr, ES_IO_EXTENT);
162 iounmap(ei_status.mem);
163}
164
165#ifndef MODULE 158#ifndef MODULE
166struct net_device * __init es_probe(int unit) 159struct net_device * __init es_probe(int unit)
167{ 160{
@@ -456,6 +449,13 @@ init_module(void)
456 return -ENXIO; 449 return -ENXIO;
457} 450}
458 451
452static void cleanup_card(struct net_device *dev)
453{
454 free_irq(dev->irq, dev);
455 release_region(dev->base_addr, ES_IO_EXTENT);
456 iounmap(ei_status.mem);
457}
458
459void 459void
460cleanup_module(void) 460cleanup_module(void)
461{ 461{
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index c39344adecce..3682ec61e8a8 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -101,6 +101,7 @@
101 * 0.46: 20 Oct 2005: Add irq optimization modes. 101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 105 *
105 * Known bugs: 106 * Known bugs:
106 * We suspect that on some hardware no TX done interrupts are generated. 107 * We suspect that on some hardware no TX done interrupts are generated.
@@ -112,7 +113,7 @@
112 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 113 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
113 * superfluous timer interrupts from the nic. 114 * superfluous timer interrupts from the nic.
114 */ 115 */
115#define FORCEDETH_VERSION "0.48" 116#define FORCEDETH_VERSION "0.49"
116#define DRV_NAME "forcedeth" 117#define DRV_NAME "forcedeth"
117 118
118#include <linux/module.h> 119#include <linux/module.h>
@@ -349,6 +350,8 @@ typedef union _ring_type {
349#define NV_TX2_VALID (1<<31) 350#define NV_TX2_VALID (1<<31)
350#define NV_TX2_TSO (1<<28) 351#define NV_TX2_TSO (1<<28)
351#define NV_TX2_TSO_SHIFT 14 352#define NV_TX2_TSO_SHIFT 14
353#define NV_TX2_TSO_MAX_SHIFT 14
354#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
352#define NV_TX2_CHECKSUM_L3 (1<<27) 355#define NV_TX2_CHECKSUM_L3 (1<<27)
353#define NV_TX2_CHECKSUM_L4 (1<<26) 356#define NV_TX2_CHECKSUM_L4 (1<<26)
354 357
@@ -408,15 +411,15 @@ typedef union _ring_type {
408#define NV_WATCHDOG_TIMEO (5*HZ) 411#define NV_WATCHDOG_TIMEO (5*HZ)
409 412
410#define RX_RING 128 413#define RX_RING 128
411#define TX_RING 64 414#define TX_RING 256
412/* 415/*
413 * If your nic mysteriously hangs then try to reduce the limits 416 * If your nic mysteriously hangs then try to reduce the limits
414 * to 1/0: It might be required to set NV_TX_LASTPACKET in the 417 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
415 * last valid ring entry. But this would be impossible to 418 * last valid ring entry. But this would be impossible to
416 * implement - probably a disassembly error. 419 * implement - probably a disassembly error.
417 */ 420 */
418#define TX_LIMIT_STOP 63 421#define TX_LIMIT_STOP 255
419#define TX_LIMIT_START 62 422#define TX_LIMIT_START 254
420 423
421/* rx/tx mac addr + type + vlan + align + slack*/ 424/* rx/tx mac addr + type + vlan + align + slack*/
422#define NV_RX_HEADERS (64) 425#define NV_RX_HEADERS (64)
@@ -535,6 +538,7 @@ struct fe_priv {
535 unsigned int next_tx, nic_tx; 538 unsigned int next_tx, nic_tx;
536 struct sk_buff *tx_skbuff[TX_RING]; 539 struct sk_buff *tx_skbuff[TX_RING];
537 dma_addr_t tx_dma[TX_RING]; 540 dma_addr_t tx_dma[TX_RING];
541 unsigned int tx_dma_len[TX_RING];
538 u32 tx_flags; 542 u32 tx_flags;
539}; 543};
540 544
@@ -935,6 +939,7 @@ static void nv_init_tx(struct net_device *dev)
935 else 939 else
936 np->tx_ring.ex[i].FlagLen = 0; 940 np->tx_ring.ex[i].FlagLen = 0;
937 np->tx_skbuff[i] = NULL; 941 np->tx_skbuff[i] = NULL;
942 np->tx_dma[i] = 0;
938 } 943 }
939} 944}
940 945
@@ -945,30 +950,27 @@ static int nv_init_ring(struct net_device *dev)
945 return nv_alloc_rx(dev); 950 return nv_alloc_rx(dev);
946} 951}
947 952
948static void nv_release_txskb(struct net_device *dev, unsigned int skbnr) 953static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
949{ 954{
950 struct fe_priv *np = netdev_priv(dev); 955 struct fe_priv *np = netdev_priv(dev);
951 struct sk_buff *skb = np->tx_skbuff[skbnr]; 956
952 unsigned int j, entry, fragments; 957 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
953 958 dev->name, skbnr);
954 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n", 959
955 dev->name, skbnr, np->tx_skbuff[skbnr]); 960 if (np->tx_dma[skbnr]) {
956 961 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
957 entry = skbnr; 962 np->tx_dma_len[skbnr],
958 if ((fragments = skb_shinfo(skb)->nr_frags) != 0) { 963 PCI_DMA_TODEVICE);
959 for (j = fragments; j >= 1; j--) { 964 np->tx_dma[skbnr] = 0;
960 skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1]; 965 }
961 pci_unmap_page(np->pci_dev, np->tx_dma[entry], 966
962 frag->size, 967 if (np->tx_skbuff[skbnr]) {
963 PCI_DMA_TODEVICE); 968 dev_kfree_skb_irq(np->tx_skbuff[skbnr]);
964 entry = (entry - 1) % TX_RING; 969 np->tx_skbuff[skbnr] = NULL;
965 } 970 return 1;
971 } else {
972 return 0;
966 } 973 }
967 pci_unmap_single(np->pci_dev, np->tx_dma[entry],
968 skb->len - skb->data_len,
969 PCI_DMA_TODEVICE);
970 dev_kfree_skb_irq(skb);
971 np->tx_skbuff[skbnr] = NULL;
972} 974}
973 975
974static void nv_drain_tx(struct net_device *dev) 976static void nv_drain_tx(struct net_device *dev)
@@ -981,10 +983,8 @@ static void nv_drain_tx(struct net_device *dev)
981 np->tx_ring.orig[i].FlagLen = 0; 983 np->tx_ring.orig[i].FlagLen = 0;
982 else 984 else
983 np->tx_ring.ex[i].FlagLen = 0; 985 np->tx_ring.ex[i].FlagLen = 0;
984 if (np->tx_skbuff[i]) { 986 if (nv_release_txskb(dev, i))
985 nv_release_txskb(dev, i);
986 np->stats.tx_dropped++; 987 np->stats.tx_dropped++;
987 }
988 } 988 }
989} 989}
990 990
@@ -1021,68 +1021,105 @@ static void drain_ring(struct net_device *dev)
1021static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1021static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1022{ 1022{
1023 struct fe_priv *np = netdev_priv(dev); 1023 struct fe_priv *np = netdev_priv(dev);
1024 u32 tx_flags = 0;
1024 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1025 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1025 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1026 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1026 unsigned int nr = (np->next_tx + fragments) % TX_RING; 1027 unsigned int nr = (np->next_tx - 1) % TX_RING;
1028 unsigned int start_nr = np->next_tx % TX_RING;
1027 unsigned int i; 1029 unsigned int i;
1030 u32 offset = 0;
1031 u32 bcnt;
1032 u32 size = skb->len-skb->data_len;
1033 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1034
1035 /* add fragments to entries count */
1036 for (i = 0; i < fragments; i++) {
1037 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1038 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1039 }
1028 1040
1029 spin_lock_irq(&np->lock); 1041 spin_lock_irq(&np->lock);
1030 1042
1031 if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) { 1043 if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
1032 spin_unlock_irq(&np->lock); 1044 spin_unlock_irq(&np->lock);
1033 netif_stop_queue(dev); 1045 netif_stop_queue(dev);
1034 return NETDEV_TX_BUSY; 1046 return NETDEV_TX_BUSY;
1035 } 1047 }
1036 1048
1037 np->tx_skbuff[nr] = skb; 1049 /* setup the header buffer */
1038 1050 do {
1039 if (fragments) { 1051 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1040 dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments); 1052 nr = (nr + 1) % TX_RING;
1041 /* setup descriptors in reverse order */ 1053
1042 for (i = fragments; i >= 1; i--) { 1054 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1043 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; 1055 PCI_DMA_TODEVICE);
1044 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size, 1056 np->tx_dma_len[nr] = bcnt;
1045 PCI_DMA_TODEVICE); 1057
1058 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1059 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1060 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1061 } else {
1062 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1063 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1064 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1065 }
1066 tx_flags = np->tx_flags;
1067 offset += bcnt;
1068 size -= bcnt;
1069 } while(size);
1070
1071 /* setup the fragments */
1072 for (i = 0; i < fragments; i++) {
1073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1074 u32 size = frag->size;
1075 offset = 0;
1076
1077 do {
1078 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1079 nr = (nr + 1) % TX_RING;
1080
1081 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1082 PCI_DMA_TODEVICE);
1083 np->tx_dma_len[nr] = bcnt;
1046 1084
1047 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1085 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1048 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1086 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1049 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); 1087 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1050 } else { 1088 } else {
1051 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1089 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1052 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1090 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1053 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); 1091 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1054 } 1092 }
1055 1093 offset += bcnt;
1056 nr = (nr - 1) % TX_RING; 1094 size -= bcnt;
1095 } while (size);
1096 }
1057 1097
1058 if (np->desc_ver == DESC_VER_1) 1098 /* set last fragment flag */
1059 tx_flags_extra &= ~NV_TX_LASTPACKET; 1099 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1060 else 1100 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1061 tx_flags_extra &= ~NV_TX2_LASTPACKET; 1101 } else {
1062 } 1102 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1063 } 1103 }
1064 1104
1105 np->tx_skbuff[nr] = skb;
1106
1065#ifdef NETIF_F_TSO 1107#ifdef NETIF_F_TSO
1066 if (skb_shinfo(skb)->tso_size) 1108 if (skb_shinfo(skb)->tso_size)
1067 tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); 1109 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1068 else 1110 else
1069#endif 1111#endif
1070 tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1112 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1071 1113
1072 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len, 1114 /* set tx flags */
1073 PCI_DMA_TODEVICE);
1074
1075 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1115 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1076 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1116 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1077 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1078 } else { 1117 } else {
1079 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1118 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1080 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1081 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1082 } 1119 }
1083 1120
1084 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n", 1121 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
1085 dev->name, np->next_tx, tx_flags_extra); 1122 dev->name, np->next_tx, entries, tx_flags_extra);
1086 { 1123 {
1087 int j; 1124 int j;
1088 for (j=0; j<64; j++) { 1125 for (j=0; j<64; j++) {
@@ -1093,7 +1130,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1093 dprintk("\n"); 1130 dprintk("\n");
1094 } 1131 }
1095 1132
1096 np->next_tx += 1 + fragments; 1133 np->next_tx += entries;
1097 1134
1098 dev->trans_start = jiffies; 1135 dev->trans_start = jiffies;
1099 spin_unlock_irq(&np->lock); 1136 spin_unlock_irq(&np->lock);
@@ -1140,7 +1177,6 @@ static void nv_tx_done(struct net_device *dev)
1140 np->stats.tx_packets++; 1177 np->stats.tx_packets++;
1141 np->stats.tx_bytes += skb->len; 1178 np->stats.tx_bytes += skb->len;
1142 } 1179 }
1143 nv_release_txskb(dev, i);
1144 } 1180 }
1145 } else { 1181 } else {
1146 if (Flags & NV_TX2_LASTPACKET) { 1182 if (Flags & NV_TX2_LASTPACKET) {
@@ -1156,9 +1192,9 @@ static void nv_tx_done(struct net_device *dev)
1156 np->stats.tx_packets++; 1192 np->stats.tx_packets++;
1157 np->stats.tx_bytes += skb->len; 1193 np->stats.tx_bytes += skb->len;
1158 } 1194 }
1159 nv_release_txskb(dev, i);
1160 } 1195 }
1161 } 1196 }
1197 nv_release_txskb(dev, i);
1162 np->nic_tx++; 1198 np->nic_tx++;
1163 } 1199 }
1164 if (np->next_tx - np->nic_tx < TX_LIMIT_START) 1200 if (np->next_tx - np->nic_tx < TX_LIMIT_START)
@@ -2456,7 +2492,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2456 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 2492 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
2457 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2493 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2458#ifdef NETIF_F_TSO 2494#ifdef NETIF_F_TSO
2459 /* disabled dev->features |= NETIF_F_TSO; */ 2495 dev->features |= NETIF_F_TSO;
2460#endif 2496#endif
2461 } 2497 }
2462 2498
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 146f9513aea5..0c18dbd67d3b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -84,6 +84,7 @@
84#include <linux/ip.h> 84#include <linux/ip.h>
85#include <linux/tcp.h> 85#include <linux/tcp.h>
86#include <linux/udp.h> 86#include <linux/udp.h>
87#include <linux/in.h>
87 88
88#include <asm/io.h> 89#include <asm/io.h>
89#include <asm/irq.h> 90#include <asm/irq.h>
@@ -398,12 +399,15 @@ static int init_phy(struct net_device *dev)
398 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 399 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
399 SUPPORTED_1000baseT_Full : 0; 400 SUPPORTED_1000baseT_Full : 0;
400 struct phy_device *phydev; 401 struct phy_device *phydev;
402 char phy_id[BUS_ID_SIZE];
401 403
402 priv->oldlink = 0; 404 priv->oldlink = 0;
403 priv->oldspeed = 0; 405 priv->oldspeed = 0;
404 priv->oldduplex = -1; 406 priv->oldduplex = -1;
405 407
406 phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0); 408 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
409
410 phydev = phy_connect(dev, phy_id, &adjust_link, 0);
407 411
408 if (IS_ERR(phydev)) { 412 if (IS_ERR(phydev)) {
409 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 413 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 94a91da84fbb..cb9d66ac3ab9 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -718,14 +718,14 @@ struct gfar_private {
718 uint32_t msg_enable; 718 uint32_t msg_enable;
719}; 719};
720 720
721extern inline u32 gfar_read(volatile unsigned *addr) 721static inline u32 gfar_read(volatile unsigned *addr)
722{ 722{
723 u32 val; 723 u32 val;
724 val = in_be32(addr); 724 val = in_be32(addr);
725 return val; 725 return val;
726} 726}
727 727
728extern inline void gfar_write(volatile unsigned *addr, u32 val) 728static inline void gfar_write(volatile unsigned *addr, u32 val)
729{ 729{
730 out_be32(addr, val); 730 out_be32(addr, val);
731} 731}
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 04a462c2a5b7..74e52fcbf806 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -128,6 +128,7 @@ int gfar_mdio_probe(struct device *dev)
128 struct gianfar_mdio_data *pdata; 128 struct gianfar_mdio_data *pdata;
129 struct gfar_mii *regs; 129 struct gfar_mii *regs;
130 struct mii_bus *new_bus; 130 struct mii_bus *new_bus;
131 struct resource *r;
131 int err = 0; 132 int err = 0;
132 133
133 if (NULL == dev) 134 if (NULL == dev)
@@ -151,8 +152,10 @@ int gfar_mdio_probe(struct device *dev)
151 return -ENODEV; 152 return -ENODEV;
152 } 153 }
153 154
155 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
156
154 /* Set the PHY base address */ 157 /* Set the PHY base address */
155 regs = (struct gfar_mii *) ioremap(pdata->paddr, 158 regs = (struct gfar_mii *) ioremap(r->start,
156 sizeof (struct gfar_mii)); 159 sizeof (struct gfar_mii));
157 160
158 if (NULL == regs) { 161 if (NULL == regs) {
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 10d34cb19192..51ef181b1368 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -7,7 +7,7 @@
7 * Based on 8260_io/fcc_enet.c 7 * Based on 8260_io/fcc_enet.c
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * 11 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
13 * 13 *
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 90999867a32c..102c1f0b90da 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,11 +456,6 @@ out:
456 456
457/* ----------------------------------------------------------------------- */ 457/* ----------------------------------------------------------------------- */
458 458
459static int sixpack_receive_room(struct tty_struct *tty)
460{
461 return 65536; /* We can handle an infinite amount of data. :-) */
462}
463
464/* 459/*
465 * Handle the 'receiver data ready' interrupt. 460 * Handle the 'receiver data ready' interrupt.
466 * This function is called by the 'tty_io' module in the kernel when 461 * This function is called by the 'tty_io' module in the kernel when
@@ -671,6 +666,7 @@ static int sixpack_open(struct tty_struct *tty)
671 666
672 /* Done. We have linked the TTY line to a channel. */ 667 /* Done. We have linked the TTY line to a channel. */
673 tty->disc_data = sp; 668 tty->disc_data = sp;
669 tty->receive_room = 65536;
674 670
675 /* Now we're ready to register. */ 671 /* Now we're ready to register. */
676 if (register_netdev(dev)) 672 if (register_netdev(dev))
@@ -802,7 +798,6 @@ static struct tty_ldisc sp_ldisc = {
802 .close = sixpack_close, 798 .close = sixpack_close,
803 .ioctl = sixpack_ioctl, 799 .ioctl = sixpack_ioctl,
804 .receive_buf = sixpack_receive_buf, 800 .receive_buf = sixpack_receive_buf,
805 .receive_room = sixpack_receive_room,
806 .write_wakeup = sixpack_write_wakeup, 801 .write_wakeup = sixpack_write_wakeup,
807}; 802};
808 803
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3e9accf137e7..dc5e9d59deed 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
516 } 516 }
517 } 517 }
518 spin_unlock_bh(&ax->buflock);
518 519
519 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 520 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
520 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); 521 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
@@ -752,6 +753,7 @@ static int mkiss_open(struct tty_struct *tty)
752 753
753 ax->tty = tty; 754 ax->tty = tty;
754 tty->disc_data = ax; 755 tty->disc_data = ax;
756 tty->receive_room = 65535;
755 757
756 if (tty->driver->flush_buffer) 758 if (tty->driver->flush_buffer)
757 tty->driver->flush_buffer(tty); 759 tty->driver->flush_buffer(tty);
@@ -939,11 +941,6 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
939 tty->driver->unthrottle(tty); 941 tty->driver->unthrottle(tty);
940} 942}
941 943
942static int mkiss_receive_room(struct tty_struct *tty)
943{
944 return 65536; /* We can handle an infinite amount of data. :-) */
945}
946
947/* 944/*
948 * Called by the driver when there's room for more data. If we have 945 * Called by the driver when there's room for more data. If we have
949 * more packets to send, we send them here. 946 * more packets to send, we send them here.
@@ -982,7 +979,6 @@ static struct tty_ldisc ax_ldisc = {
982 .close = mkiss_close, 979 .close = mkiss_close,
983 .ioctl = mkiss_ioctl, 980 .ioctl = mkiss_ioctl,
984 .receive_buf = mkiss_receive_buf, 981 .receive_buf = mkiss_receive_buf,
985 .receive_room = mkiss_receive_room,
986 .write_wakeup = mkiss_write_wakeup 982 .write_wakeup = mkiss_write_wakeup
987}; 983};
988 984
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 0abf5dd08b4c..74e167e7dea7 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -138,12 +138,6 @@ static int __init do_hpp_probe(struct net_device *dev)
138 return -ENODEV; 138 return -ENODEV;
139} 139}
140 140
141static void cleanup_card(struct net_device *dev)
142{
143 /* NB: hpp_close() handles free_irq */
144 release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
145}
146
147#ifndef MODULE 141#ifndef MODULE
148struct net_device * __init hp_plus_probe(int unit) 142struct net_device * __init hp_plus_probe(int unit)
149{ 143{
@@ -473,6 +467,12 @@ init_module(void)
473 return -ENXIO; 467 return -ENXIO;
474} 468}
475 469
470static void cleanup_card(struct net_device *dev)
471{
472 /* NB: hpp_close() handles free_irq */
473 release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
474}
475
476void 476void
477cleanup_module(void) 477cleanup_module(void)
478{ 478{
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 59cf841b14ab..cf9fb3698a6b 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -102,12 +102,6 @@ static int __init do_hp_probe(struct net_device *dev)
102 return -ENODEV; 102 return -ENODEV;
103} 103}
104 104
105static void cleanup_card(struct net_device *dev)
106{
107 free_irq(dev->irq, dev);
108 release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
109}
110
111#ifndef MODULE 105#ifndef MODULE
112struct net_device * __init hp_probe(int unit) 106struct net_device * __init hp_probe(int unit)
113{ 107{
@@ -444,6 +438,12 @@ init_module(void)
444 return -ENXIO; 438 return -ENXIO;
445} 439}
446 440
441static void cleanup_card(struct net_device *dev)
442{
443 free_irq(dev->irq, dev);
444 release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
445}
446
447void 447void
448cleanup_module(void) 448cleanup_module(void)
449{ 449{
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index e92c17f6931c..55c7ed608391 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -276,7 +276,7 @@ static void hp100_RegisterDump(struct net_device *dev);
276 * Convert an address in a kernel buffer to a bus/phys/dma address. 276 * Convert an address in a kernel buffer to a bus/phys/dma address.
277 * This work *only* for memory fragments part of lp->page_vaddr, 277 * This work *only* for memory fragments part of lp->page_vaddr,
278 * because it was properly DMA allocated via pci_alloc_consistent(), 278 * because it was properly DMA allocated via pci_alloc_consistent(),
279 * so we just need to "retreive" the original mapping to bus/phys/dma 279 * so we just need to "retrieve" the original mapping to bus/phys/dma
280 * address - Jean II */ 280 * address - Jean II */
281static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr) 281static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
282{ 282{
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 08703d6f934c..d8410634bcaf 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -150,7 +150,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
150 lp->lance.name = (char*)d->name; /* discards const, shut up gcc */ 150 lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
151 lp->lance.base = va; 151 lp->lance.base = va;
152 lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */ 152 lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
153 lp->lance.lance_init_block = 0; /* LANCE addr of same RAM */ 153 lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
154 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ 154 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
155 lp->lance.irq = d->ipl; 155 lp->lance.irq = d->ipl;
156 lp->lance.writerap = hplance_writerap; 156 lp->lance.writerap = hplance_writerap;
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 644edbff4f94..c2dae6092c4c 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -110,6 +110,7 @@ struct emac_regs {
110#define EMAC_MR1_TFS_2K 0x00080000 110#define EMAC_MR1_TFS_2K 0x00080000
111#define EMAC_MR1_TR0_MULT 0x00008000 111#define EMAC_MR1_TR0_MULT 0x00008000
112#define EMAC_MR1_JPSM 0x00000000 112#define EMAC_MR1_JPSM 0x00000000
113#define EMAC_MR1_MWSW_001 0x00000000
113#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT) 114#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
114#else 115#else
115#define EMAC_MR1_RFS_4K 0x00180000 116#define EMAC_MR1_RFS_4K 0x00180000
@@ -130,7 +131,7 @@ struct emac_regs {
130 (freq) <= 83 ? EMAC_MR1_OBCI_83 : \ 131 (freq) <= 83 ? EMAC_MR1_OBCI_83 : \
131 (freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P) 132 (freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P)
132#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \ 133#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \
133 EMAC_MR1_MWSW_001 | EMAC_MR1_OBCI(opb)) 134 EMAC_MR1_OBCI(opb))
134#endif 135#endif
135 136
136/* EMACx_TMR0 */ 137/* EMACx_TMR0 */
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 1da8a66f91e1..591c5864ffb1 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -408,7 +408,7 @@ static int emac_configure(struct ocp_enet_private *dev)
408 /* Mode register */ 408 /* Mode register */
409 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST; 409 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
410 if (dev->phy.duplex == DUPLEX_FULL) 410 if (dev->phy.duplex == DUPLEX_FULL)
411 r |= EMAC_MR1_FDE; 411 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
412 dev->stop_timeout = STOP_TIMEOUT_10; 412 dev->stop_timeout = STOP_TIMEOUT_10;
413 switch (dev->phy.speed) { 413 switch (dev->phy.speed) {
414 case SPEED_1000: 414 case SPEED_1000:
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
new file mode 100644
index 000000000000..1b699259b4ec
--- /dev/null
+++ b/drivers/net/ifb.c
@@ -0,0 +1,294 @@
1/* drivers/net/ifb.c:
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
11 dropping.
12
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
24
25 Authors: Jamal Hadi Salim (2005)
26
27*/
28
29
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/init.h>
36#include <linux/moduleparam.h>
37#include <net/pkt_sched.h>
38
39#define TX_TIMEOUT (2*HZ)
40
41#define TX_Q_LIMIT 32
42struct ifb_private {
43 struct net_device_stats stats;
44 struct tasklet_struct ifb_tasklet;
45 int tasklet_pending;
46 /* mostly debug stats leave in for now */
47 unsigned long st_task_enter; /* tasklet entered */
48 unsigned long st_txq_refl_try; /* transmit queue refill attempt */
49 unsigned long st_rxq_enter; /* receive queue entered */
50 unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
51 unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
52 unsigned long st_rx_frm_egr; /* received from egress path */
53 unsigned long st_rx_frm_ing; /* received from ingress path */
54 unsigned long st_rxq_check;
55 unsigned long st_rxq_rsch;
56 struct sk_buff_head rq;
57 struct sk_buff_head tq;
58};
59
60static int numifbs = 1;
61
62static void ri_tasklet(unsigned long dev);
63static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
64static struct net_device_stats *ifb_get_stats(struct net_device *dev);
65static int ifb_open(struct net_device *dev);
66static int ifb_close(struct net_device *dev);
67
68static void ri_tasklet(unsigned long dev)
69{
70
71 struct net_device *_dev = (struct net_device *)dev;
72 struct ifb_private *dp = netdev_priv(_dev);
73 struct net_device_stats *stats = &dp->stats;
74 struct sk_buff *skb;
75
76 dp->st_task_enter++;
77 if ((skb = skb_peek(&dp->tq)) == NULL) {
78 dp->st_txq_refl_try++;
79 if (spin_trylock(&_dev->xmit_lock)) {
80 dp->st_rxq_enter++;
81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
82 skb_queue_tail(&dp->tq, skb);
83 dp->st_rx2tx_tran++;
84 }
85 spin_unlock(&_dev->xmit_lock);
86 } else {
87 /* reschedule */
88 dp->st_rxq_notenter++;
89 goto resched;
90 }
91 }
92
93 while ((skb = skb_dequeue(&dp->tq)) != NULL) {
94 u32 from = G_TC_FROM(skb->tc_verd);
95
96 skb->tc_verd = 0;
97 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len;
100 if (from & AT_EGRESS) {
101 dp->st_rx_frm_egr++;
102 dev_queue_xmit(skb);
103 } else if (from & AT_INGRESS) {
104
105 dp->st_rx_frm_ing++;
106 netif_rx(skb);
107 } else {
108 dev_kfree_skb(skb);
109 stats->tx_dropped++;
110 }
111 }
112
113 if (spin_trylock(&_dev->xmit_lock)) {
114 dp->st_rxq_check++;
115 if ((skb = skb_peek(&dp->rq)) == NULL) {
116 dp->tasklet_pending = 0;
117 if (netif_queue_stopped(_dev))
118 netif_wake_queue(_dev);
119 } else {
120 dp->st_rxq_rsch++;
121 spin_unlock(&_dev->xmit_lock);
122 goto resched;
123 }
124 spin_unlock(&_dev->xmit_lock);
125 } else {
126resched:
127 dp->tasklet_pending = 1;
128 tasklet_schedule(&dp->ifb_tasklet);
129 }
130
131}
132
133static void __init ifb_setup(struct net_device *dev)
134{
135 /* Initialize the device structure. */
136 dev->get_stats = ifb_get_stats;
137 dev->hard_start_xmit = ifb_xmit;
138 dev->open = &ifb_open;
139 dev->stop = &ifb_close;
140
141 /* Fill in device structure with ethernet-generic values. */
142 ether_setup(dev);
143 dev->tx_queue_len = TX_Q_LIMIT;
144 dev->change_mtu = NULL;
145 dev->flags |= IFF_NOARP;
146 dev->flags &= ~IFF_MULTICAST;
147 SET_MODULE_OWNER(dev);
148 random_ether_addr(dev->dev_addr);
149}
150
151static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
152{
153 struct ifb_private *dp = netdev_priv(dev);
154 struct net_device_stats *stats = &dp->stats;
155 int ret = 0;
156 u32 from = G_TC_FROM(skb->tc_verd);
157
158 stats->tx_packets++;
159 stats->tx_bytes+=skb->len;
160
161 if (!from || !skb->input_dev) {
162dropped:
163 dev_kfree_skb(skb);
164 stats->rx_dropped++;
165 return ret;
166 } else {
167 /*
168 * note we could be going
169 * ingress -> egress or
170 * egress -> ingress
171 */
172 skb->dev = skb->input_dev;
173 skb->input_dev = dev;
174 if (from & AT_INGRESS) {
175 skb_pull(skb, skb->dev->hard_header_len);
176 } else {
177 if (!(from & AT_EGRESS)) {
178 goto dropped;
179 }
180 }
181 }
182
183 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
184 netif_stop_queue(dev);
185 }
186
187 dev->trans_start = jiffies;
188 skb_queue_tail(&dp->rq, skb);
189 if (!dp->tasklet_pending) {
190 dp->tasklet_pending = 1;
191 tasklet_schedule(&dp->ifb_tasklet);
192 }
193
194 return ret;
195}
196
197static struct net_device_stats *ifb_get_stats(struct net_device *dev)
198{
199 struct ifb_private *dp = netdev_priv(dev);
200 struct net_device_stats *stats = &dp->stats;
201
202 pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n",
203 dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter,
204 dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr,
205 dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch );
206
207 return stats;
208}
209
210static struct net_device **ifbs;
211
212/* Number of ifb devices to be set up by this module. */
213module_param(numifbs, int, 0);
214MODULE_PARM_DESC(numifbs, "Number of ifb devices");
215
216static int ifb_close(struct net_device *dev)
217{
218 struct ifb_private *dp = netdev_priv(dev);
219
220 tasklet_kill(&dp->ifb_tasklet);
221 netif_stop_queue(dev);
222 skb_queue_purge(&dp->rq);
223 skb_queue_purge(&dp->tq);
224 return 0;
225}
226
227static int ifb_open(struct net_device *dev)
228{
229 struct ifb_private *dp = netdev_priv(dev);
230
231 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
232 skb_queue_head_init(&dp->rq);
233 skb_queue_head_init(&dp->tq);
234 netif_start_queue(dev);
235
236 return 0;
237}
238
239static int __init ifb_init_one(int index)
240{
241 struct net_device *dev_ifb;
242 int err;
243
244 dev_ifb = alloc_netdev(sizeof(struct ifb_private),
245 "ifb%d", ifb_setup);
246
247 if (!dev_ifb)
248 return -ENOMEM;
249
250 if ((err = register_netdev(dev_ifb))) {
251 free_netdev(dev_ifb);
252 dev_ifb = NULL;
253 } else {
254 ifbs[index] = dev_ifb;
255 }
256
257 return err;
258}
259
260static void ifb_free_one(int index)
261{
262 unregister_netdev(ifbs[index]);
263 free_netdev(ifbs[index]);
264}
265
266static int __init ifb_init_module(void)
267{
268 int i, err = 0;
269 ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
270 if (!ifbs)
271 return -ENOMEM;
272 for (i = 0; i < numifbs && !err; i++)
273 err = ifb_init_one(i);
274 if (err) {
275 while (--i >= 0)
276 ifb_free_one(i);
277 }
278
279 return err;
280}
281
282static void __exit ifb_cleanup_module(void)
283{
284 int i;
285
286 for (i = 0; i < numifbs; i++)
287 ifb_free_one(i);
288 kfree(ifbs);
289}
290
291module_init(ifb_init_module);
292module_exit(ifb_cleanup_module);
293MODULE_LICENSE("GPL");
294MODULE_AUTHOR("Jamal Hadi Salim");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d54156f11e61..7a081346f079 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -1,4 +1,3 @@
1
2menu "Infrared-port device drivers" 1menu "Infrared-port device drivers"
3 depends on IRDA!=n 2 depends on IRDA!=n
4 3
@@ -156,7 +155,7 @@ comment "Old Serial dongle support"
156 155
157config DONGLE_OLD 156config DONGLE_OLD
158 bool "Old Serial dongle support" 157 bool "Old Serial dongle support"
159 depends on (IRTTY_OLD || IRPORT_SIR) && BROKEN_ON_SMP 158 depends on IRPORT_SIR && BROKEN_ON_SMP
160 help 159 help
161 Say Y here if you have an infrared device that connects to your 160 Say Y here if you have an infrared device that connects to your
162 computer's serial port. These devices are called dongles. Then say Y 161 computer's serial port. These devices are called dongles. Then say Y
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e7a8b7f7f5dd..72cbfdc9cfcc 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -45,4 +45,4 @@ obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
45obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o 45obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46 46
47# The SIR helper module 47# The SIR helper module
48sir-dev-objs := sir_core.o sir_dev.o sir_dongle.o sir_kthread.o 48sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3d016a498e1d..6070195b87bd 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -285,19 +285,6 @@ static void irport_start(struct irport_cb *self)
285} 285}
286 286
287/* 287/*
288 * Function irport_probe (void)
289 *
290 * Start IO port
291 *
292 */
293int irport_probe(int iobase)
294{
295 IRDA_DEBUG(4, "%s(), iobase=%#x\n", __FUNCTION__, iobase);
296
297 return 0;
298}
299
300/*
301 * Function irport_get_fcr (speed) 288 * Function irport_get_fcr (speed)
302 * 289 *
303 * Compute value of fcr 290 * Compute value of fcr
@@ -382,7 +369,7 @@ static void irport_change_speed(void *priv, __u32 speed)
382 * we cannot use schedule_timeout() when we are in interrupt context 369 * we cannot use schedule_timeout() when we are in interrupt context
383 * 370 *
384 */ 371 */
385int __irport_change_speed(struct irda_task *task) 372static int __irport_change_speed(struct irda_task *task)
386{ 373{
387 struct irport_cb *self; 374 struct irport_cb *self;
388 __u32 speed = (__u32) task->param; 375 __u32 speed = (__u32) task->param;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index b8d112348ba4..101750bf210f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -289,22 +289,6 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
289} 289}
290 290
291/* 291/*
292 * Function irtty_receive_room (tty)
293 *
294 * Used by the TTY to find out how much data we can receive at a time
295 *
296*/
297static int irtty_receive_room(struct tty_struct *tty)
298{
299 struct sirtty_cb *priv = tty->disc_data;
300
301 IRDA_ASSERT(priv != NULL, return 0;);
302 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return 0;);
303
304 return 65536; /* We can handle an infinite amount of data. :-) */
305}
306
307/*
308 * Function irtty_write_wakeup (tty) 292 * Function irtty_write_wakeup (tty)
309 * 293 *
310 * Called by the driver when there's room for more data. If we have 294 * Called by the driver when there's room for more data. If we have
@@ -534,6 +518,7 @@ static int irtty_open(struct tty_struct *tty)
534 518
535 dev->priv = priv; 519 dev->priv = priv;
536 tty->disc_data = priv; 520 tty->disc_data = priv;
521 tty->receive_room = 65536;
537 522
538 up(&irtty_sem); 523 up(&irtty_sem);
539 524
@@ -605,7 +590,6 @@ static struct tty_ldisc irda_ldisc = {
605 .ioctl = irtty_ioctl, 590 .ioctl = irtty_ioctl,
606 .poll = NULL, 591 .poll = NULL,
607 .receive_buf = irtty_receive_buf, 592 .receive_buf = irtty_receive_buf,
608 .receive_room = irtty_receive_room,
609 .write_wakeup = irtty_write_wakeup, 593 .write_wakeup = irtty_write_wakeup,
610 .owner = THIS_MODULE, 594 .owner = THIS_MODULE,
611}; 595};
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f0b8bc3637e5..f69fb4cec76f 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -133,8 +133,6 @@ extern int sirdev_put_dongle(struct sir_dev *self);
133 133
134extern void sirdev_enable_rx(struct sir_dev *dev); 134extern void sirdev_enable_rx(struct sir_dev *dev);
135extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param); 135extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
136extern int __init irda_thread_create(void);
137extern void __exit irda_thread_join(void);
138 136
139/* inline helpers */ 137/* inline helpers */
140 138
diff --git a/drivers/net/irda/sir_core.c b/drivers/net/irda/sir_core.c
deleted file mode 100644
index a49f910c835b..000000000000
--- a/drivers/net/irda/sir_core.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/*********************************************************************
2 *
3 * sir_core.c: module core for irda-sir abstraction layer
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17
18#include <net/irda/irda.h>
19
20#include "sir-dev.h"
21
22/***************************************************************************/
23
24MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
25MODULE_DESCRIPTION("IrDA SIR core");
26MODULE_LICENSE("GPL");
27
28/***************************************************************************/
29
30EXPORT_SYMBOL(irda_register_dongle);
31EXPORT_SYMBOL(irda_unregister_dongle);
32
33EXPORT_SYMBOL(sirdev_get_instance);
34EXPORT_SYMBOL(sirdev_put_instance);
35
36EXPORT_SYMBOL(sirdev_set_dongle);
37EXPORT_SYMBOL(sirdev_write_complete);
38EXPORT_SYMBOL(sirdev_receive);
39
40EXPORT_SYMBOL(sirdev_raw_write);
41EXPORT_SYMBOL(sirdev_raw_read);
42EXPORT_SYMBOL(sirdev_set_dtr_rts);
43
44static int __init sir_core_init(void)
45{
46 return irda_thread_create();
47}
48
49static void __exit sir_core_exit(void)
50{
51 irda_thread_join();
52}
53
54module_init(sir_core_init);
55module_exit(sir_core_exit);
56
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index df22b8b532e7..ea7c9464d46a 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -60,6 +60,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
60 up(&dev->fsm.sem); 60 up(&dev->fsm.sem);
61 return err; 61 return err;
62} 62}
63EXPORT_SYMBOL(sirdev_set_dongle);
63 64
64/* used by dongle drivers for dongle programming */ 65/* used by dongle drivers for dongle programming */
65 66
@@ -94,6 +95,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
94 spin_unlock_irqrestore(&dev->tx_lock, flags); 95 spin_unlock_irqrestore(&dev->tx_lock, flags);
95 return ret; 96 return ret;
96} 97}
98EXPORT_SYMBOL(sirdev_raw_write);
97 99
98/* seems some dongle drivers may need this */ 100/* seems some dongle drivers may need this */
99 101
@@ -116,6 +118,7 @@ int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
116 118
117 return count; 119 return count;
118} 120}
121EXPORT_SYMBOL(sirdev_raw_read);
119 122
120int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) 123int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
121{ 124{
@@ -124,7 +127,8 @@ int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
124 ret = dev->drv->set_dtr_rts(dev, dtr, rts); 127 ret = dev->drv->set_dtr_rts(dev, dtr, rts);
125 return ret; 128 return ret;
126} 129}
127 130EXPORT_SYMBOL(sirdev_set_dtr_rts);
131
128/**********************************************************************/ 132/**********************************************************************/
129 133
130/* called from client driver - likely with bh-context - to indicate 134/* called from client driver - likely with bh-context - to indicate
@@ -227,6 +231,7 @@ void sirdev_write_complete(struct sir_dev *dev)
227done: 231done:
228 spin_unlock_irqrestore(&dev->tx_lock, flags); 232 spin_unlock_irqrestore(&dev->tx_lock, flags);
229} 233}
234EXPORT_SYMBOL(sirdev_write_complete);
230 235
231/* called from client driver - likely with bh-context - to give us 236/* called from client driver - likely with bh-context - to give us
232 * some more received bytes. We put them into the rx-buffer, 237 * some more received bytes. We put them into the rx-buffer,
@@ -279,6 +284,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
279 284
280 return 0; 285 return 0;
281} 286}
287EXPORT_SYMBOL(sirdev_receive);
282 288
283/**********************************************************************/ 289/**********************************************************************/
284 290
@@ -641,6 +647,7 @@ out_freenetdev:
641out: 647out:
642 return NULL; 648 return NULL;
643} 649}
650EXPORT_SYMBOL(sirdev_get_instance);
644 651
645int sirdev_put_instance(struct sir_dev *dev) 652int sirdev_put_instance(struct sir_dev *dev)
646{ 653{
@@ -673,4 +680,5 @@ int sirdev_put_instance(struct sir_dev *dev)
673 680
674 return 0; 681 return 0;
675} 682}
683EXPORT_SYMBOL(sirdev_put_instance);
676 684
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index c5b76746e72b..8d225921ae7b 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -50,6 +50,7 @@ int irda_register_dongle(struct dongle_driver *new)
50 up(&dongle_list_lock); 50 up(&dongle_list_lock);
51 return 0; 51 return 0;
52} 52}
53EXPORT_SYMBOL(irda_register_dongle);
53 54
54int irda_unregister_dongle(struct dongle_driver *drv) 55int irda_unregister_dongle(struct dongle_driver *drv)
55{ 56{
@@ -58,6 +59,7 @@ int irda_unregister_dongle(struct dongle_driver *drv)
58 up(&dongle_list_lock); 59 up(&dongle_list_lock);
59 return 0; 60 return 0;
60} 61}
62EXPORT_SYMBOL(irda_unregister_dongle);
61 63
62int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) 64int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
63{ 65{
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
index c65054364bca..e3904d6bfecd 100644
--- a/drivers/net/irda/sir_kthread.c
+++ b/drivers/net/irda/sir_kthread.c
@@ -466,7 +466,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
466 return 0; 466 return 0;
467} 467}
468 468
469int __init irda_thread_create(void) 469static int __init irda_thread_create(void)
470{ 470{
471 struct completion startup; 471 struct completion startup;
472 int pid; 472 int pid;
@@ -488,7 +488,7 @@ int __init irda_thread_create(void)
488 return 0; 488 return 0;
489} 489}
490 490
491void __exit irda_thread_join(void) 491static void __exit irda_thread_join(void)
492{ 492{
493 if (irda_rq_queue.thread) { 493 if (irda_rq_queue.thread) {
494 flush_irda_queue(); 494 flush_irda_queue();
@@ -499,3 +499,10 @@ void __exit irda_thread_join(void)
499 } 499 }
500} 500}
501 501
502module_init(irda_thread_create);
503module_exit(irda_thread_join);
504
505MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
506MODULE_DESCRIPTION("IrDA SIR core");
507MODULE_LICENSE("GPL");
508
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 741aecc655df..a82a4ba8de4f 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -577,8 +577,8 @@ struct ring_descr_hw {
577 struct { 577 struct {
578 u8 addr_res[3]; 578 u8 addr_res[3];
579 volatile u8 status; /* descriptor status */ 579 volatile u8 status; /* descriptor status */
580 } rd_s __attribute__((packed)); 580 } __attribute__((packed)) rd_s;
581 } rd_u __attribute((packed)); 581 } __attribute((packed)) rd_u;
582} __attribute__ ((packed)); 582} __attribute__ ((packed));
583 583
584#define rd_addr rd_u.addr 584#define rd_addr rd_u.addr
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 77eadf84cb2c..f0f04be989d6 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -590,9 +590,9 @@ static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
590{ 590{
591 struct veth_lpevent *veth_event = (struct veth_lpevent *)event; 591 struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
592 592
593 if (event->xFlags.xFunction == HvLpEvent_Function_Ack) 593 if (hvlpevent_is_ack(event))
594 veth_handle_ack(veth_event); 594 veth_handle_ack(veth_event);
595 else if (event->xFlags.xFunction == HvLpEvent_Function_Int) 595 else
596 veth_handle_int(veth_event); 596 veth_handle_int(veth_event);
597} 597}
598 598
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 1d75ca0bb939..d1d714faa6ce 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -309,17 +309,6 @@ static void lance_tx_timeout (struct net_device *dev);
309 309
310 310
311 311
312static void cleanup_card(struct net_device *dev)
313{
314 struct lance_private *lp = dev->priv;
315 if (dev->dma != 4)
316 free_dma(dev->dma);
317 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
318 kfree(lp->tx_bounce_buffs);
319 kfree((void*)lp->rx_buffs);
320 kfree(lp);
321}
322
323#ifdef MODULE 312#ifdef MODULE
324#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */ 313#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
325 314
@@ -367,6 +356,17 @@ int init_module(void)
367 return -ENXIO; 356 return -ENXIO;
368} 357}
369 358
359static void cleanup_card(struct net_device *dev)
360{
361 struct lance_private *lp = dev->priv;
362 if (dev->dma != 4)
363 free_dma(dev->dma);
364 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
365 kfree(lp->tx_bounce_buffs);
366 kfree((void*)lp->rx_buffs);
367 kfree(lp);
368}
369
370void cleanup_module(void) 370void cleanup_module(void)
371{ 371{
372 int this_dev; 372 int this_dev;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 309d254842cf..646e89fc3562 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -145,13 +145,6 @@ static int __init do_lne390_probe(struct net_device *dev)
145 return -ENODEV; 145 return -ENODEV;
146} 146}
147 147
148static void cleanup_card(struct net_device *dev)
149{
150 free_irq(dev->irq, dev);
151 release_region(dev->base_addr, LNE390_IO_EXTENT);
152 iounmap(ei_status.mem);
153}
154
155#ifndef MODULE 148#ifndef MODULE
156struct net_device * __init lne390_probe(int unit) 149struct net_device * __init lne390_probe(int unit)
157{ 150{
@@ -440,6 +433,13 @@ int init_module(void)
440 return -ENXIO; 433 return -ENXIO;
441} 434}
442 435
436static void cleanup_card(struct net_device *dev)
437{
438 free_irq(dev->irq, dev);
439 release_region(dev->base_addr, LNE390_IO_EXTENT);
440 iounmap(ei_status.mem);
441}
442
443void cleanup_module(void) 443void cleanup_module(void)
444{ 444{
445 int this_dev; 445 int this_dev;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index d8c99f038fa0..06cb460361a8 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -559,55 +559,52 @@ static void mac8390_no_reset(struct net_device *dev)
559/* directly from daynaport.c by Alan Cox */ 559/* directly from daynaport.c by Alan Cox */
560static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count) 560static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
561{ 561{
562 volatile unsigned short *ptr; 562 volatile unsigned char *ptr;
563 unsigned short *target=to; 563 unsigned char *target=to;
564 from<<=1; /* word, skip overhead */ 564 from<<=1; /* word, skip overhead */
565 ptr=(unsigned short *)(dev->mem_start+from); 565 ptr=(unsigned char *)(dev->mem_start+from);
566 /* Leading byte? */ 566 /* Leading byte? */
567 if (from&2) { 567 if (from&2) {
568 *((char *)target)++ = *(((char *)ptr++)-1); 568 *target++ = ptr[-1];
569 ptr += 2;
569 count--; 570 count--;
570 } 571 }
571 while(count>=2) 572 while(count>=2)
572 { 573 {
573 *target++=*ptr++; /* Copy and */ 574 *(unsigned short *)target = *(unsigned short volatile *)ptr;
574 ptr++; /* skip cruft */ 575 ptr += 4; /* skip cruft */
576 target += 2;
575 count-=2; 577 count-=2;
576 } 578 }
577 /* Trailing byte? */ 579 /* Trailing byte? */
578 if(count) 580 if(count)
579 { 581 *target = *ptr;
580 /* Big endian */
581 unsigned short v=*ptr;
582 *((char *)target)=v>>8;
583 }
584} 582}
585 583
586static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count) 584static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
587{ 585{
588 volatile unsigned short *ptr; 586 volatile unsigned short *ptr;
589 const unsigned short *src=from; 587 const unsigned char *src=from;
590 to<<=1; /* word, skip overhead */ 588 to<<=1; /* word, skip overhead */
591 ptr=(unsigned short *)(dev->mem_start+to); 589 ptr=(unsigned short *)(dev->mem_start+to);
592 /* Leading byte? */ 590 /* Leading byte? */
593 if (to&2) { /* avoid a byte write (stomps on other data) */ 591 if (to&2) { /* avoid a byte write (stomps on other data) */
594 ptr[-1] = (ptr[-1]&0xFF00)|*((unsigned char *)src)++; 592 ptr[-1] = (ptr[-1]&0xFF00)|*src++;
595 ptr++; 593 ptr++;
596 count--; 594 count--;
597 } 595 }
598 while(count>=2) 596 while(count>=2)
599 { 597 {
600 *ptr++=*src++; /* Copy and */ 598 *ptr++=*(unsigned short *)src; /* Copy and */
601 ptr++; /* skip cruft */ 599 ptr++; /* skip cruft */
600 src += 2;
602 count-=2; 601 count-=2;
603 } 602 }
604 /* Trailing byte? */ 603 /* Trailing byte? */
605 if(count) 604 if(count)
606 { 605 {
607 /* Big endian */
608 unsigned short v=*src;
609 /* card doesn't like byte writes */ 606 /* card doesn't like byte writes */
610 *ptr=(*ptr&0x00FF)|(v&0xFF00); 607 *ptr=(*ptr&0x00FF)|(*src << 8);
611 } 608 }
612} 609}
613 610
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 3cb9b3fe0cf1..40ae36b20c9d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -6,7 +6,7 @@
6 * Copyright (C) 2002 rabeeh@galileo.co.il 6 * Copyright (C) 2002 rabeeh@galileo.co.il
7 * 7 *
8 * Copyright (C) 2003 PMC-Sierra, Inc., 8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani (lachwani@pmc-sierra.com) 9 * written by Manish Lachwani
10 * 10 *
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
12 * 12 *
@@ -35,6 +35,8 @@
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/udp.h> 36#include <linux/udp.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <linux/in.h>
39#include <linux/ip.h>
38 40
39#include <linux/bitops.h> 41#include <linux/bitops.h>
40#include <linux/delay.h> 42#include <linux/delay.h>
@@ -55,13 +57,15 @@
55/* Constants */ 57/* Constants */
56#define VLAN_HLEN 4 58#define VLAN_HLEN 4
57#define FCS_LEN 4 59#define FCS_LEN 4
58#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN 60#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
61#define HW_IP_ALIGN 2 /* hw aligns IP header */
62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
59#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) 63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
60 64
61#define INT_CAUSE_UNMASK_ALL 0x0007ffff 65#define INT_UNMASK_ALL 0x0007ffff
62#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff 66#define INT_UNMASK_ALL_EXT 0x0011ffff
63#define INT_CAUSE_MASK_ALL 0x00000000 67#define INT_MASK_ALL 0x00000000
64#define INT_CAUSE_MASK_ALL_EXT 0x00000000 68#define INT_MASK_ALL_EXT 0x00000000
65#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL 69#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
66#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT 70#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
67 71
@@ -78,8 +82,9 @@
78static int eth_port_link_is_up(unsigned int eth_port_num); 82static int eth_port_link_is_up(unsigned int eth_port_num);
79static void eth_port_uc_addr_get(struct net_device *dev, 83static void eth_port_uc_addr_get(struct net_device *dev,
80 unsigned char *MacAddr); 84 unsigned char *MacAddr);
81static int mv643xx_eth_real_open(struct net_device *); 85static void eth_port_set_multicast_list(struct net_device *);
82static int mv643xx_eth_real_stop(struct net_device *); 86static int mv643xx_eth_open(struct net_device *);
87static int mv643xx_eth_stop(struct net_device *);
83static int mv643xx_eth_change_mtu(struct net_device *, int); 88static int mv643xx_eth_change_mtu(struct net_device *, int);
84static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); 89static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
85static void eth_port_init_mac_tables(unsigned int eth_port_num); 90static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
124 */ 129 */
125static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 130static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
126{ 131{
127 struct mv643xx_private *mp = netdev_priv(dev); 132 if ((new_mtu > 9500) || (new_mtu < 64))
128 unsigned long flags;
129
130 spin_lock_irqsave(&mp->lock, flags);
131
132 if ((new_mtu > 9500) || (new_mtu < 64)) {
133 spin_unlock_irqrestore(&mp->lock, flags);
134 return -EINVAL; 133 return -EINVAL;
135 }
136 134
137 dev->mtu = new_mtu; 135 dev->mtu = new_mtu;
138 /* 136 /*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
142 * to memory is full, which might fail the open function. 140 * to memory is full, which might fail the open function.
143 */ 141 */
144 if (netif_running(dev)) { 142 if (netif_running(dev)) {
145 if (mv643xx_eth_real_stop(dev)) 143 mv643xx_eth_stop(dev);
146 printk(KERN_ERR 144 if (mv643xx_eth_open(dev))
147 "%s: Fatal error on stopping device\n",
148 dev->name);
149 if (mv643xx_eth_real_open(dev))
150 printk(KERN_ERR 145 printk(KERN_ERR
151 "%s: Fatal error on opening device\n", 146 "%s: Fatal error on opening device\n",
152 dev->name); 147 dev->name);
153 } 148 }
154 149
155 spin_unlock_irqrestore(&mp->lock, flags);
156 return 0; 150 return 0;
157} 151}
158 152
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
170 struct mv643xx_private *mp = netdev_priv(dev); 164 struct mv643xx_private *mp = netdev_priv(dev);
171 struct pkt_info pkt_info; 165 struct pkt_info pkt_info;
172 struct sk_buff *skb; 166 struct sk_buff *skb;
167 int unaligned;
173 168
174 if (test_and_set_bit(0, &mp->rx_task_busy)) 169 if (test_and_set_bit(0, &mp->rx_task_busy))
175 panic("%s: Error in test_set_bit / clear_bit", dev->name); 170 panic("%s: Error in test_set_bit / clear_bit", dev->name);
176 171
177 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { 172 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
178 skb = dev_alloc_skb(RX_SKB_SIZE); 173 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
179 if (!skb) 174 if (!skb)
180 break; 175 break;
181 mp->rx_ring_skbs++; 176 mp->rx_ring_skbs++;
177 unaligned = (u32)skb->data & (DMA_ALIGN - 1);
178 if (unaligned)
179 skb_reserve(skb, DMA_ALIGN - unaligned);
182 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 180 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
183 pkt_info.byte_cnt = RX_SKB_SIZE; 181 pkt_info.byte_cnt = RX_SKB_SIZE;
184 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, 182 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
189 "%s: Error allocating RX Ring\n", dev->name); 187 "%s: Error allocating RX Ring\n", dev->name);
190 break; 188 break;
191 } 189 }
192 skb_reserve(skb, 2); 190 skb_reserve(skb, HW_IP_ALIGN);
193 } 191 }
194 clear_bit(0, &mp->rx_task_busy); 192 clear_bit(0, &mp->rx_task_busy);
195 /* 193 /*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
207 else { 205 else {
208 /* Return interrupts */ 206 /* Return interrupts */
209 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), 207 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
210 INT_CAUSE_UNMASK_ALL); 208 INT_UNMASK_ALL);
211 } 209 }
212#endif 210#endif
213} 211}
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
267 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 265 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
268 266
269 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); 267 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
268
269 eth_port_set_multicast_list(dev);
270} 270}
271 271
272/* 272/*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
342 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 342 if (!(eth_int_cause_ext & (BIT0 | BIT8)))
343 return released; 343 return released;
344 344
345 spin_lock(&mp->lock);
346
347 /* Check only queue 0 */ 345 /* Check only queue 0 */
348 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 346 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
349 if (pkt_info.cmd_sts & BIT0) { 347 if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
351 stats->tx_errors++; 349 stats->tx_errors++;
352 } 350 }
353 351
354 /* 352 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
355 * If return_info is different than 0, release the skb. 353 dma_unmap_single(NULL, pkt_info.buf_ptr,
356 * The case where return_info is not 0 is only in case 354 pkt_info.byte_cnt,
357 * when transmitted a scatter/gather packet, where only 355 DMA_TO_DEVICE);
358 * last skb releases the whole chain. 356 else
359 */ 357 dma_unmap_page(NULL, pkt_info.buf_ptr,
360 if (pkt_info.return_info) { 358 pkt_info.byte_cnt,
361 if (skb_shinfo(pkt_info.return_info)->nr_frags) 359 DMA_TO_DEVICE);
362 dma_unmap_page(NULL, pkt_info.buf_ptr,
363 pkt_info.byte_cnt,
364 DMA_TO_DEVICE);
365 else
366 dma_unmap_single(NULL, pkt_info.buf_ptr,
367 pkt_info.byte_cnt,
368 DMA_TO_DEVICE);
369 360
361 if (pkt_info.return_info) {
370 dev_kfree_skb_irq(pkt_info.return_info); 362 dev_kfree_skb_irq(pkt_info.return_info);
371 released = 0; 363 released = 0;
372 } else 364 }
373 dma_unmap_page(NULL, pkt_info.buf_ptr,
374 pkt_info.byte_cnt, DMA_TO_DEVICE);
375 } 365 }
376 366
377 spin_unlock(&mp->lock);
378
379 return released; 367 return released;
380} 368}
381 369
@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
482 470
483 /* Read interrupt cause registers */ 471 /* Read interrupt cause registers */
484 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 472 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
485 INT_CAUSE_UNMASK_ALL; 473 INT_UNMASK_ALL;
486 474
487 if (eth_int_cause & BIT1) 475 if (eth_int_cause & BIT1)
488 eth_int_cause_ext = mv_read( 476 eth_int_cause_ext = mv_read(
489 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 477 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
490 INT_CAUSE_UNMASK_ALL_EXT; 478 INT_UNMASK_ALL_EXT;
491 479
492#ifdef MV643XX_NAPI 480#ifdef MV643XX_NAPI
493 if (!(eth_int_cause & 0x0007fffd)) { 481 if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
512 } else { 500 } else {
513 if (netif_rx_schedule_prep(dev)) { 501 if (netif_rx_schedule_prep(dev)) {
514 /* Mask all the interrupts */ 502 /* Mask all the interrupts */
515 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 503 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
516 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG 504 INT_MASK_ALL);
517 (port_num), 0); 505 /* wait for previous write to complete */
506 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
518 __netif_rx_schedule(dev); 507 __netif_rx_schedule(dev);
519 } 508 }
520#else 509#else
@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
527 * with skb's. 516 * with skb's.
528 */ 517 */
529#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK 518#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
530 /* Unmask all interrupts on ethernet port */ 519 /* Mask all interrupts on ethernet port */
531 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 520 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
532 INT_CAUSE_MASK_ALL); 521 INT_MASK_ALL);
522 /* wait for previous write to take effect */
523 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
524
533 queue_task(&mp->rx_task, &tq_immediate); 525 queue_task(&mp->rx_task, &tq_immediate);
534 mark_bh(IMMEDIATE_BH); 526 mark_bh(IMMEDIATE_BH);
535#else 527#else
@@ -636,56 +628,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
636} 628}
637 629
638/* 630/*
639 * mv643xx_eth_open
640 *
641 * This function is called when openning the network device. The function
642 * should initialize all the hardware, initialize cyclic Rx/Tx
643 * descriptors chain and buffers and allocate an IRQ to the network
644 * device.
645 *
646 * Input : a pointer to the network device structure
647 *
648 * Output : zero of success , nonzero if fails.
649 */
650
651static int mv643xx_eth_open(struct net_device *dev)
652{
653 struct mv643xx_private *mp = netdev_priv(dev);
654 unsigned int port_num = mp->port_num;
655 int err;
656
657 spin_lock_irq(&mp->lock);
658
659 err = request_irq(dev->irq, mv643xx_eth_int_handler,
660 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
661
662 if (err) {
663 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
664 port_num);
665 err = -EAGAIN;
666 goto out;
667 }
668
669 if (mv643xx_eth_real_open(dev)) {
670 printk("%s: Error opening interface\n", dev->name);
671 err = -EBUSY;
672 goto out_free;
673 }
674
675 spin_unlock_irq(&mp->lock);
676
677 return 0;
678
679out_free:
680 free_irq(dev->irq, dev);
681
682out:
683 spin_unlock_irq(&mp->lock);
684
685 return err;
686}
687
688/*
689 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. 631 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
690 * 632 *
691 * DESCRIPTION: 633 * DESCRIPTION:
@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
777 mp->port_tx_queue_command |= 1; 719 mp->port_tx_queue_command |= 1;
778} 720}
779 721
780/* Helper function for mv643xx_eth_open */ 722/*
781static int mv643xx_eth_real_open(struct net_device *dev) 723 * mv643xx_eth_open
724 *
725 * This function is called when openning the network device. The function
726 * should initialize all the hardware, initialize cyclic Rx/Tx
727 * descriptors chain and buffers and allocate an IRQ to the network
728 * device.
729 *
730 * Input : a pointer to the network device structure
731 *
732 * Output : zero of success , nonzero if fails.
733 */
734
735static int mv643xx_eth_open(struct net_device *dev)
782{ 736{
783 struct mv643xx_private *mp = netdev_priv(dev); 737 struct mv643xx_private *mp = netdev_priv(dev);
784 unsigned int port_num = mp->port_num; 738 unsigned int port_num = mp->port_num;
785 unsigned int size; 739 unsigned int size;
740 int err;
741
742 err = request_irq(dev->irq, mv643xx_eth_int_handler,
743 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
744 if (err) {
745 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
746 port_num);
747 return -EAGAIN;
748 }
786 749
787 /* Stop RX Queues */ 750 /* Stop RX Queues */
788 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 751 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
789 752
790 /* Clear the ethernet port interrupts */
791 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
792 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
793
794 /* Unmask RX buffer and TX end interrupt */
795 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
796 INT_CAUSE_UNMASK_ALL);
797
798 /* Unmask phy and link status changes interrupts */
799 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
800 INT_CAUSE_UNMASK_ALL_EXT);
801
802 /* Set the MAC Address */ 753 /* Set the MAC Address */
803 memcpy(mp->port_mac_addr, dev->dev_addr, 6); 754 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
804 755
@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
818 GFP_KERNEL); 769 GFP_KERNEL);
819 if (!mp->rx_skb) { 770 if (!mp->rx_skb) {
820 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); 771 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
821 return -ENOMEM; 772 err = -ENOMEM;
773 goto out_free_irq;
822 } 774 }
823 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, 775 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
824 GFP_KERNEL); 776 GFP_KERNEL);
825 if (!mp->tx_skb) { 777 if (!mp->tx_skb) {
826 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); 778 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
827 kfree(mp->rx_skb); 779 err = -ENOMEM;
828 return -ENOMEM; 780 goto out_free_rx_skb;
829 } 781 }
830 782
831 /* Allocate TX ring */ 783 /* Allocate TX ring */
@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
845 if (!mp->p_tx_desc_area) { 797 if (!mp->p_tx_desc_area) {
846 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 798 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
847 dev->name, size); 799 dev->name, size);
848 kfree(mp->rx_skb); 800 err = -ENOMEM;
849 kfree(mp->tx_skb); 801 goto out_free_tx_skb;
850 return -ENOMEM;
851 } 802 }
852 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ 803 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
853 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); 804 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
874 printk(KERN_ERR "%s: Freeing previously allocated TX queues...", 825 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
875 dev->name); 826 dev->name);
876 if (mp->rx_sram_size) 827 if (mp->rx_sram_size)
877 iounmap(mp->p_rx_desc_area); 828 iounmap(mp->p_tx_desc_area);
878 else 829 else
879 dma_free_coherent(NULL, mp->tx_desc_area_size, 830 dma_free_coherent(NULL, mp->tx_desc_area_size,
880 mp->p_tx_desc_area, mp->tx_desc_dma); 831 mp->p_tx_desc_area, mp->tx_desc_dma);
881 kfree(mp->rx_skb); 832 err = -ENOMEM;
882 kfree(mp->tx_skb); 833 goto out_free_tx_skb;
883 return -ENOMEM;
884 } 834 }
885 memset((void *)mp->p_rx_desc_area, 0, size); 835 memset((void *)mp->p_rx_desc_area, 0, size);
886 836
@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
900 mp->tx_int_coal = 850 mp->tx_int_coal =
901 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 851 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
902 852
903 netif_start_queue(dev); 853 /* Clear any pending ethernet port interrupts */
854 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
855 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
856
857 /* Unmask phy and link status changes interrupts */
858 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
859 INT_UNMASK_ALL_EXT);
904 860
861 /* Unmask RX buffer and TX end interrupt */
862 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
905 return 0; 863 return 0;
864
865out_free_tx_skb:
866 kfree(mp->tx_skb);
867out_free_rx_skb:
868 kfree(mp->rx_skb);
869out_free_irq:
870 free_irq(dev->irq, dev);
871
872 return err;
906} 873}
907 874
908static void mv643xx_eth_free_tx_rings(struct net_device *dev) 875static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
910 struct mv643xx_private *mp = netdev_priv(dev); 877 struct mv643xx_private *mp = netdev_priv(dev);
911 unsigned int port_num = mp->port_num; 878 unsigned int port_num = mp->port_num;
912 unsigned int curr; 879 unsigned int curr;
880 struct sk_buff *skb;
913 881
914 /* Stop Tx Queues */ 882 /* Stop Tx Queues */
915 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 883 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
916 884
917 /* Free outstanding skb's on TX rings */ 885 /* Free outstanding skb's on TX rings */
918 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { 886 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
919 if (mp->tx_skb[curr]) { 887 skb = mp->tx_skb[curr];
920 dev_kfree_skb(mp->tx_skb[curr]); 888 if (skb) {
889 mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
890 dev_kfree_skb(skb);
921 mp->tx_ring_skbs--; 891 mp->tx_ring_skbs--;
922 } 892 }
923 } 893 }
@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
973 * Output : zero if success , nonzero if fails 943 * Output : zero if success , nonzero if fails
974 */ 944 */
975 945
976/* Helper function for mv643xx_eth_stop */ 946static int mv643xx_eth_stop(struct net_device *dev)
977
978static int mv643xx_eth_real_stop(struct net_device *dev)
979{ 947{
980 struct mv643xx_private *mp = netdev_priv(dev); 948 struct mv643xx_private *mp = netdev_priv(dev);
981 unsigned int port_num = mp->port_num; 949 unsigned int port_num = mp->port_num;
982 950
951 /* Mask all interrupts on ethernet port */
952 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
953 /* wait for previous write to complete */
954 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
955
956#ifdef MV643XX_NAPI
957 netif_poll_disable(dev);
958#endif
983 netif_carrier_off(dev); 959 netif_carrier_off(dev);
984 netif_stop_queue(dev); 960 netif_stop_queue(dev);
985 961
986 mv643xx_eth_free_tx_rings(dev);
987 mv643xx_eth_free_rx_rings(dev);
988
989 eth_port_reset(mp->port_num); 962 eth_port_reset(mp->port_num);
990 963
991 /* Disable ethernet port interrupts */ 964 mv643xx_eth_free_tx_rings(dev);
992 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 965 mv643xx_eth_free_rx_rings(dev);
993 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
994
995 /* Mask RX buffer and TX end interrupt */
996 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
997
998 /* Mask phy and link status changes interrupts */
999 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
1000
1001 return 0;
1002}
1003
1004static int mv643xx_eth_stop(struct net_device *dev)
1005{
1006 struct mv643xx_private *mp = netdev_priv(dev);
1007
1008 spin_lock_irq(&mp->lock);
1009 966
1010 mv643xx_eth_real_stop(dev); 967#ifdef MV643XX_NAPI
968 netif_poll_enable(dev);
969#endif
1011 970
1012 free_irq(dev->irq, dev); 971 free_irq(dev->irq, dev);
1013 spin_unlock_irq(&mp->lock);
1014 972
1015 return 0; 973 return 0;
1016} 974}
@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
1022 struct pkt_info pkt_info; 980 struct pkt_info pkt_info;
1023 981
1024 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 982 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
1025 if (pkt_info.return_info) { 983 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
1026 if (skb_shinfo(pkt_info.return_info)->nr_frags) 984 dma_unmap_single(NULL, pkt_info.buf_ptr,
1027 dma_unmap_page(NULL, pkt_info.buf_ptr, 985 pkt_info.byte_cnt,
1028 pkt_info.byte_cnt, 986 DMA_TO_DEVICE);
1029 DMA_TO_DEVICE); 987 else
1030 else 988 dma_unmap_page(NULL, pkt_info.buf_ptr,
1031 dma_unmap_single(NULL, pkt_info.buf_ptr, 989 pkt_info.byte_cnt,
1032 pkt_info.byte_cnt, 990 DMA_TO_DEVICE);
1033 DMA_TO_DEVICE);
1034 991
992 if (pkt_info.return_info)
1035 dev_kfree_skb_irq(pkt_info.return_info); 993 dev_kfree_skb_irq(pkt_info.return_info);
1036 } else
1037 dma_unmap_page(NULL, pkt_info.buf_ptr,
1038 pkt_info.byte_cnt, DMA_TO_DEVICE);
1039 } 994 }
1040 995
1041 if (netif_queue_stopped(dev) && 996 if (netif_queue_stopped(dev) &&
@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1053 struct mv643xx_private *mp = netdev_priv(dev); 1008 struct mv643xx_private *mp = netdev_priv(dev);
1054 int done = 1, orig_budget, work_done; 1009 int done = 1, orig_budget, work_done;
1055 unsigned int port_num = mp->port_num; 1010 unsigned int port_num = mp->port_num;
1056 unsigned long flags;
1057 1011
1058#ifdef MV643XX_TX_FAST_REFILL 1012#ifdef MV643XX_TX_FAST_REFILL
1059 if (++mp->tx_clean_threshold > 5) { 1013 if (++mp->tx_clean_threshold > 5) {
1060 spin_lock_irqsave(&mp->lock, flags);
1061 mv643xx_tx(dev); 1014 mv643xx_tx(dev);
1062 mp->tx_clean_threshold = 0; 1015 mp->tx_clean_threshold = 0;
1063 spin_unlock_irqrestore(&mp->lock, flags);
1064 } 1016 }
1065#endif 1017#endif
1066 1018
@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1078 } 1030 }
1079 1031
1080 if (done) { 1032 if (done) {
1081 spin_lock_irqsave(&mp->lock, flags); 1033 netif_rx_complete(dev);
1082 __netif_rx_complete(dev);
1083 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1034 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1084 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1035 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1085 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1036 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1086 INT_CAUSE_UNMASK_ALL); 1037 INT_UNMASK_ALL);
1087 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1088 INT_CAUSE_UNMASK_ALL_EXT);
1089 spin_unlock_irqrestore(&mp->lock, flags);
1090 } 1038 }
1091 1039
1092 return done ? 0 : 1; 1040 return done ? 0 : 1;
1093} 1041}
1094#endif 1042#endif
1095 1043
1044/* Hardware can't handle unaligned fragments smaller than 9 bytes.
1045 * This helper function detects that case.
1046 */
1047
1048static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1049{
1050 unsigned int frag;
1051 skb_frag_t *fragp;
1052
1053 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1054 fragp = &skb_shinfo(skb)->frags[frag];
1055 if (fragp->size <= 8 && fragp->page_offset & 0x7)
1056 return 1;
1057
1058 }
1059 return 0;
1060}
1061
1062
1096/* 1063/*
1097 * mv643xx_eth_start_xmit 1064 * mv643xx_eth_start_xmit
1098 * 1065 *
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1136 return 1; 1103 return 1;
1137 } 1104 }
1138 1105
1106#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1107 if (has_tiny_unaligned_frags(skb)) {
1108 if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
1109 stats->tx_dropped++;
1110 printk(KERN_DEBUG "%s: failed to linearize tiny "
1111 "unaligned fragment\n", dev->name);
1112 return 1;
1113 }
1114 }
1115
1139 spin_lock_irqsave(&mp->lock, flags); 1116 spin_lock_irqsave(&mp->lock, flags);
1140 1117
1141 /* Update packet info data structure -- DMA owned, first last */
1142#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1143 if (!skb_shinfo(skb)->nr_frags) { 1118 if (!skb_shinfo(skb)->nr_frags) {
1144linear:
1145 if (skb->ip_summed != CHECKSUM_HW) { 1119 if (skb->ip_summed != CHECKSUM_HW) {
1146 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 1120 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1147 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1121 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
1150 5 << ETH_TX_IHL_SHIFT; 1124 5 << ETH_TX_IHL_SHIFT;
1151 pkt_info.l4i_chk = 0; 1125 pkt_info.l4i_chk = 0;
1152 } else { 1126 } else {
1153
1154 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1127 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1155 ETH_TX_FIRST_DESC | 1128 ETH_TX_FIRST_DESC |
1156 ETH_TX_LAST_DESC | 1129 ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
1158 ETH_GEN_IP_V_4_CHECKSUM | 1131 ETH_GEN_IP_V_4_CHECKSUM |
1159 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1132 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1160 /* CPU already calculated pseudo header checksum. */ 1133 /* CPU already calculated pseudo header checksum. */
1161 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1134 if ((skb->protocol == ETH_P_IP) &&
1135 (skb->nh.iph->protocol == IPPROTO_UDP) ) {
1162 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1136 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1163 pkt_info.l4i_chk = skb->h.uh->check; 1137 pkt_info.l4i_chk = skb->h.uh->check;
1164 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1138 } else if ((skb->protocol == ETH_P_IP) &&
1139 (skb->nh.iph->protocol == IPPROTO_TCP))
1165 pkt_info.l4i_chk = skb->h.th->check; 1140 pkt_info.l4i_chk = skb->h.th->check;
1166 else { 1141 else {
1167 printk(KERN_ERR 1142 printk(KERN_ERR
1168 "%s: chksum proto != TCP or UDP\n", 1143 "%s: chksum proto != IPv4 TCP or UDP\n",
1169 dev->name); 1144 dev->name);
1170 spin_unlock_irqrestore(&mp->lock, flags); 1145 spin_unlock_irqrestore(&mp->lock, flags);
1171 return 1; 1146 return 1;
@@ -1183,26 +1158,6 @@ linear:
1183 } else { 1158 } else {
1184 unsigned int frag; 1159 unsigned int frag;
1185 1160
1186 /* Since hardware can't handle unaligned fragments smaller
1187 * than 9 bytes, if we find any, we linearize the skb
1188 * and start again. When I've seen it, it's always been
1189 * the first frag (probably near the end of the page),
1190 * but we check all frags to be safe.
1191 */
1192 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1193 skb_frag_t *fragp;
1194
1195 fragp = &skb_shinfo(skb)->frags[frag];
1196 if (fragp->size <= 8 && fragp->page_offset & 0x7) {
1197 skb_linearize(skb, GFP_ATOMIC);
1198 printk(KERN_DEBUG "%s: unaligned tiny fragment"
1199 "%d of %d, fixed\n",
1200 dev->name, frag,
1201 skb_shinfo(skb)->nr_frags);
1202 goto linear;
1203 }
1204 }
1205
1206 /* first frag which is skb header */ 1161 /* first frag which is skb header */
1207 pkt_info.byte_cnt = skb_headlen(skb); 1162 pkt_info.byte_cnt = skb_headlen(skb);
1208 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 1163 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
1221 ETH_GEN_IP_V_4_CHECKSUM | 1176 ETH_GEN_IP_V_4_CHECKSUM |
1222 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1177 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1223 /* CPU already calculated pseudo header checksum. */ 1178 /* CPU already calculated pseudo header checksum. */
1224 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1179 if ((skb->protocol == ETH_P_IP) &&
1180 (skb->nh.iph->protocol == IPPROTO_UDP)) {
1225 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1181 pkt_info.cmd_sts |= ETH_UDP_FRAME;
1226 pkt_info.l4i_chk = skb->h.uh->check; 1182 pkt_info.l4i_chk = skb->h.uh->check;
1227 } else if (skb->nh.iph->protocol == IPPROTO_TCP) 1183 } else if ((skb->protocol == ETH_P_IP) &&
1184 (skb->nh.iph->protocol == IPPROTO_TCP))
1228 pkt_info.l4i_chk = skb->h.th->check; 1185 pkt_info.l4i_chk = skb->h.th->check;
1229 else { 1186 else {
1230 printk(KERN_ERR 1187 printk(KERN_ERR
1231 "%s: chksum proto != TCP or UDP\n", 1188 "%s: chksum proto != IPv4 TCP or UDP\n",
1232 dev->name); 1189 dev->name);
1233 spin_unlock_irqrestore(&mp->lock, flags); 1190 spin_unlock_irqrestore(&mp->lock, flags);
1234 return 1; 1191 return 1;
@@ -1288,6 +1245,8 @@ linear:
1288 } 1245 }
1289 } 1246 }
1290#else 1247#else
1248 spin_lock_irqsave(&mp->lock, flags);
1249
1291 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | 1250 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
1292 ETH_TX_LAST_DESC; 1251 ETH_TX_LAST_DESC;
1293 pkt_info.l4i_chk = 0; 1252 pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1340} 1299}
1341 1300
1342#ifdef CONFIG_NET_POLL_CONTROLLER 1301#ifdef CONFIG_NET_POLL_CONTROLLER
1343static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
1344{
1345 int port_num = mp->port_num;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&mp->lock, flags);
1349 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1350 INT_CAUSE_UNMASK_ALL);
1351 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1352 INT_CAUSE_UNMASK_ALL_EXT);
1353 spin_unlock_irqrestore(&mp->lock, flags);
1354}
1355
1356static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
1357{
1358 int port_num = mp->port_num;
1359 unsigned long flags;
1360
1361 spin_lock_irqsave(&mp->lock, flags);
1362 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1363 INT_CAUSE_MASK_ALL);
1364 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1365 INT_CAUSE_MASK_ALL_EXT);
1366 spin_unlock_irqrestore(&mp->lock, flags);
1367}
1368
1369static void mv643xx_netpoll(struct net_device *netdev) 1302static void mv643xx_netpoll(struct net_device *netdev)
1370{ 1303{
1371 struct mv643xx_private *mp = netdev_priv(netdev); 1304 struct mv643xx_private *mp = netdev_priv(netdev);
1305 int port_num = mp->port_num;
1306
1307 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
1308 /* wait for previous write to complete */
1309 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1372 1310
1373 mv643xx_disable_irq(mp);
1374 mv643xx_eth_int_handler(netdev->irq, netdev, NULL); 1311 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1375 mv643xx_enable_irq(mp); 1312
1313 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
1376} 1314}
1377#endif 1315#endif
1378 1316
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1441 * Zero copy can only work if we use Discovery II memory. Else, we will 1379 * Zero copy can only work if we use Discovery II memory. Else, we will
1442 * have to map the buffers to ISA memory which is only 16 MB 1380 * have to map the buffers to ISA memory which is only 16 MB
1443 */ 1381 */
1444 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; 1382 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
1445#endif 1383#endif
1446#endif 1384#endif
1447 1385
@@ -2054,6 +1992,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
2054} 1992}
2055 1993
2056/* 1994/*
1995 * The entries in each table are indexed by a hash of a packet's MAC
1996 * address. One bit in each entry determines whether the packet is
1997 * accepted. There are 4 entries (each 8 bits wide) in each register
1998 * of the table. The bits in each entry are defined as follows:
1999 * 0 Accept=1, Drop=0
2000 * 3-1 Queue (ETH_Q0=0)
2001 * 7-4 Reserved = 0;
2002 */
2003static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2004{
2005 unsigned int table_reg;
2006 unsigned int tbl_offset;
2007 unsigned int reg_offset;
2008
2009 tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
2010 reg_offset = entry % 4; /* Entry offset within the register */
2011
2012 /* Set "accepts frame bit" at specified table entry */
2013 table_reg = mv_read(table + tbl_offset);
2014 table_reg |= 0x01 << (8 * reg_offset);
2015 mv_write(table + tbl_offset, table_reg);
2016}
2017
2018/*
2019 * eth_port_mc_addr - Multicast address settings.
2020 *
2021 * The MV device supports multicast using two tables:
2022 * 1) Special Multicast Table for MAC addresses of the form
2023 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
2024 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2025 * Table entries in the DA-Filter table.
2026 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
2027 * is used as an index to the Other Multicast Table entries in the
2028 * DA-Filter table. This function calculates the CRC-8bit value.
2029 * In either case, eth_port_set_filter_table_entry() is then called
2030 * to set to set the actual table entry.
2031 */
2032static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2033{
2034 unsigned int mac_h;
2035 unsigned int mac_l;
2036 unsigned char crc_result = 0;
2037 int table;
2038 int mac_array[48];
2039 int crc[8];
2040 int i;
2041
2042 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
2043 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
2044 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2045 (eth_port_num);
2046 eth_port_set_filter_table_entry(table, p_addr[5]);
2047 return;
2048 }
2049
2050 /* Calculate CRC-8 out of the given address */
2051 mac_h = (p_addr[0] << 8) | (p_addr[1]);
2052 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
2053 (p_addr[4] << 8) | (p_addr[5] << 0);
2054
2055 for (i = 0; i < 32; i++)
2056 mac_array[i] = (mac_l >> i) & 0x1;
2057 for (i = 32; i < 48; i++)
2058 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
2059
2060 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
2061 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
2062 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
2063 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
2064 mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
2065
2066 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2067 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
2068 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
2069 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
2070 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
2071 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
2072 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
2073
2074 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
2075 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
2076 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
2077 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
2078 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
2079 mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
2080
2081 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
2082 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
2083 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
2084 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
2085 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
2086 mac_array[3] ^ mac_array[2] ^ mac_array[1];
2087
2088 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
2089 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
2090 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
2091 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
2092 mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
2093 mac_array[3] ^ mac_array[2];
2094
2095 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
2096 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
2097 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
2098 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
2099 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
2100 mac_array[4] ^ mac_array[3];
2101
2102 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
2103 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
2104 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
2105 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
2106 mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
2107 mac_array[4];
2108
2109 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
2110 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
2111 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
2112 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
2113 mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
2114
2115 for (i = 0; i < 8; i++)
2116 crc_result = crc_result | (crc[i] << i);
2117
2118 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
2119 eth_port_set_filter_table_entry(table, crc_result);
2120}
2121
2122/*
2123 * Set the entire multicast list based on dev->mc_list.
2124 */
2125static void eth_port_set_multicast_list(struct net_device *dev)
2126{
2127
2128 struct dev_mc_list *mc_list;
2129 int i;
2130 int table_index;
2131 struct mv643xx_private *mp = netdev_priv(dev);
2132 unsigned int eth_port_num = mp->port_num;
2133
2134 /* If the device is in promiscuous mode or in all multicast mode,
2135 * we will fully populate both multicast tables with accept.
2136 * This is guaranteed to yield a match on all multicast addresses...
2137 */
2138 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
2139 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2140 /* Set all entries in DA filter special multicast
2141 * table (Ex_dFSMT)
2142 * Set for ETH_Q0 for now
2143 * Bits
2144 * 0 Accept=1, Drop=0
2145 * 3-1 Queue ETH_Q0=0
2146 * 7-4 Reserved = 0;
2147 */
2148 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2149
2150 /* Set all entries in DA filter other multicast
2151 * table (Ex_dFOMT)
2152 * Set for ETH_Q0 for now
2153 * Bits
2154 * 0 Accept=1, Drop=0
2155 * 3-1 Queue ETH_Q0=0
2156 * 7-4 Reserved = 0;
2157 */
2158 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2159 }
2160 return;
2161 }
2162
2163 /* We will clear out multicast tables every time we get the list.
2164 * Then add the entire new list...
2165 */
2166 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2167 /* Clear DA filter special multicast table (Ex_dFSMT) */
2168 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2169 (eth_port_num) + table_index, 0);
2170
2171 /* Clear DA filter other multicast table (Ex_dFOMT) */
2172 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2173 (eth_port_num) + table_index, 0);
2174 }
2175
2176 /* Get pointer to net_device multicast list and add each one... */
2177 for (i = 0, mc_list = dev->mc_list;
2178 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2179 i++, mc_list = mc_list->next)
2180 if (mc_list->dmi_addrlen == 6)
2181 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
2182}
2183
2184/*
2057 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables 2185 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2058 * 2186 *
2059 * DESCRIPTION: 2187 * DESCRIPTION:
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2080 2208
2081 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2209 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2082 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2210 /* Clear DA filter special multicast table (Ex_dFSMT) */
2083 mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2211 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2084 (eth_port_num) + table_index), 0); 2212 (eth_port_num) + table_index, 0);
2085 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2213 /* Clear DA filter other multicast table (Ex_dFOMT) */
2086 mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2214 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2087 (eth_port_num) + table_index), 0); 2215 (eth_port_num) + table_index, 0);
2088 } 2216 }
2089} 2217}
2090 2218
@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2489 struct eth_tx_desc *current_descriptor; 2617 struct eth_tx_desc *current_descriptor;
2490 struct eth_tx_desc *first_descriptor; 2618 struct eth_tx_desc *first_descriptor;
2491 u32 command; 2619 u32 command;
2620 unsigned long flags;
2492 2621
2493 /* Do not process Tx ring in case of Tx ring resource error */ 2622 /* Do not process Tx ring in case of Tx ring resource error */
2494 if (mp->tx_resource_err) 2623 if (mp->tx_resource_err)
@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2505 return ETH_ERROR; 2634 return ETH_ERROR;
2506 } 2635 }
2507 2636
2637 spin_lock_irqsave(&mp->lock, flags);
2638
2508 mp->tx_ring_skbs++; 2639 mp->tx_ring_skbs++;
2509 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2640 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2510 2641
@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2554 mp->tx_resource_err = 1; 2685 mp->tx_resource_err = 1;
2555 mp->tx_curr_desc_q = tx_first_desc; 2686 mp->tx_curr_desc_q = tx_first_desc;
2556 2687
2688 spin_unlock_irqrestore(&mp->lock, flags);
2689
2557 return ETH_QUEUE_LAST_RESOURCE; 2690 return ETH_QUEUE_LAST_RESOURCE;
2558 } 2691 }
2559 2692
2560 mp->tx_curr_desc_q = tx_next_desc; 2693 mp->tx_curr_desc_q = tx_next_desc;
2561 2694
2695 spin_unlock_irqrestore(&mp->lock, flags);
2696
2562 return ETH_OK; 2697 return ETH_OK;
2563} 2698}
2564#else 2699#else
@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2569 int tx_desc_used; 2704 int tx_desc_used;
2570 struct eth_tx_desc *current_descriptor; 2705 struct eth_tx_desc *current_descriptor;
2571 unsigned int command_status; 2706 unsigned int command_status;
2707 unsigned long flags;
2572 2708
2573 /* Do not process Tx ring in case of Tx ring resource error */ 2709 /* Do not process Tx ring in case of Tx ring resource error */
2574 if (mp->tx_resource_err) 2710 if (mp->tx_resource_err)
2575 return ETH_QUEUE_FULL; 2711 return ETH_QUEUE_FULL;
2576 2712
2713 spin_lock_irqsave(&mp->lock, flags);
2714
2577 mp->tx_ring_skbs++; 2715 mp->tx_ring_skbs++;
2578 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2716 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2579 2717
@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2604 /* Check for ring index overlap in the Tx desc ring */ 2742 /* Check for ring index overlap in the Tx desc ring */
2605 if (tx_desc_curr == tx_desc_used) { 2743 if (tx_desc_curr == tx_desc_used) {
2606 mp->tx_resource_err = 1; 2744 mp->tx_resource_err = 1;
2745
2746 spin_unlock_irqrestore(&mp->lock, flags);
2607 return ETH_QUEUE_LAST_RESOURCE; 2747 return ETH_QUEUE_LAST_RESOURCE;
2608 } 2748 }
2609 2749
2750 spin_unlock_irqrestore(&mp->lock, flags);
2610 return ETH_OK; 2751 return ETH_OK;
2611} 2752}
2612#endif 2753#endif
@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2629 * Tx ring 'first' and 'used' indexes are updated. 2770 * Tx ring 'first' and 'used' indexes are updated.
2630 * 2771 *
2631 * RETURN: 2772 * RETURN:
2632 * ETH_ERROR in case the routine can not access Tx desc ring. 2773 * ETH_OK on success
2633 * ETH_RETRY in case there is transmission in process. 2774 * ETH_ERROR otherwise.
2634 * ETH_END_OF_JOB if the routine has nothing to release.
2635 * ETH_OK otherwise.
2636 * 2775 *
2637 */ 2776 */
2638static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, 2777static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2639 struct pkt_info *p_pkt_info) 2778 struct pkt_info *p_pkt_info)
2640{ 2779{
2641 int tx_desc_used; 2780 int tx_desc_used;
2781 int tx_busy_desc;
2782 struct eth_tx_desc *p_tx_desc_used;
2783 unsigned int command_status;
2784 unsigned long flags;
2785 int err = ETH_OK;
2786
2787 spin_lock_irqsave(&mp->lock, flags);
2788
2642#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 2789#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2643 int tx_busy_desc = mp->tx_first_desc_q; 2790 tx_busy_desc = mp->tx_first_desc_q;
2644#else 2791#else
2645 int tx_busy_desc = mp->tx_curr_desc_q; 2792 tx_busy_desc = mp->tx_curr_desc_q;
2646#endif 2793#endif
2647 struct eth_tx_desc *p_tx_desc_used;
2648 unsigned int command_status;
2649 2794
2650 /* Get the Tx Desc ring indexes */ 2795 /* Get the Tx Desc ring indexes */
2651 tx_desc_used = mp->tx_used_desc_q; 2796 tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2653 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; 2798 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2654 2799
2655 /* Sanity check */ 2800 /* Sanity check */
2656 if (p_tx_desc_used == NULL) 2801 if (p_tx_desc_used == NULL) {
2657 return ETH_ERROR; 2802 err = ETH_ERROR;
2803 goto out;
2804 }
2658 2805
2659 /* Stop release. About to overlap the current available Tx descriptor */ 2806 /* Stop release. About to overlap the current available Tx descriptor */
2660 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) 2807 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
2661 return ETH_END_OF_JOB; 2808 err = ETH_ERROR;
2809 goto out;
2810 }
2662 2811
2663 command_status = p_tx_desc_used->cmd_sts; 2812 command_status = p_tx_desc_used->cmd_sts;
2664 2813
2665 /* Still transmitting... */ 2814 /* Still transmitting... */
2666 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2815 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2667 return ETH_RETRY; 2816 err = ETH_ERROR;
2817 goto out;
2818 }
2668 2819
2669 /* Pass the packet information to the caller */ 2820 /* Pass the packet information to the caller */
2670 p_pkt_info->cmd_sts = command_status; 2821 p_pkt_info->cmd_sts = command_status;
2671 p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; 2822 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2823 p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
2824 p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
2672 mp->tx_skb[tx_desc_used] = NULL; 2825 mp->tx_skb[tx_desc_used] = NULL;
2673 2826
2674 /* Update the next descriptor to release. */ 2827 /* Update the next descriptor to release. */
@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2680 BUG_ON(mp->tx_ring_skbs == 0); 2833 BUG_ON(mp->tx_ring_skbs == 0);
2681 mp->tx_ring_skbs--; 2834 mp->tx_ring_skbs--;
2682 2835
2683 return ETH_OK; 2836out:
2837 spin_unlock_irqrestore(&mp->lock, flags);
2838
2839 return err;
2684} 2840}
2685 2841
2686/* 2842/*
@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2712 int rx_next_curr_desc, rx_curr_desc, rx_used_desc; 2868 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2713 volatile struct eth_rx_desc *p_rx_desc; 2869 volatile struct eth_rx_desc *p_rx_desc;
2714 unsigned int command_status; 2870 unsigned int command_status;
2871 unsigned long flags;
2715 2872
2716 /* Do not process Rx ring in case of Rx ring resource error */ 2873 /* Do not process Rx ring in case of Rx ring resource error */
2717 if (mp->rx_resource_err) 2874 if (mp->rx_resource_err)
2718 return ETH_QUEUE_FULL; 2875 return ETH_QUEUE_FULL;
2719 2876
2877 spin_lock_irqsave(&mp->lock, flags);
2878
2720 /* Get the Rx Desc ring 'curr and 'used' indexes */ 2879 /* Get the Rx Desc ring 'curr and 'used' indexes */
2721 rx_curr_desc = mp->rx_curr_desc_q; 2880 rx_curr_desc = mp->rx_curr_desc_q;
2722 rx_used_desc = mp->rx_used_desc_q; 2881 rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2728 rmb(); 2887 rmb();
2729 2888
2730 /* Nothing to receive... */ 2889 /* Nothing to receive... */
2731 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2890 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2891 spin_unlock_irqrestore(&mp->lock, flags);
2732 return ETH_END_OF_JOB; 2892 return ETH_END_OF_JOB;
2893 }
2733 2894
2734 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; 2895 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2735 p_pkt_info->cmd_sts = command_status; 2896 p_pkt_info->cmd_sts = command_status;
@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2749 if (rx_next_curr_desc == rx_used_desc) 2910 if (rx_next_curr_desc == rx_used_desc)
2750 mp->rx_resource_err = 1; 2911 mp->rx_resource_err = 1;
2751 2912
2913 spin_unlock_irqrestore(&mp->lock, flags);
2914
2752 return ETH_OK; 2915 return ETH_OK;
2753} 2916}
2754 2917
@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2777{ 2940{
2778 int used_rx_desc; /* Where to return Rx resource */ 2941 int used_rx_desc; /* Where to return Rx resource */
2779 volatile struct eth_rx_desc *p_used_rx_desc; 2942 volatile struct eth_rx_desc *p_used_rx_desc;
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&mp->lock, flags);
2780 2946
2781 /* Get 'used' Rx descriptor */ 2947 /* Get 'used' Rx descriptor */
2782 used_rx_desc = mp->rx_used_desc_q; 2948 used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2800 /* Any Rx return cancels the Rx resource error status */ 2966 /* Any Rx return cancels the Rx resource error status */
2801 mp->rx_resource_err = 0; 2967 mp->rx_resource_err = 0;
2802 2968
2969 spin_unlock_irqrestore(&mp->lock, flags);
2970
2803 return ETH_OK; 2971 return ETH_OK;
2804} 2972}
2805 2973
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 0de8fdd2aa86..94f782d51f0f 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -212,15 +212,6 @@ static int __init do_ne_probe(struct net_device *dev)
212 return -ENODEV; 212 return -ENODEV;
213} 213}
214 214
215static void cleanup_card(struct net_device *dev)
216{
217 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
218 if (idev)
219 pnp_device_detach(idev);
220 free_irq(dev->irq, dev);
221 release_region(dev->base_addr, NE_IO_EXTENT);
222}
223
224#ifndef MODULE 215#ifndef MODULE
225struct net_device * __init ne_probe(int unit) 216struct net_device * __init ne_probe(int unit)
226{ 217{
@@ -859,6 +850,15 @@ int init_module(void)
859 return -ENODEV; 850 return -ENODEV;
860} 851}
861 852
853static void cleanup_card(struct net_device *dev)
854{
855 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
856 if (idev)
857 pnp_device_detach(idev);
858 free_irq(dev->irq, dev);
859 release_region(dev->base_addr, NE_IO_EXTENT);
860}
861
862void cleanup_module(void) 862void cleanup_module(void)
863{ 863{
864 int this_dev; 864 int this_dev;
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 6d62ada85de6..e6df375a1d4b 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -278,14 +278,6 @@ static int __init do_ne2_probe(struct net_device *dev)
278 return -ENODEV; 278 return -ENODEV;
279} 279}
280 280
281static void cleanup_card(struct net_device *dev)
282{
283 mca_mark_as_unused(ei_status.priv);
284 mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
285 free_irq(dev->irq, dev);
286 release_region(dev->base_addr, NE_IO_EXTENT);
287}
288
289#ifndef MODULE 281#ifndef MODULE
290struct net_device * __init ne2_probe(int unit) 282struct net_device * __init ne2_probe(int unit)
291{ 283{
@@ -812,6 +804,14 @@ int init_module(void)
812 return -ENXIO; 804 return -ENXIO;
813} 805}
814 806
807static void cleanup_card(struct net_device *dev)
808{
809 mca_mark_as_unused(ei_status.priv);
810 mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
811 free_irq(dev->irq, dev);
812 release_region(dev->base_addr, NE_IO_EXTENT);
813}
814
815void cleanup_module(void) 815void cleanup_module(void)
816{ 816{
817 int this_dev; 817 int this_dev;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index a1ac4bd1696e..a7bb54df75a8 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -415,7 +415,7 @@ typedef enum {
415 415
416 416
417/* directly indexed by chip_t, above */ 417/* directly indexed by chip_t, above */
418const static struct { 418static const struct {
419 const char *name; 419 const char *name;
420 u8 version; /* from RTL8139C docs */ 420 u8 version; /* from RTL8139C docs */
421 u32 RxConfigMask; /* should clear the bits supported by this chip */ 421 u32 RxConfigMask; /* should clear the bits supported by this chip */
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 049c34b37067..593d8adee891 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1598,7 +1598,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1598 switch(cmd) { 1598 switch(cmd) {
1599 case SIOCGMIIPHY: /* Get the address of the PHY in use. */ 1599 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1600 data[0] = 0; /* we have only this address */ 1600 data[0] = 0; /* we have only this address */
1601 /* fall trough */ 1601 /* fall through */
1602 case SIOCGMIIREG: /* Read the specified MII register. */ 1602 case SIOCGMIIREG: /* Read the specified MII register. */
1603 data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f); 1603 data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1604 break; 1604 break;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02940c0fef68..459443b572ce 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -81,7 +81,7 @@ int mdiobus_register(struct mii_bus *bus)
81 81
82 phydev->dev.parent = bus->dev; 82 phydev->dev.parent = bus->dev;
83 phydev->dev.bus = &mdio_bus_type; 83 phydev->dev.bus = &mdio_bus_type;
84 sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i); 84 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
85 85
86 phydev->bus = bus; 86 phydev->bus = bus;
87 87
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b8686e47f899..1474b7c5ac0b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -42,7 +42,7 @@
42 */ 42 */
43void phy_print_status(struct phy_device *phydev) 43void phy_print_status(struct phy_device *phydev)
44{ 44{
45 pr_info("%s: Link is %s", phydev->dev.bus_id, 45 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
46 phydev->link ? "Up" : "Down"); 46 phydev->link ? "Up" : "Down");
47 if (phydev->link) 47 if (phydev->link)
48 printk(" - %d/%s", phydev->speed, 48 printk(" - %d/%s", phydev->speed,
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 400f652282d7..aa6540b39466 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -189,7 +189,7 @@ ppp_asynctty_open(struct tty_struct *tty)
189 goto out_free; 189 goto out_free;
190 190
191 tty->disc_data = ap; 191 tty->disc_data = ap;
192 192 tty->receive_room = 65536;
193 return 0; 193 return 0;
194 194
195 out_free: 195 out_free:
@@ -343,12 +343,6 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
343 return 0; 343 return 0;
344} 344}
345 345
346static int
347ppp_asynctty_room(struct tty_struct *tty)
348{
349 return 65535;
350}
351
352/* 346/*
353 * This can now be called from hard interrupt level as well 347 * This can now be called from hard interrupt level as well
354 * as soft interrupt level or mainline. 348 * as soft interrupt level or mainline.
@@ -398,7 +392,6 @@ static struct tty_ldisc ppp_ldisc = {
398 .write = ppp_asynctty_write, 392 .write = ppp_asynctty_write,
399 .ioctl = ppp_asynctty_ioctl, 393 .ioctl = ppp_asynctty_ioctl,
400 .poll = ppp_asynctty_poll, 394 .poll = ppp_asynctty_poll,
401 .receive_room = ppp_asynctty_room,
402 .receive_buf = ppp_asynctty_receive, 395 .receive_buf = ppp_asynctty_receive,
403 .write_wakeup = ppp_asynctty_wakeup, 396 .write_wakeup = ppp_asynctty_wakeup,
404}; 397};
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4d51c0c8023d..33cb8254e79d 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -237,7 +237,7 @@ ppp_sync_open(struct tty_struct *tty)
237 goto out_free; 237 goto out_free;
238 238
239 tty->disc_data = ap; 239 tty->disc_data = ap;
240 240 tty->receive_room = 65536;
241 return 0; 241 return 0;
242 242
243 out_free: 243 out_free:
@@ -384,12 +384,6 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
384 return 0; 384 return 0;
385} 385}
386 386
387static int
388ppp_sync_room(struct tty_struct *tty)
389{
390 return 65535;
391}
392
393/* 387/*
394 * This can now be called from hard interrupt level as well 388 * This can now be called from hard interrupt level as well
395 * as soft interrupt level or mainline. 389 * as soft interrupt level or mainline.
@@ -439,7 +433,6 @@ static struct tty_ldisc ppp_sync_ldisc = {
439 .write = ppp_sync_write, 433 .write = ppp_sync_write,
440 .ioctl = ppp_synctty_ioctl, 434 .ioctl = ppp_synctty_ioctl,
441 .poll = ppp_sync_poll, 435 .poll = ppp_sync_poll,
442 .receive_room = ppp_sync_room,
443 .receive_buf = ppp_sync_receive, 436 .receive_buf = ppp_sync_receive,
444 .write_wakeup = ppp_sync_wakeup, 437 .write_wakeup = ppp_sync_wakeup,
445}; 438};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 14a76f7cf900..2e1bed153c39 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -170,7 +170,7 @@ enum phy_version {
170#define _R(NAME,MAC,MASK) \ 170#define _R(NAME,MAC,MASK) \
171 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK } 171 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
172 172
173const static struct { 173static const struct {
174 const char *name; 174 const char *name;
175 u8 mac_version; 175 u8 mac_version;
176 u32 RxConfigMask; /* Clears the bits supported by this chip */ 176 u32 RxConfigMask; /* Clears the bits supported by this chip */
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index d6388e1533f0..76139478c3df 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -94,7 +94,7 @@ static inline int card_wait_for_busy_clear(const int ioaddr[],
94 const char* name); 94 const char* name);
95static inline int card_wait_for_ready(const int ioaddr[], const char* name, 95static inline int card_wait_for_ready(const int ioaddr[], const char* name,
96 unsigned char in[]); 96 unsigned char in[]);
97static inline int card_send_command(const int ioaddr[], const char* name, 97static int card_send_command(const int ioaddr[], const char* name,
98 const unsigned char out[], unsigned char in[]); 98 const unsigned char out[], unsigned char in[]);
99 99
100/* SB1000 hardware routines to be used during frame rx interrupt */ 100/* SB1000 hardware routines to be used during frame rx interrupt */
@@ -309,7 +309,7 @@ card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
309} 309}
310 310
311/* Card Send Command (cannot be used during an interrupt) */ 311/* Card Send Command (cannot be used during an interrupt) */
312static inline int 312static int
313card_send_command(const int ioaddr[], const char* name, 313card_send_command(const int ioaddr[], const char* name,
314 const unsigned char out[], unsigned char in[]) 314 const unsigned char out[], unsigned char in[])
315{ 315{
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 478791e09bf7..b420182eec4b 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -329,7 +329,7 @@ static struct mii_chip_info {
329 { NULL, } 329 { NULL, }
330}; 330};
331 331
332const static struct { 332static const struct {
333 const char *name; 333 const char *name;
334} sis_chip_info[] = { 334} sis_chip_info[] = {
335 { "SiS 190 PCI Fast Ethernet adapter" }, 335 { "SiS 190 PCI Fast Ethernet adapter" },
diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c
index 0fddf61047b4..07c1b4c8699d 100644
--- a/drivers/net/sk98lin/skdim.c
+++ b/drivers/net/sk98lin/skdim.c
@@ -180,7 +180,7 @@ SkDimModerate(SK_AC *pAC) {
180 /* 180 /*
181 ** The number of interrupts per sec is the same as expected. 181 ** The number of interrupts per sec is the same as expected.
182 ** Evalulate the descriptor-ratio. If it has changed, a resize 182 ** Evalulate the descriptor-ratio. If it has changed, a resize
183 ** in the moderation timer might be usefull 183 ** in the moderation timer might be useful
184 */ 184 */
185 if (M_DIMINFO.AutoSizing) { 185 if (M_DIMINFO.AutoSizing) {
186 ResizeDimTimerDuration(pAC); 186 ResizeDimTimerDuration(pAC);
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 9a76ac180b11..a5f2b1ee0752 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -282,26 +282,22 @@ SK_U32 Val) /* pointer to store the read value */
282 * Description: 282 * Description:
283 * This function initialize the PCI resources and IO 283 * This function initialize the PCI resources and IO
284 * 284 *
285 * Returns: N/A 285 * Returns:
286 * 286 * 0 - indicate everything worked ok.
287 * != 0 - error indication
287 */ 288 */
288int SkGeInitPCI(SK_AC *pAC) 289static __devinit int SkGeInitPCI(SK_AC *pAC)
289{ 290{
290 struct SK_NET_DEVICE *dev = pAC->dev[0]; 291 struct SK_NET_DEVICE *dev = pAC->dev[0];
291 struct pci_dev *pdev = pAC->PciDev; 292 struct pci_dev *pdev = pAC->PciDev;
292 int retval; 293 int retval;
293 294
294 if (pci_enable_device(pdev) != 0) {
295 return 1;
296 }
297
298 dev->mem_start = pci_resource_start (pdev, 0); 295 dev->mem_start = pci_resource_start (pdev, 0);
299 pci_set_master(pdev); 296 pci_set_master(pdev);
300 297
301 if (pci_request_regions(pdev, "sk98lin") != 0) { 298 retval = pci_request_regions(pdev, "sk98lin");
302 retval = 2; 299 if (retval)
303 goto out_disable; 300 goto out;
304 }
305 301
306#ifdef SK_BIG_ENDIAN 302#ifdef SK_BIG_ENDIAN
307 /* 303 /*
@@ -320,9 +316,8 @@ int SkGeInitPCI(SK_AC *pAC)
320 * Remap the regs into kernel space. 316 * Remap the regs into kernel space.
321 */ 317 */
322 pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000); 318 pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000);
323 319 if (!pAC->IoBase) {
324 if (!pAC->IoBase){ 320 retval = -EIO;
325 retval = 3;
326 goto out_release; 321 goto out_release;
327 } 322 }
328 323
@@ -330,8 +325,7 @@ int SkGeInitPCI(SK_AC *pAC)
330 325
331 out_release: 326 out_release:
332 pci_release_regions(pdev); 327 pci_release_regions(pdev);
333 out_disable: 328 out:
334 pci_disable_device(pdev);
335 return retval; 329 return retval;
336} 330}
337 331
@@ -492,7 +486,7 @@ module_param_array(AutoSizing, charp, NULL, 0);
492 * 0, if everything is ok 486 * 0, if everything is ok
493 * !=0, on error 487 * !=0, on error
494 */ 488 */
495static int __init SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC) 489static int __devinit SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC)
496{ 490{
497short i; 491short i;
498unsigned long Flags; 492unsigned long Flags;
@@ -529,7 +523,7 @@ SK_BOOL DualNet;
529 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) { 523 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) {
530 printk("HWInit (0) failed.\n"); 524 printk("HWInit (0) failed.\n");
531 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); 525 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
532 return(-EAGAIN); 526 return -EIO;
533 } 527 }
534 SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA); 528 SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA);
535 SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA); 529 SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA);
@@ -551,7 +545,7 @@ SK_BOOL DualNet;
551 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) { 545 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
552 printk("sk98lin: HWInit (1) failed.\n"); 546 printk("sk98lin: HWInit (1) failed.\n");
553 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); 547 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
554 return(-EAGAIN); 548 return -EIO;
555 } 549 }
556 SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO); 550 SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
557 SkEventInit(pAC, pAC->IoBase, SK_INIT_IO); 551 SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
@@ -583,20 +577,20 @@ SK_BOOL DualNet;
583 } else { 577 } else {
584 printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n", 578 printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
585 pAC->GIni.GIMacsFound); 579 pAC->GIni.GIMacsFound);
586 return -EAGAIN; 580 return -EIO;
587 } 581 }
588 582
589 if (Ret) { 583 if (Ret) {
590 printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n", 584 printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n",
591 dev->irq); 585 dev->irq);
592 return -EAGAIN; 586 return Ret;
593 } 587 }
594 pAC->AllocFlag |= SK_ALLOC_IRQ; 588 pAC->AllocFlag |= SK_ALLOC_IRQ;
595 589
596 /* Alloc memory for this board (Mem for RxD/TxD) : */ 590 /* Alloc memory for this board (Mem for RxD/TxD) : */
597 if(!BoardAllocMem(pAC)) { 591 if(!BoardAllocMem(pAC)) {
598 printk("No memory for descriptor rings.\n"); 592 printk("No memory for descriptor rings.\n");
599 return(-EAGAIN); 593 return -ENOMEM;
600 } 594 }
601 595
602 BoardInitMem(pAC); 596 BoardInitMem(pAC);
@@ -612,7 +606,7 @@ SK_BOOL DualNet;
612 DualNet)) { 606 DualNet)) {
613 BoardFreeMem(pAC); 607 BoardFreeMem(pAC);
614 printk("sk98lin: SkGeInitAssignRamToQueues failed.\n"); 608 printk("sk98lin: SkGeInitAssignRamToQueues failed.\n");
615 return(-EAGAIN); 609 return -EIO;
616 } 610 }
617 611
618 return (0); 612 return (0);
@@ -633,8 +627,7 @@ SK_BOOL DualNet;
633 * SK_TRUE, if all memory could be allocated 627 * SK_TRUE, if all memory could be allocated
634 * SK_FALSE, if not 628 * SK_FALSE, if not
635 */ 629 */
636static SK_BOOL BoardAllocMem( 630static __devinit SK_BOOL BoardAllocMem(SK_AC *pAC)
637SK_AC *pAC)
638{ 631{
639caddr_t pDescrMem; /* pointer to descriptor memory area */ 632caddr_t pDescrMem; /* pointer to descriptor memory area */
640size_t AllocLength; /* length of complete descriptor area */ 633size_t AllocLength; /* length of complete descriptor area */
@@ -727,8 +720,7 @@ size_t AllocLength; /* length of complete descriptor area */
727 * 720 *
728 * Returns: N/A 721 * Returns: N/A
729 */ 722 */
730static void BoardInitMem( 723static __devinit void BoardInitMem(SK_AC *pAC)
731SK_AC *pAC) /* pointer to adapter context */
732{ 724{
733int i; /* loop counter */ 725int i; /* loop counter */
734int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/ 726int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/
@@ -2859,7 +2851,7 @@ unsigned long Flags; /* for spin lock */
2859 * Description: 2851 * Description:
2860 * This function is called if an ioctl is issued on the device. 2852 * This function is called if an ioctl is issued on the device.
2861 * There are three subfunction for reading, writing and test-writing 2853 * There are three subfunction for reading, writing and test-writing
2862 * the private MIB data structure (usefull for SysKonnect-internal tools). 2854 * the private MIB data structure (useful for SysKonnect-internal tools).
2863 * 2855 *
2864 * Returns: 2856 * Returns:
2865 * 0, if everything is ok 2857 * 0, if everything is ok
@@ -4776,32 +4768,47 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4776 struct net_device *dev = NULL; 4768 struct net_device *dev = NULL;
4777 static int boards_found = 0; 4769 static int boards_found = 0;
4778 int error = -ENODEV; 4770 int error = -ENODEV;
4771 int using_dac = 0;
4779 char DeviceStr[80]; 4772 char DeviceStr[80];
4780 4773
4781 if (pci_enable_device(pdev)) 4774 if (pci_enable_device(pdev))
4782 goto out; 4775 goto out;
4783 4776
4784 /* Configure DMA attributes. */ 4777 /* Configure DMA attributes. */
4785 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) && 4778 if (sizeof(dma_addr_t) > sizeof(u32) &&
4786 pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 4779 !(error = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
4787 goto out_disable_device; 4780 using_dac = 1;
4788 4781 error = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4782 if (error < 0) {
4783 printk(KERN_ERR "sk98lin %s unable to obtain 64 bit DMA "
4784 "for consistent allocations\n", pci_name(pdev));
4785 goto out_disable_device;
4786 }
4787 } else {
4788 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4789 if (error) {
4790 printk(KERN_ERR "sk98lin %s no usable DMA configuration\n",
4791 pci_name(pdev));
4792 goto out_disable_device;
4793 }
4794 }
4789 4795
4790 if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) { 4796 error = -ENOMEM;
4791 printk(KERN_ERR "Unable to allocate etherdev " 4797 dev = alloc_etherdev(sizeof(DEV_NET));
4798 if (!dev) {
4799 printk(KERN_ERR "sk98lin: unable to allocate etherdev "
4792 "structure!\n"); 4800 "structure!\n");
4793 goto out_disable_device; 4801 goto out_disable_device;
4794 } 4802 }
4795 4803
4796 pNet = netdev_priv(dev); 4804 pNet = netdev_priv(dev);
4797 pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL); 4805 pNet->pAC = kzalloc(sizeof(SK_AC), GFP_KERNEL);
4798 if (!pNet->pAC) { 4806 if (!pNet->pAC) {
4799 printk(KERN_ERR "Unable to allocate adapter " 4807 printk(KERN_ERR "sk98lin: unable to allocate adapter "
4800 "structure!\n"); 4808 "structure!\n");
4801 goto out_free_netdev; 4809 goto out_free_netdev;
4802 } 4810 }
4803 4811
4804 memset(pNet->pAC, 0, sizeof(SK_AC));
4805 pAC = pNet->pAC; 4812 pAC = pNet->pAC;
4806 pAC->PciDev = pdev; 4813 pAC->PciDev = pdev;
4807 4814
@@ -4810,6 +4817,7 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4810 pAC->CheckQueue = SK_FALSE; 4817 pAC->CheckQueue = SK_FALSE;
4811 4818
4812 dev->irq = pdev->irq; 4819 dev->irq = pdev->irq;
4820
4813 error = SkGeInitPCI(pAC); 4821 error = SkGeInitPCI(pAC);
4814 if (error) { 4822 if (error) {
4815 printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error); 4823 printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error);
@@ -4844,19 +4852,25 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4844#endif 4852#endif
4845 } 4853 }
4846 4854
4855 if (using_dac)
4856 dev->features |= NETIF_F_HIGHDMA;
4857
4847 pAC->Index = boards_found++; 4858 pAC->Index = boards_found++;
4848 4859
4849 if (SkGeBoardInit(dev, pAC)) 4860 error = SkGeBoardInit(dev, pAC);
4861 if (error)
4850 goto out_free_netdev; 4862 goto out_free_netdev;
4851 4863
4852 /* Read Adapter name from VPD */ 4864 /* Read Adapter name from VPD */
4853 if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) { 4865 if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) {
4866 error = -EIO;
4854 printk(KERN_ERR "sk98lin: Could not read VPD data.\n"); 4867 printk(KERN_ERR "sk98lin: Could not read VPD data.\n");
4855 goto out_free_resources; 4868 goto out_free_resources;
4856 } 4869 }
4857 4870
4858 /* Register net device */ 4871 /* Register net device */
4859 if (register_netdev(dev)) { 4872 error = register_netdev(dev);
4873 if (error) {
4860 printk(KERN_ERR "sk98lin: Could not register device.\n"); 4874 printk(KERN_ERR "sk98lin: Could not register device.\n");
4861 goto out_free_resources; 4875 goto out_free_resources;
4862 } 4876 }
@@ -4883,15 +4897,17 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4883 4897
4884 boards_found++; 4898 boards_found++;
4885 4899
4900 pci_set_drvdata(pdev, dev);
4901
4886 /* More then one port found */ 4902 /* More then one port found */
4887 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { 4903 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
4888 if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) { 4904 dev = alloc_etherdev(sizeof(DEV_NET));
4889 printk(KERN_ERR "Unable to allocate etherdev " 4905 if (!dev) {
4906 printk(KERN_ERR "sk98lin: unable to allocate etherdev "
4890 "structure!\n"); 4907 "structure!\n");
4891 goto out; 4908 goto single_port;
4892 } 4909 }
4893 4910
4894 pAC->dev[1] = dev;
4895 pNet = netdev_priv(dev); 4911 pNet = netdev_priv(dev);
4896 pNet->PortNr = 1; 4912 pNet->PortNr = 1;
4897 pNet->NetNr = 1; 4913 pNet->NetNr = 1;
@@ -4920,20 +4936,28 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4920#endif 4936#endif
4921 } 4937 }
4922 4938
4923 if (register_netdev(dev)) { 4939 if (using_dac)
4924 printk(KERN_ERR "sk98lin: Could not register device for seconf port.\n"); 4940 dev->features |= NETIF_F_HIGHDMA;
4941
4942 error = register_netdev(dev);
4943 if (error) {
4944 printk(KERN_ERR "sk98lin: Could not register device"
4945 " for second port. (%d)\n", error);
4925 free_netdev(dev); 4946 free_netdev(dev);
4926 pAC->dev[1] = pAC->dev[0]; 4947 goto single_port;
4927 } else {
4928 memcpy(&dev->dev_addr,
4929 &pAC->Addr.Net[1].CurrentMacAddress, 6);
4930 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4931
4932 printk("%s: %s\n", dev->name, DeviceStr);
4933 printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
4934 } 4948 }
4949
4950 pAC->dev[1] = dev;
4951 memcpy(&dev->dev_addr,
4952 &pAC->Addr.Net[1].CurrentMacAddress, 6);
4953 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4954
4955 printk("%s: %s\n", dev->name, DeviceStr);
4956 printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
4935 } 4957 }
4936 4958
4959single_port:
4960
4937 /* Save the hardware revision */ 4961 /* Save the hardware revision */
4938 pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) + 4962 pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
4939 (pAC->GIni.GIPciHwRev & 0x0F); 4963 (pAC->GIni.GIPciHwRev & 0x0F);
@@ -4945,7 +4969,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
4945 memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA)); 4969 memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA));
4946 memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA)); 4970 memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA));
4947 4971
4948 pci_set_drvdata(pdev, dev);
4949 return 0; 4972 return 0;
4950 4973
4951 out_free_resources: 4974 out_free_resources:
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c
index 58e1a5be913f..a386172107e8 100644
--- a/drivers/net/sk98lin/skgepnmi.c
+++ b/drivers/net/sk98lin/skgepnmi.c
@@ -611,7 +611,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
611 * Description: 611 * Description:
612 * Calls a general sub-function for all this stuff. The preset does 612 * Calls a general sub-function for all this stuff. The preset does
613 * the same as a set, but returns just before finally setting the 613 * the same as a set, but returns just before finally setting the
614 * new value. This is usefull to check if a set might be successfull. 614 * new value. This is useful to check if a set might be successfull.
615 * If the instance -1 is passed, an array of values is supposed and 615 * If the instance -1 is passed, an array of values is supposed and
616 * all instances of the OID will be set. 616 * all instances of the OID will be set.
617 * 617 *
@@ -654,7 +654,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
654 * Description: 654 * Description:
655 * Calls a general sub-function for all this stuff. The preset does 655 * Calls a general sub-function for all this stuff. The preset does
656 * the same as a set, but returns just before finally setting the 656 * the same as a set, but returns just before finally setting the
657 * new value. This is usefull to check if a set might be successfull. 657 * new value. This is useful to check if a set might be successfull.
658 * If the instance -1 is passed, an array of values is supposed and 658 * If the instance -1 is passed, an array of values is supposed and
659 * all instances of the OID will be set. 659 * all instances of the OID will be set.
660 * 660 *
@@ -870,7 +870,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
870 * Description: 870 * Description:
871 * Calls a general sub-function for all this set stuff. The preset does 871 * Calls a general sub-function for all this set stuff. The preset does
872 * the same as a set, but returns just before finally setting the 872 * the same as a set, but returns just before finally setting the
873 * new value. This is usefull to check if a set might be successfull. 873 * new value. This is useful to check if a set might be successfull.
874 * The sub-function runs through the IdTable, checks which OIDs are able 874 * The sub-function runs through the IdTable, checks which OIDs are able
875 * to set, and calls the handler function of the OID to perform the 875 * to set, and calls the handler function of the OID to perform the
876 * preset. The return value of the function will also be stored in 876 * preset. The return value of the function will also be stored in
@@ -6473,7 +6473,7 @@ unsigned int PhysPortIndex) /* Physical port index */
6473 * 6473 *
6474 * Description: 6474 * Description:
6475 * The COMMON module only tells us if the mode is half or full duplex. 6475 * The COMMON module only tells us if the mode is half or full duplex.
6476 * But in the decade of auto sensing it is usefull for the user to 6476 * But in the decade of auto sensing it is useful for the user to
6477 * know if the mode was negotiated or forced. Therefore we have a 6477 * know if the mode was negotiated or forced. Therefore we have a
6478 * look to the mode, which was last used by the negotiation process. 6478 * look to the mode, which was last used by the negotiation process.
6479 * 6479 *
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b538e3038058..bf55a4cfb3d2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3243 3243
3244 pci_set_master(pdev); 3244 pci_set_master(pdev);
3245 3245
3246 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) 3246 if (sizeof(dma_addr_t) > sizeof(u32) &&
3247 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3247 using_dac = 1; 3248 using_dac = 1;
3248 else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3249 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3249 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3250 if (err < 0) {
3250 pci_name(pdev)); 3251 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3251 goto err_out_free_regions; 3252 "for consistent allocations\n", pci_name(pdev));
3253 goto err_out_free_regions;
3254 }
3255 } else {
3256 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3257 if (err) {
3258 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3259 pci_name(pdev));
3260 goto err_out_free_regions;
3261 }
3252 } 3262 }
3253 3263
3254#ifdef __BIG_ENDIAN 3264#ifdef __BIG_ENDIAN
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index f5d697c0c031..f8b973a04b65 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -57,7 +57,7 @@
57#include "sky2.h" 57#include "sky2.h"
58 58
59#define DRV_NAME "sky2" 59#define DRV_NAME "sky2"
60#define DRV_VERSION "0.11" 60#define DRV_VERSION "0.13"
61#define PFX DRV_NAME " " 61#define PFX DRV_NAME " "
62 62
63/* 63/*
@@ -75,6 +75,7 @@
75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 75#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 76#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
77#define RX_DEF_PENDING RX_MAX_PENDING 77#define RX_DEF_PENDING RX_MAX_PENDING
78#define RX_SKB_ALIGN 8
78 79
79#define TX_RING_SIZE 512 80#define TX_RING_SIZE 512
80#define TX_DEF_PENDING (TX_RING_SIZE - 1) 81#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
91static const u32 default_msg = 92static const u32 default_msg =
92 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 93 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
93 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 94 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
94 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR; 95 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
95 96
96static int debug = -1; /* defaults above */ 97static int debug = -1; /* defaults above */
97module_param(debug, int, 0); 98module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
624 625
625} 626}
626 627
627static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) 628/* Assign Ram Buffer allocation.
629 * start and end are in units of 4k bytes
630 * ram registers are in units of 64bit words
631 */
632static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
628{ 633{
629 u32 end; 634 u32 start, end;
630 635
631 start /= 8; 636 start = startk * 4096/8;
632 len /= 8; 637 end = (endk * 4096/8) - 1;
633 end = start + len - 1;
634 638
635 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 639 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
636 sky2_write32(hw, RB_ADDR(q, RB_START), start); 640 sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
639 sky2_write32(hw, RB_ADDR(q, RB_RP), start); 643 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
640 644
641 if (q == Q_R1 || q == Q_R2) { 645 if (q == Q_R1 || q == Q_R2) {
642 u32 rxup, rxlo; 646 u32 space = (endk - startk) * 4096/8;
647 u32 tp = space - space/4;
643 648
644 rxlo = len/2; 649 /* On receive queue's set the thresholds
645 rxup = rxlo + len/4; 650 * give receiver priority when > 3/4 full
651 * send pause when down to 2K
652 */
653 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
654 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
646 655
647 /* Set thresholds on receive queue's */ 656 tp = space - 2048/8;
648 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup); 657 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
649 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo); 658 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
650 } else { 659 } else {
651 /* Enable store & forward on Tx queue's because 660 /* Enable store & forward on Tx queue's because
652 * Tx FIFO is only 1K on Yukon 661 * Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
695 * This is a workaround code taken from SysKonnect sk98lin driver 704 * This is a workaround code taken from SysKonnect sk98lin driver
696 * to deal with chip bug on Yukon EC rev 0 in the wraparound case. 705 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
697 */ 706 */
698static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, 707static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
699 u16 idx, u16 *last, u16 size) 708 u16 idx, u16 *last, u16 size)
700{ 709{
710 wmb();
701 if (is_ec_a1(hw) && idx < *last) { 711 if (is_ec_a1(hw) && idx < *last) {
702 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 712 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
703 713
@@ -721,6 +731,7 @@ setnew:
721 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 731 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
722 } 732 }
723 *last = idx; 733 *last = idx;
734 mmiowb();
724} 735}
725 736
726 737
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
734/* Return high part of DMA address (could be 32 or 64 bit) */ 745/* Return high part of DMA address (could be 32 or 64 bit) */
735static inline u32 high32(dma_addr_t a) 746static inline u32 high32(dma_addr_t a)
736{ 747{
737 return (a >> 16) >> 16; 748 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
738} 749}
739 750
740/* Build description to hardware about buffer */ 751/* Build description to hardware about buffer */
741static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) 752static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
742{ 753{
743 struct sky2_rx_le *le; 754 struct sky2_rx_le *le;
744 u32 hi = high32(map); 755 u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
878 struct sky2_hw *hw = sky2->hw; 889 struct sky2_hw *hw = sky2->hw;
879 u16 port = sky2->port; 890 u16 port = sky2->port;
880 891
881 spin_lock(&sky2->tx_lock); 892 spin_lock_bh(&sky2->tx_lock);
882 893
883 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); 894 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
884 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); 895 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
885 sky2->vlgrp = grp; 896 sky2->vlgrp = grp;
886 897
887 spin_unlock(&sky2->tx_lock); 898 spin_unlock_bh(&sky2->tx_lock);
888} 899}
889 900
890static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 901static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
893 struct sky2_hw *hw = sky2->hw; 904 struct sky2_hw *hw = sky2->hw;
894 u16 port = sky2->port; 905 u16 port = sky2->port;
895 906
896 spin_lock(&sky2->tx_lock); 907 spin_lock_bh(&sky2->tx_lock);
897 908
898 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 909 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
899 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 910 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
900 if (sky2->vlgrp) 911 if (sky2->vlgrp)
901 sky2->vlgrp->vlan_devices[vid] = NULL; 912 sky2->vlgrp->vlan_devices[vid] = NULL;
902 913
903 spin_unlock(&sky2->tx_lock); 914 spin_unlock_bh(&sky2->tx_lock);
904} 915}
905#endif 916#endif
906 917
907/* 918/*
919 * It appears the hardware has a bug in the FIFO logic that
920 * cause it to hang if the FIFO gets overrun and the receive buffer
921 * is not aligned. ALso alloc_skb() won't align properly if slab
922 * debugging is enabled.
923 */
924static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
925{
926 struct sk_buff *skb;
927
928 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
929 if (likely(skb)) {
930 unsigned long p = (unsigned long) skb->data;
931 skb_reserve(skb,
932 ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
933 }
934
935 return skb;
936}
937
938/*
908 * Allocate and setup receiver buffer pool. 939 * Allocate and setup receiver buffer pool.
909 * In case of 64 bit dma, there are 2X as many list elements 940 * In case of 64 bit dma, there are 2X as many list elements
910 * available as ring entries 941 * available as ring entries
911 * and need to reserve one list element so we don't wrap around. 942 * and need to reserve one list element so we don't wrap around.
912 *
913 * It appears the hardware has a bug in the FIFO logic that
914 * cause it to hang if the FIFO gets overrun and the receive buffer
915 * is not aligned. This means we can't use skb_reserve to align
916 * the IP header.
917 */ 943 */
918static int sky2_rx_start(struct sky2_port *sky2) 944static int sky2_rx_start(struct sky2_port *sky2)
919{ 945{
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
929 for (i = 0; i < sky2->rx_pending; i++) { 955 for (i = 0; i < sky2->rx_pending; i++) {
930 struct ring_info *re = sky2->rx_ring + i; 956 struct ring_info *re = sky2->rx_ring + i;
931 957
932 re->skb = dev_alloc_skb(sky2->rx_bufsize); 958 re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
933 if (!re->skb) 959 if (!re->skb)
934 goto nomem; 960 goto nomem;
935 961
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
986 1012
987 sky2_mac_init(hw, port); 1013 sky2_mac_init(hw, port);
988 1014
989 /* Configure RAM buffers */ 1015 /* Determine available ram buffer space (in 4K blocks).
990 if (hw->chip_id == CHIP_ID_YUKON_FE || 1016 * Note: not sure about the FE setting below yet
991 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) 1017 */
992 ramsize = 4096; 1018 if (hw->chip_id == CHIP_ID_YUKON_FE)
993 else { 1019 ramsize = 4;
994 u8 e0 = sky2_read8(hw, B2_E_0); 1020 else
995 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096); 1021 ramsize = sky2_read8(hw, B2_E_0);
996 } 1022
1023 /* Give transmitter one third (rounded up) */
1024 rxspace = ramsize - (ramsize + 2) / 3;
997 1025
998 /* 2/3 for Rx */
999 rxspace = (2 * ramsize) / 3;
1000 sky2_ramset(hw, rxqaddr[port], 0, rxspace); 1026 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1001 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); 1027 sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
1002 1028
1003 /* Make sure SyncQ is disabled */ 1029 /* Make sure SyncQ is disabled */
1004 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), 1030 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
1054} 1080}
1055 1081
1056/* Estimate of number of transmit list elements required */ 1082/* Estimate of number of transmit list elements required */
1057static inline unsigned tx_le_req(const struct sk_buff *skb) 1083static unsigned tx_le_req(const struct sk_buff *skb)
1058{ 1084{
1059 unsigned count; 1085 unsigned count;
1060 1086
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1090 u16 mss; 1116 u16 mss;
1091 u8 ctrl; 1117 u8 ctrl;
1092 1118
1119 /* No BH disabling for tx_lock here. We are running in BH disabled
1120 * context and TX reclaim runs via poll inside of a software
1121 * interrupt, and no related locks in IRQ processing.
1122 */
1093 if (!spin_trylock(&sky2->tx_lock)) 1123 if (!spin_trylock(&sky2->tx_lock))
1094 return NETDEV_TX_LOCKED; 1124 return NETDEV_TX_LOCKED;
1095 1125
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1099 */ 1129 */
1100 if (!netif_queue_stopped(dev)) { 1130 if (!netif_queue_stopped(dev)) {
1101 netif_stop_queue(dev); 1131 netif_stop_queue(dev);
1102 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 1132 if (net_ratelimit())
1103 dev->name); 1133 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1134 dev->name);
1104 } 1135 }
1105 spin_unlock(&sky2->tx_lock); 1136 spin_unlock(&sky2->tx_lock);
1106 1137
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1199 1230
1200 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 1231 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1201 frag->size, PCI_DMA_TODEVICE); 1232 frag->size, PCI_DMA_TODEVICE);
1202 addr64 = (mapping >> 16) >> 16; 1233 addr64 = high32(mapping);
1203 if (addr64 != sky2->tx_addr64) { 1234 if (addr64 != sky2->tx_addr64) {
1204 le = get_tx_le(sky2); 1235 le = get_tx_le(sky2);
1205 le->tx.addr = cpu_to_le32(addr64); 1236 le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1229 netif_stop_queue(dev); 1260 netif_stop_queue(dev);
1230 1261
1231out_unlock: 1262out_unlock:
1232 mmiowb();
1233 spin_unlock(&sky2->tx_lock); 1263 spin_unlock(&sky2->tx_lock);
1234 1264
1235 dev->trans_start = jiffies; 1265 dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1282 dev_kfree_skb_any(skb); 1312 dev_kfree_skb_any(skb);
1283 } 1313 }
1284 1314
1285 spin_lock(&sky2->tx_lock);
1286 sky2->tx_cons = put; 1315 sky2->tx_cons = put;
1287 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) 1316 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1288 netif_wake_queue(dev); 1317 netif_wake_queue(dev);
1289 spin_unlock(&sky2->tx_lock);
1290} 1318}
1291 1319
1292/* Cleanup all untransmitted buffers, assume transmitter not running */ 1320/* Cleanup all untransmitted buffers, assume transmitter not running */
1293static void sky2_tx_clean(struct sky2_port *sky2) 1321static void sky2_tx_clean(struct sky2_port *sky2)
1294{ 1322{
1323 spin_lock_bh(&sky2->tx_lock);
1295 sky2_tx_complete(sky2, sky2->tx_prod); 1324 sky2_tx_complete(sky2, sky2->tx_prod);
1325 spin_unlock_bh(&sky2->tx_lock);
1296} 1326}
1297 1327
1298/* Network shutdown */ 1328/* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
1582 local_irq_enable(); 1612 local_irq_enable();
1583} 1613}
1584 1614
1615
1616/* Transmit timeout is only called if we are running, carries is up
1617 * and tx queue is full (stopped).
1618 */
1585static void sky2_tx_timeout(struct net_device *dev) 1619static void sky2_tx_timeout(struct net_device *dev)
1586{ 1620{
1587 struct sky2_port *sky2 = netdev_priv(dev); 1621 struct sky2_port *sky2 = netdev_priv(dev);
1588 struct sky2_hw *hw = sky2->hw; 1622 struct sky2_hw *hw = sky2->hw;
1589 unsigned txq = txqaddr[sky2->port]; 1623 unsigned txq = txqaddr[sky2->port];
1624 u16 ridx;
1625
1626 /* Maybe we just missed an status interrupt */
1627 spin_lock(&sky2->tx_lock);
1628 ridx = sky2_read16(hw,
1629 sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1630 sky2_tx_complete(sky2, ridx);
1631 spin_unlock(&sky2->tx_lock);
1632
1633 if (!netif_queue_stopped(dev)) {
1634 if (net_ratelimit())
1635 pr_info(PFX "transmit interrupt missed? recovered\n");
1636 return;
1637 }
1590 1638
1591 if (netif_msg_timer(sky2)) 1639 if (netif_msg_timer(sky2))
1592 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1640 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1593 1641
1594 netif_stop_queue(dev);
1595
1596 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1642 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1597 sky2_read32(hw, Q_ADDR(txq, Q_CSR));
1598
1599 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1643 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1600 1644
1601 sky2_tx_clean(sky2); 1645 sky2_tx_clean(sky2);
1602 1646
1603 sky2_qset(hw, txq); 1647 sky2_qset(hw, txq);
1604 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1648 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1605
1606 netif_wake_queue(dev);
1607} 1649}
1608 1650
1609 1651
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1713 } else { 1755 } else {
1714 struct sk_buff *nskb; 1756 struct sk_buff *nskb;
1715 1757
1716 nskb = dev_alloc_skb(sky2->rx_bufsize); 1758 nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
1717 if (!nskb) 1759 if (!nskb)
1718 goto resubmit; 1760 goto resubmit;
1719 1761
@@ -1745,7 +1787,7 @@ oversize:
1745error: 1787error:
1746 ++sky2->net_stats.rx_errors; 1788 ++sky2->net_stats.rx_errors;
1747 1789
1748 if (netif_msg_rx_err(sky2)) 1790 if (netif_msg_rx_err(sky2) && net_ratelimit())
1749 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 1791 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1750 sky2->netdev->name, status, length); 1792 sky2->netdev->name, status, length);
1751 1793
@@ -1766,13 +1808,16 @@ error:
1766 */ 1808 */
1767#define TX_NO_STATUS 0xffff 1809#define TX_NO_STATUS 0xffff
1768 1810
1769static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) 1811static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1770{ 1812{
1771 if (last != TX_NO_STATUS) { 1813 if (last != TX_NO_STATUS) {
1772 struct net_device *dev = hw->dev[port]; 1814 struct net_device *dev = hw->dev[port];
1773 if (dev && netif_running(dev)) { 1815 if (dev && netif_running(dev)) {
1774 struct sky2_port *sky2 = netdev_priv(dev); 1816 struct sky2_port *sky2 = netdev_priv(dev);
1817
1818 spin_lock(&sky2->tx_lock);
1775 sky2_tx_complete(sky2, last); 1819 sky2_tx_complete(sky2, last);
1820 spin_unlock(&sky2->tx_lock);
1776 } 1821 }
1777 } 1822 }
1778} 1823}
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1800 struct sk_buff *skb; 1845 struct sk_buff *skb;
1801 u32 status; 1846 u32 status;
1802 u16 length; 1847 u16 length;
1803 u8 op;
1804 1848
1805 le = hw->st_le + hw->st_idx; 1849 le = hw->st_le + hw->st_idx;
1806 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1850 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1814 sky2 = netdev_priv(dev); 1858 sky2 = netdev_priv(dev);
1815 status = le32_to_cpu(le->status); 1859 status = le32_to_cpu(le->status);
1816 length = le16_to_cpu(le->length); 1860 length = le16_to_cpu(le->length);
1817 op = le->opcode & ~HW_OWNER;
1818 le->opcode = 0;
1819 1861
1820 switch (op) { 1862 switch (le->opcode & ~HW_OWNER) {
1821 case OP_RXSTAT: 1863 case OP_RXSTAT:
1822 skb = sky2_receive(sky2, length, status); 1864 skb = sky2_receive(sky2, length, status);
1823 if (!skb) 1865 if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1865 default: 1907 default:
1866 if (net_ratelimit()) 1908 if (net_ratelimit())
1867 printk(KERN_WARNING PFX 1909 printk(KERN_WARNING PFX
1868 "unknown status opcode 0x%x\n", op); 1910 "unknown status opcode 0x%x\n", le->opcode);
1869 break; 1911 break;
1870 } 1912 }
1871 } 1913 }
1872 1914
1873exit_loop: 1915exit_loop:
1874 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 1916 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1875 mmiowb();
1876 1917
1877 sky2_tx_check(hw, 0, tx_done[0]); 1918 sky2_tx_check(hw, 0, tx_done[0]);
1878 sky2_tx_check(hw, 1, tx_done[1]); 1919 sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
1887 netif_rx_complete(dev0); 1928 netif_rx_complete(dev0);
1888 hw->intr_mask |= Y2_IS_STAT_BMU; 1929 hw->intr_mask |= Y2_IS_STAT_BMU;
1889 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1930 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1890 mmiowb();
1891 return 0; 1931 return 0;
1892 } else { 1932 } else {
1893 *budget -= work_done; 1933 *budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1900{ 1940{
1901 struct net_device *dev = hw->dev[port]; 1941 struct net_device *dev = hw->dev[port];
1902 1942
1903 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", 1943 if (net_ratelimit())
1904 dev->name, status); 1944 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1945 dev->name, status);
1905 1946
1906 if (status & Y2_IS_PAR_RD1) { 1947 if (status & Y2_IS_PAR_RD1) {
1907 printk(KERN_ERR PFX "%s: ram data read parity error\n", 1948 if (net_ratelimit())
1908 dev->name); 1949 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1950 dev->name);
1909 /* Clear IRQ */ 1951 /* Clear IRQ */
1910 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); 1952 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1911 } 1953 }
1912 1954
1913 if (status & Y2_IS_PAR_WR1) { 1955 if (status & Y2_IS_PAR_WR1) {
1914 printk(KERN_ERR PFX "%s: ram data write parity error\n", 1956 if (net_ratelimit())
1915 dev->name); 1957 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1958 dev->name);
1916 1959
1917 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); 1960 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1918 } 1961 }
1919 1962
1920 if (status & Y2_IS_PAR_MAC1) { 1963 if (status & Y2_IS_PAR_MAC1) {
1921 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); 1964 if (net_ratelimit())
1965 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1922 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); 1966 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1923 } 1967 }
1924 1968
1925 if (status & Y2_IS_PAR_RX1) { 1969 if (status & Y2_IS_PAR_RX1) {
1926 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); 1970 if (net_ratelimit())
1971 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1927 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); 1972 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1928 } 1973 }
1929 1974
1930 if (status & Y2_IS_TCP_TXA1) { 1975 if (status & Y2_IS_TCP_TXA1) {
1931 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name); 1976 if (net_ratelimit())
1977 printk(KERN_ERR PFX "%s: TCP segmentation error\n",
1978 dev->name);
1932 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); 1979 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1933 } 1980 }
1934} 1981}
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1944 u16 pci_err; 1991 u16 pci_err;
1945 1992
1946 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); 1993 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1947 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 1994 if (net_ratelimit())
1948 pci_name(hw->pdev), pci_err); 1995 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1996 pci_name(hw->pdev), pci_err);
1949 1997
1950 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1998 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1951 pci_write_config_word(hw->pdev, PCI_STATUS, 1999 pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1959 2007
1960 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); 2008 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1961 2009
1962 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2010 if (net_ratelimit())
1963 pci_name(hw->pdev), pex_err); 2011 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
2012 pci_name(hw->pdev), pex_err);
1964 2013
1965 /* clear the interrupt */ 2014 /* clear the interrupt */
1966 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2015 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
2250 return 0; 2299 return 0;
2251} 2300}
2252 2301
2253static inline u32 sky2_supported_modes(const struct sky2_hw *hw) 2302static u32 sky2_supported_modes(const struct sky2_hw *hw)
2254{ 2303{
2255 u32 modes; 2304 u32 modes;
2256 if (hw->copper) { 2305 if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2995 return dev; 3044 return dev;
2996} 3045}
2997 3046
2998static inline void sky2_show_addr(struct net_device *dev) 3047static void __devinit sky2_show_addr(struct net_device *dev)
2999{ 3048{
3000 const struct sky2_port *sky2 = netdev_priv(dev); 3049 const struct sky2_port *sky2 = netdev_priv(dev);
3001 3050
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3038 goto err_out_free_regions; 3087 goto err_out_free_regions;
3039 } 3088 }
3040 3089
3041 if (sizeof(dma_addr_t) > sizeof(u32)) { 3090 if (sizeof(dma_addr_t) > sizeof(u32) &&
3042 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 3091 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3043 if (!err) 3092 using_dac = 1;
3044 using_dac = 1; 3093 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3045 } 3094 if (err < 0) {
3095 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3096 "for consistent allocations\n", pci_name(pdev));
3097 goto err_out_free_regions;
3098 }
3046 3099
3047 if (!using_dac) { 3100 } else {
3048 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3101 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3049 if (err) { 3102 if (err) {
3050 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3103 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3052 goto err_out_free_regions; 3105 goto err_out_free_regions;
3053 } 3106 }
3054 } 3107 }
3108
3055#ifdef __BIG_ENDIAN 3109#ifdef __BIG_ENDIAN
3056 /* byte swap descriptors in hardware */ 3110 /* byte swap descriptors in hardware */
3057 { 3111 {
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3064#endif 3118#endif
3065 3119
3066 err = -ENOMEM; 3120 err = -ENOMEM;
3067 hw = kmalloc(sizeof(*hw), GFP_KERNEL); 3121 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3068 if (!hw) { 3122 if (!hw) {
3069 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3123 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3070 pci_name(pdev)); 3124 pci_name(pdev));
3071 goto err_out_free_regions; 3125 goto err_out_free_regions;
3072 } 3126 }
3073 3127
3074 memset(hw, 0, sizeof(*hw));
3075 hw->pdev = pdev; 3128 hw->pdev = pdev;
3076 3129
3077 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3130 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 404ea4297e32..b2e18d28850d 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -651,11 +651,6 @@ static void sl_setup(struct net_device *dev)
651 ******************************************/ 651 ******************************************/
652 652
653 653
654static int slip_receive_room(struct tty_struct *tty)
655{
656 return 65536; /* We can handle an infinite amount of data. :-) */
657}
658
659/* 654/*
660 * Handle the 'receiver data ready' interrupt. 655 * Handle the 'receiver data ready' interrupt.
661 * This function is called by the 'tty_io' module in the kernel when 656 * This function is called by the 'tty_io' module in the kernel when
@@ -869,10 +864,6 @@ static int slip_open(struct tty_struct *tty)
869 sl->line = tty_devnum(tty); 864 sl->line = tty_devnum(tty);
870 sl->pid = current->pid; 865 sl->pid = current->pid;
871 866
872 /* FIXME: already done before we were called - seems this can go */
873 if (tty->driver->flush_buffer)
874 tty->driver->flush_buffer(tty);
875
876 if (!test_bit(SLF_INUSE, &sl->flags)) { 867 if (!test_bit(SLF_INUSE, &sl->flags)) {
877 /* Perform the low-level SLIP initialization. */ 868 /* Perform the low-level SLIP initialization. */
878 if ((err = sl_alloc_bufs(sl, SL_MTU)) != 0) 869 if ((err = sl_alloc_bufs(sl, SL_MTU)) != 0)
@@ -897,6 +888,7 @@ static int slip_open(struct tty_struct *tty)
897 888
898 /* Done. We have linked the TTY line to a channel. */ 889 /* Done. We have linked the TTY line to a channel. */
899 rtnl_unlock(); 890 rtnl_unlock();
891 tty->receive_room = 65536; /* We don't flow control */
900 return sl->dev->base_addr; 892 return sl->dev->base_addr;
901 893
902err_free_bufs: 894err_free_bufs:
@@ -1329,7 +1321,6 @@ static struct tty_ldisc sl_ldisc = {
1329 .close = slip_close, 1321 .close = slip_close,
1330 .ioctl = slip_ioctl, 1322 .ioctl = slip_ioctl,
1331 .receive_buf = slip_receive_buf, 1323 .receive_buf = slip_receive_buf,
1332 .receive_room = slip_receive_room,
1333 .write_wakeup = slip_write_wakeup, 1324 .write_wakeup = slip_write_wakeup,
1334}; 1325};
1335 1326
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index ba8593ac3f8a..3db30cd0625e 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -168,18 +168,6 @@ static int __init do_ultra_probe(struct net_device *dev)
168 return -ENODEV; 168 return -ENODEV;
169} 169}
170 170
171static void cleanup_card(struct net_device *dev)
172{
173 /* NB: ultra_close_card() does free_irq */
174#ifdef __ISAPNP__
175 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
176 if (idev)
177 pnp_device_detach(idev);
178#endif
179 release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
180 iounmap(ei_status.mem);
181}
182
183#ifndef MODULE 171#ifndef MODULE
184struct net_device * __init ultra_probe(int unit) 172struct net_device * __init ultra_probe(int unit)
185{ 173{
@@ -594,6 +582,18 @@ init_module(void)
594 return -ENXIO; 582 return -ENXIO;
595} 583}
596 584
585static void cleanup_card(struct net_device *dev)
586{
587 /* NB: ultra_close_card() does free_irq */
588#ifdef __ISAPNP__
589 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
590 if (idev)
591 pnp_device_detach(idev);
592#endif
593 release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
594 iounmap(ei_status.mem);
595}
596
597void 597void
598cleanup_module(void) 598cleanup_module(void)
599{ 599{
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 28bf2e69eb5e..7ec08127c9d6 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -88,7 +88,6 @@ static const char version[] =
88#include <linux/skbuff.h> 88#include <linux/skbuff.h>
89 89
90#include <asm/io.h> 90#include <asm/io.h>
91#include <asm/irq.h>
92 91
93#include "smc91x.h" 92#include "smc91x.h"
94 93
@@ -2007,12 +2006,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
2007 } 2006 }
2008 2007
2009 /* Grab the IRQ */ 2008 /* Grab the IRQ */
2010 retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev); 2009 retval = request_irq(dev->irq, &smc_interrupt, SMC_IRQ_FLAGS, dev->name, dev);
2011 if (retval) 2010 if (retval)
2012 goto err_out; 2011 goto err_out;
2013 2012
2014 set_irq_type(dev->irq, SMC_IRQ_TRIGGER_TYPE);
2015
2016#ifdef SMC_USE_PXA_DMA 2013#ifdef SMC_USE_PXA_DMA
2017 { 2014 {
2018 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, 2015 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 5c2824be4ee6..e0efd1964e72 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -90,7 +90,7 @@
90 __l--; \ 90 __l--; \
91 } \ 91 } \
92 } while (0) 92 } while (0)
93#define set_irq_type(irq, type) 93#define SMC_IRQ_FLAGS (0)
94 94
95#elif defined(CONFIG_SA1100_PLEB) 95#elif defined(CONFIG_SA1100_PLEB)
96/* We can only do 16-bit reads and writes in the static memory space. */ 96/* We can only do 16-bit reads and writes in the static memory space. */
@@ -109,7 +109,7 @@
109#define SMC_outw(v, a, r) writew(v, (a) + (r)) 109#define SMC_outw(v, a, r) writew(v, (a) + (r))
110#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 110#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
111 111
112#define set_irq_type(irq, type) do {} while (0) 112#define SMC_IRQ_FLAGS (0)
113 113
114#elif defined(CONFIG_SA1100_ASSABET) 114#elif defined(CONFIG_SA1100_ASSABET)
115 115
@@ -185,11 +185,11 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
185#include <asm/mach-types.h> 185#include <asm/mach-types.h>
186#include <asm/arch/cpu.h> 186#include <asm/arch/cpu.h>
187 187
188#define SMC_IRQ_TRIGGER_TYPE (( \ 188#define SMC_IRQ_FLAGS (( \
189 machine_is_omap_h2() \ 189 machine_is_omap_h2() \
190 || machine_is_omap_h3() \ 190 || machine_is_omap_h3() \
191 || (machine_is_omap_innovator() && !cpu_is_omap1510()) \ 191 || (machine_is_omap_innovator() && !cpu_is_omap1510()) \
192 ) ? IRQT_FALLING : IRQT_RISING) 192 ) ? SA_TRIGGER_FALLING : SA_TRIGGER_RISING)
193 193
194 194
195#elif defined(CONFIG_SH_SH4202_MICRODEV) 195#elif defined(CONFIG_SH_SH4202_MICRODEV)
@@ -209,7 +209,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
209#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) 209#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
210#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) 210#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
211 211
212#define set_irq_type(irq, type) do {} while(0) 212#define SMC_IRQ_FLAGS (0)
213 213
214#elif defined(CONFIG_ISA) 214#elif defined(CONFIG_ISA)
215 215
@@ -237,7 +237,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l) 237#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
238#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l) 238#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
239 239
240#define set_irq_type(irq, type) do {} while(0) 240#define SMC_IRQ_FLAGS (0)
241 241
242#define RPC_LSA_DEFAULT RPC_LED_TX_RX 242#define RPC_LSA_DEFAULT RPC_LED_TX_RX
243#define RPC_LSB_DEFAULT RPC_LED_100_10 243#define RPC_LSB_DEFAULT RPC_LED_100_10
@@ -319,7 +319,7 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
319 au_writew(*_p++ , _a); \ 319 au_writew(*_p++ , _a); \
320 } while(0) 320 } while(0)
321 321
322#define set_irq_type(irq, type) do {} while (0) 322#define SMC_IRQ_FLAGS (0)
323 323
324#else 324#else
325 325
@@ -342,8 +342,8 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
342 342
343#endif 343#endif
344 344
345#ifndef SMC_IRQ_TRIGGER_TYPE 345#ifndef SMC_IRQ_FLAGS
346#define SMC_IRQ_TRIGGER_TYPE IRQT_RISING 346#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
347#endif 347#endif
348 348
349#ifdef SMC_USE_PXA_DMA 349#ifdef SMC_USE_PXA_DMA
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 0d765f1733b5..1f5975a61e1f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25
26#include <linux/compiler.h> 25#include <linux/compiler.h>
27#include <linux/crc32.h> 26#include <linux/crc32.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +29,7 @@
30#include <linux/ethtool.h> 29#include <linux/ethtool.h>
31#include <linux/firmware.h> 30#include <linux/firmware.h>
32#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/in.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/ip.h> 35#include <linux/ip.h>
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/vmalloc.h>
46#include <linux/wait.h> 47#include <linux/wait.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
48#include <asm/bitops.h> 49#include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
108 writel(value, card->regs + reg); 109 writel(value, card->regs + reg);
109} 110}
110 111
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register 112/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to 113 * @netdev: adapter to be written to
149 * @mii_id: id of MII 114 * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
199} 164}
200 165
201/** 166/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card 167 * spider_net_rx_irq_off - switch off rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure 168 * @card: device structure
223 * 169 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register 170 * switches off rx irq by masking them out in the GHIINTnMSK register
225 */ 171 */
226static void 172static void
227spider_net_tx_irq_off(struct spider_net_card *card) 173spider_net_rx_irq_off(struct spider_net_card *card)
228{ 174{
229 u32 regvalue; 175 u32 regvalue;
230 unsigned long flags;
231 176
232 spin_lock_irqsave(&card->intmask_lock, flags); 177 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 178 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237} 179}
238 180
239/** 181/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card 182 * spider_net_rx_irq_on - switch on rx irq on this spider card
241 * @card: device structure 183 * @card: device structure
242 * 184 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register 185 * switches on rx irq by enabling them in the GHIINTnMSK register
244 */ 186 */
245static void 187static void
246spider_net_tx_irq_on(struct spider_net_card *card) 188spider_net_rx_irq_on(struct spider_net_card *card)
247{ 189{
248 u32 regvalue; 190 u32 regvalue;
249 unsigned long flags;
250 191
251 spin_lock_irqsave(&card->intmask_lock, flags); 192 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 193 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256} 194}
257 195
258/** 196/**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr) 264spider_net_get_descr_status(struct spider_net_descr *descr)
327{ 265{
328 u32 cmd_status; 266 u32 cmd_status;
329 rmb(); 267
330 cmd_status = descr->dmac_cmd_status; 268 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 269 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only 270 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */ 271 * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
349{ 286{
350 u32 cmd_status; 287 u32 cmd_status;
351 /* read the status */ 288 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status; 289 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */ 290 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 291 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 293 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */ 294 /* and write it back */
359 descr->dmac_cmd_status = cmd_status; 295 descr->dmac_cmd_status = cmd_status;
360 wmb();
361} 296}
362 297
363/** 298/**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
398{ 333{
399 int i; 334 int i;
400 struct spider_net_descr *descr; 335 struct spider_net_descr *descr;
336 dma_addr_t buf;
401 337
402 spin_lock_init(&card->chain_lock); 338 atomic_set(&card->rx_chain_refill,0);
403 339
404 descr = start_descr; 340 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no); 341 memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
408 for (i=0; i<no; i++, descr++) { 344 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 345 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410 346
411 descr->bus_addr = 347 buf = pci_map_single(card->pdev, descr,
412 pci_map_single(card->pdev, descr, 348 SPIDER_NET_DESCR_SIZE,
413 SPIDER_NET_DESCR_SIZE, 349 PCI_DMA_BIDIRECTIONAL);
414 PCI_DMA_BIDIRECTIONAL);
415 350
416 if (descr->bus_addr == DMA_ERROR_CODE) 351 if (buf == DMA_ERROR_CODE)
417 goto iommu_error; 352 goto iommu_error;
418 353
354 descr->bus_addr = buf;
419 descr->next = descr + 1; 355 descr->next = descr + 1;
420 descr->prev = descr - 1; 356 descr->prev = descr - 1;
421 357
@@ -439,7 +375,8 @@ iommu_error:
439 for (i=0; i < no; i++, descr++) 375 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr) 376 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr, 377 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); 378 SPIDER_NET_DESCR_SIZE,
379 PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM; 380 return -ENOMEM;
444} 381}
445 382
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
459 if (descr->skb) { 396 if (descr->skb) {
460 dev_kfree_skb(descr->skb); 397 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr, 398 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU, 399 SPIDER_NET_MAX_FRAME,
463 PCI_DMA_BIDIRECTIONAL); 400 PCI_DMA_BIDIRECTIONAL);
464 } 401 }
465 descr = descr->next; 402 descr = descr->next;
@@ -480,12 +417,13 @@ static int
480spider_net_prepare_rx_descr(struct spider_net_card *card, 417spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr) 418 struct spider_net_descr *descr)
482{ 419{
420 dma_addr_t buf;
483 int error = 0; 421 int error = 0;
484 int offset; 422 int offset;
485 int bufsize; 423 int bufsize;
486 424
487 /* we need to round up the buffer size to a multiple of 128 */ 425 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & 426 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 427 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490 428
491 /* and we need to have it 128 byte aligned, therefore we allocate a 429 /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
493 /* allocate an skb */ 431 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 432 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) { 433 if (!descr->skb) {
496 if (net_ratelimit()) 434 if (netif_msg_rx_err(card) && net_ratelimit())
497 if (netif_msg_rx_err(card)) 435 pr_err("Not enough memory to allocate rx buffer\n");
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM; 436 return -ENOMEM;
501 } 437 }
502 descr->buf_size = bufsize; 438 descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
510 if (offset) 446 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 447 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */ 448 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, 449 buf = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU, 450 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
515 PCI_DMA_BIDIRECTIONAL); 451 descr->buf_addr = buf;
516 if (descr->buf_addr == DMA_ERROR_CODE) { 452 if (buf == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb); 453 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card)) 454 if (netif_msg_rx_err(card) && net_ratelimit())
519 pr_err("Could not iommu-map rx buffer\n"); 455 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 456 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else { 457 } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
526} 462}
527 463
528/** 464/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses 465 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
530 * @card: card structure 466 * @card: card structure
531 * 467 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the 468 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in 469 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac. 470 * spider_net_enable_rxdmac.
535 */ 471 */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
551static void 487static void
552spider_net_enable_rxdmac(struct spider_net_card *card) 488spider_net_enable_rxdmac(struct spider_net_card *card)
553{ 489{
490 wmb();
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 491 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE); 492 SPIDER_NET_DMA_RX_VALUE);
556} 493}
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 496 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure 497 * @card: card structure
561 * 498 *
562 * refills descriptors in all chains (last used chain first): allocates skbs 499 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
563 * and iommu-maps them.
564 */ 500 */
565static void 501static void
566spider_net_refill_rx_chain(struct spider_net_card *card) 502spider_net_refill_rx_chain(struct spider_net_card *card)
567{ 503{
568 struct spider_net_descr_chain *chain; 504 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571 505
572 chain = &card->rx_chain; 506 chain = &card->rx_chain;
573 507
574 spin_lock_irqsave(&card->chain_lock, flags); 508 /* one context doing the refill (and a second context seeing that
575 while (spider_net_get_descr_status(chain->head) == 509 * and omitting it) is ok. If called by NAPI, we'll be called again
576 SPIDER_NET_DESCR_NOT_IN_USE) { 510 * as spider_net_decode_one_descr is called several times. If some
577 if (spider_net_prepare_rx_descr(card, chain->head)) 511 * interrupt calls us, the NAPI is about to clean up anyway. */
578 break; 512 if (atomic_inc_return(&card->rx_chain_refill) == 1)
579 count++; 513 while (spider_net_get_descr_status(chain->head) ==
580 chain->head = chain->head->next; 514 SPIDER_NET_DESCR_NOT_IN_USE) {
581 } 515 if (spider_net_prepare_rx_descr(card, chain->head))
582 spin_unlock_irqrestore(&card->chain_lock, flags); 516 break;
517 chain->head = chain->head->next;
518 }
583 519
584 /* could be optimized, only do that, if we know the DMA processing 520 atomic_dec(&card->rx_chain_refill);
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588} 521}
589 522
590/** 523/**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
613 /* this will allocate the rest of the rx buffers; if not, it's 546 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */ 547 * business as usual later on */
615 spider_net_refill_rx_chain(card); 548 spider_net_refill_rx_chain(card);
549 spider_net_enable_rxdmac(card);
616 return 0; 550 return 0;
617 551
618error: 552error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
649 * @card: adapter structure 583 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use 584 * @brutal: if set, don't care about whether descriptor seems to be in use
651 * 585 *
652 * releases the tx descriptors that spider has finished with (if non-brutal) 586 * returns 0 if the tx ring is empty, otherwise 1.
653 * or simply release tx descriptors (if brutal) 587 *
588 * spider_net_release_tx_chain releases the tx descriptors that spider has
589 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
590 * If some other context is calling this function, we return 1 so that we're
591 * scheduled again (if we were scheduled) and will not loose initiative.
654 */ 592 */
655static void 593static int
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 594spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{ 595{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain; 596 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status; 597 enum spider_net_descr_status status;
660 598
661 spider_net_tx_irq_off(card); 599 if (atomic_inc_return(&card->tx_chain_release) != 1) {
600 atomic_dec(&card->tx_chain_release);
601 return 1;
602 }
662 603
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) { 604 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail); 605 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) { 606 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED: 607 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out; 608 if (!brutal)
609 goto out;
670 /* fallthrough, if we release the descriptors 610 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about 611 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */ 612 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
693 tx_chain->tail = tx_chain->tail->next; 633 tx_chain->tail = tx_chain->tail->next;
694 } 634 }
695out: 635out:
636 atomic_dec(&card->tx_chain_release);
637
696 netif_wake_queue(card->netdev); 638 netif_wake_queue(card->netdev);
697 639
698 if (!brutal) { 640 if (status == SPIDER_NET_DESCR_CARDOWNED)
699 /* switch on tx irqs (while we are still in the interrupt 641 return 1;
700 * handler, so we don't get an interrupt), check again 642 return 0;
701 * for done descriptors. This results in fewer interrupts */ 643}
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714 644
645/**
646 * spider_net_cleanup_tx_ring - cleans up the TX ring
647 * @card: card structure
648 *
649 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
650 * interrupts to cleanup our TX ring) and returns sent packets to the stack
651 * by freeing them
652 */
653static void
654spider_net_cleanup_tx_ring(struct spider_net_card *card)
655{
656 if ( (spider_net_release_tx_chain(card, 0)) &&
657 (card->netdev->flags & IFF_UP) ) {
658 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
659 }
715} 660}
716 661
717/** 662/**
@@ -726,16 +671,22 @@ out:
726static u8 671static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 672spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{ 673{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc; 674 u32 crc;
732 u8 hash; 675 u8 hash;
676 char addr_for_crc[ETH_ALEN] = { 0, };
677 int i, bit;
733 678
734 crc = crc32_be(~0, addr, netdev->addr_len); 679 for (i = 0; i < ETH_ALEN * 8; i++) {
680 bit = (addr[i / 8] >> (i % 8)) & 1;
681 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
682 }
683
684 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
735 685
736 hash = (crc >> 27); 686 hash = (crc >> 27);
737 hash <<= 3; 687 hash <<= 3;
738 hash |= crc & 7; 688 hash |= crc & 7;
689 hash &= 0xff;
739 690
740 return hash; 691 return hash;
741} 692}
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
821{ 772{
822 struct spider_net_card *card = netdev_priv(netdev); 773 struct spider_net_card *card = netdev_priv(netdev);
823 774
775 tasklet_kill(&card->rxram_full_tl);
824 netif_poll_disable(netdev); 776 netif_poll_disable(netdev);
825 netif_carrier_off(netdev); 777 netif_carrier_off(netdev);
826 netif_stop_queue(netdev); 778 netif_stop_queue(netdev);
779 del_timer_sync(&card->tx_timer);
827 780
828 /* disable/mask all interrupts */ 781 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 782 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
872 * @skb: packet to consider 825 * @skb: packet to consider
873 * 826 *
874 * fills out the command and status field of the descriptor structure, 827 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb() 828 * depending on hardware checksum settings.
876 * has executed before.
877 */ 829 */
878static void 830static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 831spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb) 832 struct sk_buff *skb)
881{ 833{
834 /* make sure the other fields in the descriptor are written */
835 wmb();
836
882 if (skb->ip_summed != CHECKSUM_HW) { 837 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 838 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return; 839 return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
887 /* is packet ip? 842 /* is packet ip?
888 * if yes: tcp? udp? */ 843 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) { 844 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) { 845 if (skb->nh.iph->protocol == IPPROTO_TCP)
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 846 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 847 else if (skb->nh.iph->protocol == IPPROTO_UDP)
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 848 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp 849 else /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */ 850 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 851 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 } 852 }
899} 853}
900 854
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr, 868 struct spider_net_descr *descr,
915 struct sk_buff *skb) 869 struct sk_buff *skb)
916{ 870{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data, 871 dma_addr_t buf;
918 skb->len, PCI_DMA_BIDIRECTIONAL); 872
919 if (descr->buf_addr == DMA_ERROR_CODE) { 873 buf = pci_map_single(card->pdev, skb->data,
920 if (netif_msg_tx_err(card)) 874 skb->len, PCI_DMA_BIDIRECTIONAL);
875 if (buf == DMA_ERROR_CODE) {
876 if (netif_msg_tx_err(card) && net_ratelimit())
921 pr_err("could not iommu-map packet (%p, %i). " 877 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len); 878 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM; 879 return -ENOMEM;
924 } 880 }
925 881
882 descr->buf_addr = buf;
926 descr->buf_size = skb->len; 883 descr->buf_size = skb->len;
927 descr->skb = skb; 884 descr->skb = skb;
928 descr->data_status = 0; 885 descr->data_status = 0;
929 886
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb); 887 spider_net_set_txdescr_cmdstat(descr,skb);
935 888
936 return 0; 889 return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
972 struct spider_net_descr *descr; 925 struct spider_net_descr *descr;
973 int result; 926 int result;
974 927
975 descr = spider_net_get_next_tx_descr(card); 928 spider_net_release_tx_chain(card, 0);
976 929
977 if (!descr) { 930 descr = spider_net_get_next_tx_descr(card);
978 netif_stop_queue(netdev);
979 931
980 descr = spider_net_get_next_tx_descr(card); 932 if (!descr)
981 if (!descr) 933 goto error;
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986 934
987 result = spider_net_prepare_tx_descr(card, descr, skb); 935 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result) 936 if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
990 938
991 card->tx_chain.head = card->tx_chain.head->next; 939 card->tx_chain.head = card->tx_chain.head->next;
992 940
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) != 941 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED) 942 SPIDER_NET_DESCR_CARDOWNED) {
999 spider_net_kick_tx_dma(card, descr); 943 /* make sure the current descriptor is in memory. Then
944 * kicking it on again makes sense, if the previous is not
945 * card-owned anymore. Check the previous descriptor twice
946 * to omit an mb() in heavy traffic cases */
947 mb();
948 if (spider_net_get_descr_status(descr->prev) !=
949 SPIDER_NET_DESCR_CARDOWNED)
950 spider_net_kick_tx_dma(card, descr);
951 }
952
953 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
1000 954
1001 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
1002 956
1003error: 957error:
1004 card->netdev_stats.tx_dropped++; 958 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED; 959 return NETDEV_TX_BUSY;
1006} 960}
1007 961
1008/** 962/**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 981 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process 982 * @descr: descriptor to process
1029 * @card: card structure 983 * @card: card structure
984 * @napi: whether caller is in NAPI context
1030 * 985 *
1031 * returns 1 on success, 0 if no packet was passed to the stack 986 * returns 1 on success, 0 if no packet was passed to the stack
1032 * 987 *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035 */ 990 */
1036static int 991static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr, 992spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card) 993 struct spider_net_card *card, int napi)
1039{ 994{
1040 struct sk_buff *skb; 995 struct sk_buff *skb;
1041 struct net_device *netdev; 996 struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1046 1001
1047 netdev = card->netdev; 1002 netdev = card->netdev;
1048 1003
1049 /* check for errors in the data_error flag */ 1004 /* unmap descriptor */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && 1005 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL); 1006 PCI_DMA_BIDIRECTIONAL);
1060 1007
1061 /* the cases we'll throw away the packet immediately */ 1008 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) 1009 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1010 if (netif_msg_rx_err(card))
1011 pr_err("error in received descriptor found, "
1012 "data_status=x%08x, data_error=x%08x\n",
1013 data_status, data_error);
1063 return 0; 1014 return 0;
1015 }
1064 1016
1017 skb = descr->skb;
1065 skb->dev = netdev; 1018 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size); 1019 skb_put(skb, descr->valid_size);
1067 1020
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1073 1026
1074 /* checksum offload */ 1027 /* checksum offload */
1075 if (card->options.rx_csum) { 1028 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && 1029 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) 1030 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1031 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1078 skb->ip_summed = CHECKSUM_UNNECESSARY; 1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else 1033 else
1080 skb->ip_summed = CHECKSUM_NONE; 1034 skb->ip_summed = CHECKSUM_NONE;
1081 } else { 1035 } else
1082 skb->ip_summed = CHECKSUM_NONE; 1036 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084 1037
1085 if (data_status & SPIDER_NET_VLAN_PACKET) { 1038 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN 1039 /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1089 } 1042 }
1090 1043
1091 /* pass skb up to stack */ 1044 /* pass skb up to stack */
1092 netif_receive_skb(skb); 1045 if (napi)
1046 netif_receive_skb(skb);
1047 else
1048 netif_rx_ni(skb);
1093 1049
1094 /* update netdevice statistics */ 1050 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++; 1051 card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1099} 1055}
1100 1056
1101/** 1057/**
1102 * spider_net_decode_descr - processes an rx descriptor 1058 * spider_net_decode_one_descr - processes an rx descriptor
1103 * @card: card structure 1059 * @card: card structure
1060 * @napi: whether caller is in NAPI context
1104 * 1061 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0 1062 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 * 1063 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1064 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack 1065 * the packet up to the stack. This function is called in softirq
1066 * context, e.g. either bottom half from interrupt or NAPI polling context
1109 */ 1067 */
1110static int 1068static int
1111spider_net_decode_one_descr(struct spider_net_card *card) 1069spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1112{ 1070{
1113 enum spider_net_descr_status status; 1071 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr; 1072 struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1122 1080
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1081 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */ 1082 /* nothing in the descriptor yet */
1125 return 0; 1083 result=0;
1084 goto out;
1126 } 1085 }
1127 1086
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) { 1087 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head 1088 /* not initialized yet, the ring must be empty */
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card); 1089 spider_net_refill_rx_chain(card);
1132 return 0; 1090 spider_net_enable_rxdmac(card);
1091 result=0;
1092 goto out;
1133 } 1093 }
1134 1094
1135 /* descriptor definitively used -- move on head */ 1095 /* descriptor definitively used -- move on tail */
1136 chain->tail = descr->next; 1096 chain->tail = descr->next;
1137 1097
1138 result = 0; 1098 result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1143 pr_err("%s: dropping RX descriptor with state %d\n", 1103 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status); 1104 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++; 1105 card->netdev_stats.rx_dropped++;
1106 pci_unmap_single(card->pdev, descr->buf_addr,
1107 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
1108 dev_kfree_skb_irq(descr->skb);
1146 goto refill; 1109 goto refill;
1147 } 1110 }
1148 1111
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1155 } 1118 }
1156 1119
1157 /* ok, we've got a packet in descr */ 1120 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card); 1121 result = spider_net_pass_skb_up(descr, card, napi);
1159refill: 1122refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1123 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */ 1124 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card); 1125 if (!napi)
1163 1126 spider_net_refill_rx_chain(card);
1127out:
1164 return result; 1128 return result;
1165} 1129}
1166 1130
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1186 packets_to_do = min(*budget, netdev->quota); 1150 packets_to_do = min(*budget, netdev->quota);
1187 1151
1188 while (packets_to_do) { 1152 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) { 1153 if (spider_net_decode_one_descr(card, 1)) {
1190 packets_done++; 1154 packets_done++;
1191 packets_to_do--; 1155 packets_to_do--;
1192 } else { 1156 } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1198 1162
1199 netdev->quota -= packets_done; 1163 netdev->quota -= packets_done;
1200 *budget -= packets_done; 1164 *budget -= packets_done;
1165 spider_net_refill_rx_chain(card);
1201 1166
1202 /* if all packets are in the stack, enable interrupts and return 0 */ 1167 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */ 1168 /* if not, return 1 */
@@ -1342,6 +1307,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
1342} 1307}
1343 1308
1344/** 1309/**
1310 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1311 * @card: card structure
1312 *
1313 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1314 * more packets in it and empty its RX RAM. This is called in bottom half
1315 * context
1316 */
1317static void
1318spider_net_handle_rxram_full(struct spider_net_card *card)
1319{
1320 while (spider_net_decode_one_descr(card, 0))
1321 ;
1322 spider_net_enable_rxchtails(card);
1323 spider_net_enable_rxdmac(card);
1324 netif_rx_schedule(card->netdev);
1325}
1326
1327/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt 1328 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure 1329 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS) 1330 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1449 switch (i) 1432 switch (i)
1450 { 1433 {
1451 case SPIDER_NET_GTMFLLINT: 1434 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card)) 1435 if (netif_msg_intr(card) && net_ratelimit())
1453 pr_err("Spider TX RAM full\n"); 1436 pr_err("Spider TX RAM full\n");
1454 show_error = 0; 1437 show_error = 0;
1455 break; 1438 break;
1439 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1440 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1441 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1442 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1456 case SPIDER_NET_GRMFLLINT: 1443 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card)) 1444 if (netif_msg_intr(card) && net_ratelimit())
1458 pr_err("Spider RX RAM full, incoming packets " 1445 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n"); 1446 "might be discarded!\n");
1460 netif_rx_schedule(card->netdev); 1447 spider_net_rx_irq_off(card);
1461 spider_net_enable_rxchtails(card); 1448 tasklet_schedule(&card->rxram_full_tl);
1462 spider_net_enable_rxdmac(card); 1449 show_error = 0;
1463 break; 1450 break;
1464 1451
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1452 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1467 /* allrighty. tx from previous descr ok */ 1454 /* allrighty. tx from previous descr ok */
1468 show_error = 0; 1455 show_error = 0;
1469 break; 1456 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474 1457
1475 /* chain end */ 1458 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1459 case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1482 "restarting DMAC %c.\n", 1465 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT); 1466 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card); 1467 spider_net_refill_rx_chain(card);
1468 spider_net_enable_rxdmac(card);
1485 show_error = 0; 1469 show_error = 0;
1486 break; 1470 break;
1487 1471
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1492 case SPIDER_NET_GDAINVDINT: 1476 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */ 1477 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card); 1478 spider_net_refill_rx_chain(card);
1479 spider_net_enable_rxdmac(card);
1495 show_error = 0; 1480 show_error = 0;
1496 break; 1481 break;
1497 1482
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1580 if (!status_reg) 1565 if (!status_reg)
1581 return IRQ_NONE; 1566 return IRQ_NONE;
1582 1567
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) { 1568 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card); 1569 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev); 1570 netif_rx_schedule(netdev);
1589 } 1571 }
1590 1572
1591 /* we do this after rx and tx processing, as we want the tx chain 1573 if (status_reg & SPIDER_NET_ERRINT )
1592 * processed to see, whether we should restart tx dma processing */ 1574 spider_net_handle_error_irq(card, status_reg);
1593 spider_net_handle_error_irq(card, status_reg);
1594 1575
1595 /* clear interrupt sources */ 1576 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1577 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
1831/** 1812/**
1832 * spider_net_download_firmware - loads firmware into the adapter 1813 * spider_net_download_firmware - loads firmware into the adapter
1833 * @card: card structure 1814 * @card: card structure
1834 * @firmware: firmware pointer 1815 * @firmware_ptr: pointer to firmware data
1835 * 1816 *
1836 * spider_net_download_firmware loads the firmware opened by 1817 * spider_net_download_firmware loads the firmware data into the
1837 * spider_net_init_firmware into the adapter. 1818 * adapter. It assumes the length etc. to be allright.
1838 */ 1819 */
1839static void 1820static int
1840spider_net_download_firmware(struct spider_net_card *card, 1821spider_net_download_firmware(struct spider_net_card *card,
1841 const struct firmware *firmware) 1822 u8 *firmware_ptr)
1842{ 1823{
1843 int sequencer, i; 1824 int sequencer, i;
1844 u32 *fw_ptr = (u32 *)firmware->data; 1825 u32 *fw_ptr = (u32 *)firmware_ptr;
1845 1826
1846 /* stop sequencers */ 1827 /* stop sequencers */
1847 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1828 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1848 SPIDER_NET_STOP_SEQ_VALUE); 1829 SPIDER_NET_STOP_SEQ_VALUE);
1849 1830
1850 for (sequencer = 0; sequencer < 6; sequencer++) { 1831 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1832 sequencer++) {
1851 spider_net_write_reg(card, 1833 spider_net_write_reg(card,
1852 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1834 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1853 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1835 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1854 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1836 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1855 sequencer * 8, *fw_ptr); 1837 sequencer * 8, *fw_ptr);
1856 fw_ptr++; 1838 fw_ptr++;
1857 } 1839 }
1858 } 1840 }
1859 1841
1842 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1843 return -EIO;
1844
1860 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1845 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1861 SPIDER_NET_RUN_SEQ_VALUE); 1846 SPIDER_NET_RUN_SEQ_VALUE);
1847
1848 return 0;
1862} 1849}
1863 1850
1864/** 1851/**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
1890static int 1877static int
1891spider_net_init_firmware(struct spider_net_card *card) 1878spider_net_init_firmware(struct spider_net_card *card)
1892{ 1879{
1893 const struct firmware *firmware; 1880 struct firmware *firmware = NULL;
1894 int err = -EIO; 1881 struct device_node *dn;
1882 u8 *fw_prop = NULL;
1883 int err = -ENOENT;
1884 int fw_size;
1885
1886 if (request_firmware((const struct firmware **)&firmware,
1887 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1888 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1889 netif_msg_probe(card) ) {
1890 pr_err("Incorrect size of spidernet firmware in " \
1891 "filesystem. Looking in host firmware...\n");
1892 goto try_host_fw;
1893 }
1894 err = spider_net_download_firmware(card, firmware->data);
1895 1895
1896 if (request_firmware(&firmware, 1896 release_firmware(firmware);
1897 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { 1897 if (err)
1898 if (netif_msg_probe(card)) 1898 goto try_host_fw;
1899 pr_err("Couldn't read in sequencer data file %s.\n",
1900 SPIDER_NET_FIRMWARE_NAME);
1901 firmware = NULL;
1902 goto out;
1903 }
1904 1899
1905 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { 1900 goto done;
1906 if (netif_msg_probe(card))
1907 pr_err("Invalid size of sequencer data file %s.\n",
1908 SPIDER_NET_FIRMWARE_NAME);
1909 goto out;
1910 } 1901 }
1911 1902
1912 spider_net_download_firmware(card, firmware); 1903try_host_fw:
1904 dn = pci_device_to_OF_node(card->pdev);
1905 if (!dn)
1906 goto out_err;
1913 1907
1914 err = 0; 1908 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
1915out: 1909 if (!fw_prop)
1916 release_firmware(firmware); 1910 goto out_err;
1911
1912 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1913 netif_msg_probe(card) ) {
1914 pr_err("Incorrect size of spidernet firmware in " \
1915 "host firmware\n");
1916 goto done;
1917 }
1917 1918
1919 err = spider_net_download_firmware(card, fw_prop);
1920
1921done:
1922 return err;
1923out_err:
1924 if (netif_msg_probe(card))
1925 pr_err("Couldn't find spidernet firmware in filesystem " \
1926 "or host firmware\n");
1918 return err; 1927 return err;
1919} 1928}
1920 1929
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1934 SPIDER_NET_CKRCTRL_RUN_VALUE); 1943 SPIDER_NET_CKRCTRL_RUN_VALUE);
1935 1944
1936 /* empty sequencer data */ 1945 /* empty sequencer data */
1937 for (sequencer = 0; sequencer < 6; sequencer++) { 1946 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1947 sequencer++) {
1938 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1948 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1939 sequencer * 8, 0x0); 1949 sequencer * 8, 0x0);
1940 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1950 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1951 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1942 sequencer * 8, 0x0); 1952 sequencer * 8, 0x0);
1943 } 1953 }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2061 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2071 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2062 2072
2063 pci_set_drvdata(card->pdev, netdev); 2073 pci_set_drvdata(card->pdev, netdev);
2064 spin_lock_init(&card->intmask_lock); 2074
2075 atomic_set(&card->tx_chain_release,0);
2076 card->rxram_full_tl.data = (unsigned long) card;
2077 card->rxram_full_tl.func =
2078 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2079 init_timer(&card->tx_timer);
2080 card->tx_timer.function =
2081 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2082 card->tx_timer.data = (unsigned long) card;
2065 netdev->irq = card->pdev->irq; 2083 netdev->irq = card->pdev->irq;
2066 2084
2067 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2085 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 22b2f2347351..5922b529a048 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
33 33
34extern char spider_net_driver_name[]; 34extern char spider_net_driver_name[];
35 35
36#define SPIDER_NET_MAX_MTU 2308 36#define SPIDER_NET_MAX_FRAME 2312
37#define SPIDER_NET_MAX_MTU 2294
37#define SPIDER_NET_MIN_MTU 64 38#define SPIDER_NET_MIN_MTU 64
38 39
39#define SPIDER_NET_RXBUF_ALIGN 128 40#define SPIDER_NET_RXBUF_ALIGN 128
40 41
41#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64 42#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
42#define SPIDER_NET_RX_DESCRIPTORS_MIN 16 43#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
43#define SPIDER_NET_RX_DESCRIPTORS_MAX 256 44#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
44 45
45#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64 46#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
46#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 47#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
47#define SPIDER_NET_TX_DESCRIPTORS_MAX 256 48#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
49
50#define SPIDER_NET_TX_TIMER 20
48 51
49#define SPIDER_NET_RX_CSUM_DEFAULT 1 52#define SPIDER_NET_RX_CSUM_DEFAULT 1
50 53
51#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ 54#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
52#define SPIDER_NET_NAPI_WEIGHT 64 55#define SPIDER_NET_NAPI_WEIGHT 64
53 56
54#define SPIDER_NET_FIRMWARE_LEN 1024 57#define SPIDER_NET_FIRMWARE_SEQS 6
58#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
59#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
60 SPIDER_NET_FIRMWARE_SEQWORDS * \
61 sizeof(u32))
55#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin" 62#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
56 63
57/** spider_net SMMIO registers */ 64/** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
142/** SCONFIG registers */ 149/** SCONFIG registers */
143#define SPIDER_NET_SCONFIG_IOACTE 0x00002810 150#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
144 151
145/** hardcoded register values */ 152/** interrupt mask registers */
146#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff 153#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
147#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff 154#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
148/* no MAC aborts -> auto retransmission */ 155/* no MAC aborts -> auto retransmission */
149#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1 156#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
150 157
151/* clear counter when interrupt sources are cleared
152#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
153/* we rely on flagged descriptor interrupts */ 158/* we rely on flagged descriptor interrupts */
154#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 159#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
155/* set this first, then the FRAMENUM_VALUE */ 160/* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
168#if 0 173#if 0
169#define SPIDER_NET_WOL_VALUE 0x00000000 174#define SPIDER_NET_WOL_VALUE 0x00000000
170#endif 175#endif
171#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8 176#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
172 177
173/* pause frames: automatic, no upper retransmission count */ 178/* pause frames: automatic, no upper retransmission count */
174/* outside loopback mode: ETOMOD signal dont matter, not connected */ 179/* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
318#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ 323#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
319 (1 << SPIDER_NET_GRMFLLINT) ) 324 (1 << SPIDER_NET_GRMFLLINT) )
320 325
326#define SPIDER_NET_ERRINT ( 0xffffffff & \
327 (~SPIDER_NET_TXINT) & \
328 (~SPIDER_NET_RXINT) )
329
321#define SPIDER_NET_GPREXEC 0x80000000 330#define SPIDER_NET_GPREXEC 0x80000000
322#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 331#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
323 332
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
358/* descr ready, descr is in middle of chain, get interrupt on completion */ 367/* descr ready, descr is in middle of chain, get interrupt on completion */
359#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 368#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
360 369
361/* multicast is no problem */
362#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
363
364enum spider_net_descr_status { 370enum spider_net_descr_status {
365 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ 371 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
366 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 372 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
373 379
374struct spider_net_descr { 380struct spider_net_descr {
375 /* as defined by the hardware */ 381 /* as defined by the hardware */
376 dma_addr_t buf_addr; 382 u32 buf_addr;
377 u32 buf_size; 383 u32 buf_size;
378 dma_addr_t next_descr_addr; 384 u32 next_descr_addr;
379 u32 dmac_cmd_status; 385 u32 dmac_cmd_status;
380 u32 result_size; 386 u32 result_size;
381 u32 valid_size; /* all zeroes for tx */ 387 u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
384 390
385 /* used in the driver */ 391 /* used in the driver */
386 struct sk_buff *skb; 392 struct sk_buff *skb;
387 dma_addr_t bus_addr; 393 u32 bus_addr;
388 struct spider_net_descr *next; 394 struct spider_net_descr *next;
389 struct spider_net_descr *prev; 395 struct spider_net_descr *prev;
390} __attribute__((aligned(32))); 396} __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
396}; 402};
397 403
398/* descriptor data_status bits */ 404/* descriptor data_status bits */
399#define SPIDER_NET_RXIPCHK 29 405#define SPIDER_NET_RX_IPCHK 29
400#define SPIDER_NET_TCPUDPIPCHK 28 406#define SPIDER_NET_RX_TCPCHK 28
401#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
402 1 << SPIDER_NET_TCPUDPIPCHK)
403
404#define SPIDER_NET_VLAN_PACKET 21 407#define SPIDER_NET_VLAN_PACKET 21
408#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
409 (1 << SPIDER_NET_RX_TCPCHK) )
405 410
406/* descriptor data_error bits */ 411/* descriptor data_error bits */
407#define SPIDER_NET_RXIPCHKERR 27 412#define SPIDER_NET_RX_IPCHKERR 27
408#define SPIDER_NET_RXTCPCHKERR 26 413#define SPIDER_NET_RX_RXTCPCHKERR 28
409#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \ 414
410 1 << SPIDER_NET_RXTCPCHKERR) 415#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
411 416
412/* the cases we don't pass the packet to the stack */ 417/* the cases we don't pass the packet to the stack.
413#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000 418 * 701b8000 would be correct, but every packets gets that flag */
419#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
414 420
415#define SPIDER_NET_DESCR_SIZE 32 421#define SPIDER_NET_DESCR_SIZE 32
416 422
@@ -445,13 +451,16 @@ struct spider_net_card {
445 451
446 struct spider_net_descr_chain tx_chain; 452 struct spider_net_descr_chain tx_chain;
447 struct spider_net_descr_chain rx_chain; 453 struct spider_net_descr_chain rx_chain;
448 spinlock_t chain_lock; 454 atomic_t rx_chain_refill;
455 atomic_t tx_chain_release;
449 456
450 struct net_device_stats netdev_stats; 457 struct net_device_stats netdev_stats;
451 458
452 struct spider_net_options options; 459 struct spider_net_options options;
453 460
454 spinlock_t intmask_lock; 461 spinlock_t intmask_lock;
462 struct tasklet_struct rxram_full_tl;
463 struct timer_list tx_timer;
455 464
456 struct work_struct tx_timeout_task; 465 struct work_struct tx_timeout_task;
457 atomic_t tx_timeout_task_counter; 466 atomic_t tx_timeout_task_counter;
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index d42e60ba74ce..a5bb0b7633af 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
113 return 0; 113 return 0;
114} 114}
115 115
116static uint32_t
117spider_net_ethtool_get_tx_csum(struct net_device *netdev)
118{
119 return (netdev->features & NETIF_F_HW_CSUM) != 0;
120}
121
122static int
123spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
124{
125 if (data)
126 netdev->features |= NETIF_F_HW_CSUM;
127 else
128 netdev->features &= ~NETIF_F_HW_CSUM;
129
130 return 0;
131}
132
116struct ethtool_ops spider_net_ethtool_ops = { 133struct ethtool_ops spider_net_ethtool_ops = {
117 .get_settings = spider_net_ethtool_get_settings, 134 .get_settings = spider_net_ethtool_get_settings,
118 .get_drvinfo = spider_net_ethtool_get_drvinfo, 135 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
122 .nway_reset = spider_net_ethtool_nway_reset, 139 .nway_reset = spider_net_ethtool_nway_reset,
123 .get_rx_csum = spider_net_ethtool_get_rx_csum, 140 .get_rx_csum = spider_net_ethtool_get_rx_csum,
124 .set_rx_csum = spider_net_ethtool_set_rx_csum, 141 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum,
125}; 144};
126 145
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 5c8fcd40ef4d..01bdb2334058 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -389,7 +389,7 @@ static int __init lance_probe( struct net_device *dev)
389 dev->stop = &lance_close; 389 dev->stop = &lance_close;
390 dev->get_stats = &lance_get_stats; 390 dev->get_stats = &lance_get_stats;
391 dev->set_multicast_list = &set_multicast_list; 391 dev->set_multicast_list = &set_multicast_list;
392 dev->set_mac_address = 0; 392 dev->set_mac_address = NULL;
393// KLUDGE -- REMOVE ME 393// KLUDGE -- REMOVE ME
394 set_bit(__LINK_STATE_PRESENT, &dev->state); 394 set_bit(__LINK_STATE_PRESENT, &dev->state);
395 395
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eb86b059809b..f2d1dafde087 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.47" 72#define DRV_MODULE_VERSION "3.48"
73#define DRV_MODULE_RELDATE "Dec 28, 2005" 73#define DRV_MODULE_RELDATE "Jan 16, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 1325 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326 tw32(0x7d00, val); 1326 tw32(0x7d00, val);
1327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 1327 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328 tg3_nvram_lock(tp); 1328 int err;
1329
1330 err = tg3_nvram_lock(tp);
1329 tg3_halt_cpu(tp, RX_CPU_BASE); 1331 tg3_halt_cpu(tp, RX_CPU_BASE);
1330 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0); 1332 if (!err)
1331 tg3_nvram_unlock(tp); 1333 tg3_nvram_unlock(tp);
1332 } 1334 }
1333 } 1335 }
1334 1336
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
4193 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 4195 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4194 int i; 4196 int i;
4195 4197
4196 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 4198 if (tp->nvram_lock_cnt == 0) {
4197 for (i = 0; i < 8000; i++) { 4199 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4198 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 4200 for (i = 0; i < 8000; i++) {
4199 break; 4201 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4200 udelay(20); 4202 break;
4203 udelay(20);
4204 }
4205 if (i == 8000) {
4206 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4207 return -ENODEV;
4208 }
4201 } 4209 }
4202 if (i == 8000) 4210 tp->nvram_lock_cnt++;
4203 return -ENODEV;
4204 } 4211 }
4205 return 0; 4212 return 0;
4206} 4213}
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
4208/* tp->lock is held. */ 4215/* tp->lock is held. */
4209static void tg3_nvram_unlock(struct tg3 *tp) 4216static void tg3_nvram_unlock(struct tg3 *tp)
4210{ 4217{
4211 if (tp->tg3_flags & TG3_FLAG_NVRAM) 4218 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4212 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 4219 if (tp->nvram_lock_cnt > 0)
4220 tp->nvram_lock_cnt--;
4221 if (tp->nvram_lock_cnt == 0)
4222 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4223 }
4213} 4224}
4214 4225
4215/* tp->lock is held. */ 4226/* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
4320 void (*write_op)(struct tg3 *, u32, u32); 4331 void (*write_op)(struct tg3 *, u32, u32);
4321 int i; 4332 int i;
4322 4333
4323 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4334 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4324 tg3_nvram_lock(tp); 4335 tg3_nvram_lock(tp);
4336 /* No matching tg3_nvram_unlock() after this because
4337 * chip reset below will undo the nvram lock.
4338 */
4339 tp->nvram_lock_cnt = 0;
4340 }
4325 4341
4326 /* 4342 /*
4327 * We must avoid the readl() that normally takes place. 4343 * We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4717 (offset == RX_CPU_BASE ? "RX" : "TX")); 4733 (offset == RX_CPU_BASE ? "RX" : "TX"));
4718 return -ENODEV; 4734 return -ENODEV;
4719 } 4735 }
4736
4737 /* Clear firmware's nvram arbitration. */
4738 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4739 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4720 return 0; 4740 return 0;
4721} 4741}
4722 4742
@@ -4736,7 +4756,7 @@ struct fw_info {
4736static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, 4756static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4737 int cpu_scratch_size, struct fw_info *info) 4757 int cpu_scratch_size, struct fw_info *info)
4738{ 4758{
4739 int err, i; 4759 int err, lock_err, i;
4740 void (*write_op)(struct tg3 *, u32, u32); 4760 void (*write_op)(struct tg3 *, u32, u32);
4741 4761
4742 if (cpu_base == TX_CPU_BASE && 4762 if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4755 /* It is possible that bootcode is still loading at this point. 4775 /* It is possible that bootcode is still loading at this point.
4756 * Get the nvram lock first before halting the cpu. 4776 * Get the nvram lock first before halting the cpu.
4757 */ 4777 */
4758 tg3_nvram_lock(tp); 4778 lock_err = tg3_nvram_lock(tp);
4759 err = tg3_halt_cpu(tp, cpu_base); 4779 err = tg3_halt_cpu(tp, cpu_base);
4760 tg3_nvram_unlock(tp); 4780 if (!lock_err)
4781 tg3_nvram_unlock(tp);
4761 if (err) 4782 if (err)
4762 goto out; 4783 goto out;
4763 4784
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8182 data[1] = 1; 8203 data[1] = 1;
8183 } 8204 }
8184 if (etest->flags & ETH_TEST_FL_OFFLINE) { 8205 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8185 int irq_sync = 0; 8206 int err, irq_sync = 0;
8186 8207
8187 if (netif_running(dev)) { 8208 if (netif_running(dev)) {
8188 tg3_netif_stop(tp); 8209 tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8192 tg3_full_lock(tp, irq_sync); 8213 tg3_full_lock(tp, irq_sync);
8193 8214
8194 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 8215 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8195 tg3_nvram_lock(tp); 8216 err = tg3_nvram_lock(tp);
8196 tg3_halt_cpu(tp, RX_CPU_BASE); 8217 tg3_halt_cpu(tp, RX_CPU_BASE);
8197 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8218 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8198 tg3_halt_cpu(tp, TX_CPU_BASE); 8219 tg3_halt_cpu(tp, TX_CPU_BASE);
8199 tg3_nvram_unlock(tp); 8220 if (!err)
8221 tg3_nvram_unlock(tp);
8200 8222
8201 if (tg3_test_registers(tp) != 0) { 8223 if (tg3_test_registers(tp) != 0) {
8202 etest->flags |= ETH_TEST_FL_FAILED; 8224 etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
8588 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 8610 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8589 tp->tg3_flags |= TG3_FLAG_NVRAM; 8611 tp->tg3_flags |= TG3_FLAG_NVRAM;
8590 8612
8591 tg3_nvram_lock(tp); 8613 if (tg3_nvram_lock(tp)) {
8614 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8615 "tg3_nvram_init failed.\n", tp->dev->name);
8616 return;
8617 }
8592 tg3_enable_nvram_access(tp); 8618 tg3_enable_nvram_access(tp);
8593 8619
8594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 8620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8686 if (offset > NVRAM_ADDR_MSK) 8712 if (offset > NVRAM_ADDR_MSK)
8687 return -EINVAL; 8713 return -EINVAL;
8688 8714
8689 tg3_nvram_lock(tp); 8715 ret = tg3_nvram_lock(tp);
8716 if (ret)
8717 return ret;
8690 8718
8691 tg3_enable_nvram_access(tp); 8719 tg3_enable_nvram_access(tp);
8692 8720
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8785 8813
8786 offset = offset + (pagesize - page_off); 8814 offset = offset + (pagesize - page_off);
8787 8815
8788 /* Nvram lock released by tg3_nvram_read() above,
8789 * so need to get it again.
8790 */
8791 tg3_nvram_lock(tp);
8792 tg3_enable_nvram_access(tp); 8816 tg3_enable_nvram_access(tp);
8793 8817
8794 /* 8818 /*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8925 else { 8949 else {
8926 u32 grc_mode; 8950 u32 grc_mode;
8927 8951
8928 tg3_nvram_lock(tp); 8952 ret = tg3_nvram_lock(tp);
8953 if (ret)
8954 return ret;
8929 8955
8930 tg3_enable_nvram_access(tp); 8956 tg3_enable_nvram_access(tp);
8931 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 8957 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 890e1635996b..e8243305f0e8 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2275,6 +2275,7 @@ struct tg3 {
2275 dma_addr_t stats_mapping; 2275 dma_addr_t stats_mapping;
2276 struct work_struct reset_task; 2276 struct work_struct reset_task;
2277 2277
2278 int nvram_lock_cnt;
2278 u32 nvram_size; 2279 u32 nvram_size;
2279 u32 nvram_pagesize; 2280 u32 nvram_pagesize;
2280 u32 nvram_jedecnum; 2281 u32 nvram_jedecnum;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 125ed00e95a5..c67c91251d04 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1564,7 +1564,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1564 dev->dev_addr, 6); 1564 dev->dev_addr, 6);
1565 } 1565 }
1566#endif 1566#endif
1567#if defined(__i386__) /* Patch up x86 BIOS bug. */ 1567#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1568 if (last_irq) 1568 if (last_irq)
1569 irq = last_irq; 1569 irq = last_irq;
1570#endif 1570#endif
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 1a4316336256..983981666800 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1689,9 +1689,9 @@ MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1689MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver"); 1689MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1690MODULE_LICENSE("GPL"); 1690MODULE_LICENSE("GPL");
1691 1691
1692MODULE_PARM(debug, "i"); 1692module_param(debug, int, 0644);
1693MODULE_PARM(mode, "i"); 1693module_param(mode, int, 0);
1694MODULE_PARM(cr6set, "i"); 1694module_param(cr6set, int, 0);
1695MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)"); 1695MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1696MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); 1696MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1697 1697
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 82c6b757d306..c2d5907dc8e0 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -791,7 +791,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
791#endif 791#endif
792 792
793 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { 793 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
794 dev->features |= NETIF_F_HW_CSUM; 794 dev->features |= NETIF_F_IP_CSUM;
795 } 795 }
796 796
797 ret = register_netdev(dev); 797 ret = register_netdev(dev);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 2b948ea397d5..40926d779161 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -641,7 +641,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
641 spin_lock_irqsave(&sc->lmc_lock, flags); 641 spin_lock_irqsave(&sc->lmc_lock, flags);
642 642
643 if(sc->check != 0xBEAFCAFE){ 643 if(sc->check != 0xBEAFCAFE){
644 printk("LMC: Corrupt net_device stuct, breaking out\n"); 644 printk("LMC: Corrupt net_device struct, breaking out\n");
645 spin_unlock_irqrestore(&sc->lmc_lock, flags); 645 spin_unlock_irqrestore(&sc->lmc_lock, flags);
646 return; 646 return;
647 } 647 }
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 52f26b9c69d2..931cbdf6d791 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -689,7 +689,7 @@ static void cpc_tty_rx_work(void * data)
689 } 689 }
690 } 690 }
691 cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; 691 cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
692 kfree(buf); 692 kfree((void *)buf);
693 buf = cpc_tty->buf_rx.first; 693 buf = cpc_tty->buf_rx.first;
694 flg_rx = 1; 694 flg_rx = 1;
695 } 695 }
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 036adc4f8ba7..22e794071cf4 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -329,9 +329,9 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
329 329
330struct _dlci_stat 330struct _dlci_stat
331{ 331{
332 short dlci __attribute__((packed)); 332 short dlci;
333 char flags __attribute__((packed)); 333 char flags;
334}; 334} __attribute__((packed));
335 335
336struct _frad_stat 336struct _frad_stat
337{ 337{
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index bdf672c48182..9c3ccc669143 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -515,11 +515,6 @@ static int x25_asy_close(struct net_device *dev)
515 return 0; 515 return 0;
516} 516}
517 517
518static int x25_asy_receive_room(struct tty_struct *tty)
519{
520 return 65536; /* We can handle an infinite amount of data. :-) */
521}
522
523/* 518/*
524 * Handle the 'receiver data ready' interrupt. 519 * Handle the 'receiver data ready' interrupt.
525 * This function is called by the 'tty_io' module in the kernel when 520 * This function is called by the 'tty_io' module in the kernel when
@@ -573,6 +568,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
573 568
574 sl->tty = tty; 569 sl->tty = tty;
575 tty->disc_data = sl; 570 tty->disc_data = sl;
571 tty->receive_room = 65536;
576 if (tty->driver->flush_buffer) { 572 if (tty->driver->flush_buffer) {
577 tty->driver->flush_buffer(tty); 573 tty->driver->flush_buffer(tty);
578 } 574 }
@@ -779,7 +775,6 @@ static struct tty_ldisc x25_ldisc = {
779 .close = x25_asy_close_tty, 775 .close = x25_asy_close_tty,
780 .ioctl = x25_asy_ioctl, 776 .ioctl = x25_asy_ioctl,
781 .receive_buf = x25_asy_receive_buf, 777 .receive_buf = x25_asy_receive_buf,
782 .receive_room = x25_asy_receive_room,
783 .write_wakeup = x25_asy_write_wakeup, 778 .write_wakeup = x25_asy_write_wakeup,
784}; 779};
785 780
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index b03feae459fc..7caa8dc88a58 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -127,13 +127,6 @@ static int __init do_wd_probe(struct net_device *dev)
127 return -ENODEV; 127 return -ENODEV;
128} 128}
129 129
130static void cleanup_card(struct net_device *dev)
131{
132 free_irq(dev->irq, dev);
133 release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
134 iounmap(ei_status.mem);
135}
136
137#ifndef MODULE 130#ifndef MODULE
138struct net_device * __init wd_probe(int unit) 131struct net_device * __init wd_probe(int unit)
139{ 132{
@@ -538,6 +531,13 @@ init_module(void)
538 return -ENXIO; 531 return -ENXIO;
539} 532}
540 533
534static void cleanup_card(struct net_device *dev)
535{
536 free_irq(dev->irq, dev);
537 release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
538 iounmap(ei_status.mem);
539}
540
541void 541void
542cleanup_module(void) 542cleanup_module(void)
543{ 543{
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 24f7967aab67..233a4f608084 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -24,10 +24,6 @@ config NET_RADIO
24 the tools from 24 the tools from
25 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 25 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
26 26
27 Some user-level drivers for scarab devices which don't require
28 special kernel support are available from
29 <ftp://shadow.cabi.net/pub/Linux/>.
30
31# Note : the cards are obsolete (can't buy them anymore), but the drivers 27# Note : the cards are obsolete (can't buy them anymore), but the drivers
32# are not, as people are still using them... 28# are not, as people are still using them...
33comment "Obsolete Wireless cards support (pre-802.11)" 29comment "Obsolete Wireless cards support (pre-802.11)"
@@ -160,7 +156,7 @@ config IPW2100
160 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 156 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
161 157
162 If you want to compile the driver as a module ( = code which can be 158 If you want to compile the driver as a module ( = code which can be
163 inserted in and remvoed from the running kernel whenever you want), 159 inserted in and removed from the running kernel whenever you want),
164 say M here and read <file:Documentation/modules.txt>. The module 160 say M here and read <file:Documentation/modules.txt>. The module
165 will be called ipw2100.ko. 161 will be called ipw2100.ko.
166 162
@@ -213,7 +209,7 @@ config IPW2200
213 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 209 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
214 210
215 If you want to compile the driver as a module ( = code which can be 211 If you want to compile the driver as a module ( = code which can be
216 inserted in and remvoed from the running kernel whenever you want), 212 inserted in and removed from the running kernel whenever you want),
217 say M here and read <file:Documentation/modules.txt>. The module 213 say M here and read <file:Documentation/modules.txt>. The module
218 will be called ipw2200.ko. 214 will be called ipw2200.ko.
219 215
@@ -243,7 +239,7 @@ config IPW2200_DEBUG
243 239
244config AIRO 240config AIRO
245 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 241 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
246 depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) 242 depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN)
247 ---help--- 243 ---help---
248 This is the standard Linux driver to support Cisco/Aironet ISA and 244 This is the standard Linux driver to support Cisco/Aironet ISA and
249 PCI 802.11 wireless cards. 245 PCI 802.11 wireless cards.
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ee866fd6957d..a4c7ae94614d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
5668 int channel = fwrq->m; 5668 int channel = fwrq->m;
5669 /* We should do a better check than that, 5669 /* We should do a better check than that,
5670 * based on the card capability !!! */ 5670 * based on the card capability !!! */
5671 if((channel < 1) || (channel > 16)) { 5671 if((channel < 1) || (channel > 14)) {
5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); 5672 printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
5673 rc = -EINVAL; 5673 rc = -EINVAL;
5674 } else { 5674 } else {
5675 readConfigRid(local, 1); 5675 readConfigRid(local, 1);
5676 /* Yes ! We can set it !!! */ 5676 /* Yes ! We can set it !!! */
5677 local->config.channelSet = (u16)(channel - 1); 5677 local->config.channelSet = (u16) channel;
5678 set_bit (FLAG_COMMIT, &local->flags); 5678 set_bit (FLAG_COMMIT, &local->flags);
5679 } 5679 }
5680 } 5680 }
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
5692{ 5692{
5693 struct airo_info *local = dev->priv; 5693 struct airo_info *local = dev->priv;
5694 StatusRid status_rid; /* Card status info */ 5694 StatusRid status_rid; /* Card status info */
5695 int ch;
5695 5696
5696 readConfigRid(local, 1); 5697 readConfigRid(local, 1);
5697 if ((local->config.opmode & 0xFF) == MODE_STA_ESS) 5698 if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
5699 else 5700 else
5700 readStatusRid(local, &status_rid, 1); 5701 readStatusRid(local, &status_rid, 1);
5701 5702
5702#ifdef WEXT_USECHANNELS 5703 ch = (int)status_rid.channel;
5703 fwrq->m = ((int)status_rid.channel) + 1; 5704 if((ch > 0) && (ch < 15)) {
5704 fwrq->e = 0; 5705 fwrq->m = frequency_list[ch - 1] * 100000;
5705#else
5706 {
5707 int f = (int)status_rid.channel;
5708 fwrq->m = frequency_list[f] * 100000;
5709 fwrq->e = 1; 5706 fwrq->e = 1;
5707 } else {
5708 fwrq->m = ch;
5709 fwrq->e = 0;
5710 } 5710 }
5711#endif
5712 5711
5713 return 0; 5712 return 0;
5714} 5713}
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
5783 /* If none, we may want to get the one that was set */ 5782 /* If none, we may want to get the one that was set */
5784 5783
5785 /* Push it out ! */ 5784 /* Push it out ! */
5786 dwrq->length = status_rid.SSIDlen + 1; 5785 dwrq->length = status_rid.SSIDlen;
5787 dwrq->flags = 1; /* active */ 5786 dwrq->flags = 1; /* active */
5788 5787
5789 return 0; 5788 return 0;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index e4729ddf29fd..98a76f10a0f7 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1407,6 +1407,17 @@ static int atmel_close(struct net_device *dev)
1407{ 1407{
1408 struct atmel_private *priv = netdev_priv(dev); 1408 struct atmel_private *priv = netdev_priv(dev);
1409 1409
1410 /* Send event to userspace that we are disassociating */
1411 if (priv->station_state == STATION_STATE_READY) {
1412 union iwreq_data wrqu;
1413
1414 wrqu.data.length = 0;
1415 wrqu.data.flags = 0;
1416 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1417 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1418 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1419 }
1420
1410 atmel_enter_state(priv, STATION_STATE_DOWN); 1421 atmel_enter_state(priv, STATION_STATE_DOWN);
1411 1422
1412 if (priv->bus_type == BUS_TYPE_PCCARD) 1423 if (priv->bus_type == BUS_TYPE_PCCARD)
@@ -1707,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
1707 if (priv->new_SSID_size != 0) { 1718 if (priv->new_SSID_size != 0) {
1708 memcpy(extra, priv->new_SSID, priv->new_SSID_size); 1719 memcpy(extra, priv->new_SSID, priv->new_SSID_size);
1709 extra[priv->new_SSID_size] = '\0'; 1720 extra[priv->new_SSID_size] = '\0';
1710 dwrq->length = priv->new_SSID_size + 1; 1721 dwrq->length = priv->new_SSID_size;
1711 } else { 1722 } else {
1712 memcpy(extra, priv->SSID, priv->SSID_size); 1723 memcpy(extra, priv->SSID, priv->SSID_size);
1713 extra[priv->SSID_size] = '\0'; 1724 extra[priv->SSID_size] = '\0';
1714 dwrq->length = priv->SSID_size + 1; 1725 dwrq->length = priv->SSID_size;
1715 } 1726 }
1716 1727
1717 dwrq->flags = !priv->connect_to_any_BSS; /* active */ 1728 dwrq->flags = !priv->connect_to_any_BSS; /* active */
@@ -1780,10 +1791,10 @@ static int atmel_set_encode(struct net_device *dev,
1780 priv->wep_is_on = 1; 1791 priv->wep_is_on = 1;
1781 priv->exclude_unencrypted = 1; 1792 priv->exclude_unencrypted = 1;
1782 if (priv->wep_key_len[index] > 5) { 1793 if (priv->wep_key_len[index] > 5) {
1783 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; 1794 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
1784 priv->encryption_level = 2; 1795 priv->encryption_level = 2;
1785 } else { 1796 } else {
1786 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; 1797 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
1787 priv->encryption_level = 1; 1798 priv->encryption_level = 1;
1788 } 1799 }
1789 } 1800 }
@@ -1853,6 +1864,181 @@ static int atmel_get_encode(struct net_device *dev,
1853 return 0; 1864 return 0;
1854} 1865}
1855 1866
1867static int atmel_set_encodeext(struct net_device *dev,
1868 struct iw_request_info *info,
1869 union iwreq_data *wrqu,
1870 char *extra)
1871{
1872 struct atmel_private *priv = netdev_priv(dev);
1873 struct iw_point *encoding = &wrqu->encoding;
1874 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1875 int idx, key_len;
1876
1877 /* Determine and validate the key index */
1878 idx = encoding->flags & IW_ENCODE_INDEX;
1879 if (idx) {
1880 if (idx < 1 || idx > WEP_KEYS)
1881 return -EINVAL;
1882 idx--;
1883 } else
1884 idx = priv->default_key;
1885
1886 if ((encoding->flags & IW_ENCODE_DISABLED) ||
1887 ext->alg == IW_ENCODE_ALG_NONE) {
1888 priv->wep_is_on = 0;
1889 priv->encryption_level = 0;
1890 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
1891 }
1892
1893 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
1894 priv->default_key = idx;
1895
1896 /* Set the requested key */
1897 switch (ext->alg) {
1898 case IW_ENCODE_ALG_NONE:
1899 break;
1900 case IW_ENCODE_ALG_WEP:
1901 if (ext->key_len > 5) {
1902 priv->wep_key_len[idx] = 13;
1903 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
1904 priv->encryption_level = 2;
1905 } else if (ext->key_len > 0) {
1906 priv->wep_key_len[idx] = 5;
1907 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
1908 priv->encryption_level = 1;
1909 } else {
1910 return -EINVAL;
1911 }
1912 priv->wep_is_on = 1;
1913 memset(priv->wep_keys[idx], 0, 13);
1914 key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
1915 memcpy(priv->wep_keys[idx], ext->key, key_len);
1916 break;
1917 default:
1918 return -EINVAL;
1919 }
1920
1921 return -EINPROGRESS;
1922}
1923
1924static int atmel_get_encodeext(struct net_device *dev,
1925 struct iw_request_info *info,
1926 union iwreq_data *wrqu,
1927 char *extra)
1928{
1929 struct atmel_private *priv = netdev_priv(dev);
1930 struct iw_point *encoding = &wrqu->encoding;
1931 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1932 int idx, max_key_len;
1933
1934 max_key_len = encoding->length - sizeof(*ext);
1935 if (max_key_len < 0)
1936 return -EINVAL;
1937
1938 idx = encoding->flags & IW_ENCODE_INDEX;
1939 if (idx) {
1940 if (idx < 1 || idx > WEP_KEYS)
1941 return -EINVAL;
1942 idx--;
1943 } else
1944 idx = priv->default_key;
1945
1946 encoding->flags = idx + 1;
1947 memset(ext, 0, sizeof(*ext));
1948
1949 if (!priv->wep_is_on) {
1950 ext->alg = IW_ENCODE_ALG_NONE;
1951 ext->key_len = 0;
1952 encoding->flags |= IW_ENCODE_DISABLED;
1953 } else {
1954 if (priv->encryption_level > 0)
1955 ext->alg = IW_ENCODE_ALG_WEP;
1956 else
1957 return -EINVAL;
1958
1959 ext->key_len = priv->wep_key_len[idx];
1960 memcpy(ext->key, priv->wep_keys[idx], ext->key_len);
1961 encoding->flags |= IW_ENCODE_ENABLED;
1962 }
1963
1964 return 0;
1965}
1966
1967static int atmel_set_auth(struct net_device *dev,
1968 struct iw_request_info *info,
1969 union iwreq_data *wrqu, char *extra)
1970{
1971 struct atmel_private *priv = netdev_priv(dev);
1972 struct iw_param *param = &wrqu->param;
1973
1974 switch (param->flags & IW_AUTH_INDEX) {
1975 case IW_AUTH_WPA_VERSION:
1976 case IW_AUTH_CIPHER_PAIRWISE:
1977 case IW_AUTH_CIPHER_GROUP:
1978 case IW_AUTH_KEY_MGMT:
1979 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1980 case IW_AUTH_PRIVACY_INVOKED:
1981 /*
1982 * atmel does not use these parameters
1983 */
1984 break;
1985
1986 case IW_AUTH_DROP_UNENCRYPTED:
1987 priv->exclude_unencrypted = param->value ? 1 : 0;
1988 break;
1989
1990 case IW_AUTH_80211_AUTH_ALG: {
1991 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
1992 priv->exclude_unencrypted = 1;
1993 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
1994 priv->exclude_unencrypted = 0;
1995 } else
1996 return -EINVAL;
1997 break;
1998 }
1999
2000 case IW_AUTH_WPA_ENABLED:
2001 /* Silently accept disable of WPA */
2002 if (param->value > 0)
2003 return -EOPNOTSUPP;
2004 break;
2005
2006 default:
2007 return -EOPNOTSUPP;
2008 }
2009 return -EINPROGRESS;
2010}
2011
2012static int atmel_get_auth(struct net_device *dev,
2013 struct iw_request_info *info,
2014 union iwreq_data *wrqu, char *extra)
2015{
2016 struct atmel_private *priv = netdev_priv(dev);
2017 struct iw_param *param = &wrqu->param;
2018
2019 switch (param->flags & IW_AUTH_INDEX) {
2020 case IW_AUTH_DROP_UNENCRYPTED:
2021 param->value = priv->exclude_unencrypted;
2022 break;
2023
2024 case IW_AUTH_80211_AUTH_ALG:
2025 if (priv->exclude_unencrypted == 1)
2026 param->value = IW_AUTH_ALG_SHARED_KEY;
2027 else
2028 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
2029 break;
2030
2031 case IW_AUTH_WPA_ENABLED:
2032 param->value = 0;
2033 break;
2034
2035 default:
2036 return -EOPNOTSUPP;
2037 }
2038 return 0;
2039}
2040
2041
1856static int atmel_get_name(struct net_device *dev, 2042static int atmel_get_name(struct net_device *dev,
1857 struct iw_request_info *info, 2043 struct iw_request_info *info,
1858 char *cwrq, 2044 char *cwrq,
@@ -2289,13 +2475,15 @@ static int atmel_set_wap(struct net_device *dev,
2289{ 2475{
2290 struct atmel_private *priv = netdev_priv(dev); 2476 struct atmel_private *priv = netdev_priv(dev);
2291 int i; 2477 int i;
2292 static const u8 bcast[] = { 255, 255, 255, 255, 255, 255 }; 2478 static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2479 static const u8 off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
2293 unsigned long flags; 2480 unsigned long flags;
2294 2481
2295 if (awrq->sa_family != ARPHRD_ETHER) 2482 if (awrq->sa_family != ARPHRD_ETHER)
2296 return -EINVAL; 2483 return -EINVAL;
2297 2484
2298 if (memcmp(bcast, awrq->sa_data, 6) == 0) { 2485 if (!memcmp(any, awrq->sa_data, 6) ||
2486 !memcmp(off, awrq->sa_data, 6)) {
2299 del_timer_sync(&priv->management_timer); 2487 del_timer_sync(&priv->management_timer);
2300 spin_lock_irqsave(&priv->irqlock, flags); 2488 spin_lock_irqsave(&priv->irqlock, flags);
2301 atmel_scan(priv, 1); 2489 atmel_scan(priv, 1);
@@ -2378,6 +2566,15 @@ static const iw_handler atmel_handler[] =
2378 (iw_handler) atmel_get_encode, /* SIOCGIWENCODE */ 2566 (iw_handler) atmel_get_encode, /* SIOCGIWENCODE */
2379 (iw_handler) atmel_set_power, /* SIOCSIWPOWER */ 2567 (iw_handler) atmel_set_power, /* SIOCSIWPOWER */
2380 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */ 2568 (iw_handler) atmel_get_power, /* SIOCGIWPOWER */
2569 (iw_handler) NULL, /* -- hole -- */
2570 (iw_handler) NULL, /* -- hole -- */
2571 (iw_handler) NULL, /* SIOCSIWGENIE */
2572 (iw_handler) NULL, /* SIOCGIWGENIE */
2573 (iw_handler) atmel_set_auth, /* SIOCSIWAUTH */
2574 (iw_handler) atmel_get_auth, /* SIOCGIWAUTH */
2575 (iw_handler) atmel_set_encodeext, /* SIOCSIWENCODEEXT */
2576 (iw_handler) atmel_get_encodeext, /* SIOCGIWENCODEEXT */
2577 (iw_handler) NULL, /* SIOCSIWPMKSA */
2381}; 2578};
2382 2579
2383static const iw_handler atmel_private_handler[] = 2580static const iw_handler atmel_private_handler[] =
@@ -2924,6 +3121,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2924 u16 ass_id = le16_to_cpu(ass_resp->ass_id); 3121 u16 ass_id = le16_to_cpu(ass_resp->ass_id);
2925 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length; 3122 u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
2926 3123
3124 union iwreq_data wrqu;
3125
2927 if (frame_len < 8 + rates_len) 3126 if (frame_len < 8 + rates_len)
2928 return; 3127 return;
2929 3128
@@ -2954,6 +3153,14 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
2954 priv->station_is_associated = 1; 3153 priv->station_is_associated = 1;
2955 priv->station_was_associated = 1; 3154 priv->station_was_associated = 1;
2956 atmel_enter_state(priv, STATION_STATE_READY); 3155 atmel_enter_state(priv, STATION_STATE_READY);
3156
3157 /* Send association event to userspace */
3158 wrqu.data.length = 0;
3159 wrqu.data.flags = 0;
3160 memcpy(wrqu.ap_addr.sa_data, priv->CurrentBSSID, ETH_ALEN);
3161 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
3162 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
3163
2957 return; 3164 return;
2958 } 3165 }
2959 3166
@@ -3632,6 +3839,7 @@ static int reset_atmel_card(struct net_device *dev)
3632 3839
3633 struct atmel_private *priv = netdev_priv(dev); 3840 struct atmel_private *priv = netdev_priv(dev);
3634 u8 configuration; 3841 u8 configuration;
3842 int old_state = priv->station_state;
3635 3843
3636 /* data to add to the firmware names, in priority order 3844 /* data to add to the firmware names, in priority order
3637 this implemenents firmware versioning */ 3845 this implemenents firmware versioning */
@@ -3792,6 +4000,17 @@ static int reset_atmel_card(struct net_device *dev)
3792 else 4000 else
3793 build_wep_mib(priv); 4001 build_wep_mib(priv);
3794 4002
4003 if (old_state == STATION_STATE_READY)
4004 {
4005 union iwreq_data wrqu;
4006
4007 wrqu.data.length = 0;
4008 wrqu.data.flags = 0;
4009 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
4010 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
4011 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
4012 }
4013
3795 return 1; 4014 return 1;
3796} 4015}
3797 4016
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 56f41c714d38..c8f6286dd35f 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
26 depends on HOSTAP 26 depends on HOSTAP
27 ---help--- 27 ---help---
28 Configure Host AP driver to include support for firmware image 28 Configure Host AP driver to include support for firmware image
29 download. Current version supports only downloading to volatile, i.e., 29 download. This option by itself only enables downloading to the
30 RAM memory. Flash upgrade is not yet supported. 30 volatile memory, i.e. the card RAM. This option is required to
31 support cards that don't have firmware in flash, such as D-Link
32 DWL-520 rev E and D-Link DWL-650 rev P.
31 33
32 Firmware image downloading needs user space tool, prism2_srec. It is 34 Firmware image downloading needs a user space tool, prism2_srec.
33 available from http://hostap.epitest.fi/. 35 It is available from http://hostap.epitest.fi/.
36
37config HOSTAP_FIRMWARE_NVRAM
38 bool "Support for non-volatile firmware download"
39 depends on HOSTAP_FIRMWARE
40 ---help---
41 Allow Host AP driver to write firmware images to the non-volatile
42 card memory, i.e. flash memory that survives power cycling.
43 Enable this option if you want to be able to change card firmware
44 permanently.
45
46 Firmware image downloading needs a user space tool, prism2_srec.
47 It is available from http://hostap.epitest.fi/.
34 48
35config HOSTAP_PLX 49config HOSTAP_PLX
36 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors" 50 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index 353ccb93134b..b8e41a702c00 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,4 +1,5 @@
1hostap-y := hostap_main.o 1hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
2 hostap_ioctl.o hostap_main.o hostap_proc.o
2obj-$(CONFIG_HOSTAP) += hostap.o 3obj-$(CONFIG_HOSTAP) += hostap.o
3 4
4obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o 5obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index 5fac89b8ce3a..5e63765219fe 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -1,6 +1,15 @@
1#ifndef HOSTAP_H 1#ifndef HOSTAP_H
2#define HOSTAP_H 2#define HOSTAP_H
3 3
4#include <linux/ethtool.h>
5
6#include "hostap_wlan.h"
7#include "hostap_ap.h"
8
9static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
10 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
11#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
12
4/* hostap.c */ 13/* hostap.c */
5 14
6extern struct proc_dir_entry *hostap_proc; 15extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
40int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, 49int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
41 u8 *body, size_t bodylen); 50 u8 *body, size_t bodylen);
42int prism2_sta_deauth(local_info_t *local, u16 reason); 51int prism2_sta_deauth(local_info_t *local, u16 reason);
52int prism2_wds_add(local_info_t *local, u8 *remote_addr,
53 int rtnl_locked);
54int prism2_wds_del(local_info_t *local, u8 *remote_addr,
55 int rtnl_locked, int do_not_remove);
56
57
58/* hostap_ap.c */
59
60int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
61int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
62void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
63int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
64void ap_control_kickall(struct ap_data *ap);
65void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
66 struct ieee80211_crypt_data ***crypt);
67int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
68 struct iw_quality qual[], int buf_size,
69 int aplist);
70int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
71int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
43 72
44 73
45/* hostap_proc.c */ 74/* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
54void hostap_info_process(local_info_t *local, struct sk_buff *skb); 83void hostap_info_process(local_info_t *local, struct sk_buff *skb);
55 84
56 85
86/* hostap_ioctl.c */
87
88extern const struct iw_handler_def hostap_iw_handler_def;
89extern struct ethtool_ops prism2_ethtool_ops;
90
91int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92
93
57#endif /* HOSTAP_H */ 94#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index bf506f50d722..1fc72fe511e9 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_80211_H 1#ifndef HOSTAP_80211_H
2#define HOSTAP_80211_H 2#define HOSTAP_80211_H
3 3
4#include <linux/types.h>
5#include <net/ieee80211_crypt.h>
6
4struct hostap_ieee80211_mgmt { 7struct hostap_ieee80211_mgmt {
5 u16 frame_control; 8 u16 frame_control;
6 u16 duration; 9 u16 duration;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index ffac50899454..7e04dc94b3bc 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,7 +1,18 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <net/ieee80211_crypt.h>
2 3
3#include "hostap_80211.h" 4#include "hostap_80211.h"
4#include "hostap.h" 5#include "hostap.h"
6#include "hostap_ap.h"
7
8/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
9/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
10static unsigned char rfc1042_header[] =
11{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
12/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
13static unsigned char bridge_tunnel_header[] =
14{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
15/* No encapsulation header if EtherType < 0x600 (=length) */
5 16
6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, 17void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
7 struct hostap_80211_rx_status *rx_stats) 18 struct hostap_80211_rx_status *rx_stats)
@@ -435,7 +446,7 @@ static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
435} 446}
436 447
437 448
438static inline int 449static int
439hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb, 450hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
440 struct hostap_80211_rx_status *rx_stats, u16 type, 451 struct hostap_80211_rx_status *rx_stats, u16 type,
441 u16 stype) 452 u16 stype)
@@ -499,7 +510,7 @@ hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
499 510
500 511
501/* Called only as a tasklet (software IRQ) */ 512/* Called only as a tasklet (software IRQ) */
502static inline struct net_device *prism2_rx_get_wds(local_info_t *local, 513static struct net_device *prism2_rx_get_wds(local_info_t *local,
503 u8 *addr) 514 u8 *addr)
504{ 515{
505 struct hostap_interface *iface = NULL; 516 struct hostap_interface *iface = NULL;
@@ -519,7 +530,7 @@ static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
519} 530}
520 531
521 532
522static inline int 533static int
523hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, 534hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
524 u16 fc, struct net_device **wds) 535 u16 fc, struct net_device **wds)
525{ 536{
@@ -615,7 +626,7 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
615 626
616 627
617/* Called only as a tasklet (software IRQ) */ 628/* Called only as a tasklet (software IRQ) */
618static inline int 629static int
619hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, 630hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
620 struct ieee80211_crypt_data *crypt) 631 struct ieee80211_crypt_data *crypt)
621{ 632{
@@ -654,7 +665,7 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
654 665
655 666
656/* Called only as a tasklet (software IRQ) */ 667/* Called only as a tasklet (software IRQ) */
657static inline int 668static int
658hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, 669hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
659 int keyidx, struct ieee80211_crypt_data *crypt) 670 int keyidx, struct ieee80211_crypt_data *crypt)
660{ 671{
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 9d24f8a38ac5..4a85e63906f1 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,3 +1,18 @@
1#include "hostap_80211.h"
2#include "hostap_common.h"
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
6
7/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
8/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
9static unsigned char rfc1042_header[] =
10{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
11/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
12static unsigned char bridge_tunnel_header[] =
13{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
14/* No encapsulation header if EtherType < 0x600 (=length) */
15
1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) 16void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
2{ 17{
3 struct ieee80211_hdr_4addr *hdr; 18 struct ieee80211_hdr_4addr *hdr;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 9da94ab7f05f..753a1de6664b 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -16,6 +16,14 @@
16 * (8802.11: 5.5) 16 * (8802.11: 5.5)
17 */ 17 */
18 18
19#include <linux/proc_fs.h>
20#include <linux/delay.h>
21#include <linux/random.h>
22
23#include "hostap_wlan.h"
24#include "hostap.h"
25#include "hostap_ap.h"
26
19static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, 27static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
20 DEF_INTS }; 28 DEF_INTS };
21module_param_array(other_ap_policy, int, NULL, 0444); 29module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
360} 368}
361 369
362 370
363static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, 371int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
364 u8 *mac)
365{ 372{
366 struct mac_entry *entry; 373 struct mac_entry *entry;
367 374
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
380} 387}
381 388
382 389
383static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, 390int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
384 u8 *mac)
385{ 391{
386 struct list_head *ptr; 392 struct list_head *ptr;
387 struct mac_entry *entry; 393 struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
433} 439}
434 440
435 441
436static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) 442void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
437{ 443{
438 struct list_head *ptr, *n; 444 struct list_head *ptr, *n;
439 struct mac_entry *entry; 445 struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
454} 460}
455 461
456 462
457static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, 463int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
458 u8 *mac)
459{ 464{
460 struct sta_info *sta; 465 struct sta_info *sta;
461 u16 resp; 466 u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
486#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 491#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
487 492
488 493
489static void ap_control_kickall(struct ap_data *ap) 494void ap_control_kickall(struct ap_data *ap)
490{ 495{
491 struct list_head *ptr, *n; 496 struct list_head *ptr, *n;
492 struct sta_info *sta; 497 struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2321} 2326}
2322 2327
2323 2328
2324static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], 2329int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2325 struct iw_quality qual[], int buf_size, 2330 struct iw_quality qual[], int buf_size,
2326 int aplist) 2331 int aplist)
2327{ 2332{
2328 struct ap_data *ap = local->ap; 2333 struct ap_data *ap = local->ap;
2329 struct list_head *ptr; 2334 struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2363 2368
2364/* Translate our list of Access Points & Stations to a card independant 2369/* Translate our list of Access Points & Stations to a card independant
2365 * format that the Wireless Tools will understand - Jean II */ 2370 * format that the Wireless Tools will understand - Jean II */
2366static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) 2371int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
2367{ 2372{
2368 struct hostap_interface *iface; 2373 struct hostap_interface *iface;
2369 local_info_t *local; 2374 local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
2608} 2613}
2609 2614
2610 2615
2611static int prism2_hostapd(struct ap_data *ap, 2616int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
2612 struct prism2_hostapd_param *param)
2613{ 2617{
2614 switch (param->cmd) { 2618 switch (param->cmd) {
2615 case PRISM2_HOSTAPD_FLUSH: 2619 case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
3207} 3211}
3208 3212
3209 3213
3210static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 3214void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3211 struct ieee80211_crypt_data ***crypt) 3215 struct ieee80211_crypt_data ***crypt)
3212{ 3216{
3213 struct sta_info *sta; 3217 struct sta_info *sta;
3214 3218
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 6d00df69c2e3..2fa2452b6b07 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -1,6 +1,8 @@
1#ifndef HOSTAP_AP_H 1#ifndef HOSTAP_AP_H
2#define HOSTAP_AP_H 2#define HOSTAP_AP_H
3 3
4#include "hostap_80211.h"
5
4/* AP data structures for STAs */ 6/* AP data structures for STAs */
5 7
6/* maximum number of frames to buffer per STA */ 8/* maximum number of frames to buffer per STA */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 6f4fa9dc308f..01624005d808 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -1,6 +1,9 @@
1#ifndef HOSTAP_COMMON_H 1#ifndef HOSTAP_COMMON_H
2#define HOSTAP_COMMON_H 2#define HOSTAP_COMMON_H
3 3
4#include <linux/types.h>
5#include <linux/if_ether.h>
6
4#define BIT(x) (1 << (x)) 7#define BIT(x) (1 << (x))
5 8
6#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] 9#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 7ed3425d08c1..c090a5aebb58 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -21,15 +21,10 @@
21#define PRISM2_DOWNLOAD_SUPPORT 21#define PRISM2_DOWNLOAD_SUPPORT
22#endif 22#endif
23 23
24#ifdef PRISM2_DOWNLOAD_SUPPORT 24/* Allow kernel configuration to enable non-volatile download support. */
25/* Allow writing firmware images into flash, i.e., to non-volatile storage. 25#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
26 * Before you enable this option, you should make absolutely sure that you are 26#define PRISM2_NON_VOLATILE_DOWNLOAD
27 * using prism2_srec utility that comes with THIS version of the driver! 27#endif
28 * In addition, please note that it is possible to kill your card with
29 * non-volatile download if you are using incorrect image. This feature has not
30 * been fully tested, so please be careful with it. */
31/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
32#endif /* PRISM2_DOWNLOAD_SUPPORT */
33 28
34/* Save low-level I/O for debugging. This should not be enabled in normal use. 29/* Save low-level I/O for debugging. This should not be enabled in normal use.
35 */ 30 */
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index abfae7fedebc..b1f142d9e232 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -253,7 +253,7 @@ static void prism2_clear_cmd_queue(local_info_t *local)
253 * @dev: pointer to net_device 253 * @dev: pointer to net_device
254 * @entry: Prism2 command queue entry to be issued 254 * @entry: Prism2 command queue entry to be issued
255 */ 255 */
256static inline int hfa384x_cmd_issue(struct net_device *dev, 256static int hfa384x_cmd_issue(struct net_device *dev,
257 struct hostap_cmd_queue *entry) 257 struct hostap_cmd_queue *entry)
258{ 258{
259 struct hostap_interface *iface; 259 struct hostap_interface *iface;
@@ -743,7 +743,7 @@ static void prism2_cmd_ev(struct net_device *dev)
743} 743}
744 744
745 745
746static inline int hfa384x_wait_offset(struct net_device *dev, u16 o_off) 746static int hfa384x_wait_offset(struct net_device *dev, u16 o_off)
747{ 747{
748 int tries = HFA384X_BAP_BUSY_TIMEOUT; 748 int tries = HFA384X_BAP_BUSY_TIMEOUT;
749 int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY; 749 int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
@@ -1904,7 +1904,7 @@ fail:
1904 * and will try to get the correct fid eventually. */ 1904 * and will try to get the correct fid eventually. */
1905#define EXTRA_FID_READ_TESTS 1905#define EXTRA_FID_READ_TESTS
1906 1906
1907static inline u16 prism2_read_fid_reg(struct net_device *dev, u16 reg) 1907static u16 prism2_read_fid_reg(struct net_device *dev, u16 reg)
1908{ 1908{
1909#ifdef EXTRA_FID_READ_TESTS 1909#ifdef EXTRA_FID_READ_TESTS
1910 u16 val, val2, val3; 1910 u16 val, val2, val3;
@@ -2581,7 +2581,7 @@ static void prism2_ev_tick(struct net_device *dev)
2581 2581
2582 2582
2583/* Called only from hardware IRQ */ 2583/* Called only from hardware IRQ */
2584static inline void prism2_check_magic(local_info_t *local) 2584static void prism2_check_magic(local_info_t *local)
2585{ 2585{
2586 /* at least PCI Prism2.5 with bus mastering seems to sometimes 2586 /* at least PCI Prism2.5 with bus mastering seems to sometimes
2587 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the 2587 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 5aa998fdf1c4..50f72d831cf4 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,5 +1,8 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */ 1/* Host AP driver Info Frame processing (part of hostap.o module) */
2 2
3#include "hostap_wlan.h"
4#include "hostap.h"
5#include "hostap_ap.h"
3 6
4/* Called only as a tasklet (software IRQ) */ 7/* Called only as a tasklet (software IRQ) */
5static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, 8static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 2617d70bcda9..f3e0ce1ee037 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,11 +1,13 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ 1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2 2
3#ifdef in_atomic 3#include <linux/types.h>
4/* Get kernel_locked() for in_atomic() */
5#include <linux/smp_lock.h> 4#include <linux/smp_lock.h>
6#endif
7#include <linux/ethtool.h> 5#include <linux/ethtool.h>
6#include <net/ieee80211_crypt.h>
8 7
8#include "hostap_wlan.h"
9#include "hostap.h"
10#include "hostap_ap.h"
9 11
10static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) 12static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
11{ 13{
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
3910 local->sta_fw_ver & 0xff); 3912 local->sta_fw_ver & 0xff);
3911} 3913}
3912 3914
3913static struct ethtool_ops prism2_ethtool_ops = { 3915struct ethtool_ops prism2_ethtool_ops = {
3914 .get_drvinfo = prism2_get_drvinfo 3916 .get_drvinfo = prism2_get_drvinfo
3915}; 3917};
3916 3918
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
3985 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */ 3987 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
3986}; 3988};
3987 3989
3988static const struct iw_handler_def hostap_iw_handler_def = 3990const struct iw_handler_def hostap_iw_handler_def =
3989{ 3991{
3990 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), 3992 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
3991 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), 3993 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..8dd4c4446a64 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -24,6 +24,7 @@
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/rtnetlink.h> 25#include <linux/rtnetlink.h>
26#include <linux/wireless.h> 26#include <linux/wireless.h>
27#include <linux/etherdevice.h>
27#include <net/iw_handler.h> 28#include <net/iw_handler.h>
28#include <net/ieee80211.h> 29#include <net/ieee80211.h>
29#include <net/ieee80211_crypt.h> 30#include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
47#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) 48#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
48 49
49 50
50/* hostap.c */
51static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
52 int rtnl_locked);
53static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
54 int rtnl_locked, int do_not_remove);
55
56/* hostap_ap.c */
57static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
58 struct iw_quality qual[], int buf_size,
59 int aplist);
60static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
61static int prism2_hostapd(struct ap_data *ap,
62 struct prism2_hostapd_param *param);
63static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
64 struct ieee80211_crypt_data ***crypt);
65static void ap_control_kickall(struct ap_data *ap);
66#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
67static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
68 u8 *mac);
69static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
70 u8 *mac);
71static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
72static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
73 u8 *mac);
74#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
75
76
77static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
78 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
79#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
80
81
82/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
83/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
84static unsigned char rfc1042_header[] =
85{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
86/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
87static unsigned char bridge_tunnel_header[] =
88{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
89/* No encapsulation header if EtherType < 0x600 (=length) */
90
91
92/* FIX: these could be compiled separately and linked together to hostap.o */
93#include "hostap_ap.c"
94#include "hostap_info.c"
95#include "hostap_ioctl.c"
96#include "hostap_proc.c"
97#include "hostap_80211_rx.c"
98#include "hostap_80211_tx.c"
99
100
101struct net_device * hostap_add_interface(struct local_info *local, 51struct net_device * hostap_add_interface(struct local_info *local,
102 int type, int rtnl_locked, 52 int type, int rtnl_locked,
103 const char *prefix, 53 const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
196} 146}
197 147
198 148
199static int prism2_wds_add(local_info_t *local, u8 *remote_addr, 149int prism2_wds_add(local_info_t *local, u8 *remote_addr,
200 int rtnl_locked) 150 int rtnl_locked)
201{ 151{
202 struct net_device *dev; 152 struct net_device *dev;
203 struct list_head *ptr; 153 struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
258} 208}
259 209
260 210
261static int prism2_wds_del(local_info_t *local, u8 *remote_addr, 211int prism2_wds_del(local_info_t *local, u8 *remote_addr,
262 int rtnl_locked, int do_not_remove) 212 int rtnl_locked, int do_not_remove)
263{ 213{
264 unsigned long flags; 214 unsigned long flags;
265 struct list_head *ptr; 215 struct list_head *ptr;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index a0a4cbd4937a..d1d8ce022e63 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -1,5 +1,12 @@
1/* /proc routines for Host AP driver */ 1/* /proc routines for Host AP driver */
2 2
3#include <linux/types.h>
4#include <linux/proc_fs.h>
5#include <net/ieee80211_crypt.h>
6
7#include "hostap_wlan.h"
8#include "hostap.h"
9
3#define PROC_LIMIT (PAGE_SIZE - 80) 10#define PROC_LIMIT (PAGE_SIZE - 80)
4 11
5 12
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cfd801559492..87a54aa6f4dd 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,10 @@
1#ifndef HOSTAP_WLAN_H 1#ifndef HOSTAP_WLAN_H
2#define HOSTAP_WLAN_H 2#define HOSTAP_WLAN_H
3 3
4#include <linux/wireless.h>
5#include <linux/netdevice.h>
6#include <net/iw_handler.h>
7
4#include "hostap_config.h" 8#include "hostap_config.h"
5#include "hostap_common.h" 9#include "hostap_common.h"
6 10
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 44cd3fcd1572..8bf02763b5c7 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -411,7 +411,7 @@ static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val)
411 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val); 411 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val);
412} 412}
413 413
414static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len, 414static void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
415 const u8 * buf) 415 const u8 * buf)
416{ 416{
417 u32 aligned_addr; 417 u32 aligned_addr;
@@ -449,7 +449,7 @@ static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
449 *buf); 449 *buf);
450} 450}
451 451
452static inline void read_nic_memory(struct net_device *dev, u32 addr, u32 len, 452static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
453 u8 * buf) 453 u8 * buf)
454{ 454{
455 u32 aligned_addr; 455 u32 aligned_addr;
@@ -657,7 +657,7 @@ static void printk_buf(int level, const u8 * data, u32 len)
657 657
658#define MAX_RESET_BACKOFF 10 658#define MAX_RESET_BACKOFF 10
659 659
660static inline void schedule_reset(struct ipw2100_priv *priv) 660static void schedule_reset(struct ipw2100_priv *priv)
661{ 661{
662 unsigned long now = get_seconds(); 662 unsigned long now = get_seconds();
663 663
@@ -1130,7 +1130,7 @@ static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv)
1130 write_register(priv->net_dev, IPW_REG_GPIO, reg); 1130 write_register(priv->net_dev, IPW_REG_GPIO, reg);
1131} 1131}
1132 1132
1133static inline int rf_kill_active(struct ipw2100_priv *priv) 1133static int rf_kill_active(struct ipw2100_priv *priv)
1134{ 1134{
1135#define MAX_RF_KILL_CHECKS 5 1135#define MAX_RF_KILL_CHECKS 5
1136#define RF_KILL_CHECK_DELAY 40 1136#define RF_KILL_CHECK_DELAY 40
@@ -2177,7 +2177,7 @@ static const char *frame_types[] = {
2177}; 2177};
2178#endif 2178#endif
2179 2179
2180static inline int ipw2100_alloc_skb(struct ipw2100_priv *priv, 2180static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
2181 struct ipw2100_rx_packet *packet) 2181 struct ipw2100_rx_packet *packet)
2182{ 2182{
2183 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx)); 2183 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
@@ -2201,7 +2201,7 @@ static inline int ipw2100_alloc_skb(struct ipw2100_priv *priv,
2201#define SEARCH_SNAPSHOT 1 2201#define SEARCH_SNAPSHOT 1
2202 2202
2203#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff)) 2203#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
2204static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv) 2204static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2205{ 2205{
2206 int i; 2206 int i;
2207 if (priv->snapshot[0]) 2207 if (priv->snapshot[0])
@@ -2221,7 +2221,7 @@ static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2221 return 1; 2221 return 1;
2222} 2222}
2223 2223
2224static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv) 2224static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
2225{ 2225{
2226 int i; 2226 int i;
2227 if (!priv->snapshot[0]) 2227 if (!priv->snapshot[0])
@@ -2231,7 +2231,7 @@ static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv)
2231 priv->snapshot[0] = NULL; 2231 priv->snapshot[0] = NULL;
2232} 2232}
2233 2233
2234static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf, 2234static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
2235 size_t len, int mode) 2235 size_t len, int mode)
2236{ 2236{
2237 u32 i, j; 2237 u32 i, j;
@@ -2288,7 +2288,7 @@ static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
2288static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH]; 2288static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
2289#endif 2289#endif
2290 2290
2291static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) 2291static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
2292{ 2292{
2293#ifdef CONFIG_IPW2100_DEBUG_C3 2293#ifdef CONFIG_IPW2100_DEBUG_C3
2294 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2294 struct ipw2100_status *status = &priv->status_queue.drv[i];
@@ -2346,7 +2346,7 @@ static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
2346 schedule_reset(priv); 2346 schedule_reset(priv);
2347} 2347}
2348 2348
2349static inline void isr_rx(struct ipw2100_priv *priv, int i, 2349static void isr_rx(struct ipw2100_priv *priv, int i,
2350 struct ieee80211_rx_stats *stats) 2350 struct ieee80211_rx_stats *stats)
2351{ 2351{
2352 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2352 struct ipw2100_status *status = &priv->status_queue.drv[i];
@@ -2425,7 +2425,7 @@ static inline void isr_rx(struct ipw2100_priv *priv, int i,
2425 priv->rx_queue.drv[i].host_addr = packet->dma_addr; 2425 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
2426} 2426}
2427 2427
2428static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) 2428static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
2429{ 2429{
2430 struct ipw2100_status *status = &priv->status_queue.drv[i]; 2430 struct ipw2100_status *status = &priv->status_queue.drv[i];
2431 struct ipw2100_rx *u = priv->rx_buffers[i].rxp; 2431 struct ipw2100_rx *u = priv->rx_buffers[i].rxp;
@@ -2481,7 +2481,7 @@ static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
2481 * The WRITE index is cached in the variable 'priv->rx_queue.next'. 2481 * The WRITE index is cached in the variable 'priv->rx_queue.next'.
2482 * 2482 *
2483 */ 2483 */
2484static inline void __ipw2100_rx_process(struct ipw2100_priv *priv) 2484static void __ipw2100_rx_process(struct ipw2100_priv *priv)
2485{ 2485{
2486 struct ipw2100_bd_queue *rxq = &priv->rx_queue; 2486 struct ipw2100_bd_queue *rxq = &priv->rx_queue;
2487 struct ipw2100_status_queue *sq = &priv->status_queue; 2487 struct ipw2100_status_queue *sq = &priv->status_queue;
@@ -2634,7 +2634,7 @@ static inline void __ipw2100_rx_process(struct ipw2100_priv *priv)
2634 * for use by future command and data packets. 2634 * for use by future command and data packets.
2635 * 2635 *
2636 */ 2636 */
2637static inline int __ipw2100_tx_process(struct ipw2100_priv *priv) 2637static int __ipw2100_tx_process(struct ipw2100_priv *priv)
2638{ 2638{
2639 struct ipw2100_bd_queue *txq = &priv->tx_queue; 2639 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2640 struct ipw2100_bd *tbd; 2640 struct ipw2100_bd *tbd;
@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
5735 return &priv->ieee->stats; 5735 return &priv->ieee->stats;
5736} 5736}
5737 5737
5738#if WIRELESS_EXT < 18
5739/* Support for wpa_supplicant before WE-18, deprecated. */
5740
5741/* following definitions must match definitions in driver_ipw.c */
5742
5743#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5744
5745#define IPW2100_CMD_SET_WPA_PARAM 1
5746#define IPW2100_CMD_SET_WPA_IE 2
5747#define IPW2100_CMD_SET_ENCRYPTION 3
5748#define IPW2100_CMD_MLME 4
5749
5750#define IPW2100_PARAM_WPA_ENABLED 1
5751#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
5752#define IPW2100_PARAM_DROP_UNENCRYPTED 3
5753#define IPW2100_PARAM_PRIVACY_INVOKED 4
5754#define IPW2100_PARAM_AUTH_ALGS 5
5755#define IPW2100_PARAM_IEEE_802_1X 6
5756
5757#define IPW2100_MLME_STA_DEAUTH 1
5758#define IPW2100_MLME_STA_DISASSOC 2
5759
5760#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
5761#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
5762#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
5763#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
5764#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
5765#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
5766
5767#define IPW2100_CRYPT_ALG_NAME_LEN 16
5768
5769struct ipw2100_param {
5770 u32 cmd;
5771 u8 sta_addr[ETH_ALEN];
5772 union {
5773 struct {
5774 u8 name;
5775 u32 value;
5776 } wpa_param;
5777 struct {
5778 u32 len;
5779 u8 reserved[32];
5780 u8 data[0];
5781 } wpa_ie;
5782 struct {
5783 u32 command;
5784 u32 reason_code;
5785 } mlme;
5786 struct {
5787 u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
5788 u8 set_tx;
5789 u32 err;
5790 u8 idx;
5791 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5792 u16 key_len;
5793 u8 key[0];
5794 } crypt;
5795
5796 } u;
5797};
5798
5799/* end of driver_ipw.c code */
5800#endif /* WIRELESS_EXT < 18 */
5801
5802static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) 5738static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5803{ 5739{
5804 /* This is called when wpa_supplicant loads and closes the driver 5740 /* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
5807 return 0; 5743 return 0;
5808} 5744}
5809 5745
5810#if WIRELESS_EXT < 18
5811#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
5812#define IW_AUTH_ALG_SHARED_KEY 0x2
5813#endif
5814
5815static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) 5746static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
5816{ 5747{
5817 5748
@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5855 ipw2100_set_wpa_ie(priv, &frame, 0); 5786 ipw2100_set_wpa_ie(priv, &frame, 0);
5856} 5787}
5857 5788
5858#if WIRELESS_EXT < 18
5859static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
5860{
5861 struct ipw2100_priv *priv = ieee80211_priv(dev);
5862 struct ieee80211_crypt_data *crypt;
5863 unsigned long flags;
5864 int ret = 0;
5865
5866 switch (name) {
5867 case IPW2100_PARAM_WPA_ENABLED:
5868 ret = ipw2100_wpa_enable(priv, value);
5869 break;
5870
5871 case IPW2100_PARAM_TKIP_COUNTERMEASURES:
5872 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
5873 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
5874 break;
5875
5876 flags = crypt->ops->get_flags(crypt->priv);
5877
5878 if (value)
5879 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5880 else
5881 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5882
5883 crypt->ops->set_flags(flags, crypt->priv);
5884
5885 break;
5886
5887 case IPW2100_PARAM_DROP_UNENCRYPTED:{
5888 /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
5889 struct ieee80211_security sec = {
5890 .flags = SEC_ENABLED,
5891 .enabled = value,
5892 };
5893 priv->ieee->drop_unencrypted = value;
5894 /* We only change SEC_LEVEL for open mode. Others
5895 * are set by ipw_wpa_set_encryption.
5896 */
5897 if (!value) {
5898 sec.flags |= SEC_LEVEL;
5899 sec.level = SEC_LEVEL_0;
5900 } else {
5901 sec.flags |= SEC_LEVEL;
5902 sec.level = SEC_LEVEL_1;
5903 }
5904 if (priv->ieee->set_security)
5905 priv->ieee->set_security(priv->ieee->dev, &sec);
5906 break;
5907 }
5908
5909 case IPW2100_PARAM_PRIVACY_INVOKED:
5910 priv->ieee->privacy_invoked = value;
5911 break;
5912
5913 case IPW2100_PARAM_AUTH_ALGS:
5914 ret = ipw2100_wpa_set_auth_algs(priv, value);
5915 break;
5916
5917 case IPW2100_PARAM_IEEE_802_1X:
5918 priv->ieee->ieee802_1x = value;
5919 break;
5920
5921 default:
5922 printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
5923 dev->name, name);
5924 ret = -EOPNOTSUPP;
5925 }
5926
5927 return ret;
5928}
5929
5930static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
5931{
5932
5933 struct ipw2100_priv *priv = ieee80211_priv(dev);
5934 int ret = 0;
5935
5936 switch (command) {
5937 case IPW2100_MLME_STA_DEAUTH:
5938 // silently ignore
5939 break;
5940
5941 case IPW2100_MLME_STA_DISASSOC:
5942 ipw2100_disassociate_bssid(priv);
5943 break;
5944
5945 default:
5946 printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
5947 dev->name, command);
5948 ret = -EOPNOTSUPP;
5949 }
5950
5951 return ret;
5952}
5953
5954static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
5955 struct ipw2100_param *param, int plen)
5956{
5957
5958 struct ipw2100_priv *priv = ieee80211_priv(dev);
5959 struct ieee80211_device *ieee = priv->ieee;
5960 u8 *buf;
5961
5962 if (!ieee->wpa_enabled)
5963 return -EOPNOTSUPP;
5964
5965 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5966 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
5967 return -EINVAL;
5968
5969 if (param->u.wpa_ie.len) {
5970 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5971 if (buf == NULL)
5972 return -ENOMEM;
5973
5974 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5975
5976 kfree(ieee->wpa_ie);
5977 ieee->wpa_ie = buf;
5978 ieee->wpa_ie_len = param->u.wpa_ie.len;
5979
5980 } else {
5981 kfree(ieee->wpa_ie);
5982 ieee->wpa_ie = NULL;
5983 ieee->wpa_ie_len = 0;
5984 }
5985
5986 ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5987
5988 return 0;
5989}
5990
5991/* implementation borrowed from hostap driver */
5992
5993static int ipw2100_wpa_set_encryption(struct net_device *dev,
5994 struct ipw2100_param *param,
5995 int param_len)
5996{
5997 int ret = 0;
5998 struct ipw2100_priv *priv = ieee80211_priv(dev);
5999 struct ieee80211_device *ieee = priv->ieee;
6000 struct ieee80211_crypto_ops *ops;
6001 struct ieee80211_crypt_data **crypt;
6002
6003 struct ieee80211_security sec = {
6004 .flags = 0,
6005 };
6006
6007 param->u.crypt.err = 0;
6008 param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
6009
6010 if (param_len !=
6011 (int)((char *)param->u.crypt.key - (char *)param) +
6012 param->u.crypt.key_len) {
6013 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
6014 param->u.crypt.key_len);
6015 return -EINVAL;
6016 }
6017 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
6018 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
6019 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
6020 if (param->u.crypt.idx >= WEP_KEYS)
6021 return -EINVAL;
6022 crypt = &ieee->crypt[param->u.crypt.idx];
6023 } else {
6024 return -EINVAL;
6025 }
6026
6027 sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
6028 if (strcmp(param->u.crypt.alg, "none") == 0) {
6029 if (crypt) {
6030 sec.enabled = 0;
6031 sec.encrypt = 0;
6032 sec.level = SEC_LEVEL_0;
6033 sec.flags |= SEC_LEVEL;
6034 ieee80211_crypt_delayed_deinit(ieee, crypt);
6035 }
6036 goto done;
6037 }
6038 sec.enabled = 1;
6039 sec.encrypt = 1;
6040
6041 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6042 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
6043 request_module("ieee80211_crypt_wep");
6044 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6045 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
6046 request_module("ieee80211_crypt_tkip");
6047 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6048 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
6049 request_module("ieee80211_crypt_ccmp");
6050 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6051 }
6052 if (ops == NULL) {
6053 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
6054 dev->name, param->u.crypt.alg);
6055 param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
6056 ret = -EINVAL;
6057 goto done;
6058 }
6059
6060 if (*crypt == NULL || (*crypt)->ops != ops) {
6061 struct ieee80211_crypt_data *new_crypt;
6062
6063 ieee80211_crypt_delayed_deinit(ieee, crypt);
6064
6065 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
6066 if (new_crypt == NULL) {
6067 ret = -ENOMEM;
6068 goto done;
6069 }
6070 new_crypt->ops = ops;
6071 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
6072 new_crypt->priv =
6073 new_crypt->ops->init(param->u.crypt.idx);
6074
6075 if (new_crypt->priv == NULL) {
6076 kfree(new_crypt);
6077 param->u.crypt.err =
6078 IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
6079 ret = -EINVAL;
6080 goto done;
6081 }
6082
6083 *crypt = new_crypt;
6084 }
6085
6086 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
6087 (*crypt)->ops->set_key(param->u.crypt.key,
6088 param->u.crypt.key_len, param->u.crypt.seq,
6089 (*crypt)->priv) < 0) {
6090 IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
6091 param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
6092 ret = -EINVAL;
6093 goto done;
6094 }
6095
6096 if (param->u.crypt.set_tx) {
6097 ieee->tx_keyidx = param->u.crypt.idx;
6098 sec.active_key = param->u.crypt.idx;
6099 sec.flags |= SEC_ACTIVE_KEY;
6100 }
6101
6102 if (ops->name != NULL) {
6103
6104 if (strcmp(ops->name, "WEP") == 0) {
6105 memcpy(sec.keys[param->u.crypt.idx],
6106 param->u.crypt.key, param->u.crypt.key_len);
6107 sec.key_sizes[param->u.crypt.idx] =
6108 param->u.crypt.key_len;
6109 sec.flags |= (1 << param->u.crypt.idx);
6110 sec.flags |= SEC_LEVEL;
6111 sec.level = SEC_LEVEL_1;
6112 } else if (strcmp(ops->name, "TKIP") == 0) {
6113 sec.flags |= SEC_LEVEL;
6114 sec.level = SEC_LEVEL_2;
6115 } else if (strcmp(ops->name, "CCMP") == 0) {
6116 sec.flags |= SEC_LEVEL;
6117 sec.level = SEC_LEVEL_3;
6118 }
6119 }
6120 done:
6121 if (ieee->set_security)
6122 ieee->set_security(ieee->dev, &sec);
6123
6124 /* Do not reset port if card is in Managed mode since resetting will
6125 * generate new IEEE 802.11 authentication which may end up in looping
6126 * with IEEE 802.1X. If your hardware requires a reset after WEP
6127 * configuration (for example... Prism2), implement the reset_port in
6128 * the callbacks structures used to initialize the 802.11 stack. */
6129 if (ieee->reset_on_keychange &&
6130 ieee->iw_mode != IW_MODE_INFRA &&
6131 ieee->reset_port && ieee->reset_port(dev)) {
6132 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
6133 param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
6134 return -EINVAL;
6135 }
6136
6137 return ret;
6138}
6139
6140static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
6141{
6142
6143 struct ipw2100_param *param;
6144 int ret = 0;
6145
6146 IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
6147
6148 if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
6149 return -EINVAL;
6150
6151 param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
6152 if (param == NULL)
6153 return -ENOMEM;
6154
6155 if (copy_from_user(param, p->pointer, p->length)) {
6156 kfree(param);
6157 return -EFAULT;
6158 }
6159
6160 switch (param->cmd) {
6161
6162 case IPW2100_CMD_SET_WPA_PARAM:
6163 ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
6164 param->u.wpa_param.value);
6165 break;
6166
6167 case IPW2100_CMD_SET_WPA_IE:
6168 ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
6169 break;
6170
6171 case IPW2100_CMD_SET_ENCRYPTION:
6172 ret = ipw2100_wpa_set_encryption(dev, param, p->length);
6173 break;
6174
6175 case IPW2100_CMD_MLME:
6176 ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
6177 param->u.mlme.reason_code);
6178 break;
6179
6180 default:
6181 printk(KERN_ERR DRV_NAME
6182 ": %s: Unknown WPA supplicant request: %d\n", dev->name,
6183 param->cmd);
6184 ret = -EOPNOTSUPP;
6185
6186 }
6187
6188 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
6189 ret = -EFAULT;
6190
6191 kfree(param);
6192 return ret;
6193}
6194
6195static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6196{
6197 struct iwreq *wrq = (struct iwreq *)rq;
6198 int ret = -1;
6199 switch (cmd) {
6200 case IPW2100_IOCTL_WPA_SUPPLICANT:
6201 ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
6202 return ret;
6203
6204 default:
6205 return -EOPNOTSUPP;
6206 }
6207
6208 return -EOPNOTSUPP;
6209}
6210#endif /* WIRELESS_EXT < 18 */
6211
6212static void ipw_ethtool_get_drvinfo(struct net_device *dev, 5789static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6213 struct ethtool_drvinfo *info) 5790 struct ethtool_drvinfo *info)
6214{ 5791{
@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6337 dev->open = ipw2100_open; 5914 dev->open = ipw2100_open;
6338 dev->stop = ipw2100_close; 5915 dev->stop = ipw2100_close;
6339 dev->init = ipw2100_net_init; 5916 dev->init = ipw2100_net_init;
6340#if WIRELESS_EXT < 18
6341 dev->do_ioctl = ipw2100_ioctl;
6342#endif
6343 dev->get_stats = ipw2100_stats; 5917 dev->get_stats = ipw2100_stats;
6344 dev->ethtool_ops = &ipw2100_ethtool_ops; 5918 dev->ethtool_ops = &ipw2100_ethtool_ops;
6345 dev->tx_timeout = ipw2100_tx_timeout; 5919 dev->tx_timeout = ipw2100_tx_timeout;
@@ -7153,7 +6727,7 @@ static int ipw2100_wx_get_range(struct net_device *dev,
7153 6727
7154 /* Set the Wireless Extension versions */ 6728 /* Set the Wireless Extension versions */
7155 range->we_version_compiled = WIRELESS_EXT; 6729 range->we_version_compiled = WIRELESS_EXT;
7156 range->we_version_source = 16; 6730 range->we_version_source = 18;
7157 6731
7158// range->retry_capa; /* What retry options are supported */ 6732// range->retry_capa; /* What retry options are supported */
7159// range->retry_flags; /* How to decode max/min retry limit */ 6733// range->retry_flags; /* How to decode max/min retry limit */
@@ -7184,6 +6758,9 @@ static int ipw2100_wx_get_range(struct net_device *dev,
7184 IW_EVENT_CAPA_MASK(SIOCGIWAP)); 6758 IW_EVENT_CAPA_MASK(SIOCGIWAP));
7185 range->event_capa[1] = IW_EVENT_CAPA_K_1; 6759 range->event_capa[1] = IW_EVENT_CAPA_K_1;
7186 6760
6761 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
6762 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
6763
7187 IPW_DEBUG_WX("GET Range\n"); 6764 IPW_DEBUG_WX("GET Range\n");
7188 6765
7189 return 0; 6766 return 0;
@@ -7852,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
7852 return 0; 7429 return 0;
7853} 7430}
7854 7431
7855#if WIRELESS_EXT > 17
7856/* 7432/*
7857 * WE-18 WPA support 7433 * WE-18 WPA support
7858 */ 7434 */
@@ -8114,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
8114 } 7690 }
8115 return 0; 7691 return 0;
8116} 7692}
8117#endif /* WIRELESS_EXT > 17 */
8118 7693
8119/* 7694/*
8120 * 7695 *
@@ -8347,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
8347 NULL, /* SIOCWIWTHRSPY */ 7922 NULL, /* SIOCWIWTHRSPY */
8348 ipw2100_wx_set_wap, /* SIOCSIWAP */ 7923 ipw2100_wx_set_wap, /* SIOCSIWAP */
8349 ipw2100_wx_get_wap, /* SIOCGIWAP */ 7924 ipw2100_wx_get_wap, /* SIOCGIWAP */
8350#if WIRELESS_EXT > 17
8351 ipw2100_wx_set_mlme, /* SIOCSIWMLME */ 7925 ipw2100_wx_set_mlme, /* SIOCSIWMLME */
8352#else
8353 NULL, /* -- hole -- */
8354#endif
8355 NULL, /* SIOCGIWAPLIST -- deprecated */ 7926 NULL, /* SIOCGIWAPLIST -- deprecated */
8356 ipw2100_wx_set_scan, /* SIOCSIWSCAN */ 7927 ipw2100_wx_set_scan, /* SIOCSIWSCAN */
8357 ipw2100_wx_get_scan, /* SIOCGIWSCAN */ 7928 ipw2100_wx_get_scan, /* SIOCGIWSCAN */
@@ -8375,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8375 ipw2100_wx_get_encode, /* SIOCGIWENCODE */ 7946 ipw2100_wx_get_encode, /* SIOCGIWENCODE */
8376 ipw2100_wx_set_power, /* SIOCSIWPOWER */ 7947 ipw2100_wx_set_power, /* SIOCSIWPOWER */
8377 ipw2100_wx_get_power, /* SIOCGIWPOWER */ 7948 ipw2100_wx_get_power, /* SIOCGIWPOWER */
8378#if WIRELESS_EXT > 17
8379 NULL, /* -- hole -- */ 7949 NULL, /* -- hole -- */
8380 NULL, /* -- hole -- */ 7950 NULL, /* -- hole -- */
8381 ipw2100_wx_set_genie, /* SIOCSIWGENIE */ 7951 ipw2100_wx_set_genie, /* SIOCSIWGENIE */
@@ -8385,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
8385 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ 7955 ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
8386 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ 7956 ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
8387 NULL, /* SIOCSIWPMKSA */ 7957 NULL, /* SIOCSIWPMKSA */
8388#endif
8389}; 7958};
8390 7959
8391#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV 7960#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index cdfe50207757..4c28e332ecc3 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -813,7 +813,7 @@ static void ipw_bg_led_link_off(void *data)
813 up(&priv->sem); 813 up(&priv->sem);
814} 814}
815 815
816static inline void __ipw_led_activity_on(struct ipw_priv *priv) 816static void __ipw_led_activity_on(struct ipw_priv *priv)
817{ 817{
818 u32 led; 818 u32 led;
819 819
@@ -1508,7 +1508,7 @@ static ssize_t store_direct_dword(struct device *d,
1508static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1508static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1509 show_direct_dword, store_direct_dword); 1509 show_direct_dword, store_direct_dword);
1510 1510
1511static inline int rf_kill_active(struct ipw_priv *priv) 1511static int rf_kill_active(struct ipw_priv *priv)
1512{ 1512{
1513 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) 1513 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1514 priv->status |= STATUS_RF_KILL_HW; 1514 priv->status |= STATUS_RF_KILL_HW;
@@ -2359,7 +2359,7 @@ static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2359} 2359}
2360 2360
2361/* perform a chip select operation */ 2361/* perform a chip select operation */
2362static inline void eeprom_cs(struct ipw_priv *priv) 2362static void eeprom_cs(struct ipw_priv *priv)
2363{ 2363{
2364 eeprom_write_reg(priv, 0); 2364 eeprom_write_reg(priv, 0);
2365 eeprom_write_reg(priv, EEPROM_BIT_CS); 2365 eeprom_write_reg(priv, EEPROM_BIT_CS);
@@ -2368,7 +2368,7 @@ static inline void eeprom_cs(struct ipw_priv *priv)
2368} 2368}
2369 2369
2370/* perform a chip select operation */ 2370/* perform a chip select operation */
2371static inline void eeprom_disable_cs(struct ipw_priv *priv) 2371static void eeprom_disable_cs(struct ipw_priv *priv)
2372{ 2372{
2373 eeprom_write_reg(priv, EEPROM_BIT_CS); 2373 eeprom_write_reg(priv, EEPROM_BIT_CS);
2374 eeprom_write_reg(priv, 0); 2374 eeprom_write_reg(priv, 0);
@@ -2475,7 +2475,7 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2475 IPW_DEBUG_TRACE("<<\n"); 2475 IPW_DEBUG_TRACE("<<\n");
2476} 2476}
2477 2477
2478static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2478static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2479{ 2479{
2480 count >>= 2; 2480 count >>= 2;
2481 if (!count) 2481 if (!count)
@@ -2772,7 +2772,7 @@ static inline int ipw_alive(struct ipw_priv *priv)
2772 return ipw_read32(priv, 0x90) == 0xd55555d5; 2772 return ipw_read32(priv, 0x90) == 0xd55555d5;
2773} 2773}
2774 2774
2775static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 2775static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2776 int timeout) 2776 int timeout)
2777{ 2777{
2778 int i = 0; 2778 int i = 0;
@@ -3150,7 +3150,7 @@ static int ipw_get_fw(struct ipw_priv *priv,
3150 3150
3151#define IPW_RX_BUF_SIZE (3000) 3151#define IPW_RX_BUF_SIZE (3000)
3152 3152
3153static inline void ipw_rx_queue_reset(struct ipw_priv *priv, 3153static void ipw_rx_queue_reset(struct ipw_priv *priv,
3154 struct ipw_rx_queue *rxq) 3154 struct ipw_rx_queue *rxq)
3155{ 3155{
3156 unsigned long flags; 3156 unsigned long flags;
@@ -3608,7 +3608,7 @@ static void ipw_tx_queue_free(struct ipw_priv *priv)
3608 ipw_queue_tx_free(priv, &priv->txq[3]); 3608 ipw_queue_tx_free(priv, &priv->txq[3]);
3609} 3609}
3610 3610
3611static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3611static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3612{ 3612{
3613 /* First 3 bytes are manufacturer */ 3613 /* First 3 bytes are manufacturer */
3614 bssid[0] = priv->mac_addr[0]; 3614 bssid[0] = priv->mac_addr[0];
@@ -3622,7 +3622,7 @@ static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3622 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3622 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3623} 3623}
3624 3624
3625static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3625static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3626{ 3626{
3627 struct ipw_station_entry entry; 3627 struct ipw_station_entry entry;
3628 int i; 3628 int i;
@@ -3655,7 +3655,7 @@ static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3655 return i; 3655 return i;
3656} 3656}
3657 3657
3658static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3658static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3659{ 3659{
3660 int i; 3660 int i;
3661 3661
@@ -3794,7 +3794,7 @@ static void inline average_init(struct average *avg)
3794 memset(avg, 0, sizeof(*avg)); 3794 memset(avg, 0, sizeof(*avg));
3795} 3795}
3796 3796
3797static void inline average_add(struct average *avg, s16 val) 3797static void average_add(struct average *avg, s16 val)
3798{ 3798{
3799 avg->sum -= avg->entries[avg->pos]; 3799 avg->sum -= avg->entries[avg->pos];
3800 avg->sum += val; 3800 avg->sum += val;
@@ -3805,7 +3805,7 @@ static void inline average_add(struct average *avg, s16 val)
3805 } 3805 }
3806} 3806}
3807 3807
3808static s16 inline average_value(struct average *avg) 3808static s16 average_value(struct average *avg)
3809{ 3809{
3810 if (!unlikely(avg->init)) { 3810 if (!unlikely(avg->init)) {
3811 if (avg->pos) 3811 if (avg->pos)
@@ -3847,7 +3847,7 @@ static void ipw_reset_stats(struct ipw_priv *priv)
3847 3847
3848} 3848}
3849 3849
3850static inline u32 ipw_get_max_rate(struct ipw_priv *priv) 3850static u32 ipw_get_max_rate(struct ipw_priv *priv)
3851{ 3851{
3852 u32 i = 0x80000000; 3852 u32 i = 0x80000000;
3853 u32 mask = priv->rates_mask; 3853 u32 mask = priv->rates_mask;
@@ -4087,7 +4087,7 @@ static void ipw_bg_gather_stats(void *data)
4087 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4087 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4088 * Above disassociate threshold, give up and stop scanning. 4088 * Above disassociate threshold, give up and stop scanning.
4089 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4089 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4090static inline void ipw_handle_missed_beacon(struct ipw_priv *priv, 4090static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4091 int missed_count) 4091 int missed_count)
4092{ 4092{
4093 priv->notif_missed_beacons = missed_count; 4093 priv->notif_missed_beacons = missed_count;
@@ -4157,7 +4157,7 @@ static inline void ipw_handle_missed_beacon(struct ipw_priv *priv,
4157 * Handle host notification packet. 4157 * Handle host notification packet.
4158 * Called from interrupt routine 4158 * Called from interrupt routine
4159 */ 4159 */
4160static inline void ipw_rx_notification(struct ipw_priv *priv, 4160static void ipw_rx_notification(struct ipw_priv *priv,
4161 struct ipw_rx_notification *notif) 4161 struct ipw_rx_notification *notif)
4162{ 4162{
4163 notif->size = le16_to_cpu(notif->size); 4163 notif->size = le16_to_cpu(notif->size);
@@ -5095,7 +5095,7 @@ static int ipw_compatible_rates(struct ipw_priv *priv,
5095 return 1; 5095 return 1;
5096} 5096}
5097 5097
5098static inline void ipw_copy_rates(struct ipw_supported_rates *dest, 5098static void ipw_copy_rates(struct ipw_supported_rates *dest,
5099 const struct ipw_supported_rates *src) 5099 const struct ipw_supported_rates *src)
5100{ 5100{
5101 u8 i; 5101 u8 i;
@@ -5856,7 +5856,7 @@ static void ipw_debug_config(struct ipw_priv *priv)
5856#define ipw_debug_config(x) do {} while (0) 5856#define ipw_debug_config(x) do {} while (0)
5857#endif 5857#endif
5858 5858
5859static inline void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 5859static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5860{ 5860{
5861 /* TODO: Verify that this works... */ 5861 /* TODO: Verify that this works... */
5862 struct ipw_fixed_rate fr = { 5862 struct ipw_fixed_rate fr = {
@@ -7634,7 +7634,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7634} 7634}
7635#endif 7635#endif
7636 7636
7637static inline int is_network_packet(struct ipw_priv *priv, 7637static int is_network_packet(struct ipw_priv *priv,
7638 struct ieee80211_hdr_4addr *header) 7638 struct ieee80211_hdr_4addr *header)
7639{ 7639{
7640 /* Filter incoming packets to determine if they are targetted toward 7640 /* Filter incoming packets to determine if they are targetted toward
@@ -7672,7 +7672,7 @@ static inline int is_network_packet(struct ipw_priv *priv,
7672 7672
7673#define IPW_PACKET_RETRY_TIME HZ 7673#define IPW_PACKET_RETRY_TIME HZ
7674 7674
7675static inline int is_duplicate_packet(struct ipw_priv *priv, 7675static int is_duplicate_packet(struct ipw_priv *priv,
7676 struct ieee80211_hdr_4addr *header) 7676 struct ieee80211_hdr_4addr *header)
7677{ 7677{
7678 u16 sc = le16_to_cpu(header->seq_ctl); 7678 u16 sc = le16_to_cpu(header->seq_ctl);
@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8936 IPW_DEBUG_HC("starting request direct scan!\n"); 8936 IPW_DEBUG_HC("starting request direct scan!\n");
8937 8937
8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8939 err = wait_event_interruptible(priv->wait_state, 8939 /* We should not sleep here; otherwise we will block most
8940 !(priv-> 8940 * of the system (for instance, we hold rtnl_lock when we
8941 status & (STATUS_SCANNING | 8941 * get here).
8942 STATUS_SCAN_ABORTING))); 8942 */
8943 if (err) { 8943 err = -EAGAIN;
8944 IPW_DEBUG_HC("aborting direct scan"); 8944 goto done;
8945 goto done;
8946 }
8947 } 8945 }
8948 memset(&scan, 0, sizeof(scan)); 8946 memset(&scan, 0, sizeof(scan));
8949 8947
@@ -9581,7 +9579,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9581 9579
9582/* net device stuff */ 9580/* net device stuff */
9583 9581
9584static inline void init_sys_config(struct ipw_sys_config *sys_config) 9582static void init_sys_config(struct ipw_sys_config *sys_config)
9585{ 9583{
9586 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 9584 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9587 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ 9585 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
@@ -9627,7 +9625,7 @@ modify to send one tfd per fragment instead of using chunking. otherwise
9627we need to heavily modify the ieee80211_skb_to_txb. 9625we need to heavily modify the ieee80211_skb_to_txb.
9628*/ 9626*/
9629 9627
9630static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, 9628static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9631 int pri) 9629 int pri)
9632{ 9630{
9633 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *) 9631 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 135a156db25d..c5cd61c7f927 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
748 if (essid->length) { 748 if (essid->length) {
749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ 749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
750 /* if it is to big, trunk it */ 750 /* if it is to big, trunk it */
751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1); 751 dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
752 } else { 752 } else {
753 dwrq->flags = 0; 753 dwrq->flags = 0;
754 dwrq->length = 0; 754 dwrq->length = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 33d64d2ee53f..a8261d8454dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
177#endif 177#endif
178 178
179 newskb->dev = skb->dev; 179 newskb->dev = skb->dev;
180 dev_kfree_skb(skb); 180 dev_kfree_skb_irq(skb);
181 skb = newskb; 181 skb = newskb;
182 } 182 }
183 } 183 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 319180ca7e71..7880d8c31aad 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
1256 extra[IW_ESSID_MAX_SIZE] = '\0'; 1256 extra[IW_ESSID_MAX_SIZE] = '\0';
1257 1257
1258 /* Push it out ! */ 1258 /* Push it out ! */
1259 dwrq->length = strlen(extra) + 1; 1259 dwrq->length = strlen(extra);
1260 dwrq->flags = 1; /* active */ 1260 dwrq->flags = 1; /* active */
1261 1261
1262 return 0; 1262 return 0;
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index d25264ba0c0e..18baacfc5a2c 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1675,11 +1675,6 @@ static int strip_rebuild_header(struct sk_buff *skb)
1675/************************************************************************/ 1675/************************************************************************/
1676/* Receiving routines */ 1676/* Receiving routines */
1677 1677
1678static int strip_receive_room(struct tty_struct *tty)
1679{
1680 return 0x10000; /* We can handle an infinite amount of data. :-) */
1681}
1682
1683/* 1678/*
1684 * This function parses the response to the ATS300? command, 1679 * This function parses the response to the ATS300? command,
1685 * extracting the radio version and serial number. 1680 * extracting the radio version and serial number.
@@ -2424,7 +2419,7 @@ static struct net_device_stats *strip_get_stats(struct net_device *dev)
2424/* 2419/*
2425 * Here's the order things happen: 2420 * Here's the order things happen:
2426 * When the user runs "slattach -p strip ..." 2421 * When the user runs "slattach -p strip ..."
2427 * 1. The TTY module calls strip_open 2422 * 1. The TTY module calls strip_open;;
2428 * 2. strip_open calls strip_alloc 2423 * 2. strip_open calls strip_alloc
2429 * 3. strip_alloc calls register_netdev 2424 * 3. strip_alloc calls register_netdev
2430 * 4. register_netdev calls strip_dev_init 2425 * 4. register_netdev calls strip_dev_init
@@ -2652,6 +2647,8 @@ static int strip_open(struct tty_struct *tty)
2652 2647
2653 strip_info->tty = tty; 2648 strip_info->tty = tty;
2654 tty->disc_data = strip_info; 2649 tty->disc_data = strip_info;
2650 tty->receive_room = 65536;
2651
2655 if (tty->driver->flush_buffer) 2652 if (tty->driver->flush_buffer)
2656 tty->driver->flush_buffer(tty); 2653 tty->driver->flush_buffer(tty);
2657 2654
@@ -2762,7 +2759,6 @@ static struct tty_ldisc strip_ldisc = {
2762 .close = strip_close, 2759 .close = strip_close,
2763 .ioctl = strip_ioctl, 2760 .ioctl = strip_ioctl,
2764 .receive_buf = strip_receive_buf, 2761 .receive_buf = strip_receive_buf,
2765 .receive_room = strip_receive_room,
2766 .write_wakeup = strip_write_some_more, 2762 .write_wakeup = strip_write_some_more,
2767}; 2763};
2768 2764
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index b0d8b5b03152..ff192e96268a 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -102,7 +102,7 @@ static inline void hacr_write(unsigned long ioaddr, u16 hacr)
102 * Write to card's Host Adapter Command Register. Include a delay for 102 * Write to card's Host Adapter Command Register. Include a delay for
103 * those times when it is needed. 103 * those times when it is needed.
104 */ 104 */
105static inline void hacr_write_slow(unsigned long ioaddr, u16 hacr) 105static void hacr_write_slow(unsigned long ioaddr, u16 hacr)
106{ 106{
107 hacr_write(ioaddr, hacr); 107 hacr_write(ioaddr, hacr);
108 /* delay might only be needed sometimes */ 108 /* delay might only be needed sometimes */
@@ -242,7 +242,7 @@ static void psa_write(unsigned long ioaddr, u16 hacr, int o, /* Offset in PSA */
242 * The Windows drivers don't use the CRC, but the AP and the PtP tool 242 * The Windows drivers don't use the CRC, but the AP and the PtP tool
243 * depend on it. 243 * depend on it.
244 */ 244 */
245static inline u16 psa_crc(u8 * psa, /* The PSA */ 245static u16 psa_crc(u8 * psa, /* The PSA */
246 int size) 246 int size)
247{ /* Number of short for CRC */ 247{ /* Number of short for CRC */
248 int byte_cnt; /* Loop on the PSA */ 248 int byte_cnt; /* Loop on the PSA */
@@ -310,7 +310,7 @@ static void update_psa_checksum(struct net_device * dev, unsigned long ioaddr, u
310/* 310/*
311 * Write 1 byte to the MMC. 311 * Write 1 byte to the MMC.
312 */ 312 */
313static inline void mmc_out(unsigned long ioaddr, u16 o, u8 d) 313static void mmc_out(unsigned long ioaddr, u16 o, u8 d)
314{ 314{
315 int count = 0; 315 int count = 0;
316 316
@@ -326,7 +326,7 @@ static inline void mmc_out(unsigned long ioaddr, u16 o, u8 d)
326 * Routine to write bytes to the Modem Management Controller. 326 * Routine to write bytes to the Modem Management Controller.
327 * We start at the end because it is the way it should be! 327 * We start at the end because it is the way it should be!
328 */ 328 */
329static inline void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n) 329static void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n)
330{ 330{
331 o += n; 331 o += n;
332 b += n; 332 b += n;
@@ -340,7 +340,7 @@ static inline void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n)
340 * Read a byte from the MMC. 340 * Read a byte from the MMC.
341 * Optimised version for 1 byte, avoid using memory. 341 * Optimised version for 1 byte, avoid using memory.
342 */ 342 */
343static inline u8 mmc_in(unsigned long ioaddr, u16 o) 343static u8 mmc_in(unsigned long ioaddr, u16 o)
344{ 344{
345 int count = 0; 345 int count = 0;
346 346
@@ -587,7 +587,7 @@ static void wv_ack(struct net_device * dev)
587 * Set channel attention bit and busy wait until command has 587 * Set channel attention bit and busy wait until command has
588 * completed, then acknowledge completion of the command. 588 * completed, then acknowledge completion of the command.
589 */ 589 */
590static inline int wv_synchronous_cmd(struct net_device * dev, const char *str) 590static int wv_synchronous_cmd(struct net_device * dev, const char *str)
591{ 591{
592 net_local *lp = (net_local *) dev->priv; 592 net_local *lp = (net_local *) dev->priv;
593 unsigned long ioaddr = dev->base_addr; 593 unsigned long ioaddr = dev->base_addr;
@@ -633,7 +633,7 @@ static inline int wv_synchronous_cmd(struct net_device * dev, const char *str)
633 * Configuration commands completion interrupt. 633 * Configuration commands completion interrupt.
634 * Check if done, and if OK. 634 * Check if done, and if OK.
635 */ 635 */
636static inline int 636static int
637wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp) 637wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp)
638{ 638{
639 unsigned short mcs_addr; 639 unsigned short mcs_addr;
@@ -843,7 +843,7 @@ if (lp->tx_n_in_use > 0)
843 * wavelan_interrupt is not an option), so you may experience 843 * wavelan_interrupt is not an option), so you may experience
844 * delays sometimes. 844 * delays sometimes.
845 */ 845 */
846static inline void wv_82586_reconfig(struct net_device * dev) 846static void wv_82586_reconfig(struct net_device * dev)
847{ 847{
848 net_local *lp = (net_local *) dev->priv; 848 net_local *lp = (net_local *) dev->priv;
849 unsigned long flags; 849 unsigned long flags;
@@ -1281,7 +1281,7 @@ static inline void wv_packet_info(u8 * p, /* Packet to dump */
1281 * This is the information which is displayed by the driver at startup. 1281 * This is the information which is displayed by the driver at startup.
1282 * There are lots of flags for configuring it to your liking. 1282 * There are lots of flags for configuring it to your liking.
1283 */ 1283 */
1284static inline void wv_init_info(struct net_device * dev) 1284static void wv_init_info(struct net_device * dev)
1285{ 1285{
1286 short ioaddr = dev->base_addr; 1286 short ioaddr = dev->base_addr;
1287 net_local *lp = (net_local *) dev->priv; 1287 net_local *lp = (net_local *) dev->priv;
@@ -1502,7 +1502,7 @@ static int wavelan_set_mac_address(struct net_device * dev, void *addr)
1502 * It's a bit complicated and you don't really want to look into it. 1502 * It's a bit complicated and you don't really want to look into it.
1503 * (called in wavelan_ioctl) 1503 * (called in wavelan_ioctl)
1504 */ 1504 */
1505static inline int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */ 1505static int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */
1506 iw_freq * frequency) 1506 iw_freq * frequency)
1507{ 1507{
1508 const int BAND_NUM = 10; /* Number of bands */ 1508 const int BAND_NUM = 10; /* Number of bands */
@@ -1677,7 +1677,7 @@ static inline int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card
1677/* 1677/*
1678 * Give the list of available frequencies. 1678 * Give the list of available frequencies.
1679 */ 1679 */
1680static inline int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */ 1680static int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */
1681 iw_freq * list, /* List of frequencies to fill */ 1681 iw_freq * list, /* List of frequencies to fill */
1682 int max) 1682 int max)
1683{ /* Maximum number of frequencies */ 1683{ /* Maximum number of frequencies */
@@ -2489,7 +2489,7 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
2489 * Note: if any errors occur, the packet is "dropped on the floor". 2489 * Note: if any errors occur, the packet is "dropped on the floor".
2490 * (called by wv_packet_rcv()) 2490 * (called by wv_packet_rcv())
2491 */ 2491 */
2492static inline void 2492static void
2493wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) 2493wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2494{ 2494{
2495 net_local *lp = (net_local *) dev->priv; 2495 net_local *lp = (net_local *) dev->priv;
@@ -2585,7 +2585,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2585 * (called in wavelan_interrupt()). 2585 * (called in wavelan_interrupt()).
2586 * Note : the spinlock is already grabbed for us. 2586 * Note : the spinlock is already grabbed for us.
2587 */ 2587 */
2588static inline void wv_receive(struct net_device * dev) 2588static void wv_receive(struct net_device * dev)
2589{ 2589{
2590 unsigned long ioaddr = dev->base_addr; 2590 unsigned long ioaddr = dev->base_addr;
2591 net_local *lp = (net_local *) dev->priv; 2591 net_local *lp = (net_local *) dev->priv;
@@ -2768,7 +2768,7 @@ static inline void wv_receive(struct net_device * dev)
2768 * 2768 *
2769 * (called in wavelan_packet_xmit()) 2769 * (called in wavelan_packet_xmit())
2770 */ 2770 */
2771static inline int wv_packet_write(struct net_device * dev, void *buf, short length) 2771static int wv_packet_write(struct net_device * dev, void *buf, short length)
2772{ 2772{
2773 net_local *lp = (net_local *) dev->priv; 2773 net_local *lp = (net_local *) dev->priv;
2774 unsigned long ioaddr = dev->base_addr; 2774 unsigned long ioaddr = dev->base_addr;
@@ -2964,7 +2964,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
2964 * Routine to initialize the Modem Management Controller. 2964 * Routine to initialize the Modem Management Controller.
2965 * (called by wv_hw_reset()) 2965 * (called by wv_hw_reset())
2966 */ 2966 */
2967static inline int wv_mmc_init(struct net_device * dev) 2967static int wv_mmc_init(struct net_device * dev)
2968{ 2968{
2969 unsigned long ioaddr = dev->base_addr; 2969 unsigned long ioaddr = dev->base_addr;
2970 net_local *lp = (net_local *) dev->priv; 2970 net_local *lp = (net_local *) dev->priv;
@@ -3136,7 +3136,7 @@ static inline int wv_mmc_init(struct net_device * dev)
3136 * Start the receive unit. 3136 * Start the receive unit.
3137 * (called by wv_hw_reset()) 3137 * (called by wv_hw_reset())
3138 */ 3138 */
3139static inline int wv_ru_start(struct net_device * dev) 3139static int wv_ru_start(struct net_device * dev)
3140{ 3140{
3141 net_local *lp = (net_local *) dev->priv; 3141 net_local *lp = (net_local *) dev->priv;
3142 unsigned long ioaddr = dev->base_addr; 3142 unsigned long ioaddr = dev->base_addr;
@@ -3228,7 +3228,7 @@ static inline int wv_ru_start(struct net_device * dev)
3228 * 3228 *
3229 * (called by wv_hw_reset()) 3229 * (called by wv_hw_reset())
3230 */ 3230 */
3231static inline int wv_cu_start(struct net_device * dev) 3231static int wv_cu_start(struct net_device * dev)
3232{ 3232{
3233 net_local *lp = (net_local *) dev->priv; 3233 net_local *lp = (net_local *) dev->priv;
3234 unsigned long ioaddr = dev->base_addr; 3234 unsigned long ioaddr = dev->base_addr;
@@ -3329,7 +3329,7 @@ static inline int wv_cu_start(struct net_device * dev)
3329 * 3329 *
3330 * (called by wv_hw_reset()) 3330 * (called by wv_hw_reset())
3331 */ 3331 */
3332static inline int wv_82586_start(struct net_device * dev) 3332static int wv_82586_start(struct net_device * dev)
3333{ 3333{
3334 net_local *lp = (net_local *) dev->priv; 3334 net_local *lp = (net_local *) dev->priv;
3335 unsigned long ioaddr = dev->base_addr; 3335 unsigned long ioaddr = dev->base_addr;
@@ -3641,7 +3641,7 @@ static void wv_82586_config(struct net_device * dev)
3641 * WaveLAN controller (i82586). 3641 * WaveLAN controller (i82586).
3642 * (called by wavelan_close()) 3642 * (called by wavelan_close())
3643 */ 3643 */
3644static inline void wv_82586_stop(struct net_device * dev) 3644static void wv_82586_stop(struct net_device * dev)
3645{ 3645{
3646 net_local *lp = (net_local *) dev->priv; 3646 net_local *lp = (net_local *) dev->priv;
3647 unsigned long ioaddr = dev->base_addr; 3647 unsigned long ioaddr = dev->base_addr;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 7e2039f52c49..cf373625fc70 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
2280 extra[IW_ESSID_MAX_SIZE] = '\0'; 2280 extra[IW_ESSID_MAX_SIZE] = '\0';
2281 2281
2282 /* Set the length */ 2282 /* Set the length */
2283 wrqu->data.length = strlen(extra) + 1; 2283 wrqu->data.length = strlen(extra);
2284 2284
2285 return 0; 2285 return 0;
2286} 2286}