aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt26
-rw-r--r--Documentation/devicetree/bindings/net/via-velocity.txt20
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/networking/scaling.txt58
-rw-r--r--Documentation/sysctl/net.txt3
-rw-r--r--arch/arm/net/bpf_jit_32.c18
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c19
-rw-r--r--arch/sparc/net/bpf_jit_comp.c20
-rw-r--r--arch/x86/net/bpf_jit_comp.c61
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/flexcan.c1
-rw-r--r--drivers/net/can/grcan.c7
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/ethernet/3com/3c509.c19
-rw-r--r--drivers/net/ethernet/3com/3c59x.c17
-rw-r--r--drivers/net/ethernet/alteon/acenic.c15
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c13
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c25
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c25
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c38
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c240
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c7
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c105
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c51
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c62
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c11
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h34
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c120
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c45
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c124
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/korina.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c13
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/sis190.c13
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c5
-rw-r--r--drivers/net/ethernet/sun/sungem.c13
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/ti/tlan.h1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c14
-rw-r--r--drivers/net/ethernet/via/Kconfig3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c507
-rw-r--r--drivers/net/ethernet/via/via-velocity.h8
-rw-r--r--drivers/net/fddi/skfp/skfddi.c13
-rw-r--r--drivers/net/hippi/rrunner.c13
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/usb/r8152.c14
-rw-r--r--drivers/net/vxlan.c263
-rw-r--r--drivers/net/xen-netback/common.h14
-rw-r--r--drivers/net/xen-netback/interface.c102
-rw-r--r--drivers/net/xen-netback/netback.c38
-rw-r--r--drivers/net/xen-netback/xenbus.c46
-rw-r--r--drivers/net/xen-netfront.c205
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/xen/interface/io/netif.h12
-rw-r--r--net/Kconfig12
-rw-r--r--net/bridge/br_device.c13
-rw-r--r--net/bridge/br_multicast.c70
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/br_sysfs_br.c26
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/net-procfs.c16
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sysctl_net_core.c104
-rw-r--r--net/ipv4/tcp.c98
-rw-r--r--net/ipv4/tcp_input.c69
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv6/addrconf.c67
-rw-r--r--net/sched/sch_tbf.c47
117 files changed, 2487 insertions, 1362 deletions
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
new file mode 100644
index 000000000000..2d39c990e641
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -0,0 +1,26 @@
1Davicom DM9000 Fast Ethernet controller
2
3Required properties:
4- compatible = "davicom,dm9000";
5- reg : physical addresses and sizes of registers, must contain 2 entries:
6 first entry : address register,
7 second entry : data register.
8- interrupt-parent : interrupt controller to which the device is connected
9- interrupts : interrupt specifier specific to interrupt controller
10
11Optional properties:
12- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address
13 to use (from firmware or bootloader)
14- davicom,no-eeprom : Configuration EEPROM is not available
15- davicom,ext-phy : Use external PHY
16
17Example:
18
19 ethernet@18000000 {
20 compatible = "davicom,dm9000";
21 reg = <0x18000000 0x2 0x18000004 0x2>;
22 interrupt-parent = <&gpn>;
23 interrupts = <7 4>;
24 local-mac-address = [00 00 de ad be ef];
25 davicom,no-eeprom;
26 };
diff --git a/Documentation/devicetree/bindings/net/via-velocity.txt b/Documentation/devicetree/bindings/net/via-velocity.txt
new file mode 100644
index 000000000000..b3db469b1ad7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/via-velocity.txt
@@ -0,0 +1,20 @@
1* VIA Velocity 10/100/1000 Network Controller
2
3Required properties:
4- compatible : Should be "via,velocity-vt6110"
5- reg : Address and length of the io space
6- interrupts : Should contain the controller interrupt line
7
8Optional properties:
9- no-eeprom : PCI network cards use an external EEPROM to store data. Embedded
10 devices quite often set this data in uboot and do not provide an eeprom.
11 Specify this option if you have no external eeprom.
12
13Examples:
14
15eth0@d8004000 {
16 compatible = "via,velocity-vt6110";
17 reg = <0xd8004000 0x400>;
18 interrupts = <10>;
19 no-eeprom;
20};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 6931c4348d24..2fe74e6ec209 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -18,6 +18,7 @@ chrp Common Hardware Reference Platform
18cirrus Cirrus Logic, Inc. 18cirrus Cirrus Logic, Inc.
19cortina Cortina Systems, Inc. 19cortina Cortina Systems, Inc.
20dallas Maxim Integrated Products (formerly Dallas Semiconductor) 20dallas Maxim Integrated Products (formerly Dallas Semiconductor)
21davicom DAVICOM Semiconductor, Inc.
21denx Denx Software Engineering 22denx Denx Software Engineering
22emmicro EM Microelectronic 23emmicro EM Microelectronic
23epson Seiko Epson Corp. 24epson Seiko Epson Corp.
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 579994afbe06..ca6977f5b2ed 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -163,6 +163,64 @@ and unnecessary. If there are fewer hardware queues than CPUs, then
163RPS might be beneficial if the rps_cpus for each queue are the ones that 163RPS might be beneficial if the rps_cpus for each queue are the ones that
164share the same memory domain as the interrupting CPU for that queue. 164share the same memory domain as the interrupting CPU for that queue.
165 165
166==== RPS Flow Limit
167
168RPS scales kernel receive processing across CPUs without introducing
169reordering. The trade-off to sending all packets from the same flow
170to the same CPU is CPU load imbalance if flows vary in packet rate.
171In the extreme case a single flow dominates traffic. Especially on
172common server workloads with many concurrent connections, such
173behavior indicates a problem such as a misconfiguration or spoofed
174source Denial of Service attack.
175
176Flow Limit is an optional RPS feature that prioritizes small flows
177during CPU contention by dropping packets from large flows slightly
178ahead of those from small flows. It is active only when an RPS or RFS
179destination CPU approaches saturation. Once a CPU's input packet
180queue exceeds half the maximum queue length (as set by sysctl
181net.core.netdev_max_backlog), the kernel starts a per-flow packet
182count over the last 256 packets. If a flow exceeds a set ratio (by
183default, half) of these packets when a new packet arrives, then the
184new packet is dropped. Packets from other flows are still only
185dropped once the input packet queue reaches netdev_max_backlog.
186No packets are dropped when the input packet queue length is below
187the threshold, so flow limit does not sever connections outright:
188even large flows maintain connectivity.
189
190== Interface
191
192Flow limit is compiled in by default (CONFIG_NET_FLOW_LIMIT), but not
193turned on. It is implemented for each CPU independently (to avoid lock
194and cache contention) and toggled per CPU by setting the relevant bit
195in sysctl net.core.flow_limit_cpu_bitmap. It exposes the same CPU
196bitmap interface as rps_cpus (see above) when called from procfs:
197
198 /proc/sys/net/core/flow_limit_cpu_bitmap
199
200Per-flow rate is calculated by hashing each packet into a hashtable
201bucket and incrementing a per-bucket counter. The hash function is
202the same that selects a CPU in RPS, but as the number of buckets can
203be much larger than the number of CPUs, flow limit has finer-grained
204identification of large flows and fewer false positives. The default
205table has 4096 buckets. This value can be modified through sysctl
206
207 net.core.flow_limit_table_len
208
209The value is only consulted when a new table is allocated. Modifying
210it does not update active tables.
211
212== Suggested Configuration
213
214Flow limit is useful on systems with many concurrent connections,
215where a single connection taking up 50% of a CPU indicates a problem.
216In such environments, enable the feature on all CPUs that handle
217network rx interrupts (as set in /proc/irq/N/smp_affinity).
218
219The feature depends on the input packet queue length to exceed
220the flow limit threshold (50%) + the flow history length (256).
221Setting net.core.netdev_max_backlog to either 1000 or 10000
222performed well in experiments.
223
166 224
167RFS: Receive Flow Steering 225RFS: Receive Flow Steering
168========================== 226==========================
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 98335b7a5337..c1f8640c2fc8 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -93,8 +93,7 @@ netdev_budget
93 93
94Maximum number of packets taken from all interfaces in one polling cycle (NAPI 94Maximum number of packets taken from all interfaces in one polling cycle (NAPI
95poll). In one polling cycle interfaces which are registered to polling are 95poll). In one polling cycle interfaces which are registered to polling are
96probed in a round-robin manner. The limit of packets in one such probe can be 96probed in a round-robin manner.
97set per-device via sysfs class/net/<device>/weight .
98 97
99netdev_max_backlog 98netdev_max_backlog
100------------------ 99------------------
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 1a643ee8e082..f50d223a0bd3 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -900,8 +900,7 @@ void bpf_jit_compile(struct sk_filter *fp)
900#endif 900#endif
901 901
902 alloc_size = 4 * ctx.idx; 902 alloc_size = 4 * ctx.idx;
903 ctx.target = module_alloc(max(sizeof(struct work_struct), 903 ctx.target = module_alloc(alloc_size);
904 alloc_size));
905 if (unlikely(ctx.target == NULL)) 904 if (unlikely(ctx.target == NULL))
906 goto out; 905 goto out;
907 906
@@ -927,19 +926,8 @@ out:
927 return; 926 return;
928} 927}
929 928
930static void bpf_jit_free_worker(struct work_struct *work)
931{
932 module_free(NULL, work);
933}
934
935void bpf_jit_free(struct sk_filter *fp) 929void bpf_jit_free(struct sk_filter *fp)
936{ 930{
937 struct work_struct *work; 931 if (fp->bpf_func != sk_run_filter)
938 932 module_free(NULL, fp->bpf_func);
939 if (fp->bpf_func != sk_run_filter) {
940 work = (struct work_struct *)fp->bpf_func;
941
942 INIT_WORK(work, bpf_jit_free_worker);
943 schedule_work(work);
944 }
945} 933}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index c427ae36374a..bf56e33f8257 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -650,8 +650,7 @@ void bpf_jit_compile(struct sk_filter *fp)
650 650
651 proglen = cgctx.idx * 4; 651 proglen = cgctx.idx * 4;
652 alloclen = proglen + FUNCTION_DESCR_SIZE; 652 alloclen = proglen + FUNCTION_DESCR_SIZE;
653 image = module_alloc(max_t(unsigned int, alloclen, 653 image = module_alloc(alloclen);
654 sizeof(struct work_struct)));
655 if (!image) 654 if (!image)
656 goto out; 655 goto out;
657 656
@@ -688,20 +687,8 @@ out:
688 return; 687 return;
689} 688}
690 689
691static void jit_free_defer(struct work_struct *arg)
692{
693 module_free(NULL, arg);
694}
695
696/* run from softirq, we must use a work_struct to call
697 * module_free() from process context
698 */
699void bpf_jit_free(struct sk_filter *fp) 690void bpf_jit_free(struct sk_filter *fp)
700{ 691{
701 if (fp->bpf_func != sk_run_filter) { 692 if (fp->bpf_func != sk_run_filter)
702 struct work_struct *work = (struct work_struct *)fp->bpf_func; 693 module_free(NULL, fp->bpf_func);
703
704 INIT_WORK(work, jit_free_defer);
705 schedule_work(work);
706 }
707} 694}
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index d36a85ebb5e0..9c7be59e6f5a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -785,9 +785,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
785 break; 785 break;
786 } 786 }
787 if (proglen == oldproglen) { 787 if (proglen == oldproglen) {
788 image = module_alloc(max_t(unsigned int, 788 image = module_alloc(proglen);
789 proglen,
790 sizeof(struct work_struct)));
791 if (!image) 789 if (!image)
792 goto out; 790 goto out;
793 } 791 }
@@ -806,20 +804,8 @@ out:
806 return; 804 return;
807} 805}
808 806
809static void jit_free_defer(struct work_struct *arg)
810{
811 module_free(NULL, arg);
812}
813
814/* run from softirq, we must use a work_struct to call
815 * module_free() from process context
816 */
817void bpf_jit_free(struct sk_filter *fp) 807void bpf_jit_free(struct sk_filter *fp)
818{ 808{
819 if (fp->bpf_func != sk_run_filter) { 809 if (fp->bpf_func != sk_run_filter)
820 struct work_struct *work = (struct work_struct *)fp->bpf_func; 810 module_free(NULL, fp->bpf_func);
821
822 INIT_WORK(work, jit_free_defer);
823 schedule_work(work);
824 }
825} 811}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f66b54086ce5..79c216aa0e2b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/filter.h> 13#include <linux/filter.h>
14#include <linux/if_vlan.h> 14#include <linux/if_vlan.h>
15#include <linux/random.h>
15 16
16/* 17/*
17 * Conventions : 18 * Conventions :
@@ -144,6 +145,39 @@ static int pkt_type_offset(void)
144 return -1; 145 return -1;
145} 146}
146 147
148struct bpf_binary_header {
149 unsigned int pages;
150 /* Note : for security reasons, bpf code will follow a randomly
151 * sized amount of int3 instructions
152 */
153 u8 image[];
154};
155
156static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
157 u8 **image_ptr)
158{
159 unsigned int sz, hole;
160 struct bpf_binary_header *header;
161
162 /* Most of BPF filters are really small,
163 * but if some of them fill a page, allow at least
164 * 128 extra bytes to insert a random section of int3
165 */
166 sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
167 header = module_alloc(sz);
168 if (!header)
169 return NULL;
170
171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
172
173 header->pages = sz / PAGE_SIZE;
174 hole = sz - (proglen + sizeof(*header));
175
176 /* insert a random number of int3 instructions before BPF code */
177 *image_ptr = &header->image[prandom_u32() % hole];
178 return header;
179}
180
147void bpf_jit_compile(struct sk_filter *fp) 181void bpf_jit_compile(struct sk_filter *fp)
148{ 182{
149 u8 temp[64]; 183 u8 temp[64];
@@ -153,6 +187,7 @@ void bpf_jit_compile(struct sk_filter *fp)
153 int t_offset, f_offset; 187 int t_offset, f_offset;
154 u8 t_op, f_op, seen = 0, pass; 188 u8 t_op, f_op, seen = 0, pass;
155 u8 *image = NULL; 189 u8 *image = NULL;
190 struct bpf_binary_header *header = NULL;
156 u8 *func; 191 u8 *func;
157 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ 192 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
158 unsigned int cleanup_addr; /* epilogue code offset */ 193 unsigned int cleanup_addr; /* epilogue code offset */
@@ -693,7 +728,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
693 if (unlikely(proglen + ilen > oldproglen)) { 728 if (unlikely(proglen + ilen > oldproglen)) {
694 pr_err("bpb_jit_compile fatal error\n"); 729 pr_err("bpb_jit_compile fatal error\n");
695 kfree(addrs); 730 kfree(addrs);
696 module_free(NULL, image); 731 module_free(NULL, header);
697 return; 732 return;
698 } 733 }
699 memcpy(image + proglen, temp, ilen); 734 memcpy(image + proglen, temp, ilen);
@@ -717,10 +752,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
717 break; 752 break;
718 } 753 }
719 if (proglen == oldproglen) { 754 if (proglen == oldproglen) {
720 image = module_alloc(max_t(unsigned int, 755 header = bpf_alloc_binary(proglen, &image);
721 proglen, 756 if (!header)
722 sizeof(struct work_struct)));
723 if (!image)
724 goto out; 757 goto out;
725 } 758 }
726 oldproglen = proglen; 759 oldproglen = proglen;
@@ -730,7 +763,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
730 bpf_jit_dump(flen, proglen, pass, image); 763 bpf_jit_dump(flen, proglen, pass, image);
731 764
732 if (image) { 765 if (image) {
733 bpf_flush_icache(image, image + proglen); 766 bpf_flush_icache(header, image + proglen);
767 set_memory_ro((unsigned long)header, header->pages);
734 fp->bpf_func = (void *)image; 768 fp->bpf_func = (void *)image;
735 } 769 }
736out: 770out:
@@ -738,20 +772,13 @@ out:
738 return; 772 return;
739} 773}
740 774
741static void jit_free_defer(struct work_struct *arg)
742{
743 module_free(NULL, arg);
744}
745
746/* run from softirq, we must use a work_struct to call
747 * module_free() from process context
748 */
749void bpf_jit_free(struct sk_filter *fp) 775void bpf_jit_free(struct sk_filter *fp)
750{ 776{
751 if (fp->bpf_func != sk_run_filter) { 777 if (fp->bpf_func != sk_run_filter) {
752 struct work_struct *work = (struct work_struct *)fp->bpf_func; 778 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
779 struct bpf_binary_header *header = (void *)addr;
753 780
754 INIT_WORK(work, jit_free_defer); 781 set_memory_rw(addr, header->pages);
755 schedule_work(work); 782 module_free(NULL, header);
756 } 783 }
757} 784}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index db52f4414def..556656661d6b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1393,8 +1393,6 @@ static int at91_can_remove(struct platform_device *pdev)
1393 1393
1394 unregister_netdev(dev); 1394 unregister_netdev(dev);
1395 1395
1396 platform_set_drvdata(pdev, NULL);
1397
1398 iounmap(priv->reg_base); 1396 iounmap(priv->reg_base);
1399 1397
1400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1398 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index d63b91904f82..6b6130b8bdc4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -234,7 +234,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
234 return 0; 234 return 0;
235 235
236exit_free_device: 236exit_free_device:
237 platform_set_drvdata(pdev, NULL);
238 free_c_can_dev(dev); 237 free_c_can_dev(dev);
239exit_iounmap: 238exit_iounmap:
240 iounmap(addr); 239 iounmap(addr);
@@ -255,7 +254,6 @@ static int c_can_plat_remove(struct platform_device *pdev)
255 struct resource *mem; 254 struct resource *mem;
256 255
257 unregister_c_can_dev(dev); 256 unregister_c_can_dev(dev);
258 platform_set_drvdata(pdev, NULL);
259 257
260 free_c_can_dev(dev); 258 free_c_can_dev(dev);
261 iounmap(priv->base); 259 iounmap(priv->base);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 769d29ed106d..4a40a186c8c8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1127,7 +1127,6 @@ static int flexcan_remove(struct platform_device *pdev)
1127 struct resource *mem; 1127 struct resource *mem;
1128 1128
1129 unregister_flexcandev(dev); 1129 unregister_flexcandev(dev);
1130 platform_set_drvdata(pdev, NULL);
1131 iounmap(priv->base); 1130 iounmap(priv->base);
1132 1131
1133 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1132 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 17fbc7a09224..9e9d0d628e7b 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1683,10 +1683,9 @@ static int grcan_probe(struct platform_device *ofdev)
1683 } 1683 }
1684 1684
1685 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 1685 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1686 base = devm_request_and_ioremap(&ofdev->dev, res); 1686 base = devm_ioremap_resource(&ofdev->dev, res);
1687 if (!base) { 1687 if (IS_ERR(base)) {
1688 dev_err(&ofdev->dev, "couldn't map IO resource\n"); 1688 err = PTR_ERR(base);
1689 err = -EADDRNOTAVAIL;
1690 goto exit_error; 1689 goto exit_error;
1691 } 1690 }
1692 1691
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f21fc37ec578..3a349a22d5bc 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1001,7 +1001,6 @@ static int ti_hecc_remove(struct platform_device *pdev)
1001 iounmap(priv->base); 1001 iounmap(priv->base);
1002 release_mem_region(res->start, resource_size(res)); 1002 release_mem_region(res->start, resource_size(res));
1003 free_candev(ndev); 1003 free_candev(ndev);
1004 platform_set_drvdata(pdev, NULL);
1005 1004
1006 return 0; 1005 return 0;
1007} 1006}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index adb4bf5eb4b4..ede8daa68275 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -723,25 +723,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
723 pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", 723 pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
724 dev->name, skb->len, inw(ioaddr + EL3_STATUS)); 724 dev->name, skb->len, inw(ioaddr + EL3_STATUS));
725 } 725 }
726#if 0
727#ifndef final_version
728 { /* Error-checking code, delete someday. */
729 ushort status = inw(ioaddr + EL3_STATUS);
730 if (status & 0x0001 && /* IRQ line active, missed one. */
731 inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
732 pr_debug("%s: Missed interrupt, status then %04x now %04x"
733 " Tx %2.2x Rx %4.4x.\n", dev->name, status,
734 inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
735 inw(ioaddr + RX_STATUS));
736 /* Fake interrupt trigger by masking, acknowledge interrupts. */
737 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
738 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
739 ioaddr + EL3_CMD);
740 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
741 }
742 }
743#endif
744#endif
745 /* 726 /*
746 * We lock the driver against other processors. Note 727 * We lock the driver against other processors. Note
747 * we don't need to lock versus the IRQ as we suspended 728 * we don't need to lock versus the IRQ as we suspended
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 072c6f14e8fc..30e74211a755 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1473,7 +1473,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1473 1473
1474 if (pdev) { 1474 if (pdev) {
1475 vp->pm_state_valid = 1; 1475 vp->pm_state_valid = 1;
1476 pci_save_state(VORTEX_PCI(vp)); 1476 pci_save_state(pdev);
1477 acpi_set_WOL(dev); 1477 acpi_set_WOL(dev);
1478 } 1478 }
1479 retval = register_netdev(dev); 1479 retval = register_netdev(dev);
@@ -3233,21 +3233,20 @@ static void vortex_remove_one(struct pci_dev *pdev)
3233 vp = netdev_priv(dev); 3233 vp = netdev_priv(dev);
3234 3234
3235 if (vp->cb_fn_base) 3235 if (vp->cb_fn_base)
3236 pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); 3236 pci_iounmap(pdev, vp->cb_fn_base);
3237 3237
3238 unregister_netdev(dev); 3238 unregister_netdev(dev);
3239 3239
3240 if (VORTEX_PCI(vp)) { 3240 pci_set_power_state(pdev, PCI_D0); /* Go active */
3241 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ 3241 if (vp->pm_state_valid)
3242 if (vp->pm_state_valid) 3242 pci_restore_state(pdev);
3243 pci_restore_state(VORTEX_PCI(vp)); 3243 pci_disable_device(pdev);
3244 pci_disable_device(VORTEX_PCI(vp)); 3244
3245 }
3246 /* Should really use issue_and_wait() here */ 3245 /* Should really use issue_and_wait() here */
3247 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), 3246 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3248 vp->ioaddr + EL3_CMD); 3247 vp->ioaddr + EL3_CMD);
3249 3248
3250 pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); 3249 pci_iounmap(pdev, vp->ioaddr);
3251 3250
3252 pci_free_consistent(pdev, 3251 pci_free_consistent(pdev,
3253 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3252 sizeof(struct boom_rx_desc) * RX_RING_SIZE
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b7894f8af9d1..219be1bf3cfc 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -702,19 +702,6 @@ static struct pci_driver acenic_pci_driver = {
702 .remove = acenic_remove_one, 702 .remove = acenic_remove_one,
703}; 703};
704 704
705static int __init acenic_init(void)
706{
707 return pci_register_driver(&acenic_pci_driver);
708}
709
710static void __exit acenic_exit(void)
711{
712 pci_unregister_driver(&acenic_pci_driver);
713}
714
715module_init(acenic_init);
716module_exit(acenic_exit);
717
718static void ace_free_descriptors(struct net_device *dev) 705static void ace_free_descriptors(struct net_device *dev)
719{ 706{
720 struct ace_private *ap = netdev_priv(dev); 707 struct ace_private *ap = netdev_priv(dev);
@@ -3199,3 +3186,5 @@ static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3199 ap->name, offset); 3186 ap->name, offset);
3200 goto out; 3187 goto out;
3201} 3188}
3189
3190module_pci_driver(acenic_pci_driver);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 8e6b665a6726..bc71aec1159d 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1981,15 +1981,4 @@ static struct pci_driver amd8111e_driver = {
1981 .resume = amd8111e_resume 1981 .resume = amd8111e_resume
1982}; 1982};
1983 1983
1984static int __init amd8111e_init(void) 1984module_pci_driver(amd8111e_driver);
1985{
1986 return pci_register_driver(&amd8111e_driver);
1987}
1988
1989static void __exit amd8111e_cleanup(void)
1990{
1991 pci_unregister_driver(&amd8111e_driver);
1992}
1993
1994module_init(amd8111e_init);
1995module_exit(amd8111e_cleanup);
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index f36bbd6d5085..714dcfe3a469 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1030,14 +1030,12 @@ static void bmac_set_multicast(struct net_device *dev)
1030 rx_cfg |= RxPromiscEnable; 1030 rx_cfg |= RxPromiscEnable;
1031 bmwrite(dev, RXCFG, rx_cfg); 1031 bmwrite(dev, RXCFG, rx_cfg);
1032 } else { 1032 } else {
1033 u16 hash_table[4]; 1033 u16 hash_table[4] = { 0 };
1034 1034
1035 rx_cfg = bmread(dev, RXCFG); 1035 rx_cfg = bmread(dev, RXCFG);
1036 rx_cfg &= ~RxPromiscEnable; 1036 rx_cfg &= ~RxPromiscEnable;
1037 bmwrite(dev, RXCFG, rx_cfg); 1037 bmwrite(dev, RXCFG, rx_cfg);
1038 1038
1039 for(i = 0; i < 4; i++) hash_table[i] = 0;
1040
1041 netdev_for_each_mc_addr(ha, dev) { 1039 netdev_for_each_mc_addr(ha, dev) {
1042 crc = ether_crc_le(6, ha->addr); 1040 crc = ether_crc_le(6, ha->addr);
1043 crc >>= 26; 1041 crc >>= 26;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0ba900762b13..786a87483298 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2755,27 +2755,4 @@ static struct pci_driver atl1c_driver = {
2755 .driver.pm = &atl1c_pm_ops, 2755 .driver.pm = &atl1c_pm_ops,
2756}; 2756};
2757 2757
2758/** 2758module_pci_driver(atl1c_driver);
2759 * atl1c_init_module - Driver Registration Routine
2760 *
2761 * atl1c_init_module is the first routine called when the driver is
2762 * loaded. All it does is register with the PCI subsystem.
2763 */
2764static int __init atl1c_init_module(void)
2765{
2766 return pci_register_driver(&atl1c_driver);
2767}
2768
2769/**
2770 * atl1c_exit_module - Driver Exit Cleanup Routine
2771 *
2772 * atl1c_exit_module is called just before the driver is removed
2773 * from memory.
2774 */
2775static void __exit atl1c_exit_module(void)
2776{
2777 pci_unregister_driver(&atl1c_driver);
2778}
2779
2780module_init(atl1c_init_module);
2781module_exit(atl1c_exit_module);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 0688bb82b442..895f5377ad1b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2489,27 +2489,4 @@ static struct pci_driver atl1e_driver = {
2489 .err_handler = &atl1e_err_handler 2489 .err_handler = &atl1e_err_handler
2490}; 2490};
2491 2491
2492/** 2492module_pci_driver(atl1e_driver);
2493 * atl1e_init_module - Driver Registration Routine
2494 *
2495 * atl1e_init_module is the first routine called when the driver is
2496 * loaded. All it does is register with the PCI subsystem.
2497 */
2498static int __init atl1e_init_module(void)
2499{
2500 return pci_register_driver(&atl1e_driver);
2501}
2502
2503/**
2504 * atl1e_exit_module - Driver Exit Cleanup Routine
2505 *
2506 * atl1e_exit_module is called just before the driver is removed
2507 * from memory.
2508 */
2509static void __exit atl1e_exit_module(void)
2510{
2511 pci_unregister_driver(&atl1e_driver);
2512}
2513
2514module_init(atl1e_init_module);
2515module_exit(atl1e_exit_module);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index fa0915f3999b..538211d6f7d9 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3145,31 +3145,6 @@ static struct pci_driver atl1_driver = {
3145 .driver.pm = &atl1_pm_ops, 3145 .driver.pm = &atl1_pm_ops,
3146}; 3146};
3147 3147
3148/**
3149 * atl1_exit_module - Driver Exit Cleanup Routine
3150 *
3151 * atl1_exit_module is called just before the driver is removed
3152 * from memory.
3153 */
3154static void __exit atl1_exit_module(void)
3155{
3156 pci_unregister_driver(&atl1_driver);
3157}
3158
3159/**
3160 * atl1_init_module - Driver Registration Routine
3161 *
3162 * atl1_init_module is the first routine called when the driver is
3163 * loaded. All it does is register with the PCI subsystem.
3164 */
3165static int __init atl1_init_module(void)
3166{
3167 return pci_register_driver(&atl1_driver);
3168}
3169
3170module_init(atl1_init_module);
3171module_exit(atl1_exit_module);
3172
3173struct atl1_stats { 3148struct atl1_stats {
3174 char stat_string[ETH_GSTRING_LEN]; 3149 char stat_string[ETH_GSTRING_LEN];
3175 int sizeof_stat; 3150 int sizeof_stat;
@@ -3705,3 +3680,5 @@ static const struct ethtool_ops atl1_ethtool_ops = {
3705 .get_ethtool_stats = atl1_get_ethtool_stats, 3680 .get_ethtool_stats = atl1_get_ethtool_stats,
3706 .get_sset_count = atl1_get_sset_count, 3681 .get_sset_count = atl1_get_sset_count,
3707}; 3682};
3683
3684module_pci_driver(atl1_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 5d204492c603..1a1b23eb13dc 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = {
8764 .err_handler = &bnx2_err_handler, 8764 .err_handler = &bnx2_err_handler,
8765}; 8765};
8766 8766
8767static int __init bnx2_init(void) 8767module_pci_driver(bnx2_pci_driver);
8768{
8769 return pci_register_driver(&bnx2_pci_driver);
8770}
8771
8772static void __exit bnx2_cleanup(void)
8773{
8774 pci_unregister_driver(&bnx2_pci_driver);
8775}
8776
8777module_init(bnx2_init);
8778module_exit(bnx2_cleanup);
8779
8780
8781
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 3dba2a70a00e..946450d6d988 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1937,6 +1937,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1937void bnx2x_update_coalesce(struct bnx2x *bp); 1937void bnx2x_update_coalesce(struct bnx2x *bp);
1938int bnx2x_get_cur_phy_idx(struct bnx2x *bp); 1938int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
1939 1939
1940bool bnx2x_port_after_undi(struct bnx2x *bp);
1941
1940static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 1942static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1941 int wait) 1943 int wait)
1942{ 1944{
@@ -2137,6 +2139,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2137#define ATTN_HARD_WIRED_MASK 0xff00 2139#define ATTN_HARD_WIRED_MASK 0xff00
2138#define ATTENTION_ID 4 2140#define ATTENTION_ID 4
2139 2141
2142#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
2143 IS_MF_FCOE_AFEX(bp))
2140 2144
2141/* stuff added to make the code fit 80Col */ 2145/* stuff added to make the code fit 80Col */
2142 2146
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index be59ec4b2c30..c80f1d26f40d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2183,6 +2183,8 @@ alloc_mem_err:
2183/* send load request to mcp and analyze response */ 2183/* send load request to mcp and analyze response */
2184static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) 2184static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185{ 2185{
2186 u32 param;
2187
2186 /* init fw_seq */ 2188 /* init fw_seq */
2187 bp->fw_seq = 2189 bp->fw_seq =
2188 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 2190 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -2195,9 +2197,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2195 DRV_PULSE_SEQ_MASK); 2197 DRV_PULSE_SEQ_MASK);
2196 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2198 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2197 2199
2200 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2201
2202 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2203 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2204
2198 /* load request */ 2205 /* load request */
2199 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 2206 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2200 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2201 2207
2202 /* if mcp fails to respond we must abort */ 2208 /* if mcp fails to respond we must abort */
2203 if (!(*load_code)) { 2209 if (!(*load_code)) {
@@ -4604,6 +4610,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4604{ 4610{
4605 struct bnx2x *bp = netdev_priv(dev); 4611 struct bnx2x *bp = netdev_priv(dev);
4606 u32 flags = bp->flags; 4612 u32 flags = bp->flags;
4613 u32 changes;
4607 bool bnx2x_reload = false; 4614 bool bnx2x_reload = false;
4608 4615
4609 if (features & NETIF_F_LRO) 4616 if (features & NETIF_F_LRO)
@@ -4628,10 +4635,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4628 } 4635 }
4629 } 4636 }
4630 4637
4631 if (flags ^ bp->flags) { 4638 changes = flags ^ bp->flags;
4632 bp->flags = flags; 4639
4640 /* if GRO is changed while LRO is enabled, dont force a reload */
4641 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4642 changes &= ~GRO_ENABLE_FLAG;
4643
4644 if (changes)
4633 bnx2x_reload = true; 4645 bnx2x_reload = true;
4634 } 4646
4647 bp->flags = flags;
4635 4648
4636 if (bnx2x_reload) { 4649 if (bnx2x_reload) {
4637 if (bp->recovery_state == BNX2X_RECOVERY_DONE) 4650 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ce1a91618677..32aa88f520dc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1921,6 +1921,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
1921 "link_test (online) " 1921 "link_test (online) "
1922}; 1922};
1923 1923
1924enum {
1925 BNX2X_PRI_FLAG_ISCSI,
1926 BNX2X_PRI_FLAG_FCOE,
1927 BNX2X_PRI_FLAG_STORAGE,
1928 BNX2X_PRI_FLAG_LEN,
1929};
1930
1931static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
1932 "iSCSI offload support",
1933 "FCoE offload support",
1934 "Storage only interface"
1935};
1936
1924static u32 bnx2x_eee_to_adv(u32 eee_adv) 1937static u32 bnx2x_eee_to_adv(u32 eee_adv)
1925{ 1938{
1926 u32 modes = 0; 1939 u32 modes = 0;
@@ -2978,32 +2991,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp)
2978static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 2991static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2979{ 2992{
2980 struct bnx2x *bp = netdev_priv(dev); 2993 struct bnx2x *bp = netdev_priv(dev);
2981 int i, num_stats; 2994 int i, num_strings = 0;
2982 2995
2983 switch (stringset) { 2996 switch (stringset) {
2984 case ETH_SS_STATS: 2997 case ETH_SS_STATS:
2985 if (is_multi(bp)) { 2998 if (is_multi(bp)) {
2986 num_stats = bnx2x_num_stat_queues(bp) * 2999 num_strings = bnx2x_num_stat_queues(bp) *
2987 BNX2X_NUM_Q_STATS; 3000 BNX2X_NUM_Q_STATS;
2988 } else 3001 } else
2989 num_stats = 0; 3002 num_strings = 0;
2990 if (IS_MF_MODE_STAT(bp)) { 3003 if (IS_MF_MODE_STAT(bp)) {
2991 for (i = 0; i < BNX2X_NUM_STATS; i++) 3004 for (i = 0; i < BNX2X_NUM_STATS; i++)
2992 if (IS_FUNC_STAT(i)) 3005 if (IS_FUNC_STAT(i))
2993 num_stats++; 3006 num_strings++;
2994 } else 3007 } else
2995 num_stats += BNX2X_NUM_STATS; 3008 num_strings += BNX2X_NUM_STATS;
2996 3009
2997 return num_stats; 3010 return num_strings;
2998 3011
2999 case ETH_SS_TEST: 3012 case ETH_SS_TEST:
3000 return BNX2X_NUM_TESTS(bp); 3013 return BNX2X_NUM_TESTS(bp);
3001 3014
3015 case ETH_SS_PRIV_FLAGS:
3016 return BNX2X_PRI_FLAG_LEN;
3017
3002 default: 3018 default:
3003 return -EINVAL; 3019 return -EINVAL;
3004 } 3020 }
3005} 3021}
3006 3022
3023static u32 bnx2x_get_private_flags(struct net_device *dev)
3024{
3025 struct bnx2x *bp = netdev_priv(dev);
3026 u32 flags = 0;
3027
3028 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
3029 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
3030 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
3031
3032 return flags;
3033}
3034
3007static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 3035static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3008{ 3036{
3009 struct bnx2x *bp = netdev_priv(dev); 3037 struct bnx2x *bp = netdev_priv(dev);
@@ -3045,6 +3073,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3045 start = 4; 3073 start = 4;
3046 memcpy(buf, bnx2x_tests_str_arr + start, 3074 memcpy(buf, bnx2x_tests_str_arr + start,
3047 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); 3075 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
3076 break;
3077
3078 case ETH_SS_PRIV_FLAGS:
3079 memcpy(buf, bnx2x_private_arr,
3080 ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
3081 break;
3048 } 3082 }
3049} 3083}
3050 3084
@@ -3112,11 +3146,6 @@ static int bnx2x_set_phys_id(struct net_device *dev,
3112 return -EAGAIN; 3146 return -EAGAIN;
3113 } 3147 }
3114 3148
3115 if (!bp->port.pmf) {
3116 DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
3117 return -EOPNOTSUPP;
3118 }
3119
3120 switch (state) { 3149 switch (state) {
3121 case ETHTOOL_ID_ACTIVE: 3150 case ETHTOOL_ID_ACTIVE:
3122 return 1; /* cycle on/off once per second */ 3151 return 1; /* cycle on/off once per second */
@@ -3445,6 +3474,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3445 .set_pauseparam = bnx2x_set_pauseparam, 3474 .set_pauseparam = bnx2x_set_pauseparam,
3446 .self_test = bnx2x_self_test, 3475 .self_test = bnx2x_self_test,
3447 .get_sset_count = bnx2x_get_sset_count, 3476 .get_sset_count = bnx2x_get_sset_count,
3477 .get_priv_flags = bnx2x_get_private_flags,
3448 .get_strings = bnx2x_get_strings, 3478 .get_strings = bnx2x_get_strings,
3449 .set_phys_id = bnx2x_set_phys_id, 3479 .set_phys_id = bnx2x_set_phys_id,
3450 .get_ethtool_stats = bnx2x_get_ethtool_stats, 3480 .get_ethtool_stats = bnx2x_get_ethtool_stats,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 12f00a40cdf0..5ef3f964e544 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1323,6 +1323,8 @@ struct drv_func_mb {
1323 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 1323 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
1324 1324
1325 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a 1325 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
1326 #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
1327
1326 u32 fw_mb_header; 1328 u32 fw_mb_header;
1327 #define FW_MSG_CODE_MASK 0xffff0000 1329 #define FW_MSG_CODE_MASK 0xffff0000
1328 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 1330 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b4c9dea93a53..7ed9cdfa115e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9780,6 +9780,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9780 return rc; 9780 return rc;
9781} 9781}
9782 9782
9783bool bnx2x_port_after_undi(struct bnx2x *bp)
9784{
9785 struct bnx2x_prev_path_list *entry;
9786 bool val;
9787
9788 down(&bnx2x_prev_sem);
9789
9790 entry = bnx2x_prev_path_get_entry(bp);
9791 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9792
9793 up(&bnx2x_prev_sem);
9794
9795 return val;
9796}
9797
9783static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 9798static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9784{ 9799{
9785 struct bnx2x_prev_path_list *tmp_list; 9800 struct bnx2x_prev_path_list *tmp_list;
@@ -10036,7 +10051,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10036{ 10051{
10037 int time_counter = 10; 10052 int time_counter = 10;
10038 u32 rc, fw, hw_lock_reg, hw_lock_val; 10053 u32 rc, fw, hw_lock_reg, hw_lock_val;
10039 struct bnx2x_prev_path_list *prev_list;
10040 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10054 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10041 10055
10042 /* clear hw from errors which may have resulted from an interrupted 10056 /* clear hw from errors which may have resulted from an interrupted
@@ -10107,8 +10121,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10107 } 10121 }
10108 10122
10109 /* Mark function if its port was used to boot from SAN */ 10123 /* Mark function if its port was used to boot from SAN */
10110 prev_list = bnx2x_prev_path_get_entry(bp); 10124 if (bnx2x_port_after_undi(bp))
10111 if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
10112 bp->link_params.feature_config_flags |= 10125 bp->link_params.feature_config_flags |=
10113 FEATURE_CONFIG_BOOT_FROM_SAN; 10126 FEATURE_CONFIG_BOOT_FROM_SAN;
10114 10127
@@ -12747,19 +12760,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12747 return 0; 12760 return 0;
12748} 12761}
12749 12762
12750static void bnx2x_eeh_recover(struct bnx2x *bp)
12751{
12752 u32 val;
12753
12754 mutex_init(&bp->port.phy_mutex);
12755
12756
12757 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12758 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12759 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12760 BNX2X_ERR("BAD MCP validity signature\n");
12761}
12762
12763/** 12763/**
12764 * bnx2x_io_error_detected - called when PCI error is detected 12764 * bnx2x_io_error_detected - called when PCI error is detected
12765 * @pdev: Pointer to PCI device 12765 * @pdev: Pointer to PCI device
@@ -12828,6 +12828,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12828 12828
12829 if (netif_running(dev)) { 12829 if (netif_running(dev)) {
12830 BNX2X_ERR("IO slot reset --> driver unload\n"); 12830 BNX2X_ERR("IO slot reset --> driver unload\n");
12831
12832 /* MCP should have been reset; Need to wait for validity */
12833 bnx2x_init_shmem(bp);
12834
12831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 12835 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
12832 u32 v; 12836 u32 v;
12833 12837
@@ -12886,8 +12890,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12886 12890
12887 rtnl_lock(); 12891 rtnl_lock();
12888 12892
12889 bnx2x_eeh_recover(bp);
12890
12891 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 12893 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12892 DRV_MSG_SEQ_NUMBER_MASK; 12894 DRV_MSG_SEQ_NUMBER_MASK;
12893 12895
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1f2dd928888a..c15a92d012ef 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2320,6 +2320,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
2320 tg3_phy_toggle_auxctl_smdsp(tp, false); 2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2321} 2321}
2322 2322
2323static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2324{
2325 u32 val;
2326 struct ethtool_eee *dest = &tp->eee;
2327
2328 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2329 return;
2330
2331 if (eee)
2332 dest = eee;
2333
2334 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2335 return;
2336
2337 /* Pull eee_active */
2338 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2339 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2340 dest->eee_active = 1;
2341 } else
2342 dest->eee_active = 0;
2343
2344 /* Pull lp advertised settings */
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2346 return;
2347 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2348
2349 /* Pull advertised and eee_enabled settings */
2350 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2351 return;
2352 dest->eee_enabled = !!val;
2353 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2354
2355 /* Pull tx_lpi_enabled */
2356 val = tr32(TG3_CPMU_EEE_MODE);
2357 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2358
2359 /* Pull lpi timer value */
2360 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2361}
2362
2323static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2363static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324{ 2364{
2325 u32 val; 2365 u32 val;
@@ -2343,11 +2383,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2343 2383
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2384 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345 2385
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 2386 tg3_eee_pull_config(tp, NULL);
2347 TG3_CL45_D7_EEERES_STAT, &val); 2387 if (tp->eee.eee_active)
2348
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351 tp->setlpicnt = 2; 2388 tp->setlpicnt = 2;
2352 } 2389 }
2353 2390
@@ -4269,6 +4306,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4269 /* Advertise 1000-BaseT EEE ability */ 4306 /* Advertise 1000-BaseT EEE ability */
4270 if (advertise & ADVERTISED_1000baseT_Full) 4307 if (advertise & ADVERTISED_1000baseT_Full)
4271 val |= MDIO_AN_EEE_ADV_1000T; 4308 val |= MDIO_AN_EEE_ADV_1000T;
4309
4310 if (!tp->eee.eee_enabled) {
4311 val = 0;
4312 tp->eee.advertised = 0;
4313 } else {
4314 tp->eee.advertised = advertise &
4315 (ADVERTISED_100baseT_Full |
4316 ADVERTISED_1000baseT_Full);
4317 }
4318
4272 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4319 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4273 if (err) 4320 if (err)
4274 val = 0; 4321 val = 0;
@@ -4513,26 +4560,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
4513 4560
4514static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4561static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4515{ 4562{
4516 u32 val; 4563 struct ethtool_eee eee;
4517 u32 tgtadv = 0;
4518 u32 advertising = tp->link_config.advertising;
4519 4564
4520 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4565 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4521 return true; 4566 return true;
4522 4567
4523 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 4568 tg3_eee_pull_config(tp, &eee);
4524 return false;
4525
4526 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4527
4528
4529 if (advertising & ADVERTISED_100baseT_Full)
4530 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4531 if (advertising & ADVERTISED_1000baseT_Full)
4532 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4533 4569
4534 if (val != tgtadv) 4570 if (tp->eee.eee_enabled) {
4535 return false; 4571 if (tp->eee.advertised != eee.advertised ||
4572 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4573 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4574 return false;
4575 } else {
4576 /* EEE is disabled but we're advertising */
4577 if (eee.advertised)
4578 return false;
4579 }
4536 4580
4537 return true; 4581 return true;
4538} 4582}
@@ -4633,6 +4677,42 @@ static void tg3_clear_mac_status(struct tg3 *tp)
4633 udelay(40); 4677 udelay(40);
4634} 4678}
4635 4679
4680static void tg3_setup_eee(struct tg3 *tp)
4681{
4682 u32 val;
4683
4684 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4685 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4686 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4687 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4688
4689 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4690
4691 tw32_f(TG3_CPMU_EEE_CTRL,
4692 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4693
4694 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4695 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4696 TG3_CPMU_EEEMD_LPI_IN_RX |
4697 TG3_CPMU_EEEMD_EEE_ENABLE;
4698
4699 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4700 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4701
4702 if (tg3_flag(tp, ENABLE_APE))
4703 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4704
4705 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4706
4707 tw32_f(TG3_CPMU_EEE_DBTMR1,
4708 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4709 (tp->eee.tx_lpi_timer & 0xffff));
4710
4711 tw32_f(TG3_CPMU_EEE_DBTMR2,
4712 TG3_CPMU_DBTMR2_APE_TX_2047US |
4713 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4714}
4715
4636static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4716static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4637{ 4717{
4638 bool current_link_up; 4718 bool current_link_up;
@@ -4799,8 +4879,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4799 */ 4879 */
4800 if (!eee_config_ok && 4880 if (!eee_config_ok &&
4801 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4881 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4802 !force_reset) 4882 !force_reset) {
4883 tg3_setup_eee(tp);
4803 tg3_phy_reset(tp); 4884 tg3_phy_reset(tp);
4885 }
4804 } else { 4886 } else {
4805 if (!(bmcr & BMCR_ANENABLE) && 4887 if (!(bmcr & BMCR_ANENABLE) &&
4806 tp->link_config.speed == current_speed && 4888 tp->link_config.speed == current_speed &&
@@ -9484,46 +9566,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9484 if (tg3_flag(tp, INIT_COMPLETE)) 9566 if (tg3_flag(tp, INIT_COMPLETE))
9485 tg3_abort_hw(tp, 1); 9567 tg3_abort_hw(tp, 1);
9486 9568
9487 /* Enable MAC control of LPI */
9488 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9489 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9490 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9491 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9492 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9493
9494 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9495
9496 tw32_f(TG3_CPMU_EEE_CTRL,
9497 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9498
9499 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9500 TG3_CPMU_EEEMD_LPI_IN_TX |
9501 TG3_CPMU_EEEMD_LPI_IN_RX |
9502 TG3_CPMU_EEEMD_EEE_ENABLE;
9503
9504 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9505 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9506
9507 if (tg3_flag(tp, ENABLE_APE))
9508 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9509
9510 tw32_f(TG3_CPMU_EEE_MODE, val);
9511
9512 tw32_f(TG3_CPMU_EEE_DBTMR1,
9513 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9514 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9515
9516 tw32_f(TG3_CPMU_EEE_DBTMR2,
9517 TG3_CPMU_DBTMR2_APE_TX_2047US |
9518 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9519 }
9520
9521 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9569 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9522 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9570 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9523 tg3_phy_pull_config(tp); 9571 tg3_phy_pull_config(tp);
9572 tg3_eee_pull_config(tp, NULL);
9524 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9573 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9525 } 9574 }
9526 9575
9576 /* Enable MAC control of LPI */
9577 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9578 tg3_setup_eee(tp);
9579
9527 if (reset_phy) 9580 if (reset_phy)
9528 tg3_phy_reset(tp); 9581 tg3_phy_reset(tp);
9529 9582
@@ -13602,6 +13655,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13602 return 0; 13655 return 0;
13603} 13656}
13604 13657
13658static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13659{
13660 struct tg3 *tp = netdev_priv(dev);
13661
13662 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13663 netdev_warn(tp->dev, "Board does not support EEE!\n");
13664 return -EOPNOTSUPP;
13665 }
13666
13667 if (edata->advertised != tp->eee.advertised) {
13668 netdev_warn(tp->dev,
13669 "Direct manipulation of EEE advertisement is not supported\n");
13670 return -EINVAL;
13671 }
13672
13673 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13674 netdev_warn(tp->dev,
13675 "Maximal Tx Lpi timer supported is %#x(u)\n",
13676 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13677 return -EINVAL;
13678 }
13679
13680 tp->eee = *edata;
13681
13682 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13683 tg3_warn_mgmt_link_flap(tp);
13684
13685 if (netif_running(tp->dev)) {
13686 tg3_full_lock(tp, 0);
13687 tg3_setup_eee(tp);
13688 tg3_phy_reset(tp);
13689 tg3_full_unlock(tp);
13690 }
13691
13692 return 0;
13693}
13694
13695static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13696{
13697 struct tg3 *tp = netdev_priv(dev);
13698
13699 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13700 netdev_warn(tp->dev,
13701 "Board does not support EEE!\n");
13702 return -EOPNOTSUPP;
13703 }
13704
13705 *edata = tp->eee;
13706 return 0;
13707}
13708
13605static const struct ethtool_ops tg3_ethtool_ops = { 13709static const struct ethtool_ops tg3_ethtool_ops = {
13606 .get_settings = tg3_get_settings, 13710 .get_settings = tg3_get_settings,
13607 .set_settings = tg3_set_settings, 13711 .set_settings = tg3_set_settings,
@@ -13635,6 +13739,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
13635 .get_channels = tg3_get_channels, 13739 .get_channels = tg3_get_channels,
13636 .set_channels = tg3_set_channels, 13740 .set_channels = tg3_set_channels,
13637 .get_ts_info = tg3_get_ts_info, 13741 .get_ts_info = tg3_get_ts_info,
13742 .get_eee = tg3_get_eee,
13743 .set_eee = tg3_set_eee,
13638}; 13744};
13639 13745
13640static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 13746static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -14983,9 +15089,18 @@ static int tg3_phy_probe(struct tg3 *tp)
14983 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15089 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14984 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15090 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14985 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15091 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14986 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) 15092 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
14987 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15093 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14988 15094
15095 tp->eee.supported = SUPPORTED_100baseT_Full |
15096 SUPPORTED_1000baseT_Full;
15097 tp->eee.advertised = ADVERTISED_100baseT_Full |
15098 ADVERTISED_1000baseT_Full;
15099 tp->eee.eee_enabled = 1;
15100 tp->eee.tx_lpi_enabled = 1;
15101 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15102 }
15103
14989 tg3_phy_init_link_config(tp); 15104 tg3_phy_init_link_config(tp);
14990 15105
14991 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15106 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
@@ -17726,15 +17841,4 @@ static struct pci_driver tg3_driver = {
17726 .driver.pm = &tg3_pm_ops, 17841 .driver.pm = &tg3_pm_ops,
17727}; 17842};
17728 17843
17729static int __init tg3_init(void) 17844module_pci_driver(tg3_driver);
17730{
17731 return pci_register_driver(&tg3_driver);
17732}
17733
17734static void __exit tg3_cleanup(void)
17735{
17736 pci_unregister_driver(&tg3_driver);
17737}
17738
17739module_init(tg3_init);
17740module_exit(tg3_cleanup);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 9b2d3ac2474a..2530c20dd823 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1175,6 +1175,7 @@
1175#define TG3_CPMU_EEE_DBTMR1 0x000036b4 1175#define TG3_CPMU_EEE_DBTMR1 0x000036b4
1176#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1176#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1177#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff 1177#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff
1178#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff
1178#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1179#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1179#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 1180#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1180#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff 1181#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff
@@ -3371,6 +3372,7 @@ struct tg3 {
3371 unsigned int irq_cnt; 3372 unsigned int irq_cnt;
3372 3373
3373 struct ethtool_coalesce coal; 3374 struct ethtool_coalesce coal;
3375 struct ethtool_eee eee;
3374 3376
3375 /* firmware info */ 3377 /* firmware info */
3376 const char *fw_needed; 3378 const char *fw_needed;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index e423f82da490..b7d8127c198f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -164,7 +164,8 @@ struct bfa_ioc_attr {
164 u8 port_mode; /*!< enum bfa_mode */ 164 u8 port_mode; /*!< enum bfa_mode */
165 u8 cap_bm; /*!< capability */ 165 u8 cap_bm; /*!< capability */
166 u8 port_mode_cfg; /*!< enum bfa_mode */ 166 u8 port_mode_cfg; /*!< enum bfa_mode */
167 u8 rsvd[4]; /*!< 64bit align */ 167 u8 def_fn; /*!< 1 if default fn */
168 u8 rsvd[3]; /*!< 64bit align */
168}; 169};
169 170
170/* Adapter capability mask definition */ 171/* Adapter capability mask definition */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index f2b73ffa9122..6f3cac060f29 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2371,7 +2371,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2371 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 2371 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2372 2372
2373 ioc_attr->state = bfa_ioc_get_state(ioc); 2373 ioc_attr->state = bfa_ioc_get_state(ioc);
2374 ioc_attr->port_id = ioc->port_id; 2374 ioc_attr->port_id = bfa_ioc_portid(ioc);
2375 ioc_attr->port_mode = ioc->port_mode; 2375 ioc_attr->port_mode = ioc->port_mode;
2376 2376
2377 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; 2377 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
@@ -2381,8 +2381,9 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2381 2381
2382 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 2382 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2383 2383
2384 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 2384 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2385 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 2385 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2386 ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2386 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2387 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2387} 2388}
2388 2389
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 63a85e555df8..f04e0aab25b4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -222,6 +222,8 @@ struct bfa_ioc_hwif {
222#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) 222#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
223#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 223#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
224#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) 224#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
225#define bfa_ioc_is_default(__ioc) \
226 (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
225#define bfa_ioc_fetch_stats(__ioc, __stats) \ 227#define bfa_ioc_fetch_stats(__ioc, __stats) \
226 (((__stats)->drv_stats) = (__ioc)->stats) 228 (((__stats)->drv_stats) = (__ioc)->stats)
227#define bfa_ioc_clr_stats(__ioc) \ 229#define bfa_ioc_clr_stats(__ioc) \
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 25dae757e9c4..f1eafc409bbd 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -455,6 +455,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
455void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); 455void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
456void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 456void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
457 struct bfi_msgq_mhdr *msghdr); 457 struct bfi_msgq_mhdr *msghdr);
458void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
459 struct bfi_msgq_mhdr *msghdr);
458 460
459/* APIs for BNA */ 461/* APIs for BNA */
460void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 462void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index db14f69d63bc..3ca77fad4851 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
298 case BFI_ENET_I2H_RSS_ENABLE_RSP: 298 case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: 299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP: 300 case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: 301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: 302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: 303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
@@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
311 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); 310 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
312 break; 311 break;
313 312
313 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
314 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315 if (rx)
316 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
317 break;
318
314 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: 319 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315 bna_rx_from_rid(bna, msghdr->enet_id, rx); 320 bna_rx_from_rid(bna, msghdr->enet_id, rx);
316 if (rx) 321 if (rx)
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index ea6f4a036401..57cd1bff59f1 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -711,6 +711,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
711} 711}
712 712
713void 713void
714bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
716{
717 struct bfi_enet_rsp *rsp =
718 (struct bfi_enet_rsp *)msghdr;
719
720 if (rsp->error) {
721 /* Clear ucast from cache */
722 rxf->ucast_active_set = 0;
723 }
724
725 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
726}
727
728void
714bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 729bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr) 730 struct bfi_msgq_mhdr *msghdr)
716{ 731{
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 07f7ef05c3f2..b78e69e0e52a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2624,6 +2624,9 @@ bnad_stop(struct net_device *netdev)
2624 bnad_destroy_tx(bnad, 0); 2624 bnad_destroy_tx(bnad, 0);
2625 bnad_destroy_rx(bnad, 0); 2625 bnad_destroy_rx(bnad, 0);
2626 2626
2627 /* These config flags are cleared in the hardware */
2628 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
2629
2627 /* Synchronize mailbox IRQ */ 2630 /* Synchronize mailbox IRQ */
2628 bnad_mbox_irq_sync(bnad); 2631 bnad_mbox_irq_sync(bnad);
2629 2632
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index c1d0bc059bfd..aefee77523f2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.1.2.1" 74#define BNAD_VERSION "3.2.21.1"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 14ca9317c915..c37f706d9992 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
37 37
38extern char bfa_version[]; 38extern char bfa_version[];
39 39
40#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin" 40#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
41#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" 41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
43 43
44#pragma pack(1) 44#pragma pack(1)
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 9624cfe7df57..d7048db9863d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1351,22 +1351,11 @@ static void remove_one(struct pci_dev *pdev)
1351 t1_sw_reset(pdev); 1351 t1_sw_reset(pdev);
1352} 1352}
1353 1353
1354static struct pci_driver driver = { 1354static struct pci_driver cxgb_pci_driver = {
1355 .name = DRV_NAME, 1355 .name = DRV_NAME,
1356 .id_table = t1_pci_tbl, 1356 .id_table = t1_pci_tbl,
1357 .probe = init_one, 1357 .probe = init_one,
1358 .remove = remove_one, 1358 .remove = remove_one,
1359}; 1359};
1360 1360
1361static int __init t1_init_module(void) 1361module_pci_driver(cxgb_pci_driver);
1362{
1363 return pci_register_driver(&driver);
1364}
1365
1366static void __exit t1_cleanup_module(void)
1367{
1368 pci_unregister_driver(&driver);
1369}
1370
1371module_init(t1_init_module);
1372module_exit(t1_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 0c96e5fe99cc..4058b856eb71 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1246,6 +1246,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1246 struct tid_range stid_range, tid_range; 1246 struct tid_range stid_range, tid_range;
1247 struct mtutab mtutab; 1247 struct mtutab mtutab;
1248 unsigned int l2t_capacity; 1248 unsigned int l2t_capacity;
1249 struct l2t_data *l2td;
1249 1250
1250 t = kzalloc(sizeof(*t), GFP_KERNEL); 1251 t = kzalloc(sizeof(*t), GFP_KERNEL);
1251 if (!t) 1252 if (!t)
@@ -1261,8 +1262,8 @@ int cxgb3_offload_activate(struct adapter *adapter)
1261 goto out_free; 1262 goto out_free;
1262 1263
1263 err = -ENOMEM; 1264 err = -ENOMEM;
1264 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1265 l2td = t3_init_l2t(l2t_capacity);
1265 if (!L2DATA(dev)) 1266 if (!l2td)
1266 goto out_free; 1267 goto out_free;
1267 1268
1268 natids = min(tid_range.num / 2, MAX_ATIDS); 1269 natids = min(tid_range.num / 2, MAX_ATIDS);
@@ -1279,6 +1280,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1279 INIT_LIST_HEAD(&t->list_node); 1280 INIT_LIST_HEAD(&t->list_node);
1280 t->dev = dev; 1281 t->dev = dev;
1281 1282
1283 RCU_INIT_POINTER(dev->l2opt, l2td);
1282 T3C_DATA(dev) = t; 1284 T3C_DATA(dev) = t;
1283 dev->recv = process_rx; 1285 dev->recv = process_rx;
1284 dev->neigh_update = t3_l2t_update; 1286 dev->neigh_update = t3_l2t_update;
@@ -1294,8 +1296,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1294 return 0; 1296 return 0;
1295 1297
1296out_free_l2t: 1298out_free_l2t:
1297 t3_free_l2t(L2DATA(dev)); 1299 t3_free_l2t(l2td);
1298 RCU_INIT_POINTER(dev->l2opt, NULL);
1299out_free: 1300out_free:
1300 kfree(t); 1301 kfree(t);
1301 return err; 1302 return err;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index f12e6b85a653..2fd773e267dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455 q->pg_chunk.offset = 0; 455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459 __free_pages(q->pg_chunk.page, order);
460 q->pg_chunk.page = NULL;
461 return -EIO;
462 }
458 q->pg_chunk.mapping = mapping; 463 q->pg_chunk.mapping = mapping;
459 } 464 }
460 sd->pg_chunk = q->pg_chunk; 465 sd->pg_chunk = q->pg_chunk;
@@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
949 return flits_to_desc(flits); 954 return flits_to_desc(flits);
950} 955}
951 956
957
958/* map_skb - map a packet main body and its page fragments
959 * @pdev: the PCI device
960 * @skb: the packet
961 * @addr: placeholder to save the mapped addresses
962 *
963 * map the main body of an sk_buff and its page fragments, if any.
964 */
965static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966 dma_addr_t *addr)
967{
968 const skb_frag_t *fp, *end;
969 const struct skb_shared_info *si;
970
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972 PCI_DMA_TODEVICE);
973 if (pci_dma_mapping_error(pdev, *addr))
974 goto out_err;
975
976 si = skb_shinfo(skb);
977 end = &si->frags[si->nr_frags];
978
979 for (fp = si->frags; fp < end; fp++) {
980 *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981 DMA_TO_DEVICE);
982 if (pci_dma_mapping_error(pdev, *addr))
983 goto unwind;
984 }
985 return 0;
986
987unwind:
988 while (fp-- > si->frags)
989 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990 DMA_TO_DEVICE);
991
992 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993out_err:
994 return -ENOMEM;
995}
996
952/** 997/**
953 * make_sgl - populate a scatter/gather list for a packet 998 * write_sgl - populate a scatter/gather list for a packet
954 * @skb: the packet 999 * @skb: the packet
955 * @sgp: the SGL to populate 1000 * @sgp: the SGL to populate
956 * @start: start address of skb main body data to include in the SGL 1001 * @start: start address of skb main body data to include in the SGL
957 * @len: length of skb main body data to include in the SGL 1002 * @len: length of skb main body data to include in the SGL
958 * @pdev: the PCI device 1003 * @addr: the list of the mapped addresses
959 * 1004 *
960 * Generates a scatter/gather list for the buffers that make up a packet 1005 * Copies the scatter/gather list for the buffers that make up a packet
961 * and returns the SGL size in 8-byte words. The caller must size the SGL 1006 * and returns the SGL size in 8-byte words. The caller must size the SGL
962 * appropriately. 1007 * appropriately.
963 */ 1008 */
964static inline unsigned int make_sgl(const struct sk_buff *skb, 1009static inline unsigned int write_sgl(const struct sk_buff *skb,
965 struct sg_ent *sgp, unsigned char *start, 1010 struct sg_ent *sgp, unsigned char *start,
966 unsigned int len, struct pci_dev *pdev) 1011 unsigned int len, const dma_addr_t *addr)
967{ 1012{
968 dma_addr_t mapping; 1013 unsigned int i, j = 0, k = 0, nfrags;
969 unsigned int i, j = 0, nfrags;
970 1014
971 if (len) { 1015 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
973 sgp->len[0] = cpu_to_be32(len); 1016 sgp->len[0] = cpu_to_be32(len);
974 sgp->addr[0] = cpu_to_be64(mapping); 1017 sgp->addr[j++] = cpu_to_be64(addr[k++]);
975 j = 1;
976 } 1018 }
977 1019
978 nfrags = skb_shinfo(skb)->nr_frags; 1020 nfrags = skb_shinfo(skb)->nr_frags;
979 for (i = 0; i < nfrags; i++) { 1021 for (i = 0; i < nfrags; i++) {
980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
981 1023
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
985 sgp->addr[j] = cpu_to_be64(mapping); 1025 sgp->addr[j] = cpu_to_be64(addr[k++]);
986 j ^= 1; 1026 j ^= 1;
987 if (j == 0) 1027 if (j == 0)
988 ++sgp; 1028 ++sgp;
@@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138 const struct port_info *pi, 1178 const struct port_info *pi,
1139 unsigned int pidx, unsigned int gen, 1179 unsigned int pidx, unsigned int gen,
1140 struct sge_txq *q, unsigned int ndesc, 1180 struct sge_txq *q, unsigned int ndesc,
1141 unsigned int compl) 1181 unsigned int compl, const dma_addr_t *addr)
1142{ 1182{
1143 unsigned int flits, sgl_flits, cntrl, tso_info; 1183 unsigned int flits, sgl_flits, cntrl, tso_info;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1196 } 1236 }
1197 1237
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); 1239 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1200 1240
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1227 struct netdev_queue *txq; 1267 struct netdev_queue *txq;
1228 struct sge_qset *qs; 1268 struct sge_qset *qs;
1229 struct sge_txq *q; 1269 struct sge_txq *q;
1270 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1230 1271
1231 /* 1272 /*
1232 * The chip min packet length is 9 octets but play safe and reject 1273 * The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1255 return NETDEV_TX_BUSY; 1296 return NETDEV_TX_BUSY;
1256 } 1297 }
1257 1298
1299 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300 dev_kfree_skb(skb);
1301 return NETDEV_TX_OK;
1302 }
1303
1258 q->in_use += ndesc; 1304 q->in_use += ndesc;
1259 if (unlikely(credits - ndesc < q->stop_thres)) { 1305 if (unlikely(credits - ndesc < q->stop_thres)) {
1260 t3_stop_tx_queue(txq, qs, q); 1306 t3_stop_tx_queue(txq, qs, q);
@@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1312 if (likely(!skb_shared(skb))) 1358 if (likely(!skb_shared(skb)))
1313 skb_orphan(skb); 1359 skb_orphan(skb);
1314 1360
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); 1361 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1316 check_ring_tx_db(adap, q); 1362 check_ring_tx_db(adap, q);
1317 return NETDEV_TX_OK; 1363 return NETDEV_TX_OK;
1318} 1364}
@@ -1578,7 +1624,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1578 */ 1624 */
1579static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1625static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1580 struct sge_txq *q, unsigned int pidx, 1626 struct sge_txq *q, unsigned int pidx,
1581 unsigned int gen, unsigned int ndesc) 1627 unsigned int gen, unsigned int ndesc,
1628 const dma_addr_t *addr)
1582{ 1629{
1583 unsigned int sgl_flits, flits; 1630 unsigned int sgl_flits, flits;
1584 struct work_request_hdr *from; 1631 struct work_request_hdr *from;
@@ -1599,9 +1646,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1599 1646
1600 flits = skb_transport_offset(skb) / 8; 1647 flits = skb_transport_offset(skb) / 8;
1601 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1648 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1602 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1649 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1603 skb->tail - skb->transport_header, 1650 skb->tail - skb->transport_header,
1604 adap->pdev); 1651 addr);
1605 if (need_skb_unmap()) { 1652 if (need_skb_unmap()) {
1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1653 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1607 skb->destructor = deferred_unmap_destructor; 1654 skb->destructor = deferred_unmap_destructor;
@@ -1659,6 +1706,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1659 goto again; 1706 goto again;
1660 } 1707 }
1661 1708
1709 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1710 spin_unlock(&q->lock);
1711 return NET_XMIT_SUCCESS;
1712 }
1713
1662 gen = q->gen; 1714 gen = q->gen;
1663 q->in_use += ndesc; 1715 q->in_use += ndesc;
1664 pidx = q->pidx; 1716 pidx = q->pidx;
@@ -1669,7 +1721,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1669 } 1721 }
1670 spin_unlock(&q->lock); 1722 spin_unlock(&q->lock);
1671 1723
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1724 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1673 check_ring_tx_db(adap, q); 1725 check_ring_tx_db(adap, q);
1674 return NET_XMIT_SUCCESS; 1726 return NET_XMIT_SUCCESS;
1675} 1727}
@@ -1687,6 +1739,7 @@ static void restart_offloadq(unsigned long data)
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1739 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1688 const struct port_info *pi = netdev_priv(qs->netdev); 1740 const struct port_info *pi = netdev_priv(qs->netdev);
1689 struct adapter *adap = pi->adapter; 1741 struct adapter *adap = pi->adapter;
1742 unsigned int written = 0;
1690 1743
1691 spin_lock(&q->lock); 1744 spin_lock(&q->lock);
1692again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1745again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1706,10 +1759,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1706 break; 1759 break;
1707 } 1760 }
1708 1761
1762 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1763 break;
1764
1709 gen = q->gen; 1765 gen = q->gen;
1710 q->in_use += ndesc; 1766 q->in_use += ndesc;
1711 pidx = q->pidx; 1767 pidx = q->pidx;
1712 q->pidx += ndesc; 1768 q->pidx += ndesc;
1769 written += ndesc;
1713 if (q->pidx >= q->size) { 1770 if (q->pidx >= q->size) {
1714 q->pidx -= q->size; 1771 q->pidx -= q->size;
1715 q->gen ^= 1; 1772 q->gen ^= 1;
@@ -1717,7 +1774,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1717 __skb_unlink(skb, &q->sendq); 1774 __skb_unlink(skb, &q->sendq);
1718 spin_unlock(&q->lock); 1775 spin_unlock(&q->lock);
1719 1776
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1777 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1778 (dma_addr_t *)skb->head);
1721 spin_lock(&q->lock); 1779 spin_lock(&q->lock);
1722 } 1780 }
1723 spin_unlock(&q->lock); 1781 spin_unlock(&q->lock);
@@ -1727,8 +1785,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1727 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1785 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1728#endif 1786#endif
1729 wmb(); 1787 wmb();
1730 t3_write_reg(adap, A_SG_KDOORBELL, 1788 if (likely(written))
1731 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1789 t3_write_reg(adap, A_SG_KDOORBELL,
1790 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1732} 1791}
1733 1792
1734/** 1793/**
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9105465b2a1a..dd243a1b03e0 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -29,6 +29,8 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/crc32.h> 30#include <linux/crc32.h>
31#include <linux/mii.h> 31#include <linux/mii.h>
32#include <linux/of.h>
33#include <linux/of_net.h>
32#include <linux/ethtool.h> 34#include <linux/ethtool.h>
33#include <linux/dm9000.h> 35#include <linux/dm9000.h>
34#include <linux/delay.h> 36#include <linux/delay.h>
@@ -827,7 +829,7 @@ dm9000_hash_table_unlocked(struct net_device *dev)
827 struct netdev_hw_addr *ha; 829 struct netdev_hw_addr *ha;
828 int i, oft; 830 int i, oft;
829 u32 hash_val; 831 u32 hash_val;
830 u16 hash_table[4]; 832 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
831 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 833 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
832 834
833 dm9000_dbg(db, 1, "entering %s\n", __func__); 835 dm9000_dbg(db, 1, "entering %s\n", __func__);
@@ -835,13 +837,6 @@ dm9000_hash_table_unlocked(struct net_device *dev)
835 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 837 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
836 iow(db, oft, dev->dev_addr[i]); 838 iow(db, oft, dev->dev_addr[i]);
837 839
838 /* Clear Hash Table */
839 for (i = 0; i < 4; i++)
840 hash_table[i] = 0x0;
841
842 /* broadcast address */
843 hash_table[3] = 0x8000;
844
845 if (dev->flags & IFF_PROMISC) 840 if (dev->flags & IFF_PROMISC)
846 rcr |= RCR_PRMSC; 841 rcr |= RCR_PRMSC;
847 842
@@ -1358,6 +1353,31 @@ static const struct net_device_ops dm9000_netdev_ops = {
1358#endif 1353#endif
1359}; 1354};
1360 1355
1356static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1357{
1358 struct dm9000_plat_data *pdata;
1359 struct device_node *np = dev->of_node;
1360 const void *mac_addr;
1361
1362 if (!IS_ENABLED(CONFIG_OF) || !np)
1363 return NULL;
1364
1365 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1366 if (!pdata)
1367 return ERR_PTR(-ENOMEM);
1368
1369 if (of_find_property(np, "davicom,ext-phy", NULL))
1370 pdata->flags |= DM9000_PLATF_EXT_PHY;
1371 if (of_find_property(np, "davicom,no-eeprom", NULL))
1372 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1373
1374 mac_addr = of_get_mac_address(np);
1375 if (mac_addr)
1376 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1377
1378 return pdata;
1379}
1380
1361/* 1381/*
1362 * Search DM9000 board, allocate space and register it 1382 * Search DM9000 board, allocate space and register it
1363 */ 1383 */
@@ -1373,6 +1393,12 @@ dm9000_probe(struct platform_device *pdev)
1373 int i; 1393 int i;
1374 u32 id_val; 1394 u32 id_val;
1375 1395
1396 if (!pdata) {
1397 pdata = dm9000_parse_dt(&pdev->dev);
1398 if (IS_ERR(pdata))
1399 return PTR_ERR(pdata);
1400 }
1401
1376 /* Init network device */ 1402 /* Init network device */
1377 ndev = alloc_etherdev(sizeof(struct board_info)); 1403 ndev = alloc_etherdev(sizeof(struct board_info));
1378 if (!ndev) 1404 if (!ndev)
@@ -1683,11 +1709,20 @@ dm9000_drv_remove(struct platform_device *pdev)
1683 return 0; 1709 return 0;
1684} 1710}
1685 1711
1712#ifdef CONFIG_OF
1713static const struct of_device_id dm9000_of_matches[] = {
1714 { .compatible = "davicom,dm9000", },
1715 { /* sentinel */ }
1716};
1717MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1718#endif
1719
1686static struct platform_driver dm9000_driver = { 1720static struct platform_driver dm9000_driver = {
1687 .driver = { 1721 .driver = {
1688 .name = "dm9000", 1722 .name = "dm9000",
1689 .owner = THIS_MODULE, 1723 .owner = THIS_MODULE,
1690 .pm = &dm9000_drv_pm_ops, 1724 .pm = &dm9000_drv_pm_ops,
1725 .of_match_table = of_match_ptr(dm9000_of_matches),
1691 }, 1726 },
1692 .probe = dm9000_probe, 1727 .probe = dm9000_probe,
1693 .remove = dm9000_drv_remove, 1728 .remove = dm9000_drv_remove,
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index cdbcd1643141..9b84cb04fe5f 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1171,16 +1171,4 @@ investigate_write_descriptor(struct net_device *dev,
1171 } 1171 }
1172} 1172}
1173 1173
1174static int __init xircom_init(void) 1174module_pci_driver(xircom_ops);
1175{
1176 return pci_register_driver(&xircom_ops);
1177}
1178
1179static void __exit xircom_exit(void)
1180{
1181 pci_unregister_driver(&xircom_ops);
1182}
1183
1184module_init(xircom_init)
1185module_exit(xircom_exit)
1186
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ca2967b0f18b..3d5e1a8929ae 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -834,32 +834,27 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835} 835}
836 836
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) 837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
838{ 839{
839 return BE3_chip(adapter) && 840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
840 be_ipv6_exthdr_check(skb);
841} 841}
842 842
843static netdev_tx_t be_xmit(struct sk_buff *skb, 843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct net_device *netdev) 844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
845{ 846{
846 struct be_adapter *adapter = netdev_priv(netdev);
847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
849 struct iphdr *ip = NULL;
850 u32 wrb_cnt = 0, copied = 0;
851 u32 start = txq->head, eth_hdr_len;
852 bool dummy_wrb, stopped = false;
853 bool skip_hw_vlan = false;
854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855 848 unsigned int eth_hdr_len;
856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? 849 struct iphdr *ip;
857 VLAN_ETH_HLEN : ETH_HLEN;
858 850
859 /* For padded packets, BE HW modifies tot_len field in IP header 851 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW. 852 * incorrecly when VLAN tag is inserted by HW.
861 */ 853 */
862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { 854 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
855 VLAN_ETH_HLEN : ETH_HLEN;
856 if (skb->len <= 60 && vlan_tx_tag_present(skb) &&
857 is_ipv4_pkt(skb)) {
863 ip = (struct iphdr *)ip_hdr(skb); 858 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 859 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865 } 860 }
@@ -869,15 +864,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
869 */ 864 */
870 if ((adapter->function_mode & UMC_ENABLED) && 865 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q)) 866 veh->h_vlan_proto == htons(ETH_P_8021Q))
872 skip_hw_vlan = true; 867 *skip_hw_vlan = true;
873 868
874 /* HW has a bug wherein it will calculate CSUM for VLAN 869 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled. 870 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt. 871 * Manually insert VLAN in pkt.
877 */ 872 */
878 if (skb->ip_summed != CHECKSUM_PARTIAL && 873 if (skb->ip_summed != CHECKSUM_PARTIAL &&
879 vlan_tx_tag_present(skb)) { 874 vlan_tx_tag_present(skb)) {
880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); 875 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
881 if (unlikely(!skb)) 876 if (unlikely(!skb))
882 goto tx_drop; 877 goto tx_drop;
883 } 878 }
@@ -887,8 +882,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
887 * skip HW tagging is not enabled by FW. 882 * skip HW tagging is not enabled by FW.
888 */ 883 */
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && 884 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) && 885 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter))) 886 !qnq_async_evt_rcvd(adapter)))
892 goto tx_drop; 887 goto tx_drop;
893 888
894 /* Manual VLAN tag insertion to prevent: 889 /* Manual VLAN tag insertion to prevent:
@@ -899,11 +894,31 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
899 */ 894 */
900 if (be_ipv6_tx_stall_chk(adapter, skb) && 895 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) { 896 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); 897 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
903 if (unlikely(!skb)) 898 if (unlikely(!skb))
904 goto tx_drop; 899 goto tx_drop;
905 } 900 }
906 901
902 return skb;
903tx_drop:
904 dev_kfree_skb_any(skb);
905 return NULL;
906}
907
908static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
912 struct be_queue_info *txq = &txo->q;
913 bool dummy_wrb, stopped = false;
914 u32 wrb_cnt = 0, copied = 0;
915 bool skip_hw_vlan = false;
916 u32 start = txq->head;
917
918 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
919 if (!skb)
920 return NETDEV_TX_OK;
921
907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 922 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
908 923
909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, 924 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
@@ -933,7 +948,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
933 txq->head = start; 948 txq->head = start;
934 dev_kfree_skb_any(skb); 949 dev_kfree_skb_any(skb);
935 } 950 }
936tx_drop:
937 return NETDEV_TX_OK; 951 return NETDEV_TX_OK;
938} 952}
939 953
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 85a06037b242..0936b26b5e1e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -53,7 +53,6 @@
53#include <linux/of_device.h> 53#include <linux/of_device.h>
54#include <linux/of_gpio.h> 54#include <linux/of_gpio.h>
55#include <linux/of_net.h> 55#include <linux/of_net.h>
56#include <linux/pinctrl/consumer.h>
57#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
58 57
59#include <asm/cacheflush.h> 58#include <asm/cacheflush.h>
@@ -243,7 +242,7 @@ static void *swap_buffer(void *bufaddr, int len)
243 int i; 242 int i;
244 unsigned int *buf = bufaddr; 243 unsigned int *buf = bufaddr;
245 244
246 for (i = 0; i < (len + 3) / 4; i++, buf++) 245 for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
247 *buf = cpu_to_be32(*buf); 246 *buf = cpu_to_be32(*buf);
248 247
249 return bufaddr; 248 return bufaddr;
@@ -1841,7 +1840,6 @@ fec_probe(struct platform_device *pdev)
1841 struct resource *r; 1840 struct resource *r;
1842 const struct of_device_id *of_id; 1841 const struct of_device_id *of_id;
1843 static int dev_id; 1842 static int dev_id;
1844 struct pinctrl *pinctrl;
1845 struct regulator *reg_phy; 1843 struct regulator *reg_phy;
1846 1844
1847 of_id = of_match_device(fec_dt_ids, &pdev->dev); 1845 of_id = of_match_device(fec_dt_ids, &pdev->dev);
@@ -1891,12 +1889,6 @@ fec_probe(struct platform_device *pdev)
1891 fep->phy_interface = ret; 1889 fep->phy_interface = ret;
1892 } 1890 }
1893 1891
1894 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1895 if (IS_ERR(pinctrl)) {
1896 ret = PTR_ERR(pinctrl);
1897 goto failed_pin;
1898 }
1899
1900 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1892 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1901 if (IS_ERR(fep->clk_ipg)) { 1893 if (IS_ERR(fep->clk_ipg)) {
1902 ret = PTR_ERR(fep->clk_ipg); 1894 ret = PTR_ERR(fep->clk_ipg);
@@ -1996,7 +1988,6 @@ failed_regulator:
1996 clk_disable_unprepare(fep->clk_ipg); 1988 clk_disable_unprepare(fep->clk_ipg);
1997 clk_disable_unprepare(fep->clk_enet_out); 1989 clk_disable_unprepare(fep->clk_enet_out);
1998 clk_disable_unprepare(fep->clk_ptp); 1990 clk_disable_unprepare(fep->clk_ptp);
1999failed_pin:
2000failed_clk: 1991failed_clk:
2001failed_ioremap: 1992failed_ioremap:
2002 free_netdev(ndev); 1993 free_netdev(ndev);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 068d78151658..1fde90b96685 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2298,15 +2298,4 @@ static struct pci_driver ipg_pci_driver = {
2298 .remove = ipg_remove, 2298 .remove = ipg_remove,
2299}; 2299};
2300 2300
2301static int __init ipg_init_module(void) 2301module_pci_driver(ipg_pci_driver);
2302{
2303 return pci_register_driver(&ipg_pci_driver);
2304}
2305
2306static void __exit ipg_exit_module(void)
2307{
2308 pci_unregister_driver(&ipg_pci_driver);
2309}
2310
2311module_init(ipg_init_module);
2312module_exit(ipg_exit_module);
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index b71c8502a2b3..895450e9bb3c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -66,17 +66,17 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
66 s32 ret_val; 66 s32 ret_val;
67 67
68 if (hw->phy.media_type != e1000_media_type_copper) { 68 if (hw->phy.media_type != e1000_media_type_copper) {
69 phy->type = e1000_phy_none; 69 phy->type = e1000_phy_none;
70 return 0; 70 return 0;
71 } else { 71 } else {
72 phy->ops.power_up = e1000_power_up_phy_copper; 72 phy->ops.power_up = e1000_power_up_phy_copper;
73 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; 73 phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
74 } 74 }
75 75
76 phy->addr = 1; 76 phy->addr = 1;
77 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 77 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
78 phy->reset_delay_us = 100; 78 phy->reset_delay_us = 100;
79 phy->type = e1000_phy_gg82563; 79 phy->type = e1000_phy_gg82563;
80 80
81 /* This can only be done after all function pointers are setup. */ 81 /* This can only be done after all function pointers are setup. */
82 ret_val = e1000e_get_phy_id(hw); 82 ret_val = e1000e_get_phy_id(hw);
@@ -98,19 +98,19 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
98 u32 eecd = er32(EECD); 98 u32 eecd = er32(EECD);
99 u16 size; 99 u16 size;
100 100
101 nvm->opcode_bits = 8; 101 nvm->opcode_bits = 8;
102 nvm->delay_usec = 1; 102 nvm->delay_usec = 1;
103 switch (nvm->override) { 103 switch (nvm->override) {
104 case e1000_nvm_override_spi_large: 104 case e1000_nvm_override_spi_large:
105 nvm->page_size = 32; 105 nvm->page_size = 32;
106 nvm->address_bits = 16; 106 nvm->address_bits = 16;
107 break; 107 break;
108 case e1000_nvm_override_spi_small: 108 case e1000_nvm_override_spi_small:
109 nvm->page_size = 8; 109 nvm->page_size = 8;
110 nvm->address_bits = 8; 110 nvm->address_bits = 8;
111 break; 111 break;
112 default: 112 default:
113 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 113 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
114 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; 114 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
115 break; 115 break;
116 } 116 }
@@ -128,7 +128,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
128 /* EEPROM access above 16k is unsupported */ 128 /* EEPROM access above 16k is unsupported */
129 if (size > 14) 129 if (size > 14)
130 size = 14; 130 size = 14;
131 nvm->word_size = 1 << size; 131 nvm->word_size = 1 << size;
132 132
133 return 0; 133 return 0;
134} 134}
@@ -859,7 +859,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
859 859
860 /* Transmit Arbitration Control 0 */ 860 /* Transmit Arbitration Control 0 */
861 reg = er32(TARC(0)); 861 reg = er32(TARC(0));
862 reg &= ~(0xF << 27); /* 30:27 */ 862 reg &= ~(0xF << 27); /* 30:27 */
863 if (hw->phy.media_type != e1000_media_type_copper) 863 if (hw->phy.media_type != e1000_media_type_copper)
864 reg &= ~(1 << 20); 864 reg &= ~(1 << 20);
865 ew32(TARC(0), reg); 865 ew32(TARC(0), reg);
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7380442a3829..4c303e2a7cb3 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -77,24 +77,24 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
77 return 0; 77 return 0;
78 } 78 }
79 79
80 phy->addr = 1; 80 phy->addr = 1;
81 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 81 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
82 phy->reset_delay_us = 100; 82 phy->reset_delay_us = 100;
83 83
84 phy->ops.power_up = e1000_power_up_phy_copper; 84 phy->ops.power_up = e1000_power_up_phy_copper;
85 phy->ops.power_down = e1000_power_down_phy_copper_82571; 85 phy->ops.power_down = e1000_power_down_phy_copper_82571;
86 86
87 switch (hw->mac.type) { 87 switch (hw->mac.type) {
88 case e1000_82571: 88 case e1000_82571:
89 case e1000_82572: 89 case e1000_82572:
90 phy->type = e1000_phy_igp_2; 90 phy->type = e1000_phy_igp_2;
91 break; 91 break;
92 case e1000_82573: 92 case e1000_82573:
93 phy->type = e1000_phy_m88; 93 phy->type = e1000_phy_m88;
94 break; 94 break;
95 case e1000_82574: 95 case e1000_82574:
96 case e1000_82583: 96 case e1000_82583:
97 phy->type = e1000_phy_bm; 97 phy->type = e1000_phy_bm;
98 phy->ops.acquire = e1000_get_hw_semaphore_82574; 98 phy->ops.acquire = e1000_get_hw_semaphore_82574;
99 phy->ops.release = e1000_put_hw_semaphore_82574; 99 phy->ops.release = e1000_put_hw_semaphore_82574;
100 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; 100 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
@@ -193,7 +193,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
193 /* EEPROM access above 16k is unsupported */ 193 /* EEPROM access above 16k is unsupported */
194 if (size > 14) 194 if (size > 14)
195 size = 14; 195 size = 14;
196 nvm->word_size = 1 << size; 196 nvm->word_size = 1 << size;
197 break; 197 break;
198 } 198 }
199 199
@@ -339,7 +339,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
339static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) 339static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
340{ 340{
341 struct e1000_hw *hw = &adapter->hw; 341 struct e1000_hw *hw = &adapter->hw;
342 static int global_quad_port_a; /* global port a indication */ 342 static int global_quad_port_a; /* global port a indication */
343 struct pci_dev *pdev = adapter->pdev; 343 struct pci_dev *pdev = adapter->pdev;
344 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 344 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
345 s32 rc; 345 s32 rc;
@@ -1003,8 +1003,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1003 default: 1003 default:
1004 break; 1004 break;
1005 } 1005 }
1006 if (ret_val)
1007 e_dbg("Cannot acquire MDIO ownership\n");
1008 1006
1009 ctrl = er32(CTRL); 1007 ctrl = er32(CTRL);
1010 1008
@@ -1015,7 +1013,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1015 switch (hw->mac.type) { 1013 switch (hw->mac.type) {
1016 case e1000_82574: 1014 case e1000_82574:
1017 case e1000_82583: 1015 case e1000_82583:
1018 e1000_put_hw_semaphore_82574(hw); 1016 /* Release mutex only if the hw semaphore is acquired */
1017 if (!ret_val)
1018 e1000_put_hw_semaphore_82574(hw);
1019 break; 1019 break;
1020 default: 1020 default:
1021 break; 1021 break;
@@ -1178,7 +1178,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1178 1178
1179 /* Transmit Arbitration Control 0 */ 1179 /* Transmit Arbitration Control 0 */
1180 reg = er32(TARC(0)); 1180 reg = er32(TARC(0));
1181 reg &= ~(0xF << 27); /* 30:27 */ 1181 reg &= ~(0xF << 27); /* 30:27 */
1182 switch (hw->mac.type) { 1182 switch (hw->mac.type) {
1183 case e1000_82571: 1183 case e1000_82571:
1184 case e1000_82572: 1184 case e1000_82572:
@@ -1390,7 +1390,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
1390 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); 1390 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
1391 if (ret_val) 1391 if (ret_val)
1392 return false; 1392 return false;
1393 if (receive_errors == E1000_RECEIVE_ERROR_MAX) { 1393 if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
1394 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); 1394 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
1395 if (ret_val) 1395 if (ret_val)
1396 return false; 1396 return false;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 7c8ca658d553..59c22bf18701 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -244,7 +244,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
244 mac->autoneg = 1; 244 mac->autoneg = 1;
245 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 245 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
246 break; 246 break;
247 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 247 case SPEED_1000 + DUPLEX_HALF: /* not supported */
248 default: 248 default:
249 goto err_inval; 249 goto err_inval;
250 } 250 }
@@ -416,7 +416,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
416 416
417static int e1000_get_regs_len(struct net_device __always_unused *netdev) 417static int e1000_get_regs_len(struct net_device __always_unused *netdev)
418{ 418{
419#define E1000_REGS_LEN 32 /* overestimate */ 419#define E1000_REGS_LEN 32 /* overestimate */
420 return E1000_REGS_LEN * sizeof(u32); 420 return E1000_REGS_LEN * sizeof(u32);
421} 421}
422 422
@@ -433,22 +433,22 @@ static void e1000_get_regs(struct net_device *netdev,
433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
434 adapter->pdev->device; 434 adapter->pdev->device;
435 435
436 regs_buff[0] = er32(CTRL); 436 regs_buff[0] = er32(CTRL);
437 regs_buff[1] = er32(STATUS); 437 regs_buff[1] = er32(STATUS);
438 438
439 regs_buff[2] = er32(RCTL); 439 regs_buff[2] = er32(RCTL);
440 regs_buff[3] = er32(RDLEN(0)); 440 regs_buff[3] = er32(RDLEN(0));
441 regs_buff[4] = er32(RDH(0)); 441 regs_buff[4] = er32(RDH(0));
442 regs_buff[5] = er32(RDT(0)); 442 regs_buff[5] = er32(RDT(0));
443 regs_buff[6] = er32(RDTR); 443 regs_buff[6] = er32(RDTR);
444 444
445 regs_buff[7] = er32(TCTL); 445 regs_buff[7] = er32(TCTL);
446 regs_buff[8] = er32(TDLEN(0)); 446 regs_buff[8] = er32(TDLEN(0));
447 regs_buff[9] = er32(TDH(0)); 447 regs_buff[9] = er32(TDH(0));
448 regs_buff[10] = er32(TDT(0)); 448 regs_buff[10] = er32(TDT(0));
449 regs_buff[11] = er32(TIDV); 449 regs_buff[11] = er32(TIDV);
450 450
451 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 451 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
452 452
453 /* ethtool doesn't use anything past this point, so all this 453 /* ethtool doesn't use anything past this point, so all this
454 * code is likely legacy junk for apps that may or may not exist 454 * code is likely legacy junk for apps that may or may not exist
@@ -1379,7 +1379,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1379 1379
1380 if (hw->phy.media_type == e1000_media_type_copper && 1380 if (hw->phy.media_type == e1000_media_type_copper &&
1381 hw->phy.type == e1000_phy_m88) { 1381 hw->phy.type == e1000_phy_m88) {
1382 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1382 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1383 } else { 1383 } else {
1384 /* Set the ILOS bit on the fiber Nic if half duplex link is 1384 /* Set the ILOS bit on the fiber Nic if half duplex link is
1385 * detected. 1385 * detected.
@@ -1613,7 +1613,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1613 ew32(TDT(0), k); 1613 ew32(TDT(0), k);
1614 e1e_flush(); 1614 e1e_flush();
1615 msleep(200); 1615 msleep(200);
1616 time = jiffies; /* set the start time for the receive */ 1616 time = jiffies; /* set the start time for the receive */
1617 good_cnt = 0; 1617 good_cnt = 0;
1618 /* receive the sent packets */ 1618 /* receive the sent packets */
1619 do { 1619 do {
@@ -1636,11 +1636,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1636 */ 1636 */
1637 } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); 1637 } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1638 if (good_cnt != 64) { 1638 if (good_cnt != 64) {
1639 ret_val = 13; /* ret_val is the same as mis-compare */ 1639 ret_val = 13; /* ret_val is the same as mis-compare */
1640 break; 1640 break;
1641 } 1641 }
1642 if (jiffies >= (time + 20)) { 1642 if (jiffies >= (time + 20)) {
1643 ret_val = 14; /* error code for time out error */ 1643 ret_val = 14; /* error code for time out error */
1644 break; 1644 break;
1645 } 1645 }
1646 } 1646 }
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 84850f7a23e4..a6f903a9b773 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -402,13 +402,13 @@ struct e1000_phy_stats {
402 402
403struct e1000_host_mng_dhcp_cookie { 403struct e1000_host_mng_dhcp_cookie {
404 u32 signature; 404 u32 signature;
405 u8 status; 405 u8 status;
406 u8 reserved0; 406 u8 reserved0;
407 u16 vlan_id; 407 u16 vlan_id;
408 u32 reserved1; 408 u32 reserved1;
409 u16 reserved2; 409 u16 reserved2;
410 u8 reserved3; 410 u8 reserved3;
411 u8 checksum; 411 u8 checksum;
412}; 412};
413 413
414/* Host Interface "Rev 1" */ 414/* Host Interface "Rev 1" */
@@ -427,8 +427,8 @@ struct e1000_host_command_info {
427 427
428/* Host Interface "Rev 2" */ 428/* Host Interface "Rev 2" */
429struct e1000_host_mng_command_header { 429struct e1000_host_mng_command_header {
430 u8 command_id; 430 u8 command_id;
431 u8 checksum; 431 u8 checksum;
432 u16 reserved1; 432 u16 reserved1;
433 u16 reserved2; 433 u16 reserved2;
434 u16 command_length; 434 u16 command_length;
@@ -549,7 +549,7 @@ struct e1000_mac_info {
549 u32 mta_shadow[MAX_MTA_REG]; 549 u32 mta_shadow[MAX_MTA_REG];
550 u16 rar_entry_count; 550 u16 rar_entry_count;
551 551
552 u8 forced_speed_duplex; 552 u8 forced_speed_duplex;
553 553
554 bool adaptive_ifs; 554 bool adaptive_ifs;
555 bool has_fwsm; 555 bool has_fwsm;
@@ -577,7 +577,7 @@ struct e1000_phy_info {
577 577
578 u32 addr; 578 u32 addr;
579 u32 id; 579 u32 id;
580 u32 reset_delay_us; /* in usec */ 580 u32 reset_delay_us; /* in usec */
581 u32 revision; 581 u32 revision;
582 582
583 enum e1000_media_type media_type; 583 enum e1000_media_type media_type;
@@ -636,11 +636,11 @@ struct e1000_dev_spec_82571 {
636}; 636};
637 637
638struct e1000_dev_spec_80003es2lan { 638struct e1000_dev_spec_80003es2lan {
639 bool mdic_wa_enable; 639 bool mdic_wa_enable;
640}; 640};
641 641
642struct e1000_shadow_ram { 642struct e1000_shadow_ram {
643 u16 value; 643 u16 value;
644 bool modified; 644 bool modified;
645}; 645};
646 646
@@ -660,17 +660,17 @@ struct e1000_hw {
660 void __iomem *hw_addr; 660 void __iomem *hw_addr;
661 void __iomem *flash_address; 661 void __iomem *flash_address;
662 662
663 struct e1000_mac_info mac; 663 struct e1000_mac_info mac;
664 struct e1000_fc_info fc; 664 struct e1000_fc_info fc;
665 struct e1000_phy_info phy; 665 struct e1000_phy_info phy;
666 struct e1000_nvm_info nvm; 666 struct e1000_nvm_info nvm;
667 struct e1000_bus_info bus; 667 struct e1000_bus_info bus;
668 struct e1000_host_mng_dhcp_cookie mng_cookie; 668 struct e1000_host_mng_dhcp_cookie mng_cookie;
669 669
670 union { 670 union {
671 struct e1000_dev_spec_82571 e82571; 671 struct e1000_dev_spec_82571 e82571;
672 struct e1000_dev_spec_80003es2lan e80003es2lan; 672 struct e1000_dev_spec_80003es2lan e80003es2lan;
673 struct e1000_dev_spec_ich8lan ich8lan; 673 struct e1000_dev_spec_ich8lan ich8lan;
674 } dev_spec; 674 } dev_spec;
675}; 675};
676 676
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ad9d8f2dd868..9dde390f7e71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -101,12 +101,12 @@ union ich8_hws_flash_regacc {
101/* ICH Flash Protected Region */ 101/* ICH Flash Protected Region */
102union ich8_flash_protected_range { 102union ich8_flash_protected_range {
103 struct ich8_pr { 103 struct ich8_pr {
104 u32 base:13; /* 0:12 Protected Range Base */ 104 u32 base:13; /* 0:12 Protected Range Base */
105 u32 reserved1:2; /* 13:14 Reserved */ 105 u32 reserved1:2; /* 13:14 Reserved */
106 u32 rpe:1; /* 15 Read Protection Enable */ 106 u32 rpe:1; /* 15 Read Protection Enable */
107 u32 limit:13; /* 16:28 Protected Range Limit */ 107 u32 limit:13; /* 16:28 Protected Range Limit */
108 u32 reserved2:2; /* 29:30 Reserved */ 108 u32 reserved2:2; /* 29:30 Reserved */
109 u32 wpe:1; /* 31 Write Protection Enable */ 109 u32 wpe:1; /* 31 Write Protection Enable */
110 } range; 110 } range;
111 u32 regval; 111 u32 regval;
112}; 112};
@@ -362,21 +362,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
362 struct e1000_phy_info *phy = &hw->phy; 362 struct e1000_phy_info *phy = &hw->phy;
363 s32 ret_val; 363 s32 ret_val;
364 364
365 phy->addr = 1; 365 phy->addr = 1;
366 phy->reset_delay_us = 100; 366 phy->reset_delay_us = 100;
367 367
368 phy->ops.set_page = e1000_set_page_igp; 368 phy->ops.set_page = e1000_set_page_igp;
369 phy->ops.read_reg = e1000_read_phy_reg_hv; 369 phy->ops.read_reg = e1000_read_phy_reg_hv;
370 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; 370 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
371 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; 371 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
372 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 372 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
373 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 373 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
374 phy->ops.write_reg = e1000_write_phy_reg_hv; 374 phy->ops.write_reg = e1000_write_phy_reg_hv;
375 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; 375 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
376 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; 376 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
377 phy->ops.power_up = e1000_power_up_phy_copper; 377 phy->ops.power_up = e1000_power_up_phy_copper;
378 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 378 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
379 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 379 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
380 380
381 phy->id = e1000_phy_unknown; 381 phy->id = e1000_phy_unknown;
382 382
@@ -445,11 +445,11 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
445 s32 ret_val; 445 s32 ret_val;
446 u16 i = 0; 446 u16 i = 0;
447 447
448 phy->addr = 1; 448 phy->addr = 1;
449 phy->reset_delay_us = 100; 449 phy->reset_delay_us = 100;
450 450
451 phy->ops.power_up = e1000_power_up_phy_copper; 451 phy->ops.power_up = e1000_power_up_phy_copper;
452 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 452 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
453 453
454 /* We may need to do this twice - once for IGP and if that fails, 454 /* We may need to do this twice - once for IGP and if that fails,
455 * we'll set BM func pointers and try again 455 * we'll set BM func pointers and try again
@@ -457,7 +457,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
457 ret_val = e1000e_determine_phy_address(hw); 457 ret_val = e1000e_determine_phy_address(hw);
458 if (ret_val) { 458 if (ret_val) {
459 phy->ops.write_reg = e1000e_write_phy_reg_bm; 459 phy->ops.write_reg = e1000e_write_phy_reg_bm;
460 phy->ops.read_reg = e1000e_read_phy_reg_bm; 460 phy->ops.read_reg = e1000e_read_phy_reg_bm;
461 ret_val = e1000e_determine_phy_address(hw); 461 ret_val = e1000e_determine_phy_address(hw);
462 if (ret_val) { 462 if (ret_val) {
463 e_dbg("Cannot determine PHY addr. Erroring out\n"); 463 e_dbg("Cannot determine PHY addr. Erroring out\n");
@@ -560,7 +560,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
560 /* Clear shadow ram */ 560 /* Clear shadow ram */
561 for (i = 0; i < nvm->word_size; i++) { 561 for (i = 0; i < nvm->word_size; i++) {
562 dev_spec->shadow_ram[i].modified = false; 562 dev_spec->shadow_ram[i].modified = false;
563 dev_spec->shadow_ram[i].value = 0xFFFF; 563 dev_spec->shadow_ram[i].value = 0xFFFF;
564 } 564 }
565 565
566 return 0; 566 return 0;
@@ -1012,7 +1012,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1012 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1012 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1013 1013
1014 if (!link) 1014 if (!link)
1015 return 0; /* No link detected */ 1015 return 0; /* No link detected */
1016 1016
1017 mac->get_link_status = false; 1017 mac->get_link_status = false;
1018 1018
@@ -2816,7 +2816,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2816 s32 ret_val = -E1000_ERR_NVM; 2816 s32 ret_val = -E1000_ERR_NVM;
2817 u8 count = 0; 2817 u8 count = 0;
2818 2818
2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2820 return -E1000_ERR_NVM; 2820 return -E1000_ERR_NVM;
2821 2821
2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
@@ -2939,7 +2939,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2939 * write to bank 0 etc. We also need to erase the segment that 2939 * write to bank 0 etc. We also need to erase the segment that
2940 * is going to be written 2940 * is going to be written
2941 */ 2941 */
2942 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 2942 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2943 if (ret_val) { 2943 if (ret_val) {
2944 e_dbg("Could not detect valid bank, assuming bank 0\n"); 2944 e_dbg("Could not detect valid bank, assuming bank 0\n");
2945 bank = 0; 2945 bank = 0;
@@ -4073,7 +4073,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4073{ 4073{
4074 u32 reg; 4074 u32 reg;
4075 u16 data; 4075 u16 data;
4076 u8 retry = 0; 4076 u8 retry = 0;
4077 4077
4078 if (hw->phy.type != e1000_phy_igp_3) 4078 if (hw->phy.type != e1000_phy_igp_3)
4079 return; 4079 return;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a27e3bcc3249..77f81cbb601a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1196,7 +1196,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1196 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1196 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1197 (count < tx_ring->count)) { 1197 (count < tx_ring->count)) {
1198 bool cleaned = false; 1198 bool cleaned = false;
1199 rmb(); /* read buffer_info after eop_desc */ 1199 rmb(); /* read buffer_info after eop_desc */
1200 for (; !cleaned; count++) { 1200 for (; !cleaned; count++) {
1201 tx_desc = E1000_TX_DESC(*tx_ring, i); 1201 tx_desc = E1000_TX_DESC(*tx_ring, i);
1202 buffer_info = &tx_ring->buffer_info[i]; 1202 buffer_info = &tx_ring->buffer_info[i];
@@ -1385,7 +1385,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1385 1385
1386 skb_put(skb, l1); 1386 skb_put(skb, l1);
1387 goto copydone; 1387 goto copydone;
1388 } /* if */ 1388 } /* if */
1389 } 1389 }
1390 1390
1391 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1391 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -1800,7 +1800,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1800 u32 rctl, icr = er32(ICR); 1800 u32 rctl, icr = er32(ICR);
1801 1801
1802 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1802 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1803 return IRQ_NONE; /* Not our interrupt */ 1803 return IRQ_NONE; /* Not our interrupt */
1804 1804
1805 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1805 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1806 * not set, then the adapter didn't send an interrupt 1806 * not set, then the adapter didn't send an interrupt
@@ -2487,7 +2487,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2487 else if ((packets < 5) && (bytes > 512)) 2487 else if ((packets < 5) && (bytes > 512))
2488 retval = low_latency; 2488 retval = low_latency;
2489 break; 2489 break;
2490 case low_latency: /* 50 usec aka 20000 ints/s */ 2490 case low_latency: /* 50 usec aka 20000 ints/s */
2491 if (bytes > 10000) { 2491 if (bytes > 10000) {
2492 /* this if handles the TSO accounting */ 2492 /* this if handles the TSO accounting */
2493 if (bytes / packets > 8000) 2493 if (bytes / packets > 8000)
@@ -2502,7 +2502,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2502 retval = lowest_latency; 2502 retval = lowest_latency;
2503 } 2503 }
2504 break; 2504 break;
2505 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2505 case bulk_latency: /* 250 usec aka 4000 ints/s */
2506 if (bytes > 25000) { 2506 if (bytes > 25000) {
2507 if (packets > 35) 2507 if (packets > 35)
2508 retval = low_latency; 2508 retval = low_latency;
@@ -2554,7 +2554,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2554 new_itr = 70000; 2554 new_itr = 70000;
2555 break; 2555 break;
2556 case low_latency: 2556 case low_latency:
2557 new_itr = 20000; /* aka hwitr = ~200 */ 2557 new_itr = 20000; /* aka hwitr = ~200 */
2558 break; 2558 break;
2559 case bulk_latency: 2559 case bulk_latency:
2560 new_itr = 4000; 2560 new_itr = 4000;
@@ -2673,7 +2673,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2673} 2673}
2674 2674
2675static int e1000_vlan_rx_add_vid(struct net_device *netdev, 2675static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2676 __be16 proto, u16 vid) 2676 __always_unused __be16 proto, u16 vid)
2677{ 2677{
2678 struct e1000_adapter *adapter = netdev_priv(netdev); 2678 struct e1000_adapter *adapter = netdev_priv(netdev);
2679 struct e1000_hw *hw = &adapter->hw; 2679 struct e1000_hw *hw = &adapter->hw;
@@ -2699,7 +2699,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2699} 2699}
2700 2700
2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2702 __be16 proto, u16 vid) 2702 __always_unused __be16 proto, u16 vid)
2703{ 2703{
2704 struct e1000_adapter *adapter = netdev_priv(netdev); 2704 struct e1000_adapter *adapter = netdev_priv(netdev);
2705 struct e1000_hw *hw = &adapter->hw; 2705 struct e1000_hw *hw = &adapter->hw;
@@ -3104,13 +3104,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3104 /* UPE and MPE will be handled by normal PROMISC logic 3104 /* UPE and MPE will be handled by normal PROMISC logic
3105 * in e1000e_set_rx_mode 3105 * in e1000e_set_rx_mode
3106 */ 3106 */
3107 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3107 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3108 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3108 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3109 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3109 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3110 3110
3111 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3111 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3112 E1000_RCTL_DPF | /* Allow filtered pause */ 3112 E1000_RCTL_DPF | /* Allow filtered pause */
3113 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3113 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3114 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3114 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3115 * and that breaks VLANs. 3115 * and that breaks VLANs.
3116 */ 3116 */
@@ -3799,7 +3799,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3799 hwm = min(((pba << 10) * 9 / 10), 3799 hwm = min(((pba << 10) * 9 / 10),
3800 ((pba << 10) - adapter->max_frame_size)); 3800 ((pba << 10) - adapter->max_frame_size));
3801 3801
3802 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3802 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3803 fc->low_water = fc->high_water - 8; 3803 fc->low_water = fc->high_water - 8;
3804 break; 3804 break;
3805 case e1000_pchlan: 3805 case e1000_pchlan:
@@ -3808,10 +3808,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
3808 */ 3808 */
3809 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3809 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3810 fc->high_water = 0x3500; 3810 fc->high_water = 0x3500;
3811 fc->low_water = 0x1500; 3811 fc->low_water = 0x1500;
3812 } else { 3812 } else {
3813 fc->high_water = 0x5000; 3813 fc->high_water = 0x5000;
3814 fc->low_water = 0x3000; 3814 fc->low_water = 0x3000;
3815 } 3815 }
3816 fc->refresh_time = 0x1000; 3816 fc->refresh_time = 0x1000;
3817 break; 3817 break;
@@ -4581,7 +4581,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4581 adapter->stats.crcerrs += er32(CRCERRS); 4581 adapter->stats.crcerrs += er32(CRCERRS);
4582 adapter->stats.gprc += er32(GPRC); 4582 adapter->stats.gprc += er32(GPRC);
4583 adapter->stats.gorc += er32(GORCL); 4583 adapter->stats.gorc += er32(GORCL);
4584 er32(GORCH); /* Clear gorc */ 4584 er32(GORCH); /* Clear gorc */
4585 adapter->stats.bprc += er32(BPRC); 4585 adapter->stats.bprc += er32(BPRC);
4586 adapter->stats.mprc += er32(MPRC); 4586 adapter->stats.mprc += er32(MPRC);
4587 adapter->stats.roc += er32(ROC); 4587 adapter->stats.roc += er32(ROC);
@@ -4614,7 +4614,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4614 adapter->stats.xofftxc += er32(XOFFTXC); 4614 adapter->stats.xofftxc += er32(XOFFTXC);
4615 adapter->stats.gptc += er32(GPTC); 4615 adapter->stats.gptc += er32(GPTC);
4616 adapter->stats.gotc += er32(GOTCL); 4616 adapter->stats.gotc += er32(GOTCL);
4617 er32(GOTCH); /* Clear gotc */ 4617 er32(GOTCH); /* Clear gotc */
4618 adapter->stats.rnbc += er32(RNBC); 4618 adapter->stats.rnbc += er32(RNBC);
4619 adapter->stats.ruc += er32(RUC); 4619 adapter->stats.ruc += er32(RUC);
4620 4620
@@ -5106,13 +5106,13 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5107 buffer_info = &tx_ring->buffer_info[i]; 5107 buffer_info = &tx_ring->buffer_info[i];
5108 5108
5109 context_desc->lower_setup.ip_fields.ipcss = ipcss; 5109 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5110 context_desc->lower_setup.ip_fields.ipcso = ipcso; 5110 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5111 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 5111 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5112 context_desc->upper_setup.tcp_fields.tucss = tucss; 5112 context_desc->upper_setup.tcp_fields.tucss = tucss;
5113 context_desc->upper_setup.tcp_fields.tucso = tucso; 5113 context_desc->upper_setup.tcp_fields.tucso = tucso;
5114 context_desc->upper_setup.tcp_fields.tucse = 0; 5114 context_desc->upper_setup.tcp_fields.tucse = 0;
5115 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 5115 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5116 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 5116 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5117 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 5117 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5118 5118
@@ -5363,7 +5363,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5363static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 5363static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5364 struct sk_buff *skb) 5364 struct sk_buff *skb)
5365{ 5365{
5366 struct e1000_hw *hw = &adapter->hw; 5366 struct e1000_hw *hw = &adapter->hw;
5367 u16 length, offset; 5367 u16 length, offset;
5368 5368
5369 if (vlan_tx_tag_present(skb) && 5369 if (vlan_tx_tag_present(skb) &&
@@ -6259,7 +6259,7 @@ static void e1000_netpoll(struct net_device *netdev)
6259 e1000_intr_msi(adapter->pdev->irq, netdev); 6259 e1000_intr_msi(adapter->pdev->irq, netdev);
6260 enable_irq(adapter->pdev->irq); 6260 enable_irq(adapter->pdev->irq);
6261 break; 6261 break;
6262 default: /* E1000E_INT_MODE_LEGACY */ 6262 default: /* E1000E_INT_MODE_LEGACY */
6263 disable_irq(adapter->pdev->irq); 6263 disable_irq(adapter->pdev->irq);
6264 e1000_intr(adapter->pdev->irq, netdev); 6264 e1000_intr(adapter->pdev->irq, netdev);
6265 enable_irq(adapter->pdev->irq); 6265 enable_irq(adapter->pdev->irq);
@@ -6589,9 +6589,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; 6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6590 6590
6591 /* construct the net_device struct */ 6591 /* construct the net_device struct */
6592 netdev->netdev_ops = &e1000e_netdev_ops; 6592 netdev->netdev_ops = &e1000e_netdev_ops;
6593 e1000e_set_ethtool_ops(netdev); 6593 e1000e_set_ethtool_ops(netdev);
6594 netdev->watchdog_timeo = 5 * HZ; 6594 netdev->watchdog_timeo = 5 * HZ;
6595 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); 6595 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6596 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6596 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6597 6597
@@ -7034,7 +7034,6 @@ static void __exit e1000_exit_module(void)
7034} 7034}
7035module_exit(e1000_exit_module); 7035module_exit(e1000_exit_module);
7036 7036
7037
7038MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 7037MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7039MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 7038MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7040MODULE_LICENSE("GPL"); 7039MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 44ddc0a0ee0e..d70a03906ac0 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -117,7 +117,6 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
117 u16 data; 117 u16 data;
118 118
119 eecd = er32(EECD); 119 eecd = er32(EECD);
120
121 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 120 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
122 data = 0; 121 data = 0;
123 122
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 59c76a6815a0..da2be59505c0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1583,13 +1583,13 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1583 case e1000_phy_gg82563: 1583 case e1000_phy_gg82563:
1584 case e1000_phy_bm: 1584 case e1000_phy_bm:
1585 case e1000_phy_82578: 1585 case e1000_phy_82578:
1586 offset = M88E1000_PHY_SPEC_STATUS; 1586 offset = M88E1000_PHY_SPEC_STATUS;
1587 mask = M88E1000_PSSR_DOWNSHIFT; 1587 mask = M88E1000_PSSR_DOWNSHIFT;
1588 break; 1588 break;
1589 case e1000_phy_igp_2: 1589 case e1000_phy_igp_2:
1590 case e1000_phy_igp_3: 1590 case e1000_phy_igp_3:
1591 offset = IGP01E1000_PHY_LINK_HEALTH; 1591 offset = IGP01E1000_PHY_LINK_HEALTH;
1592 mask = IGP01E1000_PLHR_SS_DOWNGRADE; 1592 mask = IGP01E1000_PLHR_SS_DOWNGRADE;
1593 break; 1593 break;
1594 default: 1594 default:
1595 /* speed downshift not supported */ 1595 /* speed downshift not supported */
@@ -1653,14 +1653,14 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1653 1653
1654 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 1654 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1655 IGP01E1000_PSSR_SPEED_1000MBPS) { 1655 IGP01E1000_PSSR_SPEED_1000MBPS) {
1656 offset = IGP01E1000_PHY_PCS_INIT_REG; 1656 offset = IGP01E1000_PHY_PCS_INIT_REG;
1657 mask = IGP01E1000_PHY_POLARITY_MASK; 1657 mask = IGP01E1000_PHY_POLARITY_MASK;
1658 } else { 1658 } else {
1659 /* This really only applies to 10Mbps since 1659 /* This really only applies to 10Mbps since
1660 * there is no polarity for 100Mbps (always 0). 1660 * there is no polarity for 100Mbps (always 0).
1661 */ 1661 */
1662 offset = IGP01E1000_PHY_PORT_STATUS; 1662 offset = IGP01E1000_PHY_PORT_STATUS;
1663 mask = IGP01E1000_PSSR_POLARITY_REVERSED; 1663 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1664 } 1664 }
1665 1665
1666 ret_val = e1e_rphy(hw, offset, &data); 1666 ret_val = e1e_rphy(hw, offset, &data);
@@ -1900,7 +1900,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1900s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) 1900s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1901{ 1901{
1902 struct e1000_phy_info *phy = &hw->phy; 1902 struct e1000_phy_info *phy = &hw->phy;
1903 s32 ret_val; 1903 s32 ret_val;
1904 u16 phy_data; 1904 u16 phy_data;
1905 bool link; 1905 bool link;
1906 1906
@@ -2253,7 +2253,7 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2253 case M88E1011_I_PHY_ID: 2253 case M88E1011_I_PHY_ID:
2254 phy_type = e1000_phy_m88; 2254 phy_type = e1000_phy_m88;
2255 break; 2255 break;
2256 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ 2256 case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
2257 phy_type = e1000_phy_igp_2; 2257 phy_type = e1000_phy_igp_2;
2258 break; 2258 break;
2259 case GG82563_E_PHY_ID: 2259 case GG82563_E_PHY_ID:
@@ -2317,7 +2317,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
2317 /* If phy_type is valid, break - we found our 2317 /* If phy_type is valid, break - we found our
2318 * PHY address 2318 * PHY address
2319 */ 2319 */
2320 if (phy_type != e1000_phy_unknown) 2320 if (phy_type != e1000_phy_unknown)
2321 return 0; 2321 return 0;
2322 2322
2323 usleep_range(1000, 2000); 2323 usleep_range(1000, 2000);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ff6a17cb1362..f21a91a299a2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -401,12 +401,82 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
401 return 0; 401 return 0;
402} 402}
403 403
404/**
405 * igb_set_sfp_media_type_82575 - derives SFP module media type.
406 * @hw: pointer to the HW structure
407 *
408 * The media type is chosen based on SFP module.
409 * compatibility flags retrieved from SFP ID EEPROM.
410 **/
411static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
412{
413 s32 ret_val = E1000_ERR_CONFIG;
414 u32 ctrl_ext = 0;
415 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
416 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
417 u8 tranceiver_type = 0;
418 s32 timeout = 3;
419
420 /* Turn I2C interface ON and power on sfp cage */
421 ctrl_ext = rd32(E1000_CTRL_EXT);
422 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
423 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
424
425 wrfl();
426
427 /* Read SFP module data */
428 while (timeout) {
429 ret_val = igb_read_sfp_data_byte(hw,
430 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
431 &tranceiver_type);
432 if (ret_val == 0)
433 break;
434 msleep(100);
435 timeout--;
436 }
437 if (ret_val != 0)
438 goto out;
439
440 ret_val = igb_read_sfp_data_byte(hw,
441 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
442 (u8 *)eth_flags);
443 if (ret_val != 0)
444 goto out;
445
446 /* Check if there is some SFP module plugged and powered */
447 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
448 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
449 dev_spec->module_plugged = true;
450 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
451 hw->phy.media_type = e1000_media_type_internal_serdes;
452 } else if (eth_flags->e100_base_fx) {
453 dev_spec->sgmii_active = true;
454 hw->phy.media_type = e1000_media_type_internal_serdes;
455 } else if (eth_flags->e1000_base_t) {
456 dev_spec->sgmii_active = true;
457 hw->phy.media_type = e1000_media_type_copper;
458 } else {
459 hw->phy.media_type = e1000_media_type_unknown;
460 hw_dbg("PHY module has not been recognized\n");
461 goto out;
462 }
463 } else {
464 hw->phy.media_type = e1000_media_type_unknown;
465 }
466 ret_val = 0;
467out:
468 /* Restore I2C interface setting */
469 wr32(E1000_CTRL_EXT, ctrl_ext);
470 return ret_val;
471}
472
404static s32 igb_get_invariants_82575(struct e1000_hw *hw) 473static s32 igb_get_invariants_82575(struct e1000_hw *hw)
405{ 474{
406 struct e1000_mac_info *mac = &hw->mac; 475 struct e1000_mac_info *mac = &hw->mac;
407 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 476 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
408 s32 ret_val; 477 s32 ret_val;
409 u32 ctrl_ext = 0; 478 u32 ctrl_ext = 0;
479 u32 link_mode = 0;
410 480
411 switch (hw->device_id) { 481 switch (hw->device_id) {
412 case E1000_DEV_ID_82575EB_COPPER: 482 case E1000_DEV_ID_82575EB_COPPER:
@@ -470,16 +540,56 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
470 */ 540 */
471 hw->phy.media_type = e1000_media_type_copper; 541 hw->phy.media_type = e1000_media_type_copper;
472 dev_spec->sgmii_active = false; 542 dev_spec->sgmii_active = false;
543 dev_spec->module_plugged = false;
473 544
474 ctrl_ext = rd32(E1000_CTRL_EXT); 545 ctrl_ext = rd32(E1000_CTRL_EXT);
475 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 546
476 case E1000_CTRL_EXT_LINK_MODE_SGMII: 547 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
477 dev_spec->sgmii_active = true; 548 switch (link_mode) {
478 break;
479 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 549 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
480 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
481 hw->phy.media_type = e1000_media_type_internal_serdes; 550 hw->phy.media_type = e1000_media_type_internal_serdes;
482 break; 551 break;
552 case E1000_CTRL_EXT_LINK_MODE_SGMII:
553 /* Get phy control interface type set (MDIO vs. I2C)*/
554 if (igb_sgmii_uses_mdio_82575(hw)) {
555 hw->phy.media_type = e1000_media_type_copper;
556 dev_spec->sgmii_active = true;
557 break;
558 }
559 /* fall through for I2C based SGMII */
560 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
561 /* read media type from SFP EEPROM */
562 ret_val = igb_set_sfp_media_type_82575(hw);
563 if ((ret_val != 0) ||
564 (hw->phy.media_type == e1000_media_type_unknown)) {
565 /* If media type was not identified then return media
566 * type defined by the CTRL_EXT settings.
567 */
568 hw->phy.media_type = e1000_media_type_internal_serdes;
569
570 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
571 hw->phy.media_type = e1000_media_type_copper;
572 dev_spec->sgmii_active = true;
573 }
574
575 break;
576 }
577
578 /* do not change link mode for 100BaseFX */
579 if (dev_spec->eth_flags.e100_base_fx)
580 break;
581
582 /* change current link mode setting */
583 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
584
585 if (hw->phy.media_type == e1000_media_type_copper)
586 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
587 else
588 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
589
590 wr32(E1000_CTRL_EXT, ctrl_ext);
591
592 break;
483 default: 593 default:
484 break; 594 break;
485 } 595 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 31a0f82cc650..aa201abb8ad2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -61,20 +61,22 @@
61/* Clear Interrupt timers after IMS clear */ 61/* Clear Interrupt timers after IMS clear */
62/* packet buffer parity error detection enabled */ 62/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 63/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 65#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 67#define E1000_I2CCMD_OPCODE_READ 0x08000000
68#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 68#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
69#define E1000_I2CCMD_READY 0x20000000 69#define E1000_I2CCMD_READY 0x20000000
70#define E1000_I2CCMD_ERROR 0x80000000 70#define E1000_I2CCMD_ERROR 0x80000000
71#define E1000_MAX_SGMII_PHY_REG_ADDR 255 71#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
72#define E1000_I2CCMD_PHY_TIMEOUT 200 72#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
73#define E1000_IVAR_VALID 0x80 73#define E1000_MAX_SGMII_PHY_REG_ADDR 255
74#define E1000_GPIE_NSICR 0x00000001 74#define E1000_I2CCMD_PHY_TIMEOUT 200
75#define E1000_GPIE_MSIX_MODE 0x00000010 75#define E1000_IVAR_VALID 0x80
76#define E1000_GPIE_EIAME 0x40000000 76#define E1000_GPIE_NSICR 0x00000001
77#define E1000_GPIE_PBA 0x80000000 77#define E1000_GPIE_MSIX_MODE 0x00000010
78#define E1000_GPIE_EIAME 0x40000000
79#define E1000_GPIE_PBA 0x80000000
78 80
79/* Receive Descriptor bit definitions */ 81/* Receive Descriptor bit definitions */
80#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 82#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -270,8 +272,10 @@
270#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX 272#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
271 273
272/* LED Control */ 274/* LED Control */
273#define E1000_LEDCTL_LED0_MODE_SHIFT 0 275#define E1000_LEDCTL_LED0_MODE_SHIFT 0
274#define E1000_LEDCTL_LED0_BLINK 0x00000080 276#define E1000_LEDCTL_LED0_BLINK 0x00000080
277#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
278#define E1000_LEDCTL_LED0_IVRT 0x00000040
275 279
276#define E1000_LEDCTL_MODE_LED_ON 0xE 280#define E1000_LEDCTL_MODE_LED_ON 0xE
277#define E1000_LEDCTL_MODE_LED_OFF 0xF 281#define E1000_LEDCTL_MODE_LED_OFF 0xF
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 488abb24a54f..94d7866b9c20 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -528,6 +528,8 @@ struct e1000_dev_spec_82575 {
528 bool global_device_reset; 528 bool global_device_reset;
529 bool eee_disable; 529 bool eee_disable;
530 bool clear_semaphore_once; 530 bool clear_semaphore_once;
531 struct e1000_sfp_flags eth_flags;
532 bool module_plugged;
531}; 533};
532 534
533struct e1000_hw { 535struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index bfc08e05c907..5caa332e7556 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -82,11 +82,11 @@ enum E1000_INVM_STRUCTURE_TYPE {
82#define E1000_INVM_MAJOR_SHIFT 4 82#define E1000_INVM_MAJOR_SHIFT 4
83 83
84#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ 84#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
85 (ID_LED_OFF1_OFF2 << 4) | \ 85 (ID_LED_DEF1_DEF2 << 4) | \
86 (ID_LED_DEF1_DEF2)) 86 (ID_LED_OFF1_OFF2))
87#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ 87#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
88 (ID_LED_DEF1_DEF2 << 4) | \ 88 (ID_LED_DEF1_DEF2 << 4) | \
89 (ID_LED_DEF1_DEF2)) 89 (ID_LED_OFF1_ON2))
90 90
91/* NVM offset defaults for i211 device */ 91/* NVM offset defaults for i211 device */
92#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 92#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 2559d70a2321..bab556a47fcc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1332,7 +1332,13 @@ s32 igb_id_led_init(struct e1000_hw *hw)
1332 u16 data, i, temp; 1332 u16 data, i, temp;
1333 const u16 led_mask = 0x0F; 1333 const u16 led_mask = 0x0F;
1334 1334
1335 ret_val = igb_valid_led_default(hw, &data); 1335 /* i210 and i211 devices have different LED mechanism */
1336 if ((hw->mac.type == e1000_i210) ||
1337 (hw->mac.type == e1000_i211))
1338 ret_val = igb_valid_led_default_i210(hw, &data);
1339 else
1340 ret_val = igb_valid_led_default(hw, &data);
1341
1336 if (ret_val) 1342 if (ret_val)
1337 goto out; 1343 goto out;
1338 1344
@@ -1406,15 +1412,34 @@ s32 igb_blink_led(struct e1000_hw *hw)
1406 u32 ledctl_blink = 0; 1412 u32 ledctl_blink = 0;
1407 u32 i; 1413 u32 i;
1408 1414
1409 /* set the blink bit for each LED that's "on" (0x0E) 1415 if (hw->phy.media_type == e1000_media_type_fiber) {
1410 * in ledctl_mode2 1416 /* always blink LED0 for PCI-E fiber */
1411 */ 1417 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1412 ledctl_blink = hw->mac.ledctl_mode2; 1418 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1413 for (i = 0; i < 4; i++) 1419 } else {
1414 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1420 /* Set the blink bit for each LED that's "on" (0x0E)
1415 E1000_LEDCTL_MODE_LED_ON) 1421 * (or "off" if inverted) in ledctl_mode2. The blink
1416 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << 1422 * logic in hardware only works when mode is set to "on"
1417 (i * 8)); 1423 * so it must be changed accordingly when the mode is
1424 * "off" and inverted.
1425 */
1426 ledctl_blink = hw->mac.ledctl_mode2;
1427 for (i = 0; i < 32; i += 8) {
1428 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1429 E1000_LEDCTL_LED0_MODE_MASK;
1430 u32 led_default = hw->mac.ledctl_default >> i;
1431
1432 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1433 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1434 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1435 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1436 ledctl_blink &=
1437 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1438 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1439 E1000_LEDCTL_MODE_LED_ON) << i;
1440 }
1441 }
1442 }
1418 1443
1419 wr32(E1000_LEDCTL, ledctl_blink); 1444 wr32(E1000_LEDCTL, ledctl_blink);
1420 1445
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 115b0da6e013..1d6a401cc5d4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -341,6 +341,130 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
341} 341}
342 342
343/** 343/**
344 * igb_read_sfp_data_byte - Reads SFP module data.
345 * @hw: pointer to the HW structure
346 * @offset: byte location offset to be read
347 * @data: read data buffer pointer
348 *
349 * Reads one byte from SFP module data stored
350 * in SFP resided EEPROM memory or SFP diagnostic area.
351 * Function should be called with
352 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
353 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
354 * access
355 **/
356s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
357{
358 u32 i = 0;
359 u32 i2ccmd = 0;
360 u32 data_local = 0;
361
362 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
363 hw_dbg("I2CCMD command address exceeds upper limit\n");
364 return -E1000_ERR_PHY;
365 }
366
367 /* Set up Op-code, EEPROM Address,in the I2CCMD
368 * register. The MAC will take care of interfacing with the
369 * EEPROM to retrieve the desired data.
370 */
371 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
372 E1000_I2CCMD_OPCODE_READ);
373
374 wr32(E1000_I2CCMD, i2ccmd);
375
376 /* Poll the ready bit to see if the I2C read completed */
377 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
378 udelay(50);
379 data_local = rd32(E1000_I2CCMD);
380 if (data_local & E1000_I2CCMD_READY)
381 break;
382 }
383 if (!(data_local & E1000_I2CCMD_READY)) {
384 hw_dbg("I2CCMD Read did not complete\n");
385 return -E1000_ERR_PHY;
386 }
387 if (data_local & E1000_I2CCMD_ERROR) {
388 hw_dbg("I2CCMD Error bit set\n");
389 return -E1000_ERR_PHY;
390 }
391 *data = (u8) data_local & 0xFF;
392
393 return 0;
394}
395
396/**
397 * e1000_write_sfp_data_byte - Writes SFP module data.
398 * @hw: pointer to the HW structure
399 * @offset: byte location offset to write to
400 * @data: data to write
401 *
402 * Writes one byte to SFP module data stored
403 * in SFP resided EEPROM memory or SFP diagnostic area.
404 * Function should be called with
405 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
406 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
407 * access
408 **/
409s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
410{
411 u32 i = 0;
412 u32 i2ccmd = 0;
413 u32 data_local = 0;
414
415 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
416 hw_dbg("I2CCMD command address exceeds upper limit\n");
417 return -E1000_ERR_PHY;
418 }
419 /* The programming interface is 16 bits wide
420 * so we need to read the whole word first
421 * then update appropriate byte lane and write
422 * the updated word back.
423 */
424 /* Set up Op-code, EEPROM Address,in the I2CCMD
425 * register. The MAC will take care of interfacing
426 * with an EEPROM to write the data given.
427 */
428 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
429 E1000_I2CCMD_OPCODE_READ);
430 /* Set a command to read single word */
431 wr32(E1000_I2CCMD, i2ccmd);
432 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
433 udelay(50);
434 /* Poll the ready bit to see if lastly
435 * launched I2C operation completed
436 */
437 i2ccmd = rd32(E1000_I2CCMD);
438 if (i2ccmd & E1000_I2CCMD_READY) {
439 /* Check if this is READ or WRITE phase */
440 if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
441 E1000_I2CCMD_OPCODE_READ) {
442 /* Write the selected byte
443 * lane and update whole word
444 */
445 data_local = i2ccmd & 0xFF00;
446 data_local |= data;
447 i2ccmd = ((offset <<
448 E1000_I2CCMD_REG_ADDR_SHIFT) |
449 E1000_I2CCMD_OPCODE_WRITE | data_local);
450 wr32(E1000_I2CCMD, i2ccmd);
451 } else {
452 break;
453 }
454 }
455 }
456 if (!(i2ccmd & E1000_I2CCMD_READY)) {
457 hw_dbg("I2CCMD Write did not complete\n");
458 return -E1000_ERR_PHY;
459 }
460 if (i2ccmd & E1000_I2CCMD_ERROR) {
461 hw_dbg("I2CCMD Error bit set\n");
462 return -E1000_ERR_PHY;
463 }
464 return 0;
465}
466
467/**
344 * igb_read_phy_reg_igp - Read igp PHY register 468 * igb_read_phy_reg_igp - Read igp PHY register
345 * @hw: pointer to the HW structure 469 * @hw: pointer to the HW structure
346 * @offset: register offset to be read 470 * @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 784fd1c40989..6a0873f2095a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -69,6 +69,8 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
69s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 69s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); 70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); 71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
72s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
73s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
72s32 igb_copper_link_setup_82580(struct e1000_hw *hw); 74s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
73s32 igb_get_phy_info_82580(struct e1000_hw *hw); 75s32 igb_get_phy_info_82580(struct e1000_hw *hw);
74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); 76s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
@@ -157,4 +159,22 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
157#define GS40G_CS_POWER_DOWN 0x0002 159#define GS40G_CS_POWER_DOWN 0x0002
158#define GS40G_LINE_LB 0x4000 160#define GS40G_LINE_LB 0x4000
159 161
162/* SFP modules ID memory locations */
163#define E1000_SFF_IDENTIFIER_OFFSET 0x00
164#define E1000_SFF_IDENTIFIER_SFF 0x02
165#define E1000_SFF_IDENTIFIER_SFP 0x03
166
167#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
168/* Flags for SFP modules compatible with ETH up to 1Gb */
169struct e1000_sfp_flags {
170 u8 e1000_base_sx:1;
171 u8 e1000_base_lx:1;
172 u8 e1000_base_cx:1;
173 u8 e1000_base_t:1;
174 u8 e100_base_lx:1;
175 u8 e100_base_fx:1;
176 u8 e10_base_bx10:1;
177 u8 e10_base_px:1;
178};
179
160#endif 180#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9d6c075e232d..15ea8dc9dad3 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -322,11 +322,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
322 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 322 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
323} 323}
324 324
325struct igb_i2c_client_list {
326 struct i2c_client *client;
327 struct igb_i2c_client_list *next;
328};
329
330#ifdef CONFIG_IGB_HWMON 325#ifdef CONFIG_IGB_HWMON
331 326
332#define IGB_HWMON_TYPE_LOC 0 327#define IGB_HWMON_TYPE_LOC 0
@@ -514,13 +509,18 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
514extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 509extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
515 unsigned char *va, 510 unsigned char *va,
516 struct sk_buff *skb); 511 struct sk_buff *skb);
517static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 512static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
518 union e1000_adv_rx_desc *rx_desc, 513 union e1000_adv_rx_desc *rx_desc,
519 struct sk_buff *skb) 514 struct sk_buff *skb)
520{ 515{
521 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 516 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
522 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 517 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
523 igb_ptp_rx_rgtstamp(q_vector, skb); 518 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
519
520 /* Update the last_rx_timestamp timer in order to enable watchdog check
521 * for error case of latched timestamp on a dropped packet.
522 */
523 rx_ring->last_rx_timestamp = jiffies;
524} 524}
525 525
526extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 526extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7876240fa74e..85fe7b52f435 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -142,6 +142,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
142{ 142{
143 struct igb_adapter *adapter = netdev_priv(netdev); 143 struct igb_adapter *adapter = netdev_priv(netdev);
144 struct e1000_hw *hw = &adapter->hw; 144 struct e1000_hw *hw = &adapter->hw;
145 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
146 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
145 u32 status; 147 u32 status;
146 148
147 if (hw->phy.media_type == e1000_media_type_copper) { 149 if (hw->phy.media_type == e1000_media_type_copper) {
@@ -162,49 +164,26 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
162 ecmd->advertising |= hw->phy.autoneg_advertised; 164 ecmd->advertising |= hw->phy.autoneg_advertised;
163 } 165 }
164 166
165 if (hw->mac.autoneg != 1)
166 ecmd->advertising &= ~(ADVERTISED_Pause |
167 ADVERTISED_Asym_Pause);
168
169 if (hw->fc.requested_mode == e1000_fc_full)
170 ecmd->advertising |= ADVERTISED_Pause;
171 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
172 ecmd->advertising |= (ADVERTISED_Pause |
173 ADVERTISED_Asym_Pause);
174 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
175 ecmd->advertising |= ADVERTISED_Asym_Pause;
176 else
177 ecmd->advertising &= ~(ADVERTISED_Pause |
178 ADVERTISED_Asym_Pause);
179
180 ecmd->port = PORT_TP; 167 ecmd->port = PORT_TP;
181 ecmd->phy_address = hw->phy.addr; 168 ecmd->phy_address = hw->phy.addr;
182 ecmd->transceiver = XCVR_INTERNAL; 169 ecmd->transceiver = XCVR_INTERNAL;
183 } else { 170 } else {
184 ecmd->supported = (SUPPORTED_1000baseT_Full | 171 ecmd->supported = (SUPPORTED_FIBRE |
185 SUPPORTED_100baseT_Full |
186 SUPPORTED_FIBRE |
187 SUPPORTED_Autoneg | 172 SUPPORTED_Autoneg |
188 SUPPORTED_Pause); 173 SUPPORTED_Pause);
189 if (hw->mac.type == e1000_i354)
190 ecmd->supported |= SUPPORTED_2500baseX_Full;
191
192 ecmd->advertising = ADVERTISED_FIBRE; 174 ecmd->advertising = ADVERTISED_FIBRE;
193 175 if (hw->mac.type == e1000_i354) {
194 switch (adapter->link_speed) { 176 ecmd->supported |= SUPPORTED_2500baseX_Full;
195 case SPEED_2500: 177 ecmd->advertising |= ADVERTISED_2500baseX_Full;
196 ecmd->advertising = ADVERTISED_2500baseX_Full; 178 }
197 break; 179 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
198 case SPEED_1000: 180 ecmd->supported |= SUPPORTED_1000baseT_Full;
199 ecmd->advertising = ADVERTISED_1000baseT_Full; 181 ecmd->advertising |= ADVERTISED_1000baseT_Full;
200 break; 182 }
201 case SPEED_100: 183 if (eth_flags->e100_base_fx) {
202 ecmd->advertising = ADVERTISED_100baseT_Full; 184 ecmd->supported |= SUPPORTED_100baseT_Full;
203 break; 185 ecmd->advertising |= ADVERTISED_100baseT_Full;
204 default:
205 break;
206 } 186 }
207
208 if (hw->mac.autoneg == 1) 187 if (hw->mac.autoneg == 1)
209 ecmd->advertising |= ADVERTISED_Autoneg; 188 ecmd->advertising |= ADVERTISED_Autoneg;
210 189
@@ -212,6 +191,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
212 ecmd->transceiver = XCVR_EXTERNAL; 191 ecmd->transceiver = XCVR_EXTERNAL;
213 } 192 }
214 193
194 if (hw->mac.autoneg != 1)
195 ecmd->advertising &= ~(ADVERTISED_Pause |
196 ADVERTISED_Asym_Pause);
197
198 if (hw->fc.requested_mode == e1000_fc_full)
199 ecmd->advertising |= ADVERTISED_Pause;
200 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
201 ecmd->advertising |= (ADVERTISED_Pause |
202 ADVERTISED_Asym_Pause);
203 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
204 ecmd->advertising |= ADVERTISED_Asym_Pause;
205 else
206 ecmd->advertising &= ~(ADVERTISED_Pause |
207 ADVERTISED_Asym_Pause);
208
215 status = rd32(E1000_STATUS); 209 status = rd32(E1000_STATUS);
216 210
217 if (status & E1000_STATUS_LU) { 211 if (status & E1000_STATUS_LU) {
@@ -392,6 +386,10 @@ static int igb_set_pauseparam(struct net_device *netdev,
392 struct e1000_hw *hw = &adapter->hw; 386 struct e1000_hw *hw = &adapter->hw;
393 int retval = 0; 387 int retval = 0;
394 388
389 /* 100basefx does not support setting link flow control */
390 if (hw->dev_spec._82575.eth_flags.e100_base_fx)
391 return -EINVAL;
392
395 adapter->fc_autoneg = pause->autoneg; 393 adapter->fc_autoneg = pause->autoneg;
396 394
397 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 395 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
@@ -813,10 +811,8 @@ static int igb_set_eeprom(struct net_device *netdev,
813 ret_val = hw->nvm.ops.write(hw, first_word, 811 ret_val = hw->nvm.ops.write(hw, first_word,
814 last_word - first_word + 1, eeprom_buff); 812 last_word - first_word + 1, eeprom_buff);
815 813
816 /* Update the checksum over the first part of the EEPROM if needed 814 /* Update the checksum if nvm write succeeded */
817 * and flush shadow RAM for 82573 controllers 815 if (ret_val == 0)
818 */
819 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
820 hw->nvm.ops.update(hw); 816 hw->nvm.ops.update(hw);
821 817
822 igb_set_fw_version(adapter); 818 igb_set_fw_version(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 64cbe0dfe043..6a0c1b66ce54 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1667,10 +1667,13 @@ void igb_down(struct igb_adapter *adapter)
1667 wrfl(); 1667 wrfl();
1668 msleep(10); 1668 msleep(10);
1669 1669
1670 for (i = 0; i < adapter->num_q_vectors; i++) 1670 igb_irq_disable(adapter);
1671
1672 for (i = 0; i < adapter->num_q_vectors; i++) {
1673 napi_synchronize(&(adapter->q_vector[i]->napi));
1671 napi_disable(&(adapter->q_vector[i]->napi)); 1674 napi_disable(&(adapter->q_vector[i]->napi));
1675 }
1672 1676
1673 igb_irq_disable(adapter);
1674 1677
1675 del_timer_sync(&adapter->watchdog_timer); 1678 del_timer_sync(&adapter->watchdog_timer);
1676 del_timer_sync(&adapter->phy_info_timer); 1679 del_timer_sync(&adapter->phy_info_timer);
@@ -6622,7 +6625,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6622 6625
6623 igb_rx_checksum(rx_ring, rx_desc, skb); 6626 igb_rx_checksum(rx_ring, rx_desc, skb);
6624 6627
6625 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); 6628 igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
6626 6629
6627 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6630 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6628 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6631 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 1f2c805684dd..e055e000131b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
380 } 380 }
381 return 0; 381 return 0;
382} 382}
383
384static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
385{
386 u32 reg, i;
387
388 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
389 for (i = 0; i < MAX_USER_PRIORITY; i++)
390 map[i] = IXGBE_RTRUP2TC_UP_MASK &
391 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
392 return;
393}
394
395void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
396{
397 switch (hw->mac.type) {
398 case ixgbe_mac_82599EB:
399 case ixgbe_mac_X540:
400 ixgbe_dcb_read_rtrup2tc_82599(hw, map);
401 break;
402 default:
403 break;
404 }
405}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1634de8b627f..fc0a2dd52499 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
159s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); 159s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
160s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 160s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
161 161
162void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
163
162/* DCB definitions for credit calculation */ 164/* DCB definitions for credit calculation */
163#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ 165#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
164#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ 166#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index a4ef07631d1e..d71d9ce3e394 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -45,6 +45,7 @@
45 45
46/* Receive UP2TC mapping */ 46/* Receive UP2TC mapping */
47#define IXGBE_RTRUP2TC_UP_SHIFT 3 47#define IXGBE_RTRUP2TC_UP_SHIFT 3
48#define IXGBE_RTRUP2TC_UP_MASK 7
48/* Transmit UP2TC mapping */ 49/* Transmit UP2TC mapping */
49#define IXGBE_RTTUP2TC_UP_SHIFT 3 50#define IXGBE_RTTUP2TC_UP_SHIFT 3
50 51
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f3d68f9696ba..edd89a1ef27f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
554 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 554 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
555 adapter->ixgbe_ieee_ets->prio_tc[i] = 555 adapter->ixgbe_ieee_ets->prio_tc[i] =
556 IEEE_8021QAZ_MAX_TCS; 556 IEEE_8021QAZ_MAX_TCS;
557 /* if possible update UP2TC mappings from HW */
558 ixgbe_dcb_read_rtrup2tc(&adapter->hw,
559 adapter->ixgbe_ieee_ets->prio_tc);
557 } 560 }
558 561
559 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 562 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 5409fe876a44..0b5708520121 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -495,12 +495,9 @@ static void korina_multicast_list(struct net_device *dev)
495 495
496 /* Build the hash table */ 496 /* Build the hash table */
497 if (netdev_mc_count(dev) > 4) { 497 if (netdev_mc_count(dev) > 4) {
498 u16 hash_table[4]; 498 u16 hash_table[4] = { 0 };
499 u32 crc; 499 u32 crc;
500 500
501 for (i = 0; i < 4; i++)
502 hash_table[i] = 0;
503
504 netdev_for_each_mc_addr(ha, dev) { 501 netdev_for_each_mc_addr(ha, dev) {
505 crc = ether_crc_le(6, ha->addr); 502 crc = ether_crc_le(6, ha->addr);
506 crc >>= 26; 503 crc >>= 26;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b003fe53c8e2..098b96dad66f 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6340,7 +6340,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6340 {0,}, 6340 {0,},
6341}; 6341};
6342 6342
6343static struct pci_driver driver = { 6343static struct pci_driver forcedeth_pci_driver = {
6344 .name = DRV_NAME, 6344 .name = DRV_NAME,
6345 .id_table = pci_tbl, 6345 .id_table = pci_tbl,
6346 .probe = nv_probe, 6346 .probe = nv_probe,
@@ -6349,16 +6349,6 @@ static struct pci_driver driver = {
6349 .driver.pm = NV_PM_OPS, 6349 .driver.pm = NV_PM_OPS,
6350}; 6350};
6351 6351
6352static int __init init_nic(void)
6353{
6354 return pci_register_driver(&driver);
6355}
6356
6357static void __exit exit_nic(void)
6358{
6359 pci_unregister_driver(&driver);
6360}
6361
6362module_param(max_interrupt_work, int, 0); 6352module_param(max_interrupt_work, int, 0);
6363MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6353MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6364module_param(optimization_mode, int, 0); 6354module_param(optimization_mode, int, 0);
@@ -6379,11 +6369,8 @@ module_param(debug_tx_timeout, bool, 0);
6379MODULE_PARM_DESC(debug_tx_timeout, 6369MODULE_PARM_DESC(debug_tx_timeout,
6380 "Dump tx related registers and ring when tx_timeout happens"); 6370 "Dump tx related registers and ring when tx_timeout happens");
6381 6371
6372module_pci_driver(forcedeth_pci_driver);
6382MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6373MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6383MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6374MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6384MODULE_LICENSE("GPL"); 6375MODULE_LICENSE("GPL");
6385
6386MODULE_DEVICE_TABLE(pci, pci_tbl); 6376MODULE_DEVICE_TABLE(pci, pci_tbl);
6387
6388module_init(init_nic);
6389module_exit(exit_nic);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 50235d201592..e04d471acb10 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4945,15 +4945,4 @@ static struct pci_driver qlge_driver = {
4945 .err_handler = &qlge_err_handler 4945 .err_handler = &qlge_err_handler
4946}; 4946};
4947 4947
4948static int __init qlge_init_module(void) 4948module_pci_driver(qlge_driver);
4949{
4950 return pci_register_driver(&qlge_driver);
4951}
4952
4953static void __exit qlge_exit(void)
4954{
4955 pci_unregister_driver(&qlge_driver);
4956}
4957
4958module_init(qlge_init_module);
4959module_exit(qlge_exit);
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 7ed08c32a9c5..ffa78432164d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1398,16 +1398,6 @@ static struct pci_driver ioc3_driver = {
1398 .remove = ioc3_remove_one, 1398 .remove = ioc3_remove_one,
1399}; 1399};
1400 1400
1401static int __init ioc3_init_module(void)
1402{
1403 return pci_register_driver(&ioc3_driver);
1404}
1405
1406static void __exit ioc3_cleanup_module(void)
1407{
1408 pci_unregister_driver(&ioc3_driver);
1409}
1410
1411static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1401static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1412{ 1402{
1413 unsigned long data; 1403 unsigned long data;
@@ -1677,9 +1667,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1677 netif_wake_queue(dev); /* Let us get going again. */ 1667 netif_wake_queue(dev); /* Let us get going again. */
1678} 1668}
1679 1669
1670module_pci_driver(ioc3_driver);
1680MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 1671MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1681MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); 1672MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1682MODULE_LICENSE("GPL"); 1673MODULE_LICENSE("GPL");
1683
1684module_init(ioc3_init_module);
1685module_exit(ioc3_cleanup_module);
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 28f7268f1b88..5eb933c97bba 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1578,19 +1578,7 @@ static struct pci_driver sc92031_pci_driver = {
1578 .resume = sc92031_resume, 1578 .resume = sc92031_resume,
1579}; 1579};
1580 1580
1581static int __init sc92031_init(void) 1581module_pci_driver(sc92031_pci_driver);
1582{
1583 return pci_register_driver(&sc92031_pci_driver);
1584}
1585
1586static void __exit sc92031_exit(void)
1587{
1588 pci_unregister_driver(&sc92031_pci_driver);
1589}
1590
1591module_init(sc92031_init);
1592module_exit(sc92031_exit);
1593
1594MODULE_LICENSE("GPL"); 1582MODULE_LICENSE("GPL");
1595MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); 1583MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1596MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); 1584MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 9a9c379420d1..02df0894690d 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1934,15 +1934,4 @@ static struct pci_driver sis190_pci_driver = {
1934 .remove = sis190_remove_one, 1934 .remove = sis190_remove_one,
1935}; 1935};
1936 1936
1937static int __init sis190_init_module(void) 1937module_pci_driver(sis190_pci_driver);
1938{
1939 return pci_register_driver(&sis190_pci_driver);
1940}
1941
1942static void __exit sis190_cleanup_module(void)
1943{
1944 pci_unregister_driver(&sis190_pci_driver);
1945}
1946
1947module_init(sis190_init_module);
1948module_exit(sis190_cleanup_module);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index bb4c1674ff99..ff9e99474039 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -97,7 +97,7 @@ config SMC911X
97 97
98config SMSC911X 98config SMSC911X
99 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 99 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
100 depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) 100 depends on HAS_IOMEM
101 select CRC32 101 select CRC32
102 select NET_CORE 102 select NET_CORE
103 select MII 103 select MII
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 054975939a18..09b4f8c0b199 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1017,10 +1017,7 @@ static void bigmac_set_multicast(struct net_device *dev)
1017 tmp |= BIGMAC_RXCFG_PMISC; 1017 tmp |= BIGMAC_RXCFG_PMISC;
1018 sbus_writel(tmp, bregs + BMAC_RXCFG); 1018 sbus_writel(tmp, bregs + BMAC_RXCFG);
1019 } else { 1019 } else {
1020 u16 hash_table[4]; 1020 u16 hash_table[4] = { 0 };
1021
1022 for (i = 0; i < 4; i++)
1023 hash_table[i] = 0;
1024 1021
1025 netdev_for_each_mc_addr(ha, dev) { 1022 netdev_for_each_mc_addr(ha, dev) {
1026 crc = ether_crc_le(6, ha->addr); 1023 crc = ether_crc_le(6, ha->addr);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5f3f9d52757d..e62df2b81302 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -3028,15 +3028,4 @@ static struct pci_driver gem_driver = {
3028#endif /* CONFIG_PM */ 3028#endif /* CONFIG_PM */
3029}; 3029};
3030 3030
3031static int __init gem_init(void) 3031module_pci_driver(gem_driver);
3032{
3033 return pci_register_driver(&gem_driver);
3034}
3035
3036static void __exit gem_cleanup(void)
3037{
3038 pci_unregister_driver(&gem_driver);
3039}
3040
3041module_init(gem_init);
3042module_exit(gem_cleanup);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 60c400f6d01f..59abfbcd0d55 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -533,7 +533,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
533 /* This is a hack. We need to know which board structure 533 /* This is a hack. We need to know which board structure
534 * is suited for this adapter */ 534 * is suited for this adapter */
535 device_id = inw(ioaddr + EISA_ID2); 535 device_id = inw(ioaddr + EISA_ID2);
536 priv->is_eisa = 1;
537 if (device_id == 0x20F1) { 536 if (device_id == 0x20F1) {
538 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 537 priv->adapter = &board_info[13]; /* NetFlex-3/E */
539 priv->adapter_rev = 23; /* TLAN 2.3 */ 538 priv->adapter_rev = 23; /* TLAN 2.3 */
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
index 5fc98a8e4889..2eb33a250788 100644
--- a/drivers/net/ethernet/ti/tlan.h
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -207,7 +207,6 @@ struct tlan_priv {
207 u8 tlan_full_duplex; 207 u8 tlan_full_duplex;
208 spinlock_t lock; 208 spinlock_t lock;
209 u8 link; 209 u8 link;
210 u8 is_eisa;
211 struct work_struct tlan_tqueue; 210 struct work_struct tlan_tqueue;
212 u8 neg_be_verbose; 211 u8 neg_be_verbose;
213}; 212};
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index fe256094db35..a971b9cca564 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -2209,18 +2209,6 @@ MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2209module_param_named(duplex, options.duplex, int, 0); 2209module_param_named(duplex, options.duplex, int, 0);
2210MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2210MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2211 2211
2212static int __init tc35815_init_module(void) 2212module_pci_driver(tc35815_pci_driver);
2213{
2214 return pci_register_driver(&tc35815_pci_driver);
2215}
2216
2217static void __exit tc35815_cleanup_module(void)
2218{
2219 pci_unregister_driver(&tc35815_pci_driver);
2220}
2221
2222module_init(tc35815_init_module);
2223module_exit(tc35815_cleanup_module);
2224
2225MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); 2213MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2226MODULE_LICENSE("GPL"); 2214MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 68a9ba66feba..6a87097d88c0 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -5,7 +5,6 @@
5config NET_VENDOR_VIA 5config NET_VENDOR_VIA
6 bool "VIA devices" 6 bool "VIA devices"
7 default y 7 default y
8 depends on PCI
9 ---help--- 8 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 9 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 10 and read the Ethernet-HOWTO, available from
@@ -45,7 +44,7 @@ config VIA_RHINE_MMIO
45 44
46config VIA_VELOCITY 45config VIA_VELOCITY
47 tristate "VIA Velocity support" 46 tristate "VIA Velocity support"
48 depends on PCI 47 depends on (PCI || USE_OF)
49 select CRC32 48 select CRC32
50 select CRC_CCITT 49 select CRC_CCITT
51 select NET_CORE 50 select NET_CORE
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index fb6248956ee2..76919948b4ee 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -46,6 +46,7 @@
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/dma-mapping.h>
49#include <linux/mm.h> 50#include <linux/mm.h>
50#include <linux/errno.h> 51#include <linux/errno.h>
51#include <linux/ioport.h> 52#include <linux/ioport.h>
@@ -64,7 +65,11 @@
64#include <linux/if.h> 65#include <linux/if.h>
65#include <linux/uaccess.h> 66#include <linux/uaccess.h>
66#include <linux/proc_fs.h> 67#include <linux/proc_fs.h>
68#include <linux/of_address.h>
69#include <linux/of_device.h>
70#include <linux/of_irq.h>
67#include <linux/inetdevice.h> 71#include <linux/inetdevice.h>
72#include <linux/platform_device.h>
68#include <linux/reboot.h> 73#include <linux/reboot.h>
69#include <linux/ethtool.h> 74#include <linux/ethtool.h>
70#include <linux/mii.h> 75#include <linux/mii.h>
@@ -79,10 +84,24 @@
79 84
80#include "via-velocity.h" 85#include "via-velocity.h"
81 86
87enum velocity_bus_type {
88 BUS_PCI,
89 BUS_PLATFORM,
90};
82 91
83static int velocity_nics; 92static int velocity_nics;
84static int msglevel = MSG_LEVEL_INFO; 93static int msglevel = MSG_LEVEL_INFO;
85 94
95static void velocity_set_power_state(struct velocity_info *vptr, char state)
96{
97 void *addr = vptr->mac_regs;
98
99 if (vptr->pdev)
100 pci_set_power_state(vptr->pdev, state);
101 else
102 writeb(state, addr + 0x154);
103}
104
86/** 105/**
87 * mac_get_cam_mask - Read a CAM mask 106 * mac_get_cam_mask - Read a CAM mask
88 * @regs: register block for this velocity 107 * @regs: register block for this velocity
@@ -361,12 +380,23 @@ static struct velocity_info_tbl chip_info_table[] = {
361 * Describe the PCI device identifiers that we support in this 380 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading. 381 * device driver. Used for hotplug autoloading.
363 */ 382 */
364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = { 383
384static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = {
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 385 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { } 386 { }
367}; 387};
368 388
369MODULE_DEVICE_TABLE(pci, velocity_id_table); 389MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
390
391/**
392 * Describe the OF device identifiers that we support in this
393 * device driver. Used for devicetree nodes.
394 */
395static struct of_device_id velocity_of_ids[] = {
396 { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
397 { /* Sentinel */ },
398};
399MODULE_DEVICE_TABLE(of, velocity_of_ids);
370 400
371/** 401/**
372 * get_chip_name - identifier to name 402 * get_chip_name - identifier to name
@@ -385,29 +415,6 @@ static const char *get_chip_name(enum chip_type chip_id)
385} 415}
386 416
387/** 417/**
388 * velocity_remove1 - device unplug
389 * @pdev: PCI device being removed
390 *
391 * Device unload callback. Called on an unplug or on module
392 * unload for each active device that is present. Disconnects
393 * the device from the network layer and frees all the resources
394 */
395static void velocity_remove1(struct pci_dev *pdev)
396{
397 struct net_device *dev = pci_get_drvdata(pdev);
398 struct velocity_info *vptr = netdev_priv(dev);
399
400 unregister_netdev(dev);
401 iounmap(vptr->mac_regs);
402 pci_release_regions(pdev);
403 pci_disable_device(pdev);
404 pci_set_drvdata(pdev, NULL);
405 free_netdev(dev);
406
407 velocity_nics--;
408}
409
410/**
411 * velocity_set_int_opt - parser for integer options 418 * velocity_set_int_opt - parser for integer options
412 * @opt: pointer to option value 419 * @opt: pointer to option value
413 * @val: value the user requested (or -1 for default) 420 * @val: value the user requested (or -1 for default)
@@ -998,9 +1005,9 @@ static void velocity_print_link_status(struct velocity_info *vptr)
998{ 1005{
999 1006
1000 if (vptr->mii_status & VELOCITY_LINK_FAIL) { 1007 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1001 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); 1008 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
1002 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { 1009 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1003 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); 1010 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
1004 1011
1005 if (vptr->mii_status & VELOCITY_SPEED_1000) 1012 if (vptr->mii_status & VELOCITY_SPEED_1000)
1006 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); 1013 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
@@ -1014,7 +1021,7 @@ static void velocity_print_link_status(struct velocity_info *vptr)
1014 else 1021 else
1015 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); 1022 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1016 } else { 1023 } else {
1017 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); 1024 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
1018 switch (vptr->options.spd_dpx) { 1025 switch (vptr->options.spd_dpx) {
1019 case SPD_DPX_1000_FULL: 1026 case SPD_DPX_1000_FULL:
1020 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); 1027 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
@@ -1180,6 +1187,17 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1180 u16 BMCR; 1187 u16 BMCR;
1181 1188
1182 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { 1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190 case PHYID_ICPLUS_IP101A:
1191 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192 MII_ADVERTISE, vptr->mac_regs);
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195 vptr->mac_regs);
1196 else
1197 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198 vptr->mac_regs);
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200 break;
1183 case PHYID_CICADA_CS8201: 1201 case PHYID_CICADA_CS8201:
1184 /* 1202 /*
1185 * Reset to hardware default 1203 * Reset to hardware default
@@ -1311,6 +1329,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1311 enum velocity_init_type type) 1329 enum velocity_init_type type)
1312{ 1330{
1313 struct mac_regs __iomem *regs = vptr->mac_regs; 1331 struct mac_regs __iomem *regs = vptr->mac_regs;
1332 struct net_device *netdev = vptr->netdev;
1314 int i, mii_status; 1333 int i, mii_status;
1315 1334
1316 mac_wol_reset(regs); 1335 mac_wol_reset(regs);
@@ -1319,7 +1338,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1319 case VELOCITY_INIT_RESET: 1338 case VELOCITY_INIT_RESET:
1320 case VELOCITY_INIT_WOL: 1339 case VELOCITY_INIT_WOL:
1321 1340
1322 netif_stop_queue(vptr->dev); 1341 netif_stop_queue(netdev);
1323 1342
1324 /* 1343 /*
1325 * Reset RX to prevent RX pointer not on the 4X location 1344 * Reset RX to prevent RX pointer not on the 4X location
@@ -1332,7 +1351,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1332 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1333 velocity_print_link_status(vptr); 1352 velocity_print_link_status(vptr);
1334 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1335 netif_wake_queue(vptr->dev); 1354 netif_wake_queue(netdev);
1336 } 1355 }
1337 1356
1338 enable_flow_control_ability(vptr); 1357 enable_flow_control_ability(vptr);
@@ -1352,9 +1371,11 @@ static void velocity_init_registers(struct velocity_info *vptr,
1352 velocity_soft_reset(vptr); 1371 velocity_soft_reset(vptr);
1353 mdelay(5); 1372 mdelay(5);
1354 1373
1355 mac_eeprom_reload(regs); 1374 if (!vptr->no_eeprom) {
1356 for (i = 0; i < 6; i++) 1375 mac_eeprom_reload(regs);
1357 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); 1376 for (i = 0; i < 6; i++)
1377 writeb(netdev->dev_addr[i], regs->PAR + i);
1378 }
1358 1379
1359 /* 1380 /*
1360 * clear Pre_ACPI bit. 1381 * clear Pre_ACPI bit.
@@ -1377,7 +1398,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1377 /* 1398 /*
1378 * Set packet filter: Receive directed and broadcast address 1399 * Set packet filter: Receive directed and broadcast address
1379 */ 1400 */
1380 velocity_set_multi(vptr->dev); 1401 velocity_set_multi(netdev);
1381 1402
1382 /* 1403 /*
1383 * Enable MII auto-polling 1404 * Enable MII auto-polling
@@ -1404,14 +1425,14 @@ static void velocity_init_registers(struct velocity_info *vptr,
1404 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set); 1425 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1405 1426
1406 mii_status = velocity_get_opt_media_mode(vptr); 1427 mii_status = velocity_get_opt_media_mode(vptr);
1407 netif_stop_queue(vptr->dev); 1428 netif_stop_queue(netdev);
1408 1429
1409 mii_init(vptr, mii_status); 1430 mii_init(vptr, mii_status);
1410 1431
1411 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1412 velocity_print_link_status(vptr); 1433 velocity_print_link_status(vptr);
1413 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1414 netif_wake_queue(vptr->dev); 1435 netif_wake_queue(netdev);
1415 } 1436 }
1416 1437
1417 enable_flow_control_ability(vptr); 1438 enable_flow_control_ability(vptr);
@@ -1459,7 +1480,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1459 struct velocity_opt *opt = &vptr->options; 1480 struct velocity_opt *opt = &vptr->options;
1460 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1481 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1461 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); 1482 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1462 struct pci_dev *pdev = vptr->pdev;
1463 dma_addr_t pool_dma; 1483 dma_addr_t pool_dma;
1464 void *pool; 1484 void *pool;
1465 unsigned int i; 1485 unsigned int i;
@@ -1467,14 +1487,14 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1467 /* 1487 /*
1468 * Allocate all RD/TD rings a single pool. 1488 * Allocate all RD/TD rings a single pool.
1469 * 1489 *
1470 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1490 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1471 * alignment 1491 * alignment
1472 */ 1492 */
1473 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + 1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1474 rx_ring_size, &pool_dma); 1494 rx_ring_size, &pool_dma, GFP_ATOMIC);
1475 if (!pool) { 1495 if (!pool) {
1476 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1477 vptr->dev->name); 1497 vptr->netdev->name);
1478 return -ENOMEM; 1498 return -ENOMEM;
1479 } 1499 }
1480 1500
@@ -1514,7 +1534,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1514 struct rx_desc *rd = &(vptr->rx.ring[idx]); 1534 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1516 1536
1517 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64); 1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1518 if (rd_info->skb == NULL) 1538 if (rd_info->skb == NULL)
1519 return -ENOMEM; 1539 return -ENOMEM;
1520 1540
@@ -1524,8 +1544,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1524 */ 1544 */
1525 skb_reserve(rd_info->skb, 1545 skb_reserve(rd_info->skb,
1526 64 - ((unsigned long) rd_info->skb->data & 63)); 1546 64 - ((unsigned long) rd_info->skb->data & 63));
1527 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1528 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1548 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1529 1549
1530 /* 1550 /*
1531 * Fill in the descriptor to match 1551 * Fill in the descriptor to match
@@ -1588,8 +1608,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1588 1608
1589 if (!rd_info->skb) 1609 if (!rd_info->skb)
1590 continue; 1610 continue;
1591 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1592 PCI_DMA_FROMDEVICE); 1612 DMA_FROM_DEVICE);
1593 rd_info->skb_dma = 0; 1613 rd_info->skb_dma = 0;
1594 1614
1595 dev_kfree_skb(rd_info->skb); 1615 dev_kfree_skb(rd_info->skb);
@@ -1620,7 +1640,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1620 1640
1621 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1622 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1642 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1623 "%s: failed to allocate RX buffer.\n", vptr->dev->name); 1643 "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
1624 velocity_free_rd_ring(vptr); 1644 velocity_free_rd_ring(vptr);
1625 goto out; 1645 goto out;
1626 } 1646 }
@@ -1670,7 +1690,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
1670 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1690 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1671 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1691 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1672 1692
1673 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); 1693 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1674} 1694}
1675 1695
1676static int velocity_init_rings(struct velocity_info *vptr, int mtu) 1696static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1727,8 +1747,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1727 pktlen = max_t(size_t, pktlen, 1747 pktlen = max_t(size_t, pktlen,
1728 td->td_buf[i].size & ~TD_QUEUE); 1748 td->td_buf[i].size & ~TD_QUEUE);
1729 1749
1730 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], 1750 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1731 le16_to_cpu(pktlen), PCI_DMA_TODEVICE); 1751 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1732 } 1752 }
1733 } 1753 }
1734 dev_kfree_skb_irq(skb); 1754 dev_kfree_skb_irq(skb);
@@ -1750,8 +1770,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1750 if (td_info->skb) { 1770 if (td_info->skb) {
1751 for (i = 0; i < td_info->nskb_dma; i++) { 1771 for (i = 0; i < td_info->nskb_dma; i++) {
1752 if (td_info->skb_dma[i]) { 1772 if (td_info->skb_dma[i]) {
1753 pci_unmap_single(vptr->pdev, td_info->skb_dma[i], 1773 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1754 td_info->skb->len, PCI_DMA_TODEVICE); 1774 td_info->skb->len, DMA_TO_DEVICE);
1755 td_info->skb_dma[i] = 0; 1775 td_info->skb_dma[i] = 0;
1756 } 1776 }
1757 } 1777 }
@@ -1809,7 +1829,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
1809 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0])); 1829 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1810 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR); 1830 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1811 writew(TRDCSR_RUN, &regs->TDCSRClr); 1831 writew(TRDCSR_RUN, &regs->TDCSRClr);
1812 netif_stop_queue(vptr->dev); 1832 netif_stop_queue(vptr->netdev);
1813 1833
1814 /* FIXME: port over the pci_device_failed code and use it 1834 /* FIXME: port over the pci_device_failed code and use it
1815 here */ 1835 here */
@@ -1850,10 +1870,10 @@ static void velocity_error(struct velocity_info *vptr, int status)
1850 1870
1851 if (linked) { 1871 if (linked) {
1852 vptr->mii_status &= ~VELOCITY_LINK_FAIL; 1872 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1853 netif_carrier_on(vptr->dev); 1873 netif_carrier_on(vptr->netdev);
1854 } else { 1874 } else {
1855 vptr->mii_status |= VELOCITY_LINK_FAIL; 1875 vptr->mii_status |= VELOCITY_LINK_FAIL;
1856 netif_carrier_off(vptr->dev); 1876 netif_carrier_off(vptr->netdev);
1857 } 1877 }
1858 1878
1859 velocity_print_link_status(vptr); 1879 velocity_print_link_status(vptr);
@@ -1867,9 +1887,9 @@ static void velocity_error(struct velocity_info *vptr, int status)
1867 enable_mii_autopoll(regs); 1887 enable_mii_autopoll(regs);
1868 1888
1869 if (vptr->mii_status & VELOCITY_LINK_FAIL) 1889 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1870 netif_stop_queue(vptr->dev); 1890 netif_stop_queue(vptr->netdev);
1871 else 1891 else
1872 netif_wake_queue(vptr->dev); 1892 netif_wake_queue(vptr->netdev);
1873 1893
1874 } 1894 }
1875 if (status & ISR_MIBFI) 1895 if (status & ISR_MIBFI)
@@ -1894,7 +1914,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1894 int idx; 1914 int idx;
1895 int works = 0; 1915 int works = 0;
1896 struct velocity_td_info *tdinfo; 1916 struct velocity_td_info *tdinfo;
1897 struct net_device_stats *stats = &vptr->dev->stats; 1917 struct net_device_stats *stats = &vptr->netdev->stats;
1898 1918
1899 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 1919 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1900 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; 1920 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
@@ -1939,9 +1959,9 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1939 * Look to see if we should kick the transmit network 1959 * Look to see if we should kick the transmit network
1940 * layer for more work. 1960 * layer for more work.
1941 */ 1961 */
1942 if (netif_queue_stopped(vptr->dev) && (full == 0) && 1962 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1943 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { 1963 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1944 netif_wake_queue(vptr->dev); 1964 netif_wake_queue(vptr->netdev);
1945 } 1965 }
1946 return works; 1966 return works;
1947} 1967}
@@ -1989,7 +2009,7 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1989 if (pkt_size < rx_copybreak) { 2009 if (pkt_size < rx_copybreak) {
1990 struct sk_buff *new_skb; 2010 struct sk_buff *new_skb;
1991 2011
1992 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); 2012 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
1993 if (new_skb) { 2013 if (new_skb) {
1994 new_skb->ip_summed = rx_skb[0]->ip_summed; 2014 new_skb->ip_summed = rx_skb[0]->ip_summed;
1995 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 2015 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
@@ -2029,15 +2049,14 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
2029 */ 2049 */
2030static int velocity_receive_frame(struct velocity_info *vptr, int idx) 2050static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2031{ 2051{
2032 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 2052 struct net_device_stats *stats = &vptr->netdev->stats;
2033 struct net_device_stats *stats = &vptr->dev->stats;
2034 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2053 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2035 struct rx_desc *rd = &(vptr->rx.ring[idx]); 2054 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2036 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2037 struct sk_buff *skb; 2056 struct sk_buff *skb;
2038 2057
2039 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2040 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); 2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
2041 stats->rx_length_errors++; 2060 stats->rx_length_errors++;
2042 return -EINVAL; 2061 return -EINVAL;
2043 } 2062 }
@@ -2047,8 +2066,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2047 2066
2048 skb = rd_info->skb; 2067 skb = rd_info->skb;
2049 2068
2050 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2051 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 2070 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2052 2071
2053 /* 2072 /*
2054 * Drop frame not meeting IEEE 802.3 2073 * Drop frame not meeting IEEE 802.3
@@ -2061,21 +2080,20 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2061 } 2080 }
2062 } 2081 }
2063 2082
2064 pci_action = pci_dma_sync_single_for_device;
2065
2066 velocity_rx_csum(rd, skb); 2083 velocity_rx_csum(rd, skb);
2067 2084
2068 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2069 velocity_iph_realign(vptr, skb, pkt_len); 2086 velocity_iph_realign(vptr, skb, pkt_len);
2070 pci_action = pci_unmap_single;
2071 rd_info->skb = NULL; 2087 rd_info->skb = NULL;
2088 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2089 DMA_FROM_DEVICE);
2090 } else {
2091 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2092 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2072 } 2093 }
2073 2094
2074 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2075 PCI_DMA_FROMDEVICE);
2076
2077 skb_put(skb, pkt_len - 4); 2095 skb_put(skb, pkt_len - 4);
2078 skb->protocol = eth_type_trans(skb, vptr->dev); 2096 skb->protocol = eth_type_trans(skb, vptr->netdev);
2079 2097
2080 if (rd->rdesc0.RSR & RSR_DETAG) { 2098 if (rd->rdesc0.RSR & RSR_DETAG) {
2081 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); 2099 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
@@ -2100,7 +2118,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2100 */ 2118 */
2101static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) 2119static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2102{ 2120{
2103 struct net_device_stats *stats = &vptr->dev->stats; 2121 struct net_device_stats *stats = &vptr->netdev->stats;
2104 int rd_curr = vptr->rx.curr; 2122 int rd_curr = vptr->rx.curr;
2105 int works = 0; 2123 int works = 0;
2106 2124
@@ -2235,15 +2253,15 @@ static int velocity_open(struct net_device *dev)
2235 goto out; 2253 goto out;
2236 2254
2237 /* Ensure chip is running */ 2255 /* Ensure chip is running */
2238 pci_set_power_state(vptr->pdev, PCI_D0); 2256 velocity_set_power_state(vptr, PCI_D0);
2239 2257
2240 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2258 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2241 2259
2242 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, 2260 ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2243 dev->name, dev); 2261 dev->name, dev);
2244 if (ret < 0) { 2262 if (ret < 0) {
2245 /* Power down the chip */ 2263 /* Power down the chip */
2246 pci_set_power_state(vptr->pdev, PCI_D3hot); 2264 velocity_set_power_state(vptr, PCI_D3hot);
2247 velocity_free_rings(vptr); 2265 velocity_free_rings(vptr);
2248 goto out; 2266 goto out;
2249 } 2267 }
@@ -2292,7 +2310,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292 2310
2293 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 2311 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2294 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 2312 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2295 vptr->dev->name); 2313 vptr->netdev->name);
2296 ret = -EINVAL; 2314 ret = -EINVAL;
2297 goto out_0; 2315 goto out_0;
2298 } 2316 }
@@ -2314,8 +2332,9 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2314 goto out_0; 2332 goto out_0;
2315 } 2333 }
2316 2334
2317 tmp_vptr->dev = dev; 2335 tmp_vptr->netdev = dev;
2318 tmp_vptr->pdev = vptr->pdev; 2336 tmp_vptr->pdev = vptr->pdev;
2337 tmp_vptr->dev = vptr->dev;
2319 tmp_vptr->options = vptr->options; 2338 tmp_vptr->options = vptr->options;
2320 tmp_vptr->tx.numq = vptr->tx.numq; 2339 tmp_vptr->tx.numq = vptr->tx.numq;
2321 2340
@@ -2415,7 +2434,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2415 saving then we need to bring the device back up to talk to it */ 2434 saving then we need to bring the device back up to talk to it */
2416 2435
2417 if (!netif_running(dev)) 2436 if (!netif_running(dev))
2418 pci_set_power_state(vptr->pdev, PCI_D0); 2437 velocity_set_power_state(vptr, PCI_D0);
2419 2438
2420 switch (cmd) { 2439 switch (cmd) {
2421 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2440 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -2428,7 +2447,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428 ret = -EOPNOTSUPP; 2447 ret = -EOPNOTSUPP;
2429 } 2448 }
2430 if (!netif_running(dev)) 2449 if (!netif_running(dev))
2431 pci_set_power_state(vptr->pdev, PCI_D3hot); 2450 velocity_set_power_state(vptr, PCI_D3hot);
2432 2451
2433 2452
2434 return ret; 2453 return ret;
@@ -2494,7 +2513,7 @@ static int velocity_close(struct net_device *dev)
2494 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) 2513 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2495 velocity_get_ip(vptr); 2514 velocity_get_ip(vptr);
2496 2515
2497 free_irq(vptr->pdev->irq, dev); 2516 free_irq(dev->irq, dev);
2498 2517
2499 velocity_free_rings(vptr); 2518 velocity_free_rings(vptr);
2500 2519
@@ -2550,7 +2569,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2550 * add it to the transmit ring. 2569 * add it to the transmit ring.
2551 */ 2570 */
2552 tdinfo->skb = skb; 2571 tdinfo->skb = skb;
2553 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2572 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2573 DMA_TO_DEVICE);
2554 td_ptr->tdesc0.len = cpu_to_le16(pktlen); 2574 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2575 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556 td_ptr->td_buf[0].pa_high = 0; 2576 td_ptr->td_buf[0].pa_high = 0;
@@ -2560,7 +2580,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2581 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562 2582
2563 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, 2583 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2564 frag, 0, 2584 frag, 0,
2565 skb_frag_size(frag), 2585 skb_frag_size(frag),
2566 DMA_TO_DEVICE); 2586 DMA_TO_DEVICE);
@@ -2632,12 +2652,9 @@ static const struct net_device_ops velocity_netdev_ops = {
2632 * Set up the initial velocity_info struct for the device that has been 2652 * Set up the initial velocity_info struct for the device that has been
2633 * discovered. 2653 * discovered.
2634 */ 2654 */
2635static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, 2655static void velocity_init_info(struct velocity_info *vptr,
2636 const struct velocity_info_tbl *info) 2656 const struct velocity_info_tbl *info)
2637{ 2657{
2638 memset(vptr, 0, sizeof(struct velocity_info));
2639
2640 vptr->pdev = pdev;
2641 vptr->chip_id = info->chip_id; 2658 vptr->chip_id = info->chip_id;
2642 vptr->tx.numq = info->txqueue; 2659 vptr->tx.numq = info->txqueue;
2643 vptr->multicast_limit = MCAM_SIZE; 2660 vptr->multicast_limit = MCAM_SIZE;
@@ -2652,10 +2669,9 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2652 * Retrieve the PCI configuration space data that interests us from 2669 * Retrieve the PCI configuration space data that interests us from
2653 * the kernel PCI layer 2670 * the kernel PCI layer
2654 */ 2671 */
2655static int velocity_get_pci_info(struct velocity_info *vptr, 2672static int velocity_get_pci_info(struct velocity_info *vptr)
2656 struct pci_dev *pdev)
2657{ 2673{
2658 vptr->rev_id = pdev->revision; 2674 struct pci_dev *pdev = vptr->pdev;
2659 2675
2660 pci_set_master(pdev); 2676 pci_set_master(pdev);
2661 2677
@@ -2678,7 +2694,37 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
2678 dev_err(&pdev->dev, "region #1 is too small.\n"); 2694 dev_err(&pdev->dev, "region #1 is too small.\n");
2679 return -EINVAL; 2695 return -EINVAL;
2680 } 2696 }
2681 vptr->pdev = pdev; 2697
2698 return 0;
2699}
2700
2701/**
2702 * velocity_get_platform_info - retrieve platform info for device
2703 * @vptr: velocity device
2704 * @pdev: platform device it matches
2705 *
2706 * Retrieve the Platform configuration data that interests us
2707 */
2708static int velocity_get_platform_info(struct velocity_info *vptr)
2709{
2710 struct resource res;
2711 int ret;
2712
2713 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2714 vptr->no_eeprom = 1;
2715
2716 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2717 if (ret) {
2718 dev_err(vptr->dev, "unable to find memory address\n");
2719 return ret;
2720 }
2721
2722 vptr->memaddr = res.start;
2723
2724 if (resource_size(&res) < VELOCITY_IO_SIZE) {
2725 dev_err(vptr->dev, "memory region is too small.\n");
2726 return -EINVAL;
2727 }
2682 2728
2683 return 0; 2729 return 0;
2684} 2730}
@@ -2692,7 +2738,7 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
2692 */ 2738 */
2693static void velocity_print_info(struct velocity_info *vptr) 2739static void velocity_print_info(struct velocity_info *vptr)
2694{ 2740{
2695 struct net_device *dev = vptr->dev; 2741 struct net_device *dev = vptr->netdev;
2696 2742
2697 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2743 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2698 printk(KERN_INFO "%s: Ethernet Address: %pM\n", 2744 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
@@ -2707,21 +2753,22 @@ static u32 velocity_get_link(struct net_device *dev)
2707} 2753}
2708 2754
2709/** 2755/**
2710 * velocity_found1 - set up discovered velocity card 2756 * velocity_probe - set up discovered velocity device
2711 * @pdev: PCI device 2757 * @pdev: PCI device
2712 * @ent: PCI device table entry that matched 2758 * @ent: PCI device table entry that matched
2759 * @bustype: bus that device is connected to
2713 * 2760 *
2714 * Configure a discovered adapter from scratch. Return a negative 2761 * Configure a discovered adapter from scratch. Return a negative
2715 * errno error code on failure paths. 2762 * errno error code on failure paths.
2716 */ 2763 */
2717static int velocity_found1(struct pci_dev *pdev, 2764static int velocity_probe(struct device *dev, int irq,
2718 const struct pci_device_id *ent) 2765 const struct velocity_info_tbl *info,
2766 enum velocity_bus_type bustype)
2719{ 2767{
2720 static int first = 1; 2768 static int first = 1;
2721 struct net_device *dev; 2769 struct net_device *netdev;
2722 int i; 2770 int i;
2723 const char *drv_string; 2771 const char *drv_string;
2724 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2725 struct velocity_info *vptr; 2772 struct velocity_info *vptr;
2726 struct mac_regs __iomem *regs; 2773 struct mac_regs __iomem *regs;
2727 int ret = -ENOMEM; 2774 int ret = -ENOMEM;
@@ -2730,20 +2777,18 @@ static int velocity_found1(struct pci_dev *pdev,
2730 * can support more than MAX_UNITS. 2777 * can support more than MAX_UNITS.
2731 */ 2778 */
2732 if (velocity_nics >= MAX_UNITS) { 2779 if (velocity_nics >= MAX_UNITS) {
2733 dev_notice(&pdev->dev, "already found %d NICs.\n", 2780 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2734 velocity_nics);
2735 return -ENODEV; 2781 return -ENODEV;
2736 } 2782 }
2737 2783
2738 dev = alloc_etherdev(sizeof(struct velocity_info)); 2784 netdev = alloc_etherdev(sizeof(struct velocity_info));
2739 if (!dev) 2785 if (!netdev)
2740 goto out; 2786 goto out;
2741 2787
2742 /* Chain it all together */ 2788 /* Chain it all together */
2743 2789
2744 SET_NETDEV_DEV(dev, &pdev->dev); 2790 SET_NETDEV_DEV(netdev, dev);
2745 vptr = netdev_priv(dev); 2791 vptr = netdev_priv(netdev);
2746
2747 2792
2748 if (first) { 2793 if (first) {
2749 printk(KERN_INFO "%s Ver. %s\n", 2794 printk(KERN_INFO "%s Ver. %s\n",
@@ -2753,41 +2798,41 @@ static int velocity_found1(struct pci_dev *pdev,
2753 first = 0; 2798 first = 0;
2754 } 2799 }
2755 2800
2756 velocity_init_info(pdev, vptr, info); 2801 netdev->irq = irq;
2757 2802 vptr->netdev = netdev;
2758 vptr->dev = dev; 2803 vptr->dev = dev;
2759 2804
2760 ret = pci_enable_device(pdev); 2805 velocity_init_info(vptr, info);
2761 if (ret < 0)
2762 goto err_free_dev;
2763 2806
2764 ret = velocity_get_pci_info(vptr, pdev); 2807 if (bustype == BUS_PCI) {
2765 if (ret < 0) { 2808 vptr->pdev = to_pci_dev(dev);
2766 /* error message already printed */
2767 goto err_disable;
2768 }
2769 2809
2770 ret = pci_request_regions(pdev, VELOCITY_NAME); 2810 ret = velocity_get_pci_info(vptr);
2771 if (ret < 0) { 2811 if (ret < 0)
2772 dev_err(&pdev->dev, "No PCI resources.\n"); 2812 goto err_free_dev;
2773 goto err_disable; 2813 } else {
2814 vptr->pdev = NULL;
2815 ret = velocity_get_platform_info(vptr);
2816 if (ret < 0)
2817 goto err_free_dev;
2774 } 2818 }
2775 2819
2776 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); 2820 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2777 if (regs == NULL) { 2821 if (regs == NULL) {
2778 ret = -EIO; 2822 ret = -EIO;
2779 goto err_release_res; 2823 goto err_free_dev;
2780 } 2824 }
2781 2825
2782 vptr->mac_regs = regs; 2826 vptr->mac_regs = regs;
2827 vptr->rev_id = readb(&regs->rev_id);
2783 2828
2784 mac_wol_reset(regs); 2829 mac_wol_reset(regs);
2785 2830
2786 for (i = 0; i < 6; i++) 2831 for (i = 0; i < 6; i++)
2787 dev->dev_addr[i] = readb(&regs->PAR[i]); 2832 netdev->dev_addr[i] = readb(&regs->PAR[i]);
2788 2833
2789 2834
2790 drv_string = dev_driver_string(&pdev->dev); 2835 drv_string = dev_driver_string(dev);
2791 2836
2792 velocity_get_options(&vptr->options, velocity_nics, drv_string); 2837 velocity_get_options(&vptr->options, velocity_nics, drv_string);
2793 2838
@@ -2808,46 +2853,125 @@ static int velocity_found1(struct pci_dev *pdev,
2808 2853
2809 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 2854 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2810 2855
2811 dev->netdev_ops = &velocity_netdev_ops; 2856 netdev->netdev_ops = &velocity_netdev_ops;
2812 dev->ethtool_ops = &velocity_ethtool_ops; 2857 netdev->ethtool_ops = &velocity_ethtool_ops;
2813 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2858 netif_napi_add(netdev, &vptr->napi, velocity_poll,
2859 VELOCITY_NAPI_WEIGHT);
2814 2860
2815 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 2861 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2816 NETIF_F_HW_VLAN_CTAG_TX; 2862 NETIF_F_HW_VLAN_CTAG_TX;
2817 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER | 2863 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2818 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM; 2864 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2865 NETIF_F_IP_CSUM;
2819 2866
2820 ret = register_netdev(dev); 2867 ret = register_netdev(netdev);
2821 if (ret < 0) 2868 if (ret < 0)
2822 goto err_iounmap; 2869 goto err_iounmap;
2823 2870
2824 if (!velocity_get_link(dev)) { 2871 if (!velocity_get_link(netdev)) {
2825 netif_carrier_off(dev); 2872 netif_carrier_off(netdev);
2826 vptr->mii_status |= VELOCITY_LINK_FAIL; 2873 vptr->mii_status |= VELOCITY_LINK_FAIL;
2827 } 2874 }
2828 2875
2829 velocity_print_info(vptr); 2876 velocity_print_info(vptr);
2830 pci_set_drvdata(pdev, dev); 2877 dev_set_drvdata(vptr->dev, netdev);
2831 2878
2832 /* and leave the chip powered down */ 2879 /* and leave the chip powered down */
2833 2880
2834 pci_set_power_state(pdev, PCI_D3hot); 2881 velocity_set_power_state(vptr, PCI_D3hot);
2835 velocity_nics++; 2882 velocity_nics++;
2836out: 2883out:
2837 return ret; 2884 return ret;
2838 2885
2839err_iounmap: 2886err_iounmap:
2840 iounmap(regs); 2887 iounmap(regs);
2841err_release_res:
2842 pci_release_regions(pdev);
2843err_disable:
2844 pci_disable_device(pdev);
2845err_free_dev: 2888err_free_dev:
2846 free_netdev(dev); 2889 free_netdev(netdev);
2847 goto out; 2890 goto out;
2848} 2891}
2849 2892
2850#ifdef CONFIG_PM 2893/**
2894 * velocity_remove - device unplug
2895 * @dev: device being removed
2896 *
2897 * Device unload callback. Called on an unplug or on module
2898 * unload for each active device that is present. Disconnects
2899 * the device from the network layer and frees all the resources
2900 */
2901static int velocity_remove(struct device *dev)
2902{
2903 struct net_device *netdev = dev_get_drvdata(dev);
2904 struct velocity_info *vptr = netdev_priv(netdev);
2905
2906 unregister_netdev(netdev);
2907 iounmap(vptr->mac_regs);
2908 free_netdev(netdev);
2909 velocity_nics--;
2910
2911 return 0;
2912}
2913
2914static int velocity_pci_probe(struct pci_dev *pdev,
2915 const struct pci_device_id *ent)
2916{
2917 const struct velocity_info_tbl *info =
2918 &chip_info_table[ent->driver_data];
2919 int ret;
2920
2921 ret = pci_enable_device(pdev);
2922 if (ret < 0)
2923 return ret;
2924
2925 ret = pci_request_regions(pdev, VELOCITY_NAME);
2926 if (ret < 0) {
2927 dev_err(&pdev->dev, "No PCI resources.\n");
2928 goto fail1;
2929 }
2930
2931 ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2932 if (ret == 0)
2933 return 0;
2934
2935 pci_release_regions(pdev);
2936fail1:
2937 pci_disable_device(pdev);
2938 return ret;
2939}
2940
2941static void velocity_pci_remove(struct pci_dev *pdev)
2942{
2943 velocity_remove(&pdev->dev);
2944
2945 pci_release_regions(pdev);
2946 pci_disable_device(pdev);
2947}
2948
2949static int velocity_platform_probe(struct platform_device *pdev)
2950{
2951 const struct of_device_id *of_id;
2952 const struct velocity_info_tbl *info;
2953 int irq;
2954
2955 of_id = of_match_device(velocity_of_ids, &pdev->dev);
2956 if (!of_id)
2957 return -EINVAL;
2958 info = of_id->data;
2959
2960 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2961 if (!irq)
2962 return -EINVAL;
2963
2964 return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2965}
2966
2967static int velocity_platform_remove(struct platform_device *pdev)
2968{
2969 velocity_remove(&pdev->dev);
2970
2971 return 0;
2972}
2973
2974#ifdef CONFIG_PM_SLEEP
2851/** 2975/**
2852 * wol_calc_crc - WOL CRC 2976 * wol_calc_crc - WOL CRC
2853 * @pattern: data pattern 2977 * @pattern: data pattern
@@ -3004,32 +3128,35 @@ static void velocity_save_context(struct velocity_info *vptr, struct velocity_co
3004 3128
3005} 3129}
3006 3130
3007static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) 3131static int velocity_suspend(struct device *dev)
3008{ 3132{
3009 struct net_device *dev = pci_get_drvdata(pdev); 3133 struct net_device *netdev = dev_get_drvdata(dev);
3010 struct velocity_info *vptr = netdev_priv(dev); 3134 struct velocity_info *vptr = netdev_priv(netdev);
3011 unsigned long flags; 3135 unsigned long flags;
3012 3136
3013 if (!netif_running(vptr->dev)) 3137 if (!netif_running(vptr->netdev))
3014 return 0; 3138 return 0;
3015 3139
3016 netif_device_detach(vptr->dev); 3140 netif_device_detach(vptr->netdev);
3017 3141
3018 spin_lock_irqsave(&vptr->lock, flags); 3142 spin_lock_irqsave(&vptr->lock, flags);
3019 pci_save_state(pdev); 3143 if (vptr->pdev)
3144 pci_save_state(vptr->pdev);
3020 3145
3021 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { 3146 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3022 velocity_get_ip(vptr); 3147 velocity_get_ip(vptr);
3023 velocity_save_context(vptr, &vptr->context); 3148 velocity_save_context(vptr, &vptr->context);
3024 velocity_shutdown(vptr); 3149 velocity_shutdown(vptr);
3025 velocity_set_wol(vptr); 3150 velocity_set_wol(vptr);
3026 pci_enable_wake(pdev, PCI_D3hot, 1); 3151 if (vptr->pdev)
3027 pci_set_power_state(pdev, PCI_D3hot); 3152 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3153 velocity_set_power_state(vptr, PCI_D3hot);
3028 } else { 3154 } else {
3029 velocity_save_context(vptr, &vptr->context); 3155 velocity_save_context(vptr, &vptr->context);
3030 velocity_shutdown(vptr); 3156 velocity_shutdown(vptr);
3031 pci_disable_device(pdev); 3157 if (vptr->pdev)
3032 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3158 pci_disable_device(vptr->pdev);
3159 velocity_set_power_state(vptr, PCI_D3hot);
3033 } 3160 }
3034 3161
3035 spin_unlock_irqrestore(&vptr->lock, flags); 3162 spin_unlock_irqrestore(&vptr->lock, flags);
@@ -3071,19 +3198,22 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity
3071 writeb(*((u8 *) (context->mac_reg + i)), ptr + i); 3198 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3072} 3199}
3073 3200
3074static int velocity_resume(struct pci_dev *pdev) 3201static int velocity_resume(struct device *dev)
3075{ 3202{
3076 struct net_device *dev = pci_get_drvdata(pdev); 3203 struct net_device *netdev = dev_get_drvdata(dev);
3077 struct velocity_info *vptr = netdev_priv(dev); 3204 struct velocity_info *vptr = netdev_priv(netdev);
3078 unsigned long flags; 3205 unsigned long flags;
3079 int i; 3206 int i;
3080 3207
3081 if (!netif_running(vptr->dev)) 3208 if (!netif_running(vptr->netdev))
3082 return 0; 3209 return 0;
3083 3210
3084 pci_set_power_state(pdev, PCI_D0); 3211 velocity_set_power_state(vptr, PCI_D0);
3085 pci_enable_wake(pdev, 0, 0); 3212
3086 pci_restore_state(pdev); 3213 if (vptr->pdev) {
3214 pci_enable_wake(vptr->pdev, 0, 0);
3215 pci_restore_state(vptr->pdev);
3216 }
3087 3217
3088 mac_wol_reset(vptr->mac_regs); 3218 mac_wol_reset(vptr->mac_regs);
3089 3219
@@ -3101,27 +3231,38 @@ static int velocity_resume(struct pci_dev *pdev)
3101 3231
3102 mac_enable_int(vptr->mac_regs); 3232 mac_enable_int(vptr->mac_regs);
3103 spin_unlock_irqrestore(&vptr->lock, flags); 3233 spin_unlock_irqrestore(&vptr->lock, flags);
3104 netif_device_attach(vptr->dev); 3234 netif_device_attach(vptr->netdev);
3105 3235
3106 return 0; 3236 return 0;
3107} 3237}
3108#endif 3238#endif /* CONFIG_PM_SLEEP */
3239
3240static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3109 3241
3110/* 3242/*
3111 * Definition for our device driver. The PCI layer interface 3243 * Definition for our device driver. The PCI layer interface
3112 * uses this to handle all our card discover and plugging 3244 * uses this to handle all our card discover and plugging
3113 */ 3245 */
3114static struct pci_driver velocity_driver = { 3246static struct pci_driver velocity_pci_driver = {
3115 .name = VELOCITY_NAME, 3247 .name = VELOCITY_NAME,
3116 .id_table = velocity_id_table, 3248 .id_table = velocity_pci_id_table,
3117 .probe = velocity_found1, 3249 .probe = velocity_pci_probe,
3118 .remove = velocity_remove1, 3250 .remove = velocity_pci_remove,
3119#ifdef CONFIG_PM 3251 .driver = {
3120 .suspend = velocity_suspend, 3252 .pm = &velocity_pm_ops,
3121 .resume = velocity_resume, 3253 },
3122#endif
3123}; 3254};
3124 3255
3256static struct platform_driver velocity_platform_driver = {
3257 .probe = velocity_platform_probe,
3258 .remove = velocity_platform_remove,
3259 .driver = {
3260 .name = "via-velocity",
3261 .owner = THIS_MODULE,
3262 .of_match_table = velocity_of_ids,
3263 .pm = &velocity_pm_ops,
3264 },
3265};
3125 3266
3126/** 3267/**
3127 * velocity_ethtool_up - pre hook for ethtool 3268 * velocity_ethtool_up - pre hook for ethtool
@@ -3134,7 +3275,7 @@ static int velocity_ethtool_up(struct net_device *dev)
3134{ 3275{
3135 struct velocity_info *vptr = netdev_priv(dev); 3276 struct velocity_info *vptr = netdev_priv(dev);
3136 if (!netif_running(dev)) 3277 if (!netif_running(dev))
3137 pci_set_power_state(vptr->pdev, PCI_D0); 3278 velocity_set_power_state(vptr, PCI_D0);
3138 return 0; 3279 return 0;
3139} 3280}
3140 3281
@@ -3149,7 +3290,7 @@ static void velocity_ethtool_down(struct net_device *dev)
3149{ 3290{
3150 struct velocity_info *vptr = netdev_priv(dev); 3291 struct velocity_info *vptr = netdev_priv(dev);
3151 if (!netif_running(dev)) 3292 if (!netif_running(dev))
3152 pci_set_power_state(vptr->pdev, PCI_D3hot); 3293 velocity_set_power_state(vptr, PCI_D3hot);
3153} 3294}
3154 3295
3155static int velocity_get_settings(struct net_device *dev, 3296static int velocity_get_settings(struct net_device *dev,
@@ -3269,9 +3410,14 @@ static int velocity_set_settings(struct net_device *dev,
3269static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3410static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3270{ 3411{
3271 struct velocity_info *vptr = netdev_priv(dev); 3412 struct velocity_info *vptr = netdev_priv(dev);
3413
3272 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); 3414 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3273 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); 3415 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3274 strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); 3416 if (vptr->pdev)
3417 strlcpy(info->bus_info, pci_name(vptr->pdev),
3418 sizeof(info->bus_info));
3419 else
3420 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3275} 3421}
3276 3422
3277static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3423static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3561,13 +3707,20 @@ static void velocity_unregister_notifier(void)
3561 */ 3707 */
3562static int __init velocity_init_module(void) 3708static int __init velocity_init_module(void)
3563{ 3709{
3564 int ret; 3710 int ret_pci, ret_platform;
3565 3711
3566 velocity_register_notifier(); 3712 velocity_register_notifier();
3567 ret = pci_register_driver(&velocity_driver); 3713
3568 if (ret < 0) 3714 ret_pci = pci_register_driver(&velocity_pci_driver);
3715 ret_platform = platform_driver_register(&velocity_platform_driver);
3716
3717 /* if both_registers failed, remove the notifier */
3718 if ((ret_pci < 0) && (ret_platform < 0)) {
3569 velocity_unregister_notifier(); 3719 velocity_unregister_notifier();
3570 return ret; 3720 return ret_pci;
3721 }
3722
3723 return 0;
3571} 3724}
3572 3725
3573/** 3726/**
@@ -3581,7 +3734,9 @@ static int __init velocity_init_module(void)
3581static void __exit velocity_cleanup_module(void) 3734static void __exit velocity_cleanup_module(void)
3582{ 3735{
3583 velocity_unregister_notifier(); 3736 velocity_unregister_notifier();
3584 pci_unregister_driver(&velocity_driver); 3737
3738 pci_unregister_driver(&velocity_pci_driver);
3739 platform_driver_unregister(&velocity_platform_driver);
3585} 3740}
3586 3741
3587module_init(velocity_init_module); 3742module_init(velocity_init_module);
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index 4cb9f13485e9..9453bfa9324a 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1265,7 +1265,7 @@ struct velocity_context {
1265#define PHYID_VT3216_64BIT 0x000FC600UL 1265#define PHYID_VT3216_64BIT 0x000FC600UL
1266#define PHYID_MARVELL_1000 0x01410C50UL 1266#define PHYID_MARVELL_1000 0x01410C50UL
1267#define PHYID_MARVELL_1000S 0x01410C40UL 1267#define PHYID_MARVELL_1000S 0x01410C40UL
1268 1268#define PHYID_ICPLUS_IP101A 0x02430C54UL
1269#define PHYID_REV_ID_MASK 0x0000000FUL 1269#define PHYID_REV_ID_MASK 0x0000000FUL
1270 1270
1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) 1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
@@ -1434,8 +1434,10 @@ struct velocity_opt {
1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1435 1435
1436struct velocity_info { 1436struct velocity_info {
1437 struct device *dev;
1437 struct pci_dev *pdev; 1438 struct pci_dev *pdev;
1438 struct net_device *dev; 1439 struct net_device *netdev;
1440 int no_eeprom;
1439 1441
1440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 1442 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
1441 u8 ip_addr[4]; 1443 u8 ip_addr[4];
@@ -1514,7 +1516,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr)
1514 int res = -ENOENT; 1516 int res = -ENOENT;
1515 1517
1516 rcu_read_lock(); 1518 rcu_read_lock();
1517 in_dev = __in_dev_get_rcu(vptr->dev); 1519 in_dev = __in_dev_get_rcu(vptr->netdev);
1518 if (in_dev != NULL) { 1520 if (in_dev != NULL) {
1519 ifa = (struct in_ifaddr *) in_dev->ifa_list; 1521 ifa = (struct in_ifaddr *) in_dev->ifa_list;
1520 if (ifa != NULL) { 1522 if (ifa != NULL) {
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index d5bd563ac131..f5d7305a5784 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -2246,15 +2246,4 @@ static struct pci_driver skfddi_pci_driver = {
2246 .remove = skfp_remove_one, 2246 .remove = skfp_remove_one,
2247}; 2247};
2248 2248
2249static int __init skfd_init(void) 2249module_pci_driver(skfddi_pci_driver);
2250{
2251 return pci_register_driver(&skfddi_pci_driver);
2252}
2253
2254static void __exit skfd_exit(void)
2255{
2256 pci_unregister_driver(&skfddi_pci_driver);
2257}
2258
2259module_init(skfd_init);
2260module_exit(skfd_exit);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 3c4d6274bb9b..00ed75155ce8 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1686,15 +1686,4 @@ static struct pci_driver rr_driver = {
1686 .remove = rr_remove_one, 1686 .remove = rr_remove_one,
1687}; 1687};
1688 1688
1689static int __init rr_init_module(void) 1689module_pci_driver(rr_driver);
1690{
1691 return pci_register_driver(&rr_driver);
1692}
1693
1694static void __exit rr_cleanup_module(void)
1695{
1696 pci_unregister_driver(&rr_driver);
1697}
1698
1699module_init(rr_init_module);
1700module_exit(rr_cleanup_module);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c14f14741b3f..2d28a0ef4572 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,8 +419,6 @@ out_unlock:
419EXPORT_SYMBOL(phy_start_aneg); 419EXPORT_SYMBOL(phy_start_aneg);
420 420
421 421
422static void phy_change(struct work_struct *work);
423
424/** 422/**
425 * phy_start_machine - start PHY state machine tracking 423 * phy_start_machine - start PHY state machine tracking
426 * @phydev: the phy_device struct 424 * @phydev: the phy_device struct
@@ -565,8 +563,6 @@ int phy_start_interrupts(struct phy_device *phydev)
565{ 563{
566 int err = 0; 564 int err = 0;
567 565
568 INIT_WORK(&phydev->phy_queue, phy_change);
569
570 atomic_set(&phydev->irq_disable, 0); 566 atomic_set(&phydev->irq_disable, 0);
571 if (request_irq(phydev->irq, phy_interrupt, 567 if (request_irq(phydev->irq, phy_interrupt,
572 IRQF_SHARED, 568 IRQF_SHARED,
@@ -623,7 +619,7 @@ EXPORT_SYMBOL(phy_stop_interrupts);
623 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes 619 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
624 * @work: work_struct that describes the work to be done 620 * @work: work_struct that describes the work to be done
625 */ 621 */
626static void phy_change(struct work_struct *work) 622void phy_change(struct work_struct *work)
627{ 623{
628 int err; 624 int err;
629 struct phy_device *phydev = 625 struct phy_device *phydev =
@@ -682,7 +678,7 @@ void phy_stop(struct phy_device *phydev)
682 if (PHY_HALTED == phydev->state) 678 if (PHY_HALTED == phydev->state)
683 goto out_unlock; 679 goto out_unlock;
684 680
685 if (phydev->irq != PHY_POLL) { 681 if (phy_interrupt_is_valid(phydev)) {
686 /* Disable PHY Interrupts */ 682 /* Disable PHY Interrupts */
687 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); 683 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
688 684
@@ -828,8 +824,9 @@ void phy_state_machine(struct work_struct *work)
828 break; 824 break;
829 case PHY_RUNNING: 825 case PHY_RUNNING:
830 /* Only register a CHANGE if we are 826 /* Only register a CHANGE if we are
831 * polling */ 827 * polling or ignoring interrupts
832 if (PHY_POLL == phydev->irq) 828 */
829 if (!phy_interrupt_is_valid(phydev))
833 phydev->state = PHY_CHANGELINK; 830 phydev->state = PHY_CHANGELINK;
834 break; 831 break;
835 case PHY_CHANGELINK: 832 case PHY_CHANGELINK:
@@ -848,7 +845,7 @@ void phy_state_machine(struct work_struct *work)
848 845
849 phydev->adjust_link(phydev->attached_dev); 846 phydev->adjust_link(phydev->attached_dev);
850 847
851 if (PHY_POLL != phydev->irq) 848 if (phy_interrupt_is_valid(phydev))
852 err = phy_config_interrupt(phydev, 849 err = phy_config_interrupt(phydev,
853 PHY_INTERRUPT_ENABLED); 850 PHY_INTERRUPT_ENABLED);
854 break; 851 break;
@@ -921,6 +918,14 @@ void phy_state_machine(struct work_struct *work)
921 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); 918 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
922} 919}
923 920
921void phy_mac_interrupt(struct phy_device *phydev, int new_link)
922{
923 cancel_work_sync(&phydev->phy_queue);
924 phydev->link = new_link;
925 schedule_work(&phydev->phy_queue);
926}
927EXPORT_SYMBOL(phy_mac_interrupt);
928
924static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, 929static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
925 int addr) 930 int addr)
926{ 931{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3657b4a29124..b55aa33a5b8b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -189,6 +189,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
189 189
190 mutex_init(&dev->lock); 190 mutex_init(&dev->lock);
191 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 191 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
192 INIT_WORK(&dev->phy_queue, phy_change);
192 193
193 /* Request the appropriate module unconditionally; don't 194 /* Request the appropriate module unconditionally; don't
194 bother trying to do so only if it isn't already loaded, 195 bother trying to do so only if it isn't already loaded,
@@ -1009,8 +1010,11 @@ static int phy_probe(struct device *dev)
1009 phydrv = to_phy_driver(drv); 1010 phydrv = to_phy_driver(drv);
1010 phydev->drv = phydrv; 1011 phydev->drv = phydrv;
1011 1012
1012 /* Disable the interrupt if the PHY doesn't support it */ 1013 /* Disable the interrupt if the PHY doesn't support it
1013 if (!(phydrv->flags & PHY_HAS_INTERRUPT)) 1014 * but the interrupt is still a valid one
1015 */
1016 if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
1017 phy_interrupt_is_valid(phydev))
1014 phydev->irq = PHY_POLL; 1018 phydev->irq = PHY_POLL;
1015 1019
1016 mutex_lock(&phydev->lock); 1020 mutex_lock(&phydev->lock);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 14e519888631..d02bac82fc57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -11,7 +11,6 @@
11#include <linux/signal.h> 11#include <linux/signal.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/version.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/mii.h> 16#include <linux/mii.h>
@@ -1749,18 +1748,7 @@ static struct usb_driver rtl8152_driver = {
1749 .resume = rtl8152_resume 1748 .resume = rtl8152_resume
1750}; 1749};
1751 1750
1752static int __init usb_rtl8152_init(void) 1751module_usb_driver(rtl8152_driver);
1753{
1754 return usb_register(&rtl8152_driver);
1755}
1756
1757static void __exit usb_rtl8152_exit(void)
1758{
1759 usb_deregister(&rtl8152_driver);
1760}
1761
1762module_init(usb_rtl8152_init);
1763module_exit(usb_rtl8152_exit);
1764 1752
1765MODULE_AUTHOR(DRIVER_AUTHOR); 1753MODULE_AUTHOR(DRIVER_AUTHOR);
1766MODULE_DESCRIPTION(DRIVER_DESC); 1754MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b1d2ee7156b..5ed64d496f83 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -44,6 +44,8 @@
44 44
45#define VXLAN_VERSION "0.1" 45#define VXLAN_VERSION "0.1"
46 46
47#define PORT_HASH_BITS 8
48#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
47#define VNI_HASH_BITS 10 49#define VNI_HASH_BITS 10
48#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 50#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
49#define FDB_HASH_BITS 8 51#define FDB_HASH_BITS 8
@@ -76,13 +78,24 @@ static bool log_ecn_error = true;
76module_param(log_ecn_error, bool, 0644); 78module_param(log_ecn_error, bool, 0644);
77MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 79MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
78 80
79/* per-net private data for this module */
80static unsigned int vxlan_net_id; 81static unsigned int vxlan_net_id;
81struct vxlan_net { 82
82 struct socket *sock; /* UDP encap socket */ 83/* per UDP socket information */
84struct vxlan_sock {
85 struct hlist_node hlist;
86 struct rcu_head rcu;
87 struct work_struct del_work;
88 unsigned int refcnt;
89 struct socket *sock;
83 struct hlist_head vni_list[VNI_HASH_SIZE]; 90 struct hlist_head vni_list[VNI_HASH_SIZE];
84}; 91};
85 92
93/* per-network namespace private data for this module */
94struct vxlan_net {
95 struct list_head vxlan_list;
96 struct hlist_head sock_list[PORT_HASH_SIZE];
97};
98
86struct vxlan_rdst { 99struct vxlan_rdst {
87 struct rcu_head rcu; 100 struct rcu_head rcu;
88 __be32 remote_ip; 101 __be32 remote_ip;
@@ -106,7 +119,9 @@ struct vxlan_fdb {
106 119
107/* Pseudo network device */ 120/* Pseudo network device */
108struct vxlan_dev { 121struct vxlan_dev {
109 struct hlist_node hlist; 122 struct hlist_node hlist; /* vni hash table */
123 struct list_head next; /* vxlan's per namespace list */
124 struct vxlan_sock *vn_sock; /* listening socket */
110 struct net_device *dev; 125 struct net_device *dev;
111 struct vxlan_rdst default_dst; /* default destination */ 126 struct vxlan_rdst default_dst; /* default destination */
112 __be32 saddr; /* source address */ 127 __be32 saddr; /* source address */
@@ -135,19 +150,43 @@ struct vxlan_dev {
135/* salt for hash table */ 150/* salt for hash table */
136static u32 vxlan_salt __read_mostly; 151static u32 vxlan_salt __read_mostly;
137 152
138static inline struct hlist_head *vni_head(struct net *net, u32 id) 153/* Virtual Network hash table head */
154static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
155{
156 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
157}
158
159/* Socket hash table head */
160static inline struct hlist_head *vs_head(struct net *net, __be16 port)
139{ 161{
140 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 162 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
141 163
142 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)]; 164 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
165}
166
167/* Find VXLAN socket based on network namespace and UDP port */
168static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
169{
170 struct vxlan_sock *vs;
171
172 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
173 if (inet_sk(vs->sock->sk)->inet_sport == port)
174 return vs;
175 }
176 return NULL;
143} 177}
144 178
145/* Look up VNI in a per net namespace table */ 179/* Look up VNI in a per net namespace table */
146static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) 180static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
147{ 181{
182 struct vxlan_sock *vs;
148 struct vxlan_dev *vxlan; 183 struct vxlan_dev *vxlan;
149 184
150 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) { 185 vs = vxlan_find_port(net, port);
186 if (!vs)
187 return NULL;
188
189 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
151 if (vxlan->default_dst.remote_vni == id) 190 if (vxlan->default_dst.remote_vni == id)
152 return vxlan; 191 return vxlan;
153 } 192 }
@@ -603,20 +642,18 @@ static void vxlan_snoop(struct net_device *dev,
603static bool vxlan_group_used(struct vxlan_net *vn, 642static bool vxlan_group_used(struct vxlan_net *vn,
604 const struct vxlan_dev *this) 643 const struct vxlan_dev *this)
605{ 644{
606 const struct vxlan_dev *vxlan; 645 struct vxlan_dev *vxlan;
607 unsigned h;
608 646
609 for (h = 0; h < VNI_HASH_SIZE; ++h) 647 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
610 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) { 648 if (vxlan == this)
611 if (vxlan == this) 649 continue;
612 continue;
613 650
614 if (!netif_running(vxlan->dev)) 651 if (!netif_running(vxlan->dev))
615 continue; 652 continue;
616 653
617 if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip) 654 if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip)
618 return true; 655 return true;
619 } 656 }
620 657
621 return false; 658 return false;
622} 659}
@@ -626,7 +663,7 @@ static int vxlan_join_group(struct net_device *dev)
626{ 663{
627 struct vxlan_dev *vxlan = netdev_priv(dev); 664 struct vxlan_dev *vxlan = netdev_priv(dev);
628 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 665 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
629 struct sock *sk = vn->sock->sk; 666 struct sock *sk = vxlan->vn_sock->sock->sk;
630 struct ip_mreqn mreq = { 667 struct ip_mreqn mreq = {
631 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, 668 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
632 .imr_ifindex = vxlan->default_dst.remote_ifindex, 669 .imr_ifindex = vxlan->default_dst.remote_ifindex,
@@ -654,7 +691,7 @@ static int vxlan_leave_group(struct net_device *dev)
654 struct vxlan_dev *vxlan = netdev_priv(dev); 691 struct vxlan_dev *vxlan = netdev_priv(dev);
655 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 692 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
656 int err = 0; 693 int err = 0;
657 struct sock *sk = vn->sock->sk; 694 struct sock *sk = vxlan->vn_sock->sock->sk;
658 struct ip_mreqn mreq = { 695 struct ip_mreqn mreq = {
659 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, 696 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
660 .imr_ifindex = vxlan->default_dst.remote_ifindex, 697 .imr_ifindex = vxlan->default_dst.remote_ifindex,
@@ -681,6 +718,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
681 struct vxlanhdr *vxh; 718 struct vxlanhdr *vxh;
682 struct vxlan_dev *vxlan; 719 struct vxlan_dev *vxlan;
683 struct pcpu_tstats *stats; 720 struct pcpu_tstats *stats;
721 __be16 port;
684 __u32 vni; 722 __u32 vni;
685 int err; 723 int err;
686 724
@@ -704,9 +742,11 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
704 742
705 /* Is this VNI defined? */ 743 /* Is this VNI defined? */
706 vni = ntohl(vxh->vx_vni) >> 8; 744 vni = ntohl(vxh->vx_vni) >> 8;
707 vxlan = vxlan_find_vni(sock_net(sk), vni); 745 port = inet_sk(sk)->inet_sport;
746 vxlan = vxlan_find_vni(sock_net(sk), vni, port);
708 if (!vxlan) { 747 if (!vxlan) {
709 netdev_dbg(skb->dev, "unknown vni %d\n", vni); 748 netdev_dbg(skb->dev, "unknown vni %d port %u\n",
749 vni, ntohs(port));
710 goto drop; 750 goto drop;
711 } 751 }
712 752
@@ -886,7 +926,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
886 return false; 926 return false;
887} 927}
888 928
889static void vxlan_sock_free(struct sk_buff *skb) 929static void vxlan_sock_put(struct sk_buff *skb)
890{ 930{
891 sock_put(skb->sk); 931 sock_put(skb->sk);
892} 932}
@@ -894,13 +934,13 @@ static void vxlan_sock_free(struct sk_buff *skb)
894/* On transmit, associate with the tunnel socket */ 934/* On transmit, associate with the tunnel socket */
895static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) 935static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
896{ 936{
897 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 937 struct vxlan_dev *vxlan = netdev_priv(dev);
898 struct sock *sk = vn->sock->sk; 938 struct sock *sk = vxlan->vn_sock->sock->sk;
899 939
900 skb_orphan(skb); 940 skb_orphan(skb);
901 sock_hold(sk); 941 sock_hold(sk);
902 skb->sk = sk; 942 skb->sk = sk;
903 skb->destructor = vxlan_sock_free; 943 skb->destructor = vxlan_sock_put;
904} 944}
905 945
906/* Compute source port for outgoing packet 946/* Compute source port for outgoing packet
@@ -1042,7 +1082,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1042 struct vxlan_dev *dst_vxlan; 1082 struct vxlan_dev *dst_vxlan;
1043 1083
1044 ip_rt_put(rt); 1084 ip_rt_put(rt);
1045 dst_vxlan = vxlan_find_vni(dev_net(dev), vni); 1085 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1046 if (!dst_vxlan) 1086 if (!dst_vxlan)
1047 goto tx_error; 1087 goto tx_error;
1048 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1088 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1317,6 +1357,7 @@ static void vxlan_setup(struct net_device *dev)
1317 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1357 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1318 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1358 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1319 1359
1360 INIT_LIST_HEAD(&vxlan->next);
1320 spin_lock_init(&vxlan->hash_lock); 1361 spin_lock_init(&vxlan->hash_lock);
1321 1362
1322 init_timer_deferrable(&vxlan->age_timer); 1363 init_timer_deferrable(&vxlan->age_timer);
@@ -1401,11 +1442,78 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
1401 .get_link = ethtool_op_get_link, 1442 .get_link = ethtool_op_get_link,
1402}; 1443};
1403 1444
1445static void vxlan_del_work(struct work_struct *work)
1446{
1447 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
1448
1449 sk_release_kernel(vs->sock->sk);
1450 kfree_rcu(vs, rcu);
1451}
1452
1453/* Create new listen socket if needed */
1454static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
1455{
1456 struct vxlan_sock *vs;
1457 struct sock *sk;
1458 struct sockaddr_in vxlan_addr = {
1459 .sin_family = AF_INET,
1460 .sin_addr.s_addr = htonl(INADDR_ANY),
1461 };
1462 int rc;
1463 unsigned h;
1464
1465 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
1466 if (!vs)
1467 return ERR_PTR(-ENOMEM);
1468
1469 for (h = 0; h < VNI_HASH_SIZE; ++h)
1470 INIT_HLIST_HEAD(&vs->vni_list[h]);
1471
1472 INIT_WORK(&vs->del_work, vxlan_del_work);
1473
1474 /* Create UDP socket for encapsulation receive. */
1475 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
1476 if (rc < 0) {
1477 pr_debug("UDP socket create failed\n");
1478 kfree(vs);
1479 return ERR_PTR(rc);
1480 }
1481
1482 /* Put in proper namespace */
1483 sk = vs->sock->sk;
1484 sk_change_net(sk, net);
1485
1486 vxlan_addr.sin_port = port;
1487
1488 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
1489 sizeof(vxlan_addr));
1490 if (rc < 0) {
1491 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1492 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1493 sk_release_kernel(sk);
1494 kfree(vs);
1495 return ERR_PTR(rc);
1496 }
1497
1498 /* Disable multicast loopback */
1499 inet_sk(sk)->mc_loop = 0;
1500
1501 /* Mark socket as an encapsulation socket. */
1502 udp_sk(sk)->encap_type = 1;
1503 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1504 udp_encap_enable();
1505
1506 vs->refcnt = 1;
1507 return vs;
1508}
1509
1404static int vxlan_newlink(struct net *net, struct net_device *dev, 1510static int vxlan_newlink(struct net *net, struct net_device *dev,
1405 struct nlattr *tb[], struct nlattr *data[]) 1511 struct nlattr *tb[], struct nlattr *data[])
1406{ 1512{
1513 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1407 struct vxlan_dev *vxlan = netdev_priv(dev); 1514 struct vxlan_dev *vxlan = netdev_priv(dev);
1408 struct vxlan_rdst *dst = &vxlan->default_dst; 1515 struct vxlan_rdst *dst = &vxlan->default_dst;
1516 struct vxlan_sock *vs;
1409 __u32 vni; 1517 __u32 vni;
1410 int err; 1518 int err;
1411 1519
@@ -1413,10 +1521,6 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1413 return -EINVAL; 1521 return -EINVAL;
1414 1522
1415 vni = nla_get_u32(data[IFLA_VXLAN_ID]); 1523 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1416 if (vxlan_find_vni(net, vni)) {
1417 pr_info("duplicate VNI %u\n", vni);
1418 return -EEXIST;
1419 }
1420 dst->remote_vni = vni; 1524 dst->remote_vni = vni;
1421 1525
1422 if (data[IFLA_VXLAN_GROUP]) 1526 if (data[IFLA_VXLAN_GROUP])
@@ -1482,22 +1586,58 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1482 if (data[IFLA_VXLAN_PORT]) 1586 if (data[IFLA_VXLAN_PORT])
1483 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 1587 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1484 1588
1589 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
1590 pr_info("duplicate VNI %u\n", vni);
1591 return -EEXIST;
1592 }
1593
1594 vs = vxlan_find_port(net, vxlan->dst_port);
1595 if (vs)
1596 ++vs->refcnt;
1597 else {
1598 /* Drop lock because socket create acquires RTNL lock */
1599 rtnl_unlock();
1600 vs = vxlan_socket_create(net, vxlan->dst_port);
1601 rtnl_lock();
1602 if (IS_ERR(vs))
1603 return PTR_ERR(vs);
1604
1605 hlist_add_head_rcu(&vs->hlist, vs_head(net, vxlan->dst_port));
1606 }
1607 vxlan->vn_sock = vs;
1608
1485 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 1609 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1486 1610
1487 err = register_netdevice(dev); 1611 err = register_netdevice(dev);
1488 if (!err) 1612 if (err) {
1489 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni)); 1613 if (--vs->refcnt == 0) {
1614 rtnl_unlock();
1615 sk_release_kernel(vs->sock->sk);
1616 kfree(vs);
1617 rtnl_lock();
1618 }
1619 return err;
1620 }
1490 1621
1491 return err; 1622 list_add(&vxlan->next, &vn->vxlan_list);
1623 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1624
1625 return 0;
1492} 1626}
1493 1627
1494static void vxlan_dellink(struct net_device *dev, struct list_head *head) 1628static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1495{ 1629{
1496 struct vxlan_dev *vxlan = netdev_priv(dev); 1630 struct vxlan_dev *vxlan = netdev_priv(dev);
1631 struct vxlan_sock *vs = vxlan->vn_sock;
1497 1632
1498 hlist_del_rcu(&vxlan->hlist); 1633 hlist_del_rcu(&vxlan->hlist);
1499 1634 list_del(&vxlan->next);
1500 unregister_netdevice_queue(dev, head); 1635 unregister_netdevice_queue(dev, head);
1636
1637 if (--vs->refcnt == 0) {
1638 hlist_del_rcu(&vs->hlist);
1639 schedule_work(&vs->del_work);
1640 }
1501} 1641}
1502 1642
1503static size_t vxlan_get_size(const struct net_device *dev) 1643static size_t vxlan_get_size(const struct net_device *dev)
@@ -1583,46 +1723,12 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1583static __net_init int vxlan_init_net(struct net *net) 1723static __net_init int vxlan_init_net(struct net *net)
1584{ 1724{
1585 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1725 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1586 struct sock *sk;
1587 struct sockaddr_in vxlan_addr = {
1588 .sin_family = AF_INET,
1589 .sin_addr.s_addr = htonl(INADDR_ANY),
1590 };
1591 int rc;
1592 unsigned h; 1726 unsigned h;
1593 1727
1594 /* Create UDP socket for encapsulation receive. */ 1728 INIT_LIST_HEAD(&vn->vxlan_list);
1595 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1596 if (rc < 0) {
1597 pr_debug("UDP socket create failed\n");
1598 return rc;
1599 }
1600 /* Put in proper namespace */
1601 sk = vn->sock->sk;
1602 sk_change_net(sk, net);
1603
1604 vxlan_addr.sin_port = htons(vxlan_port);
1605
1606 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1607 sizeof(vxlan_addr));
1608 if (rc < 0) {
1609 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1610 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1611 sk_release_kernel(sk);
1612 vn->sock = NULL;
1613 return rc;
1614 }
1615
1616 /* Disable multicast loopback */
1617 inet_sk(sk)->mc_loop = 0;
1618 1729
1619 /* Mark socket as an encapsulation socket. */ 1730 for (h = 0; h < PORT_HASH_SIZE; ++h)
1620 udp_sk(sk)->encap_type = 1; 1731 INIT_HLIST_HEAD(&vn->sock_list[h]);
1621 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1622 udp_encap_enable();
1623
1624 for (h = 0; h < VNI_HASH_SIZE; ++h)
1625 INIT_HLIST_HEAD(&vn->vni_list[h]);
1626 1732
1627 return 0; 1733 return 0;
1628} 1734}
@@ -1631,18 +1737,11 @@ static __net_exit void vxlan_exit_net(struct net *net)
1631{ 1737{
1632 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1738 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1633 struct vxlan_dev *vxlan; 1739 struct vxlan_dev *vxlan;
1634 unsigned h;
1635 1740
1636 rtnl_lock(); 1741 rtnl_lock();
1637 for (h = 0; h < VNI_HASH_SIZE; ++h) 1742 list_for_each_entry(vxlan, &vn->vxlan_list, next)
1638 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) 1743 dev_close(vxlan->dev);
1639 dev_close(vxlan->dev);
1640 rtnl_unlock(); 1744 rtnl_unlock();
1641
1642 if (vn->sock) {
1643 sk_release_kernel(vn->sock->sk);
1644 vn->sock = NULL;
1645 }
1646} 1745}
1647 1746
1648static struct pernet_operations vxlan_net_ops = { 1747static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 9d7f1723dd8f..8a4d77ee9c5b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -57,8 +57,12 @@ struct xenvif {
57 57
58 u8 fe_dev_addr[6]; 58 u8 fe_dev_addr[6];
59 59
60 /* Physical parameters of the comms window. */ 60 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
61 unsigned int irq; 61 unsigned int tx_irq;
62 unsigned int rx_irq;
63 /* Only used when feature-split-event-channels = 1 */
64 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
65 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
62 66
63 /* List of frontends to notify after a batch of frames sent. */ 67 /* List of frontends to notify after a batch of frames sent. */
64 struct list_head notify_list; 68 struct list_head notify_list;
@@ -113,13 +117,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
113 unsigned int handle); 117 unsigned int handle);
114 118
115int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 119int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
116 unsigned long rx_ring_ref, unsigned int evtchn); 120 unsigned long rx_ring_ref, unsigned int tx_evtchn,
121 unsigned int rx_evtchn);
117void xenvif_disconnect(struct xenvif *vif); 122void xenvif_disconnect(struct xenvif *vif);
118 123
119void xenvif_get(struct xenvif *vif); 124void xenvif_get(struct xenvif *vif);
120void xenvif_put(struct xenvif *vif); 125void xenvif_put(struct xenvif *vif);
121 126
122int xenvif_xenbus_init(void); 127int xenvif_xenbus_init(void);
128void xenvif_xenbus_fini(void);
123 129
124int xenvif_schedulable(struct xenvif *vif); 130int xenvif_schedulable(struct xenvif *vif);
125 131
@@ -157,4 +163,6 @@ void xenvif_carrier_off(struct xenvif *vif);
157/* Returns number of ring slots required to send an skb to the frontend */ 163/* Returns number of ring slots required to send an skb to the frontend */
158unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 164unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
159 165
166extern bool separate_tx_rx_irq;
167
160#endif /* __XEN_NETBACK__COMMON_H__ */ 168#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d98414168485..087d2db0389d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif)
60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); 60 return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
61} 61}
62 62
63static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 63static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
64{ 64{
65 struct xenvif *vif = dev_id; 65 struct xenvif *vif = dev_id;
66 66
67 if (vif->netbk == NULL) 67 if (vif->netbk == NULL)
68 return IRQ_NONE; 68 return IRQ_HANDLED;
69 69
70 xen_netbk_schedule_xenvif(vif); 70 xen_netbk_schedule_xenvif(vif);
71 71
72 return IRQ_HANDLED;
73}
74
75static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
76{
77 struct xenvif *vif = dev_id;
78
79 if (vif->netbk == NULL)
80 return IRQ_HANDLED;
81
72 if (xenvif_rx_schedulable(vif)) 82 if (xenvif_rx_schedulable(vif))
73 netif_wake_queue(vif->dev); 83 netif_wake_queue(vif->dev);
74 84
75 return IRQ_HANDLED; 85 return IRQ_HANDLED;
76} 86}
77 87
88static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
89{
90 xenvif_tx_interrupt(irq, dev_id);
91 xenvif_rx_interrupt(irq, dev_id);
92
93 return IRQ_HANDLED;
94}
95
78static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 96static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 97{
80 struct xenvif *vif = netdev_priv(dev); 98 struct xenvif *vif = netdev_priv(dev);
@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
125static void xenvif_up(struct xenvif *vif) 143static void xenvif_up(struct xenvif *vif)
126{ 144{
127 xen_netbk_add_xenvif(vif); 145 xen_netbk_add_xenvif(vif);
128 enable_irq(vif->irq); 146 enable_irq(vif->tx_irq);
147 if (vif->tx_irq != vif->rx_irq)
148 enable_irq(vif->rx_irq);
129 xen_netbk_check_rx_xenvif(vif); 149 xen_netbk_check_rx_xenvif(vif);
130} 150}
131 151
132static void xenvif_down(struct xenvif *vif) 152static void xenvif_down(struct xenvif *vif)
133{ 153{
134 disable_irq(vif->irq); 154 disable_irq(vif->tx_irq);
155 if (vif->tx_irq != vif->rx_irq)
156 disable_irq(vif->rx_irq);
135 del_timer_sync(&vif->credit_timeout); 157 del_timer_sync(&vif->credit_timeout);
136 xen_netbk_deschedule_xenvif(vif); 158 xen_netbk_deschedule_xenvif(vif);
137 xen_netbk_remove_xenvif(vif); 159 xen_netbk_remove_xenvif(vif);
@@ -308,25 +330,52 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
308} 330}
309 331
310int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 332int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
311 unsigned long rx_ring_ref, unsigned int evtchn) 333 unsigned long rx_ring_ref, unsigned int tx_evtchn,
334 unsigned int rx_evtchn)
312{ 335{
313 int err = -ENOMEM; 336 int err = -ENOMEM;
314 337
315 /* Already connected through? */ 338 /* Already connected through? */
316 if (vif->irq) 339 if (vif->tx_irq)
317 return 0; 340 return 0;
318 341
342 __module_get(THIS_MODULE);
343
319 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 344 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
320 if (err < 0) 345 if (err < 0)
321 goto err; 346 goto err;
322 347
323 err = bind_interdomain_evtchn_to_irqhandler( 348 if (tx_evtchn == rx_evtchn) {
324 vif->domid, evtchn, xenvif_interrupt, 0, 349 /* feature-split-event-channels == 0 */
325 vif->dev->name, vif); 350 err = bind_interdomain_evtchn_to_irqhandler(
326 if (err < 0) 351 vif->domid, tx_evtchn, xenvif_interrupt, 0,
327 goto err_unmap; 352 vif->dev->name, vif);
328 vif->irq = err; 353 if (err < 0)
329 disable_irq(vif->irq); 354 goto err_unmap;
355 vif->tx_irq = vif->rx_irq = err;
356 disable_irq(vif->tx_irq);
357 } else {
358 /* feature-split-event-channels == 1 */
359 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
360 "%s-tx", vif->dev->name);
361 err = bind_interdomain_evtchn_to_irqhandler(
362 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
363 vif->tx_irq_name, vif);
364 if (err < 0)
365 goto err_unmap;
366 vif->tx_irq = err;
367 disable_irq(vif->tx_irq);
368
369 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
370 "%s-rx", vif->dev->name);
371 err = bind_interdomain_evtchn_to_irqhandler(
372 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
373 vif->rx_irq_name, vif);
374 if (err < 0)
375 goto err_tx_unbind;
376 vif->rx_irq = err;
377 disable_irq(vif->rx_irq);
378 }
330 379
331 xenvif_get(vif); 380 xenvif_get(vif);
332 381
@@ -340,9 +389,13 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
340 rtnl_unlock(); 389 rtnl_unlock();
341 390
342 return 0; 391 return 0;
392err_tx_unbind:
393 unbind_from_irqhandler(vif->tx_irq, vif);
394 vif->tx_irq = 0;
343err_unmap: 395err_unmap:
344 xen_netbk_unmap_frontend_rings(vif); 396 xen_netbk_unmap_frontend_rings(vif);
345err: 397err:
398 module_put(THIS_MODULE);
346 return err; 399 return err;
347} 400}
348 401
@@ -360,18 +413,37 @@ void xenvif_carrier_off(struct xenvif *vif)
360 413
361void xenvif_disconnect(struct xenvif *vif) 414void xenvif_disconnect(struct xenvif *vif)
362{ 415{
416 /* Disconnect funtion might get called by generic framework
417 * even before vif connects, so we need to check if we really
418 * need to do a module_put.
419 */
420 int need_module_put = 0;
421
363 if (netif_carrier_ok(vif->dev)) 422 if (netif_carrier_ok(vif->dev))
364 xenvif_carrier_off(vif); 423 xenvif_carrier_off(vif);
365 424
366 atomic_dec(&vif->refcnt); 425 atomic_dec(&vif->refcnt);
367 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 426 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
368 427
369 if (vif->irq) 428 if (vif->tx_irq) {
370 unbind_from_irqhandler(vif->irq, vif); 429 if (vif->tx_irq == vif->rx_irq)
430 unbind_from_irqhandler(vif->tx_irq, vif);
431 else {
432 unbind_from_irqhandler(vif->tx_irq, vif);
433 unbind_from_irqhandler(vif->rx_irq, vif);
434 }
435 /* vif->irq is valid, we had a module_get in
436 * xenvif_connect.
437 */
438 need_module_put = 1;
439 }
371 440
372 unregister_netdev(vif->dev); 441 unregister_netdev(vif->dev);
373 442
374 xen_netbk_unmap_frontend_rings(vif); 443 xen_netbk_unmap_frontend_rings(vif);
375 444
376 free_netdev(vif->dev); 445 free_netdev(vif->dev);
446
447 if (need_module_put)
448 module_put(THIS_MODULE);
377} 449}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 37984e6d4e99..82576fffb452 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,6 +47,13 @@
47#include <asm/xen/hypercall.h> 47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h> 48#include <asm/xen/page.h>
49 49
50/* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
52 * enabled by default.
53 */
54bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644);
56
50/* 57/*
51 * This is the maximum slots a skb can have. If a guest sends a skb 58 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious. 59 * which exceeds this limit it is considered malicious.
@@ -662,7 +669,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
662{ 669{
663 struct xenvif *vif = NULL, *tmp; 670 struct xenvif *vif = NULL, *tmp;
664 s8 status; 671 s8 status;
665 u16 irq, flags; 672 u16 flags;
666 struct xen_netif_rx_response *resp; 673 struct xen_netif_rx_response *resp;
667 struct sk_buff_head rxq; 674 struct sk_buff_head rxq;
668 struct sk_buff *skb; 675 struct sk_buff *skb;
@@ -771,7 +778,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
771 sco->meta_slots_used); 778 sco->meta_slots_used);
772 779
773 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 780 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
774 irq = vif->irq;
775 if (ret && list_empty(&vif->notify_list)) 781 if (ret && list_empty(&vif->notify_list))
776 list_add_tail(&vif->notify_list, &notify); 782 list_add_tail(&vif->notify_list, &notify);
777 783
@@ -783,7 +789,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
783 } 789 }
784 790
785 list_for_each_entry_safe(vif, tmp, &notify, notify_list) { 791 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
786 notify_remote_via_irq(vif->irq); 792 notify_remote_via_irq(vif->rx_irq);
787 list_del_init(&vif->notify_list); 793 list_del_init(&vif->notify_list);
788 } 794 }
789 795
@@ -1762,7 +1768,7 @@ static void make_tx_response(struct xenvif *vif,
1762 vif->tx.rsp_prod_pvt = ++i; 1768 vif->tx.rsp_prod_pvt = ++i;
1763 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); 1769 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1764 if (notify) 1770 if (notify)
1765 notify_remote_via_irq(vif->irq); 1771 notify_remote_via_irq(vif->tx_irq);
1766} 1772}
1767 1773
1768static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 1774static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
@@ -1939,10 +1945,6 @@ static int __init netback_init(void)
1939failed_init: 1945failed_init:
1940 while (--group >= 0) { 1946 while (--group >= 0) {
1941 struct xen_netbk *netbk = &xen_netbk[group]; 1947 struct xen_netbk *netbk = &xen_netbk[group];
1942 for (i = 0; i < MAX_PENDING_REQS; i++) {
1943 if (netbk->mmap_pages[i])
1944 __free_page(netbk->mmap_pages[i]);
1945 }
1946 del_timer(&netbk->net_timer); 1948 del_timer(&netbk->net_timer);
1947 kthread_stop(netbk->task); 1949 kthread_stop(netbk->task);
1948 } 1950 }
@@ -1953,5 +1955,25 @@ failed_init:
1953 1955
1954module_init(netback_init); 1956module_init(netback_init);
1955 1957
1958static void __exit netback_fini(void)
1959{
1960 int i, j;
1961
1962 xenvif_xenbus_fini();
1963
1964 for (i = 0; i < xen_netbk_group_nr; i++) {
1965 struct xen_netbk *netbk = &xen_netbk[i];
1966 del_timer_sync(&netbk->net_timer);
1967 kthread_stop(netbk->task);
1968 for (j = 0; j < MAX_PENDING_REQS; j++) {
1969 if (netbk->mmap_pages[i])
1970 __free_page(netbk->mmap_pages[i]);
1971 }
1972 }
1973
1974 vfree(xen_netbk);
1975}
1976module_exit(netback_fini);
1977
1956MODULE_LICENSE("Dual BSD/GPL"); 1978MODULE_LICENSE("Dual BSD/GPL");
1957MODULE_ALIAS("xen-backend:vif"); 1979MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 410018c4c528..04bd860d16a9 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -122,6 +122,16 @@ static int netback_probe(struct xenbus_device *dev,
122 goto fail; 122 goto fail;
123 } 123 }
124 124
125 /*
126 * Split event channels support, this is optional so it is not
127 * put inside the above loop.
128 */
129 err = xenbus_printf(XBT_NIL, dev->nodename,
130 "feature-split-event-channels",
131 "%u", separate_tx_rx_irq);
132 if (err)
133 pr_debug("Error writing feature-split-event-channels");
134
125 err = xenbus_switch_state(dev, XenbusStateInitWait); 135 err = xenbus_switch_state(dev, XenbusStateInitWait);
126 if (err) 136 if (err)
127 goto fail; 137 goto fail;
@@ -393,21 +403,36 @@ static int connect_rings(struct backend_info *be)
393 struct xenvif *vif = be->vif; 403 struct xenvif *vif = be->vif;
394 struct xenbus_device *dev = be->dev; 404 struct xenbus_device *dev = be->dev;
395 unsigned long tx_ring_ref, rx_ring_ref; 405 unsigned long tx_ring_ref, rx_ring_ref;
396 unsigned int evtchn, rx_copy; 406 unsigned int tx_evtchn, rx_evtchn, rx_copy;
397 int err; 407 int err;
398 int val; 408 int val;
399 409
400 err = xenbus_gather(XBT_NIL, dev->otherend, 410 err = xenbus_gather(XBT_NIL, dev->otherend,
401 "tx-ring-ref", "%lu", &tx_ring_ref, 411 "tx-ring-ref", "%lu", &tx_ring_ref,
402 "rx-ring-ref", "%lu", &rx_ring_ref, 412 "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
403 "event-channel", "%u", &evtchn, NULL);
404 if (err) { 413 if (err) {
405 xenbus_dev_fatal(dev, err, 414 xenbus_dev_fatal(dev, err,
406 "reading %s/ring-ref and event-channel", 415 "reading %s/ring-ref",
407 dev->otherend); 416 dev->otherend);
408 return err; 417 return err;
409 } 418 }
410 419
420 /* Try split event channels first, then single event channel. */
421 err = xenbus_gather(XBT_NIL, dev->otherend,
422 "event-channel-tx", "%u", &tx_evtchn,
423 "event-channel-rx", "%u", &rx_evtchn, NULL);
424 if (err < 0) {
425 err = xenbus_scanf(XBT_NIL, dev->otherend,
426 "event-channel", "%u", &tx_evtchn);
427 if (err < 0) {
428 xenbus_dev_fatal(dev, err,
429 "reading %s/event-channel(-tx/rx)",
430 dev->otherend);
431 return err;
432 }
433 rx_evtchn = tx_evtchn;
434 }
435
411 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 436 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
412 &rx_copy); 437 &rx_copy);
413 if (err == -ENOENT) { 438 if (err == -ENOENT) {
@@ -454,11 +479,13 @@ static int connect_rings(struct backend_info *be)
454 vif->csum = !val; 479 vif->csum = !val;
455 480
456 /* Map the shared frame, irq etc. */ 481 /* Map the shared frame, irq etc. */
457 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn); 482 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
483 tx_evtchn, rx_evtchn);
458 if (err) { 484 if (err) {
459 xenbus_dev_fatal(dev, err, 485 xenbus_dev_fatal(dev, err,
460 "mapping shared-frames %lu/%lu port %u", 486 "mapping shared-frames %lu/%lu port tx %u rx %u",
461 tx_ring_ref, rx_ring_ref, evtchn); 487 tx_ring_ref, rx_ring_ref,
488 tx_evtchn, rx_evtchn);
462 return err; 489 return err;
463 } 490 }
464 return 0; 491 return 0;
@@ -485,3 +512,8 @@ int xenvif_xenbus_init(void)
485{ 512{
486 return xenbus_register_backend(&netback_driver); 513 return xenbus_register_backend(&netback_driver);
487} 514}
515
516void xenvif_xenbus_fini(void)
517{
518 return xenbus_unregister_driver(&netback_driver);
519}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db101415069..62238a08cb51 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -85,7 +85,15 @@ struct netfront_info {
85 85
86 struct napi_struct napi; 86 struct napi_struct napi;
87 87
88 unsigned int evtchn; 88 /* Split event channels support, tx_* == rx_* when using
89 * single event channel.
90 */
91 unsigned int tx_evtchn, rx_evtchn;
92 unsigned int tx_irq, rx_irq;
93 /* Only used when split event channels support is enabled */
94 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
95 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
96
89 struct xenbus_device *xbdev; 97 struct xenbus_device *xbdev;
90 98
91 spinlock_t tx_lock; 99 spinlock_t tx_lock;
@@ -330,7 +338,7 @@ no_skb:
330 push: 338 push:
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 339 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
332 if (notify) 340 if (notify)
333 notify_remote_via_irq(np->netdev->irq); 341 notify_remote_via_irq(np->rx_irq);
334} 342}
335 343
336static int xennet_open(struct net_device *dev) 344static int xennet_open(struct net_device *dev)
@@ -623,7 +631,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
623 631
624 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
625 if (notify) 633 if (notify)
626 notify_remote_via_irq(np->netdev->irq); 634 notify_remote_via_irq(np->tx_irq);
627 635
628 u64_stats_update_begin(&stats->syncp); 636 u64_stats_update_begin(&stats->syncp);
629 stats->tx_bytes += skb->len; 637 stats->tx_bytes += skb->len;
@@ -1254,23 +1262,35 @@ static int xennet_set_features(struct net_device *dev,
1254 return 0; 1262 return 0;
1255} 1263}
1256 1264
1257static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1265static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1258{ 1266{
1259 struct net_device *dev = dev_id; 1267 struct netfront_info *np = dev_id;
1260 struct netfront_info *np = netdev_priv(dev); 1268 struct net_device *dev = np->netdev;
1261 unsigned long flags; 1269 unsigned long flags;
1262 1270
1263 spin_lock_irqsave(&np->tx_lock, flags); 1271 spin_lock_irqsave(&np->tx_lock, flags);
1272 xennet_tx_buf_gc(dev);
1273 spin_unlock_irqrestore(&np->tx_lock, flags);
1264 1274
1265 if (likely(netif_carrier_ok(dev))) { 1275 return IRQ_HANDLED;
1266 xennet_tx_buf_gc(dev); 1276}
1267 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1277
1268 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1278static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1279{
1280 struct netfront_info *np = dev_id;
1281 struct net_device *dev = np->netdev;
1282
1283 if (likely(netif_carrier_ok(dev) &&
1284 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1269 napi_schedule(&np->napi); 1285 napi_schedule(&np->napi);
1270 }
1271 1286
1272 spin_unlock_irqrestore(&np->tx_lock, flags); 1287 return IRQ_HANDLED;
1288}
1273 1289
1290static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1291{
1292 xennet_tx_interrupt(irq, dev_id);
1293 xennet_rx_interrupt(irq, dev_id);
1274 return IRQ_HANDLED; 1294 return IRQ_HANDLED;
1275} 1295}
1276 1296
@@ -1451,9 +1471,14 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1451 spin_unlock_irq(&info->tx_lock); 1471 spin_unlock_irq(&info->tx_lock);
1452 spin_unlock_bh(&info->rx_lock); 1472 spin_unlock_bh(&info->rx_lock);
1453 1473
1454 if (info->netdev->irq) 1474 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1455 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1475 unbind_from_irqhandler(info->tx_irq, info);
1456 info->evtchn = info->netdev->irq = 0; 1476 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1477 unbind_from_irqhandler(info->tx_irq, info);
1478 unbind_from_irqhandler(info->rx_irq, info);
1479 }
1480 info->tx_evtchn = info->rx_evtchn = 0;
1481 info->tx_irq = info->rx_irq = 0;
1457 1482
1458 /* End access and free the pages */ 1483 /* End access and free the pages */
1459 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1484 xennet_end_access(info->tx_ring_ref, info->tx.sring);
@@ -1503,12 +1528,82 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1503 return 0; 1528 return 0;
1504} 1529}
1505 1530
1531static int setup_netfront_single(struct netfront_info *info)
1532{
1533 int err;
1534
1535 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1536 if (err < 0)
1537 goto fail;
1538
1539 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1540 xennet_interrupt,
1541 0, info->netdev->name, info);
1542 if (err < 0)
1543 goto bind_fail;
1544 info->rx_evtchn = info->tx_evtchn;
1545 info->rx_irq = info->tx_irq = err;
1546
1547 return 0;
1548
1549bind_fail:
1550 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1551 info->tx_evtchn = 0;
1552fail:
1553 return err;
1554}
1555
1556static int setup_netfront_split(struct netfront_info *info)
1557{
1558 int err;
1559
1560 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1561 if (err < 0)
1562 goto fail;
1563 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1564 if (err < 0)
1565 goto alloc_rx_evtchn_fail;
1566
1567 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1568 "%s-tx", info->netdev->name);
1569 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1570 xennet_tx_interrupt,
1571 0, info->tx_irq_name, info);
1572 if (err < 0)
1573 goto bind_tx_fail;
1574 info->tx_irq = err;
1575
1576 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1577 "%s-rx", info->netdev->name);
1578 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1579 xennet_rx_interrupt,
1580 0, info->rx_irq_name, info);
1581 if (err < 0)
1582 goto bind_rx_fail;
1583 info->rx_irq = err;
1584
1585 return 0;
1586
1587bind_rx_fail:
1588 unbind_from_irqhandler(info->tx_irq, info);
1589 info->tx_irq = 0;
1590bind_tx_fail:
1591 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1592 info->rx_evtchn = 0;
1593alloc_rx_evtchn_fail:
1594 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1595 info->tx_evtchn = 0;
1596fail:
1597 return err;
1598}
1599
1506static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1600static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1507{ 1601{
1508 struct xen_netif_tx_sring *txs; 1602 struct xen_netif_tx_sring *txs;
1509 struct xen_netif_rx_sring *rxs; 1603 struct xen_netif_rx_sring *rxs;
1510 int err; 1604 int err;
1511 struct net_device *netdev = info->netdev; 1605 struct net_device *netdev = info->netdev;
1606 unsigned int feature_split_evtchn;
1512 1607
1513 info->tx_ring_ref = GRANT_INVALID_REF; 1608 info->tx_ring_ref = GRANT_INVALID_REF;
1514 info->rx_ring_ref = GRANT_INVALID_REF; 1609 info->rx_ring_ref = GRANT_INVALID_REF;
@@ -1516,6 +1611,12 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1516 info->tx.sring = NULL; 1611 info->tx.sring = NULL;
1517 netdev->irq = 0; 1612 netdev->irq = 0;
1518 1613
1614 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1615 "feature-split-event-channels", "%u",
1616 &feature_split_evtchn);
1617 if (err < 0)
1618 feature_split_evtchn = 0;
1619
1519 err = xen_net_read_mac(dev, netdev->dev_addr); 1620 err = xen_net_read_mac(dev, netdev->dev_addr);
1520 if (err) { 1621 if (err) {
1521 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1622 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
@@ -1532,40 +1633,50 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1532 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1633 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1533 1634
1534 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1635 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1535 if (err < 0) { 1636 if (err < 0)
1536 free_page((unsigned long)txs); 1637 goto grant_tx_ring_fail;
1537 goto fail;
1538 }
1539 1638
1540 info->tx_ring_ref = err; 1639 info->tx_ring_ref = err;
1541 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1640 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1542 if (!rxs) { 1641 if (!rxs) {
1543 err = -ENOMEM; 1642 err = -ENOMEM;
1544 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1643 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1545 goto fail; 1644 goto alloc_rx_ring_fail;
1546 } 1645 }
1547 SHARED_RING_INIT(rxs); 1646 SHARED_RING_INIT(rxs);
1548 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1647 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1549 1648
1550 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1649 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1551 if (err < 0) { 1650 if (err < 0)
1552 free_page((unsigned long)rxs); 1651 goto grant_rx_ring_fail;
1553 goto fail;
1554 }
1555 info->rx_ring_ref = err; 1652 info->rx_ring_ref = err;
1556 1653
1557 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1654 if (feature_split_evtchn)
1655 err = setup_netfront_split(info);
1656 /* setup single event channel if
1657 * a) feature-split-event-channels == 0
1658 * b) feature-split-event-channels == 1 but failed to setup
1659 */
1660 if (!feature_split_evtchn || (feature_split_evtchn && err))
1661 err = setup_netfront_single(info);
1662
1558 if (err) 1663 if (err)
1559 goto fail; 1664 goto alloc_evtchn_fail;
1560 1665
1561 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1562 0, netdev->name, netdev);
1563 if (err < 0)
1564 goto fail;
1565 netdev->irq = err;
1566 return 0; 1666 return 0;
1567 1667
1568 fail: 1668 /* If we fail to setup netfront, it is safe to just revoke access to
1669 * granted pages because backend is not accessing it at this point.
1670 */
1671alloc_evtchn_fail:
1672 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1673grant_rx_ring_fail:
1674 free_page((unsigned long)rxs);
1675alloc_rx_ring_fail:
1676 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1677grant_tx_ring_fail:
1678 free_page((unsigned long)txs);
1679fail:
1569 return err; 1680 return err;
1570} 1681}
1571 1682
@@ -1601,11 +1712,27 @@ again:
1601 message = "writing rx ring-ref"; 1712 message = "writing rx ring-ref";
1602 goto abort_transaction; 1713 goto abort_transaction;
1603 } 1714 }
1604 err = xenbus_printf(xbt, dev->nodename, 1715
1605 "event-channel", "%u", info->evtchn); 1716 if (info->tx_evtchn == info->rx_evtchn) {
1606 if (err) { 1717 err = xenbus_printf(xbt, dev->nodename,
1607 message = "writing event-channel"; 1718 "event-channel", "%u", info->tx_evtchn);
1608 goto abort_transaction; 1719 if (err) {
1720 message = "writing event-channel";
1721 goto abort_transaction;
1722 }
1723 } else {
1724 err = xenbus_printf(xbt, dev->nodename,
1725 "event-channel-tx", "%u", info->tx_evtchn);
1726 if (err) {
1727 message = "writing event-channel-tx";
1728 goto abort_transaction;
1729 }
1730 err = xenbus_printf(xbt, dev->nodename,
1731 "event-channel-rx", "%u", info->rx_evtchn);
1732 if (err) {
1733 message = "writing event-channel-rx";
1734 goto abort_transaction;
1735 }
1609 } 1736 }
1610 1737
1611 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1738 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
@@ -1718,7 +1845,9 @@ static int xennet_connect(struct net_device *dev)
1718 * packets. 1845 * packets.
1719 */ 1846 */
1720 netif_carrier_on(np->netdev); 1847 netif_carrier_on(np->netdev);
1721 notify_remote_via_irq(np->netdev->irq); 1848 notify_remote_via_irq(np->tx_irq);
1849 if (np->tx_irq != np->rx_irq)
1850 notify_remote_via_irq(np->rx_irq);
1722 xennet_tx_buf_gc(dev); 1851 xennet_tx_buf_gc(dev);
1723 xennet_alloc_rx_buffers(dev); 1852 xennet_alloc_rx_buffers(dev);
1724 1853
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c050dcc322a4..56a6b7fbb3c6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -58,10 +58,10 @@ extern void bpf_jit_free(struct sk_filter *fp);
58static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 58static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
59 u32 pass, void *image) 59 u32 pass, void *image)
60{ 60{
61 pr_err("flen=%u proglen=%u pass=%u image=%p\n", 61 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
62 flen, proglen, pass, image); 62 flen, proglen, pass, image);
63 if (image) 63 if (image)
64 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, 64 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
65 16, 1, image, proglen, false); 65 16, 1, image, proglen, false);
66} 66}
67#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) 67#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 60584b185a0c..0ebd63ae2cc8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1778,6 +1778,19 @@ static inline int unregister_gifconf(unsigned int family)
1778 return register_gifconf(family, NULL); 1778 return register_gifconf(family, NULL);
1779} 1779}
1780 1780
1781#ifdef CONFIG_NET_FLOW_LIMIT
1782#define FLOW_LIMIT_HISTORY (1 << 8) /* must be ^2 */
1783struct sd_flow_limit {
1784 u64 count;
1785 unsigned int num_buckets;
1786 unsigned int history_head;
1787 u16 history[FLOW_LIMIT_HISTORY];
1788 u8 buckets[];
1789};
1790
1791extern int netdev_flow_limit_table_len;
1792#endif /* CONFIG_NET_FLOW_LIMIT */
1793
1781/* 1794/*
1782 * Incoming packets are placed on per-cpu queues 1795 * Incoming packets are placed on per-cpu queues
1783 */ 1796 */
@@ -1807,6 +1820,10 @@ struct softnet_data {
1807 unsigned int dropped; 1820 unsigned int dropped;
1808 struct sk_buff_head input_pkt_queue; 1821 struct sk_buff_head input_pkt_queue;
1809 struct napi_struct backlog; 1822 struct napi_struct backlog;
1823
1824#ifdef CONFIG_NET_FLOW_LIMIT
1825 struct sd_flow_limit *flow_limit;
1826#endif
1810}; 1827};
1811 1828
1812static inline void input_queue_head_incr(struct softnet_data *sd) 1829static inline void input_queue_head_incr(struct softnet_data *sd)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9e11039dd7a3..fdfa11542974 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -508,6 +508,18 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
508 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 508 return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
509} 509}
510 510
511/**
512 * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
513 * @phydev: the phy_device struct
514 *
515 * NOTE: must be kept in sync with addition/removal of PHY_POLL and
516 * PHY_IGNORE_INTERRUPT
517 */
518static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
519{
520 return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
521}
522
511struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, 523struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
512 bool is_c45, struct phy_c45_device_ids *c45_ids); 524 bool is_c45, struct phy_c45_device_ids *c45_ids);
513struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); 525struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
@@ -545,6 +557,8 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
545int phy_driver_register(struct phy_driver *new_driver); 557int phy_driver_register(struct phy_driver *new_driver);
546int phy_drivers_register(struct phy_driver *new_driver, int n); 558int phy_drivers_register(struct phy_driver *new_driver, int n);
547void phy_state_machine(struct work_struct *work); 559void phy_state_machine(struct work_struct *work);
560void phy_change(struct work_struct *work);
561void phy_mac_interrupt(struct phy_device *phydev, int new_link);
548void phy_start_machine(struct phy_device *phydev, 562void phy_start_machine(struct phy_device *phydev,
549 void (*handler)(struct net_device *)); 563 void (*handler)(struct net_device *));
550void phy_stop_machine(struct phy_device *phydev); 564void phy_stop_machine(struct phy_device *phydev);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 5adbc33d1ab3..472120b4fac5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -246,7 +246,6 @@ struct tcp_sock {
246 246
247 /* from STCP, retrans queue hinting */ 247 /* from STCP, retrans queue hinting */
248 struct sk_buff* lost_skb_hint; 248 struct sk_buff* lost_skb_hint;
249 struct sk_buff *scoreboard_skb_hint;
250 struct sk_buff *retransmit_skb_hint; 249 struct sk_buff *retransmit_skb_hint;
251 250
252 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 251 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 100fb8cec17c..e07feb456d19 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -74,6 +74,7 @@ struct inet6_ifaddr {
74 bool tokenized; 74 bool tokenized;
75 75
76 struct rcu_head rcu; 76 struct rcu_head rcu;
77 struct in6_addr peer_addr;
77}; 78};
78 79
79struct ip6_sf_socklist { 80struct ip6_sf_socklist {
@@ -192,7 +193,6 @@ struct inet6_dev {
192 struct in6_addr token; 193 struct in6_addr token;
193 194
194 struct neigh_parms *nd_parms; 195 struct neigh_parms *nd_parms;
195 struct inet6_dev *next;
196 struct ipv6_devconf cnf; 196 struct ipv6_devconf cnf;
197 struct ipv6_devstat stats; 197 struct ipv6_devstat stats;
198 unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ 198 unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5bba80fbd1d9..bf1cc3dced5e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1193,7 +1193,6 @@ static inline void tcp_mib_init(struct net *net)
1193static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) 1193static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1194{ 1194{
1195 tp->lost_skb_hint = NULL; 1195 tp->lost_skb_hint = NULL;
1196 tp->scoreboard_skb_hint = NULL;
1197} 1196}
1198 1197
1199static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1198static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
@@ -1284,11 +1283,13 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1284#define tcp_twsk_md5_key(twsk) NULL 1283#define tcp_twsk_md5_key(twsk) NULL
1285#endif 1284#endif
1286 1285
1287extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *); 1286extern bool tcp_alloc_md5sig_pool(void);
1288extern void tcp_free_md5sig_pool(void);
1289 1287
1290extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); 1288extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1291extern void tcp_put_md5sig_pool(void); 1289static inline void tcp_put_md5sig_pool(void)
1290{
1291 local_bh_enable();
1292}
1292 1293
1293extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); 1294extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1294extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1295extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 3ef3fe05ee99..eb262e3324d2 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -38,6 +38,18 @@
38 * that it cannot safely queue packets (as it may not be kicked to send them). 38 * that it cannot safely queue packets (as it may not be kicked to send them).
39 */ 39 */
40 40
41 /*
42 * "feature-split-event-channels" is introduced to separate guest TX
43 * and RX notificaion. Backend either doesn't support this feature or
44 * advertise it via xenstore as 0 (disabled) or 1 (enabled).
45 *
46 * To make use of this feature, frontend should allocate two event
47 * channels for TX and RX, advertise them to backend as
48 * "event-channel-tx" and "event-channel-rx" respectively. If frontend
49 * doesn't want to use this feature, it just writes "event-channel"
50 * node as before.
51 */
52
41/* 53/*
42 * This is the 'wire' format for packets: 54 * This is the 'wire' format for packets:
43 * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) 55 * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
diff --git a/net/Kconfig b/net/Kconfig
index 2ddc9046868e..08de901415ee 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -259,6 +259,18 @@ config BPF_JIT
259 packet sniffing (libpcap/tcpdump). Note : Admin should enable 259 packet sniffing (libpcap/tcpdump). Note : Admin should enable
260 this feature changing /proc/sys/net/core/bpf_jit_enable 260 this feature changing /proc/sys/net/core/bpf_jit_enable
261 261
262config NET_FLOW_LIMIT
263 boolean
264 depends on RPS
265 default y
266 ---help---
267 The network stack has to drop packets when a receive processing CPU's
268 backlog reaches netdev_max_backlog. If a few out of many active flows
269 generate the vast majority of load, drop their traffic earlier to
270 maintain capacity for the other flows. This feature provides servers
271 with many clients some protection against DoS by a single (spoofed)
272 flow that greatly exceeds average workload.
273
262menu "Network testing" 274menu "Network testing"
263 275
264config NET_PKTGEN 276config NET_PKTGEN
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 967312803e41..75f3239130f8 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -22,6 +22,9 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include "br_private.h" 23#include "br_private.h"
24 24
25#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
26 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
27
25/* net device transmit always called with BH disabled */ 28/* net device transmit always called with BH disabled */
26netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 29netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
27{ 30{
@@ -346,12 +349,10 @@ void br_dev_setup(struct net_device *dev)
346 dev->tx_queue_len = 0; 349 dev->tx_queue_len = 0;
347 dev->priv_flags = IFF_EBRIDGE; 350 dev->priv_flags = IFF_EBRIDGE;
348 351
349 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 352 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
350 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | 353 NETIF_F_HW_VLAN_CTAG_TX;
351 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX; 354 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
352 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 355 dev->vlan_features = COMMON_FEATURES;
353 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
354 NETIF_F_HW_VLAN_CTAG_TX;
355 356
356 br->dev = dev; 357 br->dev = dev;
357 spin_lock_init(&br->lock); 358 spin_lock_init(&br->lock);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 81f2389f78eb..37a467697967 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -23,6 +23,7 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/inetdevice.h>
26#include <net/ip.h> 27#include <net/ip.h>
27#if IS_ENABLED(CONFIG_IPV6) 28#if IS_ENABLED(CONFIG_IPV6)
28#include <net/ipv6.h> 29#include <net/ipv6.h>
@@ -381,7 +382,8 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
381 iph->frag_off = htons(IP_DF); 382 iph->frag_off = htons(IP_DF);
382 iph->ttl = 1; 383 iph->ttl = 1;
383 iph->protocol = IPPROTO_IGMP; 384 iph->protocol = IPPROTO_IGMP;
384 iph->saddr = 0; 385 iph->saddr = br->multicast_query_use_ifaddr ?
386 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
385 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 387 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
386 ((u8 *)&iph[1])[0] = IPOPT_RA; 388 ((u8 *)&iph[1])[0] = IPOPT_RA;
387 ((u8 *)&iph[1])[1] = 4; 389 ((u8 *)&iph[1])[1] = 4;
@@ -615,8 +617,6 @@ rehash:
615 617
616 mp->br = br; 618 mp->br = br;
617 mp->addr = *group; 619 mp->addr = *group;
618 setup_timer(&mp->timer, br_multicast_group_expired,
619 (unsigned long)mp);
620 620
621 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 621 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
622 mdb->size++; 622 mdb->size++;
@@ -654,7 +654,6 @@ static int br_multicast_add_group(struct net_bridge *br,
654 struct net_bridge_mdb_entry *mp; 654 struct net_bridge_mdb_entry *mp;
655 struct net_bridge_port_group *p; 655 struct net_bridge_port_group *p;
656 struct net_bridge_port_group __rcu **pp; 656 struct net_bridge_port_group __rcu **pp;
657 unsigned long now = jiffies;
658 int err; 657 int err;
659 658
660 spin_lock(&br->multicast_lock); 659 spin_lock(&br->multicast_lock);
@@ -669,7 +668,6 @@ static int br_multicast_add_group(struct net_bridge *br,
669 668
670 if (!port) { 669 if (!port) {
671 mp->mglist = true; 670 mp->mglist = true;
672 mod_timer(&mp->timer, now + br->multicast_membership_interval);
673 goto out; 671 goto out;
674 } 672 }
675 673
@@ -677,7 +675,7 @@ static int br_multicast_add_group(struct net_bridge *br,
677 (p = mlock_dereference(*pp, br)) != NULL; 675 (p = mlock_dereference(*pp, br)) != NULL;
678 pp = &p->next) { 676 pp = &p->next) {
679 if (p->port == port) 677 if (p->port == port)
680 goto found; 678 goto out;
681 if ((unsigned long)p->port < (unsigned long)port) 679 if ((unsigned long)p->port < (unsigned long)port)
682 break; 680 break;
683 } 681 }
@@ -688,8 +686,6 @@ static int br_multicast_add_group(struct net_bridge *br,
688 rcu_assign_pointer(*pp, p); 686 rcu_assign_pointer(*pp, p);
689 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 687 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
690 688
691found:
692 mod_timer(&p->timer, now + br->multicast_membership_interval);
693out: 689out:
694 err = 0; 690 err = 0;
695 691
@@ -1129,6 +1125,10 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1129 if (!mp) 1125 if (!mp)
1130 goto out; 1126 goto out;
1131 1127
1128 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
1129 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1130 mp->timer_armed = true;
1131
1132 max_delay *= br->multicast_last_member_count; 1132 max_delay *= br->multicast_last_member_count;
1133 1133
1134 if (mp->mglist && 1134 if (mp->mglist &&
@@ -1203,6 +1203,10 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1203 if (!mp) 1203 if (!mp)
1204 goto out; 1204 goto out;
1205 1205
1206 setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
1207 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1208 mp->timer_armed = true;
1209
1206 max_delay *= br->multicast_last_member_count; 1210 max_delay *= br->multicast_last_member_count;
1207 if (mp->mglist && 1211 if (mp->mglist &&
1208 (timer_pending(&mp->timer) ? 1212 (timer_pending(&mp->timer) ?
@@ -1246,6 +1250,32 @@ static void br_multicast_leave_group(struct net_bridge *br,
1246 if (!mp) 1250 if (!mp)
1247 goto out; 1251 goto out;
1248 1252
1253 if (br->multicast_querier &&
1254 !timer_pending(&br->multicast_querier_timer)) {
1255 __br_multicast_send_query(br, port, &mp->addr);
1256
1257 time = jiffies + br->multicast_last_member_count *
1258 br->multicast_last_member_interval;
1259 mod_timer(port ? &port->multicast_query_timer :
1260 &br->multicast_query_timer, time);
1261
1262 for (p = mlock_dereference(mp->ports, br);
1263 p != NULL;
1264 p = mlock_dereference(p->next, br)) {
1265 if (p->port != port)
1266 continue;
1267
1268 if (!hlist_unhashed(&p->mglist) &&
1269 (timer_pending(&p->timer) ?
1270 time_after(p->timer.expires, time) :
1271 try_to_del_timer_sync(&p->timer) >= 0)) {
1272 mod_timer(&p->timer, time);
1273 }
1274
1275 break;
1276 }
1277 }
1278
1249 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1279 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1250 struct net_bridge_port_group __rcu **pp; 1280 struct net_bridge_port_group __rcu **pp;
1251 1281
@@ -1261,7 +1291,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1261 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1291 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1262 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1292 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1263 1293
1264 if (!mp->ports && !mp->mglist && 1294 if (!mp->ports && !mp->mglist && mp->timer_armed &&
1265 netif_running(br->dev)) 1295 netif_running(br->dev))
1266 mod_timer(&mp->timer, jiffies); 1296 mod_timer(&mp->timer, jiffies);
1267 } 1297 }
@@ -1273,30 +1303,12 @@ static void br_multicast_leave_group(struct net_bridge *br,
1273 br->multicast_last_member_interval; 1303 br->multicast_last_member_interval;
1274 1304
1275 if (!port) { 1305 if (!port) {
1276 if (mp->mglist && 1306 if (mp->mglist && mp->timer_armed &&
1277 (timer_pending(&mp->timer) ? 1307 (timer_pending(&mp->timer) ?
1278 time_after(mp->timer.expires, time) : 1308 time_after(mp->timer.expires, time) :
1279 try_to_del_timer_sync(&mp->timer) >= 0)) { 1309 try_to_del_timer_sync(&mp->timer) >= 0)) {
1280 mod_timer(&mp->timer, time); 1310 mod_timer(&mp->timer, time);
1281 } 1311 }
1282
1283 goto out;
1284 }
1285
1286 for (p = mlock_dereference(mp->ports, br);
1287 p != NULL;
1288 p = mlock_dereference(p->next, br)) {
1289 if (p->port != port)
1290 continue;
1291
1292 if (!hlist_unhashed(&p->mglist) &&
1293 (timer_pending(&p->timer) ?
1294 time_after(p->timer.expires, time) :
1295 try_to_del_timer_sync(&p->timer) >= 0)) {
1296 mod_timer(&p->timer, time);
1297 }
1298
1299 break;
1300 } 1312 }
1301 1313
1302out: 1314out:
@@ -1618,6 +1630,7 @@ void br_multicast_init(struct net_bridge *br)
1618 1630
1619 br->multicast_router = 1; 1631 br->multicast_router = 1;
1620 br->multicast_querier = 0; 1632 br->multicast_querier = 0;
1633 br->multicast_query_use_ifaddr = 0;
1621 br->multicast_last_member_count = 2; 1634 br->multicast_last_member_count = 2;
1622 br->multicast_startup_query_count = 2; 1635 br->multicast_startup_query_count = 2;
1623 1636
@@ -1671,6 +1684,7 @@ void br_multicast_stop(struct net_bridge *br)
1671 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1684 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1672 hlist[ver]) { 1685 hlist[ver]) {
1673 del_timer(&mp->timer); 1686 del_timer(&mp->timer);
1687 mp->timer_armed = false;
1674 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1688 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1675 } 1689 }
1676 } 1690 }
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d2c043a857b6..1b0ac95a5c37 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -112,6 +112,7 @@ struct net_bridge_mdb_entry
112 struct timer_list timer; 112 struct timer_list timer;
113 struct br_ip addr; 113 struct br_ip addr;
114 bool mglist; 114 bool mglist;
115 bool timer_armed;
115}; 116};
116 117
117struct net_bridge_mdb_htable 118struct net_bridge_mdb_htable
@@ -249,6 +250,7 @@ struct net_bridge
249 250
250 u8 multicast_disabled:1; 251 u8 multicast_disabled:1;
251 u8 multicast_querier:1; 252 u8 multicast_querier:1;
253 u8 multicast_query_use_ifaddr:1;
252 254
253 u32 hash_elasticity; 255 u32 hash_elasticity;
254 u32 hash_max; 256 u32 hash_max;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 8baa9c08e1a4..394bb96b6087 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -375,6 +375,31 @@ static ssize_t store_multicast_snooping(struct device *d,
375static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, 375static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
376 show_multicast_snooping, store_multicast_snooping); 376 show_multicast_snooping, store_multicast_snooping);
377 377
378static ssize_t show_multicast_query_use_ifaddr(struct device *d,
379 struct device_attribute *attr,
380 char *buf)
381{
382 struct net_bridge *br = to_bridge(d);
383 return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr);
384}
385
386static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val)
387{
388 br->multicast_query_use_ifaddr = !!val;
389 return 0;
390}
391
392static ssize_t
393store_multicast_query_use_ifaddr(struct device *d,
394 struct device_attribute *attr,
395 const char *buf, size_t len)
396{
397 return store_bridge_parm(d, buf, len, set_query_use_ifaddr);
398}
399static DEVICE_ATTR(multicast_query_use_ifaddr, S_IRUGO | S_IWUSR,
400 show_multicast_query_use_ifaddr,
401 store_multicast_query_use_ifaddr);
402
378static ssize_t show_multicast_querier(struct device *d, 403static ssize_t show_multicast_querier(struct device *d,
379 struct device_attribute *attr, 404 struct device_attribute *attr,
380 char *buf) 405 char *buf)
@@ -734,6 +759,7 @@ static struct attribute *bridge_attrs[] = {
734 &dev_attr_multicast_router.attr, 759 &dev_attr_multicast_router.attr,
735 &dev_attr_multicast_snooping.attr, 760 &dev_attr_multicast_snooping.attr,
736 &dev_attr_multicast_querier.attr, 761 &dev_attr_multicast_querier.attr,
762 &dev_attr_multicast_query_use_ifaddr.attr,
737 &dev_attr_hash_elasticity.attr, 763 &dev_attr_hash_elasticity.attr,
738 &dev_attr_hash_max.attr, 764 &dev_attr_hash_max.attr,
739 &dev_attr_multicast_last_member_count.attr, 765 &dev_attr_multicast_last_member_count.attr,
diff --git a/net/core/dev.c b/net/core/dev.c
index fc1e289397f5..7229bc30e509 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1629,7 +1629,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1629 return NET_RX_DROP; 1629 return NET_RX_DROP;
1630 } 1630 }
1631 skb->skb_iif = 0; 1631 skb->skb_iif = 0;
1632 skb->dev = dev;
1633 skb_dst_drop(skb); 1632 skb_dst_drop(skb);
1634 skb->tstamp.tv64 = 0; 1633 skb->tstamp.tv64 = 0;
1635 skb->pkt_type = PACKET_HOST; 1634 skb->pkt_type = PACKET_HOST;
@@ -3065,6 +3064,46 @@ static int rps_ipi_queued(struct softnet_data *sd)
3065 return 0; 3064 return 0;
3066} 3065}
3067 3066
3067#ifdef CONFIG_NET_FLOW_LIMIT
3068int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3069#endif
3070
3071static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3072{
3073#ifdef CONFIG_NET_FLOW_LIMIT
3074 struct sd_flow_limit *fl;
3075 struct softnet_data *sd;
3076 unsigned int old_flow, new_flow;
3077
3078 if (qlen < (netdev_max_backlog >> 1))
3079 return false;
3080
3081 sd = &__get_cpu_var(softnet_data);
3082
3083 rcu_read_lock();
3084 fl = rcu_dereference(sd->flow_limit);
3085 if (fl) {
3086 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3087 old_flow = fl->history[fl->history_head];
3088 fl->history[fl->history_head] = new_flow;
3089
3090 fl->history_head++;
3091 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3092
3093 if (likely(fl->buckets[old_flow]))
3094 fl->buckets[old_flow]--;
3095
3096 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3097 fl->count++;
3098 rcu_read_unlock();
3099 return true;
3100 }
3101 }
3102 rcu_read_unlock();
3103#endif
3104 return false;
3105}
3106
3068/* 3107/*
3069 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 3108 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3070 * queue (may be a remote CPU queue). 3109 * queue (may be a remote CPU queue).
@@ -3074,13 +3113,15 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3074{ 3113{
3075 struct softnet_data *sd; 3114 struct softnet_data *sd;
3076 unsigned long flags; 3115 unsigned long flags;
3116 unsigned int qlen;
3077 3117
3078 sd = &per_cpu(softnet_data, cpu); 3118 sd = &per_cpu(softnet_data, cpu);
3079 3119
3080 local_irq_save(flags); 3120 local_irq_save(flags);
3081 3121
3082 rps_lock(sd); 3122 rps_lock(sd);
3083 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { 3123 qlen = skb_queue_len(&sd->input_pkt_queue);
3124 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3084 if (skb_queue_len(&sd->input_pkt_queue)) { 3125 if (skb_queue_len(&sd->input_pkt_queue)) {
3085enqueue: 3126enqueue:
3086 __skb_queue_tail(&sd->input_pkt_queue, skb); 3127 __skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -6270,6 +6311,10 @@ static int __init net_dev_init(void)
6270 sd->backlog.weight = weight_p; 6311 sd->backlog.weight = weight_p;
6271 sd->backlog.gro_list = NULL; 6312 sd->backlog.gro_list = NULL;
6272 sd->backlog.gro_count = 0; 6313 sd->backlog.gro_count = 0;
6314
6315#ifdef CONFIG_NET_FLOW_LIMIT
6316 sd->flow_limit = NULL;
6317#endif
6273 } 6318 }
6274 6319
6275 dev_boot_phase = 0; 6320 dev_boot_phase = 0;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 569d355fec3e..2bf83299600a 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -146,11 +146,23 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
146static int softnet_seq_show(struct seq_file *seq, void *v) 146static int softnet_seq_show(struct seq_file *seq, void *v)
147{ 147{
148 struct softnet_data *sd = v; 148 struct softnet_data *sd = v;
149 unsigned int flow_limit_count = 0;
149 150
150 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 151#ifdef CONFIG_NET_FLOW_LIMIT
152 struct sd_flow_limit *fl;
153
154 rcu_read_lock();
155 fl = rcu_dereference(sd->flow_limit);
156 if (fl)
157 flow_limit_count = fl->count;
158 rcu_read_unlock();
159#endif
160
161 seq_printf(seq,
162 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
151 sd->processed, sd->dropped, sd->time_squeeze, 0, 163 sd->processed, sd->dropped, sd->time_squeeze, 0,
152 0, 0, 0, 0, /* was fastroute */ 164 0, 0, 0, 0, /* was fastroute */
153 sd->cpu_collision, sd->received_rps); 165 sd->cpu_collision, sd->received_rps, flow_limit_count);
154 return 0; 166 return 0;
155} 167}
156 168
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index af9185d0be6a..d6298914f4e7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2853,7 +2853,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2853 doffset + tnl_hlen); 2853 doffset + tnl_hlen);
2854 2854
2855 if (fskb != skb_shinfo(skb)->frag_list) 2855 if (fskb != skb_shinfo(skb)->frag_list)
2856 continue; 2856 goto perform_csum_check;
2857 2857
2858 if (!sg) { 2858 if (!sg) {
2859 nskb->ip_summed = CHECKSUM_NONE; 2859 nskb->ip_summed = CHECKSUM_NONE;
@@ -2917,6 +2917,7 @@ skip_fraglist:
2917 nskb->len += nskb->data_len; 2917 nskb->len += nskb->data_len;
2918 nskb->truesize += nskb->data_len; 2918 nskb->truesize += nskb->data_len;
2919 2919
2920perform_csum_check:
2920 if (!csum) { 2921 if (!csum) {
2921 nskb->csum = skb_checksum(nskb, doffset, 2922 nskb->csum = skb_checksum(nskb, doffset,
2922 nskb->len - doffset, 0); 2923 nskb->len - doffset, 0);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cfdb46ab3a7f..741db5fc7806 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -87,6 +87,96 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
87} 87}
88#endif /* CONFIG_RPS */ 88#endif /* CONFIG_RPS */
89 89
90#ifdef CONFIG_NET_FLOW_LIMIT
91static DEFINE_MUTEX(flow_limit_update_mutex);
92
93static int flow_limit_cpu_sysctl(ctl_table *table, int write,
94 void __user *buffer, size_t *lenp,
95 loff_t *ppos)
96{
97 struct sd_flow_limit *cur;
98 struct softnet_data *sd;
99 cpumask_var_t mask;
100 int i, len, ret = 0;
101
102 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
103 return -ENOMEM;
104
105 if (write) {
106 ret = cpumask_parse_user(buffer, *lenp, mask);
107 if (ret)
108 goto done;
109
110 mutex_lock(&flow_limit_update_mutex);
111 len = sizeof(*cur) + netdev_flow_limit_table_len;
112 for_each_possible_cpu(i) {
113 sd = &per_cpu(softnet_data, i);
114 cur = rcu_dereference_protected(sd->flow_limit,
115 lockdep_is_held(&flow_limit_update_mutex));
116 if (cur && !cpumask_test_cpu(i, mask)) {
117 RCU_INIT_POINTER(sd->flow_limit, NULL);
118 synchronize_rcu();
119 kfree(cur);
120 } else if (!cur && cpumask_test_cpu(i, mask)) {
121 cur = kzalloc(len, GFP_KERNEL);
122 if (!cur) {
123 /* not unwinding previous changes */
124 ret = -ENOMEM;
125 goto write_unlock;
126 }
127 cur->num_buckets = netdev_flow_limit_table_len;
128 rcu_assign_pointer(sd->flow_limit, cur);
129 }
130 }
131write_unlock:
132 mutex_unlock(&flow_limit_update_mutex);
133 } else {
134 if (*ppos || !*lenp) {
135 *lenp = 0;
136 goto done;
137 }
138
139 cpumask_clear(mask);
140 rcu_read_lock();
141 for_each_possible_cpu(i) {
142 sd = &per_cpu(softnet_data, i);
143 if (rcu_dereference(sd->flow_limit))
144 cpumask_set_cpu(i, mask);
145 }
146 rcu_read_unlock();
147
148 len = cpumask_scnprintf(buffer, *lenp, mask);
149 *lenp = len + 1;
150 *ppos += len + 1;
151 }
152
153done:
154 free_cpumask_var(mask);
155 return ret;
156}
157
158static int flow_limit_table_len_sysctl(ctl_table *table, int write,
159 void __user *buffer, size_t *lenp,
160 loff_t *ppos)
161{
162 unsigned int old, *ptr;
163 int ret;
164
165 mutex_lock(&flow_limit_update_mutex);
166
167 ptr = table->data;
168 old = *ptr;
169 ret = proc_dointvec(table, write, buffer, lenp, ppos);
170 if (!ret && write && !is_power_of_2(*ptr)) {
171 *ptr = old;
172 ret = -EINVAL;
173 }
174
175 mutex_unlock(&flow_limit_update_mutex);
176 return ret;
177}
178#endif /* CONFIG_NET_FLOW_LIMIT */
179
90static struct ctl_table net_core_table[] = { 180static struct ctl_table net_core_table[] = {
91#ifdef CONFIG_NET 181#ifdef CONFIG_NET
92 { 182 {
@@ -180,6 +270,20 @@ static struct ctl_table net_core_table[] = {
180 .proc_handler = rps_sock_flow_sysctl 270 .proc_handler = rps_sock_flow_sysctl
181 }, 271 },
182#endif 272#endif
273#ifdef CONFIG_NET_FLOW_LIMIT
274 {
275 .procname = "flow_limit_cpu_bitmap",
276 .mode = 0644,
277 .proc_handler = flow_limit_cpu_sysctl
278 },
279 {
280 .procname = "flow_limit_table_len",
281 .data = &netdev_flow_limit_table_len,
282 .maxlen = sizeof(int),
283 .mode = 0644,
284 .proc_handler = flow_limit_table_len_sysctl
285 },
286#endif /* CONFIG_NET_FLOW_LIMIT */
183#endif /* CONFIG_NET */ 287#endif /* CONFIG_NET */
184 { 288 {
185 .procname = "netdev_budget", 289 .procname = "netdev_budget",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ab450c099aa4..d87ce72ca8aa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3115,9 +3115,8 @@ int tcp_gro_complete(struct sk_buff *skb)
3115EXPORT_SYMBOL(tcp_gro_complete); 3115EXPORT_SYMBOL(tcp_gro_complete);
3116 3116
3117#ifdef CONFIG_TCP_MD5SIG 3117#ifdef CONFIG_TCP_MD5SIG
3118static unsigned long tcp_md5sig_users; 3118static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
3119static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool; 3119static DEFINE_MUTEX(tcp_md5sig_mutex);
3120static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
3121 3120
3122static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) 3121static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
3123{ 3122{
@@ -3132,30 +3131,14 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
3132 free_percpu(pool); 3131 free_percpu(pool);
3133} 3132}
3134 3133
3135void tcp_free_md5sig_pool(void) 3134static void __tcp_alloc_md5sig_pool(void)
3136{
3137 struct tcp_md5sig_pool __percpu *pool = NULL;
3138
3139 spin_lock_bh(&tcp_md5sig_pool_lock);
3140 if (--tcp_md5sig_users == 0) {
3141 pool = tcp_md5sig_pool;
3142 tcp_md5sig_pool = NULL;
3143 }
3144 spin_unlock_bh(&tcp_md5sig_pool_lock);
3145 if (pool)
3146 __tcp_free_md5sig_pool(pool);
3147}
3148EXPORT_SYMBOL(tcp_free_md5sig_pool);
3149
3150static struct tcp_md5sig_pool __percpu *
3151__tcp_alloc_md5sig_pool(struct sock *sk)
3152{ 3135{
3153 int cpu; 3136 int cpu;
3154 struct tcp_md5sig_pool __percpu *pool; 3137 struct tcp_md5sig_pool __percpu *pool;
3155 3138
3156 pool = alloc_percpu(struct tcp_md5sig_pool); 3139 pool = alloc_percpu(struct tcp_md5sig_pool);
3157 if (!pool) 3140 if (!pool)
3158 return NULL; 3141 return;
3159 3142
3160 for_each_possible_cpu(cpu) { 3143 for_each_possible_cpu(cpu) {
3161 struct crypto_hash *hash; 3144 struct crypto_hash *hash;
@@ -3166,53 +3149,27 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
3166 3149
3167 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 3150 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
3168 } 3151 }
3169 return pool; 3152 /* before setting tcp_md5sig_pool, we must commit all writes
3153 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
3154 */
3155 smp_wmb();
3156 tcp_md5sig_pool = pool;
3157 return;
3170out_free: 3158out_free:
3171 __tcp_free_md5sig_pool(pool); 3159 __tcp_free_md5sig_pool(pool);
3172 return NULL;
3173} 3160}
3174 3161
3175struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 3162bool tcp_alloc_md5sig_pool(void)
3176{ 3163{
3177 struct tcp_md5sig_pool __percpu *pool; 3164 if (unlikely(!tcp_md5sig_pool)) {
3178 bool alloc = false; 3165 mutex_lock(&tcp_md5sig_mutex);
3179 3166
3180retry: 3167 if (!tcp_md5sig_pool)
3181 spin_lock_bh(&tcp_md5sig_pool_lock); 3168 __tcp_alloc_md5sig_pool();
3182 pool = tcp_md5sig_pool; 3169
3183 if (tcp_md5sig_users++ == 0) { 3170 mutex_unlock(&tcp_md5sig_mutex);
3184 alloc = true;
3185 spin_unlock_bh(&tcp_md5sig_pool_lock);
3186 } else if (!pool) {
3187 tcp_md5sig_users--;
3188 spin_unlock_bh(&tcp_md5sig_pool_lock);
3189 cpu_relax();
3190 goto retry;
3191 } else
3192 spin_unlock_bh(&tcp_md5sig_pool_lock);
3193
3194 if (alloc) {
3195 /* we cannot hold spinlock here because this may sleep. */
3196 struct tcp_md5sig_pool __percpu *p;
3197
3198 p = __tcp_alloc_md5sig_pool(sk);
3199 spin_lock_bh(&tcp_md5sig_pool_lock);
3200 if (!p) {
3201 tcp_md5sig_users--;
3202 spin_unlock_bh(&tcp_md5sig_pool_lock);
3203 return NULL;
3204 }
3205 pool = tcp_md5sig_pool;
3206 if (pool) {
3207 /* oops, it has already been assigned. */
3208 spin_unlock_bh(&tcp_md5sig_pool_lock);
3209 __tcp_free_md5sig_pool(p);
3210 } else {
3211 tcp_md5sig_pool = pool = p;
3212 spin_unlock_bh(&tcp_md5sig_pool_lock);
3213 }
3214 } 3171 }
3215 return pool; 3172 return tcp_md5sig_pool != NULL;
3216} 3173}
3217EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 3174EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3218 3175
@@ -3229,28 +3186,15 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3229 struct tcp_md5sig_pool __percpu *p; 3186 struct tcp_md5sig_pool __percpu *p;
3230 3187
3231 local_bh_disable(); 3188 local_bh_disable();
3232 3189 p = ACCESS_ONCE(tcp_md5sig_pool);
3233 spin_lock(&tcp_md5sig_pool_lock);
3234 p = tcp_md5sig_pool;
3235 if (p)
3236 tcp_md5sig_users++;
3237 spin_unlock(&tcp_md5sig_pool_lock);
3238
3239 if (p) 3190 if (p)
3240 return this_cpu_ptr(p); 3191 return __this_cpu_ptr(p);
3241 3192
3242 local_bh_enable(); 3193 local_bh_enable();
3243 return NULL; 3194 return NULL;
3244} 3195}
3245EXPORT_SYMBOL(tcp_get_md5sig_pool); 3196EXPORT_SYMBOL(tcp_get_md5sig_pool);
3246 3197
3247void tcp_put_md5sig_pool(void)
3248{
3249 local_bh_enable();
3250 tcp_free_md5sig_pool();
3251}
3252EXPORT_SYMBOL(tcp_put_md5sig_pool);
3253
3254int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 3198int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3255 const struct tcphdr *th) 3199 const struct tcphdr *th)
3256{ 3200{
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9c6225780bd5..8230cd6243aa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -360,9 +360,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
360 if (mss > 1460) 360 if (mss > 1460)
361 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 361 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
362 362
363 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); 363 rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER);
364 while (tcp_win_from_space(rcvmem) < mss)
365 rcvmem += 128;
366 364
367 rcvmem *= icwnd; 365 rcvmem *= icwnd;
368 366
@@ -1257,8 +1255,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1257 1255
1258 if (skb == tp->retransmit_skb_hint) 1256 if (skb == tp->retransmit_skb_hint)
1259 tp->retransmit_skb_hint = prev; 1257 tp->retransmit_skb_hint = prev;
1260 if (skb == tp->scoreboard_skb_hint)
1261 tp->scoreboard_skb_hint = prev;
1262 if (skb == tp->lost_skb_hint) { 1258 if (skb == tp->lost_skb_hint) {
1263 tp->lost_skb_hint = prev; 1259 tp->lost_skb_hint = prev;
1264 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1260 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
@@ -1966,20 +1962,6 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
1966 return true; 1962 return true;
1967} 1963}
1968 1964
1969static inline int tcp_skb_timedout(const struct sock *sk,
1970 const struct sk_buff *skb)
1971{
1972 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
1973}
1974
1975static inline int tcp_head_timedout(const struct sock *sk)
1976{
1977 const struct tcp_sock *tp = tcp_sk(sk);
1978
1979 return tp->packets_out &&
1980 tcp_skb_timedout(sk, tcp_write_queue_head(sk));
1981}
1982
1983/* Linux NewReno/SACK/FACK/ECN state machine. 1965/* Linux NewReno/SACK/FACK/ECN state machine.
1984 * -------------------------------------- 1966 * --------------------------------------
1985 * 1967 *
@@ -2086,12 +2068,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
2086 if (tcp_dupack_heuristics(tp) > tp->reordering) 2068 if (tcp_dupack_heuristics(tp) > tp->reordering)
2087 return true; 2069 return true;
2088 2070
2089 /* Trick#3 : when we use RFC2988 timer restart, fast
2090 * retransmit can be triggered by timeout of queue head.
2091 */
2092 if (tcp_is_fack(tp) && tcp_head_timedout(sk))
2093 return true;
2094
2095 /* Trick#4: It is still not OK... But will it be useful to delay 2071 /* Trick#4: It is still not OK... But will it be useful to delay
2096 * recovery more? 2072 * recovery more?
2097 */ 2073 */
@@ -2128,44 +2104,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
2128 return false; 2104 return false;
2129} 2105}
2130 2106
2131/* New heuristics: it is possible only after we switched to restart timer
2132 * each time when something is ACKed. Hence, we can detect timed out packets
2133 * during fast retransmit without falling to slow start.
2134 *
2135 * Usefulness of this as is very questionable, since we should know which of
2136 * the segments is the next to timeout which is relatively expensive to find
2137 * in general case unless we add some data structure just for that. The
2138 * current approach certainly won't find the right one too often and when it
2139 * finally does find _something_ it usually marks large part of the window
2140 * right away (because a retransmission with a larger timestamp blocks the
2141 * loop from advancing). -ij
2142 */
2143static void tcp_timeout_skbs(struct sock *sk)
2144{
2145 struct tcp_sock *tp = tcp_sk(sk);
2146 struct sk_buff *skb;
2147
2148 if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
2149 return;
2150
2151 skb = tp->scoreboard_skb_hint;
2152 if (tp->scoreboard_skb_hint == NULL)
2153 skb = tcp_write_queue_head(sk);
2154
2155 tcp_for_write_queue_from(skb, sk) {
2156 if (skb == tcp_send_head(sk))
2157 break;
2158 if (!tcp_skb_timedout(sk, skb))
2159 break;
2160
2161 tcp_skb_mark_lost(tp, skb);
2162 }
2163
2164 tp->scoreboard_skb_hint = skb;
2165
2166 tcp_verify_left_out(tp);
2167}
2168
2169/* Detect loss in event "A" above by marking head of queue up as lost. 2107/* Detect loss in event "A" above by marking head of queue up as lost.
2170 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments 2108 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments
2171 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2109 * are considered lost. For RFC3517 SACK, a segment is considered lost if it
@@ -2251,8 +2189,6 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2251 else if (fast_rexmit) 2189 else if (fast_rexmit)
2252 tcp_mark_head_lost(sk, 1, 1); 2190 tcp_mark_head_lost(sk, 1, 1);
2253 } 2191 }
2254
2255 tcp_timeout_skbs(sk);
2256} 2192}
2257 2193
2258/* CWND moderation, preventing bursts due to too big ACKs 2194/* CWND moderation, preventing bursts due to too big ACKs
@@ -2846,7 +2782,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2846 fast_rexmit = 1; 2782 fast_rexmit = 1;
2847 } 2783 }
2848 2784
2849 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 2785 if (do_lost)
2850 tcp_update_scoreboard(sk, fast_rexmit); 2786 tcp_update_scoreboard(sk, fast_rexmit);
2851 tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); 2787 tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit);
2852 tcp_xmit_retransmit_queue(sk); 2788 tcp_xmit_retransmit_queue(sk);
@@ -3079,7 +3015,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3079 3015
3080 tcp_unlink_write_queue(skb, sk); 3016 tcp_unlink_write_queue(skb, sk);
3081 sk_wmem_free_skb(sk, skb); 3017 sk_wmem_free_skb(sk, skb);
3082 tp->scoreboard_skb_hint = NULL;
3083 if (skb == tp->retransmit_skb_hint) 3018 if (skb == tp->retransmit_skb_hint)
3084 tp->retransmit_skb_hint = NULL; 3019 tp->retransmit_skb_hint = NULL;
3085 if (skb == tp->lost_skb_hint) 3020 if (skb == tp->lost_skb_hint)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 719652305a29..d20ede0c9593 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1026,7 +1026,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1026 key = sock_kmalloc(sk, sizeof(*key), gfp); 1026 key = sock_kmalloc(sk, sizeof(*key), gfp);
1027 if (!key) 1027 if (!key)
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) { 1029 if (!tcp_alloc_md5sig_pool()) {
1030 sock_kfree_s(sk, key, sizeof(*key)); 1030 sock_kfree_s(sk, key, sizeof(*key));
1031 return -ENOMEM; 1031 return -ENOMEM;
1032 } 1032 }
@@ -1044,9 +1044,7 @@ EXPORT_SYMBOL(tcp_md5_do_add);
1044 1044
1045int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) 1045int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1046{ 1046{
1047 struct tcp_sock *tp = tcp_sk(sk);
1048 struct tcp_md5sig_key *key; 1047 struct tcp_md5sig_key *key;
1049 struct tcp_md5sig_info *md5sig;
1050 1048
1051 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1049 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1052 if (!key) 1050 if (!key)
@@ -1054,10 +1052,6 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1054 hlist_del_rcu(&key->node); 1052 hlist_del_rcu(&key->node);
1055 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1053 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1056 kfree_rcu(key, rcu); 1054 kfree_rcu(key, rcu);
1057 md5sig = rcu_dereference_protected(tp->md5sig_info,
1058 sock_owned_by_user(sk));
1059 if (hlist_empty(&md5sig->head))
1060 tcp_free_md5sig_pool();
1061 return 0; 1055 return 0;
1062} 1056}
1063EXPORT_SYMBOL(tcp_md5_do_del); 1057EXPORT_SYMBOL(tcp_md5_do_del);
@@ -1071,8 +1065,6 @@ static void tcp_clear_md5_list(struct sock *sk)
1071 1065
1072 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 1066 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1073 1067
1074 if (!hlist_empty(&md5sig->head))
1075 tcp_free_md5sig_pool();
1076 hlist_for_each_entry_safe(key, n, &md5sig->head, node) { 1068 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1077 hlist_del_rcu(&key->node); 1069 hlist_del_rcu(&key->node);
1078 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1070 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0f0178827259..ab1c08658528 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -317,7 +317,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
317 key = tp->af_specific->md5_lookup(sk, sk); 317 key = tp->af_specific->md5_lookup(sk, sk);
318 if (key != NULL) { 318 if (key != NULL) {
319 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 319 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
320 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL) 320 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
321 BUG(); 321 BUG();
322 } 322 }
323 } while (0); 323 } while (0);
@@ -358,10 +358,8 @@ void tcp_twsk_destructor(struct sock *sk)
358#ifdef CONFIG_TCP_MD5SIG 358#ifdef CONFIG_TCP_MD5SIG
359 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 359 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
360 360
361 if (twsk->tw_md5_key) { 361 if (twsk->tw_md5_key)
362 tcp_free_md5sig_pool();
363 kfree_rcu(twsk->tw_md5_key, rcu); 362 kfree_rcu(twsk->tw_md5_key, rcu);
364 }
365#endif 363#endif
366} 364}
367EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 365EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d1ab6ab29a55..432e084b6b62 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1126,8 +1126,7 @@ retry:
1126 1126
1127 ift = !max_addresses || 1127 ift = !max_addresses ||
1128 ipv6_count_addresses(idev) < max_addresses ? 1128 ipv6_count_addresses(idev) < max_addresses ?
1129 ipv6_add_addr(idev, &addr, tmp_plen, 1129 ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
1130 ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
1131 addr_flags) : NULL; 1130 addr_flags) : NULL;
1132 if (IS_ERR_OR_NULL(ift)) { 1131 if (IS_ERR_OR_NULL(ift)) {
1133 in6_ifa_put(ifp); 1132 in6_ifa_put(ifp);
@@ -2402,6 +2401,7 @@ err_exit:
2402 * Manual configuration of address on an interface 2401 * Manual configuration of address on an interface
2403 */ 2402 */
2404static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx, 2403static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
2404 const struct in6_addr *peer_pfx,
2405 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2405 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2406 __u32 valid_lft) 2406 __u32 valid_lft)
2407{ 2407{
@@ -2457,6 +2457,8 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
2457 ifp->valid_lft = valid_lft; 2457 ifp->valid_lft = valid_lft;
2458 ifp->prefered_lft = prefered_lft; 2458 ifp->prefered_lft = prefered_lft;
2459 ifp->tstamp = jiffies; 2459 ifp->tstamp = jiffies;
2460 if (peer_pfx)
2461 ifp->peer_addr = *peer_pfx;
2460 spin_unlock_bh(&ifp->lock); 2462 spin_unlock_bh(&ifp->lock);
2461 2463
2462 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, 2464 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
@@ -2526,7 +2528,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
2526 return -EFAULT; 2528 return -EFAULT;
2527 2529
2528 rtnl_lock(); 2530 rtnl_lock();
2529 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, 2531 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2530 ireq.ifr6_prefixlen, IFA_F_PERMANENT, 2532 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2531 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2533 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2532 rtnl_unlock(); 2534 rtnl_unlock();
@@ -3610,18 +3612,20 @@ restart:
3610 rcu_read_unlock_bh(); 3612 rcu_read_unlock_bh();
3611} 3613}
3612 3614
3613static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) 3615static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
3616 struct in6_addr **peer_pfx)
3614{ 3617{
3615 struct in6_addr *pfx = NULL; 3618 struct in6_addr *pfx = NULL;
3616 3619
3620 *peer_pfx = NULL;
3621
3617 if (addr) 3622 if (addr)
3618 pfx = nla_data(addr); 3623 pfx = nla_data(addr);
3619 3624
3620 if (local) { 3625 if (local) {
3621 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx))) 3626 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
3622 pfx = NULL; 3627 *peer_pfx = pfx;
3623 else 3628 pfx = nla_data(local);
3624 pfx = nla_data(local);
3625 } 3629 }
3626 3630
3627 return pfx; 3631 return pfx;
@@ -3639,7 +3643,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3639 struct net *net = sock_net(skb->sk); 3643 struct net *net = sock_net(skb->sk);
3640 struct ifaddrmsg *ifm; 3644 struct ifaddrmsg *ifm;
3641 struct nlattr *tb[IFA_MAX+1]; 3645 struct nlattr *tb[IFA_MAX+1];
3642 struct in6_addr *pfx; 3646 struct in6_addr *pfx, *peer_pfx;
3643 int err; 3647 int err;
3644 3648
3645 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3649 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3647,7 +3651,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3647 return err; 3651 return err;
3648 3652
3649 ifm = nlmsg_data(nlh); 3653 ifm = nlmsg_data(nlh);
3650 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); 3654 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
3651 if (pfx == NULL) 3655 if (pfx == NULL)
3652 return -EINVAL; 3656 return -EINVAL;
3653 3657
@@ -3705,7 +3709,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
3705 struct net *net = sock_net(skb->sk); 3709 struct net *net = sock_net(skb->sk);
3706 struct ifaddrmsg *ifm; 3710 struct ifaddrmsg *ifm;
3707 struct nlattr *tb[IFA_MAX+1]; 3711 struct nlattr *tb[IFA_MAX+1];
3708 struct in6_addr *pfx; 3712 struct in6_addr *pfx, *peer_pfx;
3709 struct inet6_ifaddr *ifa; 3713 struct inet6_ifaddr *ifa;
3710 struct net_device *dev; 3714 struct net_device *dev;
3711 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME; 3715 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
@@ -3717,7 +3721,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
3717 return err; 3721 return err;
3718 3722
3719 ifm = nlmsg_data(nlh); 3723 ifm = nlmsg_data(nlh);
3720 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); 3724 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
3721 if (pfx == NULL) 3725 if (pfx == NULL)
3722 return -EINVAL; 3726 return -EINVAL;
3723 3727
@@ -3745,7 +3749,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
3745 * It would be best to check for !NLM_F_CREATE here but 3749 * It would be best to check for !NLM_F_CREATE here but
3746 * userspace alreay relies on not having to provide this. 3750 * userspace alreay relies on not having to provide this.
3747 */ 3751 */
3748 return inet6_addr_add(net, ifm->ifa_index, pfx, 3752 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
3749 ifm->ifa_prefixlen, ifa_flags, 3753 ifm->ifa_prefixlen, ifa_flags,
3750 preferred_lft, valid_lft); 3754 preferred_lft, valid_lft);
3751 } 3755 }
@@ -3802,6 +3806,7 @@ static inline int rt_scope(int ifa_scope)
3802static inline int inet6_ifaddr_msgsize(void) 3806static inline int inet6_ifaddr_msgsize(void)
3803{ 3807{
3804 return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) 3808 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
3809 + nla_total_size(16) /* IFA_LOCAL */
3805 + nla_total_size(16) /* IFA_ADDRESS */ 3810 + nla_total_size(16) /* IFA_ADDRESS */
3806 + nla_total_size(sizeof(struct ifa_cacheinfo)); 3811 + nla_total_size(sizeof(struct ifa_cacheinfo));
3807} 3812}
@@ -3840,13 +3845,22 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3840 valid = INFINITY_LIFE_TIME; 3845 valid = INFINITY_LIFE_TIME;
3841 } 3846 }
3842 3847
3843 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 || 3848 if (!ipv6_addr_any(&ifa->peer_addr)) {
3844 put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) { 3849 if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
3845 nlmsg_cancel(skb, nlh); 3850 nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
3846 return -EMSGSIZE; 3851 goto error;
3847 } 3852 } else
3853 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
3854 goto error;
3855
3856 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
3857 goto error;
3848 3858
3849 return nlmsg_end(skb, nlh); 3859 return nlmsg_end(skb, nlh);
3860
3861error:
3862 nlmsg_cancel(skb, nlh);
3863 return -EMSGSIZE;
3850} 3864}
3851 3865
3852static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, 3866static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
@@ -4046,7 +4060,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
4046 struct net *net = sock_net(in_skb->sk); 4060 struct net *net = sock_net(in_skb->sk);
4047 struct ifaddrmsg *ifm; 4061 struct ifaddrmsg *ifm;
4048 struct nlattr *tb[IFA_MAX+1]; 4062 struct nlattr *tb[IFA_MAX+1];
4049 struct in6_addr *addr = NULL; 4063 struct in6_addr *addr = NULL, *peer;
4050 struct net_device *dev = NULL; 4064 struct net_device *dev = NULL;
4051 struct inet6_ifaddr *ifa; 4065 struct inet6_ifaddr *ifa;
4052 struct sk_buff *skb; 4066 struct sk_buff *skb;
@@ -4056,7 +4070,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
4056 if (err < 0) 4070 if (err < 0)
4057 goto errout; 4071 goto errout;
4058 4072
4059 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); 4073 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4060 if (addr == NULL) { 4074 if (addr == NULL) {
4061 err = -EINVAL; 4075 err = -EINVAL;
4062 goto errout; 4076 goto errout;
@@ -4564,11 +4578,26 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4564 ip6_ins_rt(ifp->rt); 4578 ip6_ins_rt(ifp->rt);
4565 if (ifp->idev->cnf.forwarding) 4579 if (ifp->idev->cnf.forwarding)
4566 addrconf_join_anycast(ifp); 4580 addrconf_join_anycast(ifp);
4581 if (!ipv6_addr_any(&ifp->peer_addr))
4582 addrconf_prefix_route(&ifp->peer_addr, 128,
4583 ifp->idev->dev, 0, 0);
4567 break; 4584 break;
4568 case RTM_DELADDR: 4585 case RTM_DELADDR:
4569 if (ifp->idev->cnf.forwarding) 4586 if (ifp->idev->cnf.forwarding)
4570 addrconf_leave_anycast(ifp); 4587 addrconf_leave_anycast(ifp);
4571 addrconf_leave_solict(ifp->idev, &ifp->addr); 4588 addrconf_leave_solict(ifp->idev, &ifp->addr);
4589 if (!ipv6_addr_any(&ifp->peer_addr)) {
4590 struct rt6_info *rt;
4591 struct net_device *dev = ifp->idev->dev;
4592
4593 rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
4594 dev->ifindex, 1);
4595 if (rt) {
4596 dst_hold(&rt->dst);
4597 if (ip6_del_rt(rt))
4598 dst_free(&rt->dst);
4599 }
4600 }
4572 dst_hold(&ifp->rt->dst); 4601 dst_hold(&ifp->rt->dst);
4573 4602
4574 if (ip6_del_rt(ifp->rt)) 4603 if (ip6_del_rt(ifp->rt))
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c8388f3c3426..38008b0980d9 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -116,14 +116,57 @@ struct tbf_sched_data {
116 struct qdisc_watchdog watchdog; /* Watchdog timer */ 116 struct qdisc_watchdog watchdog; /* Watchdog timer */
117}; 117};
118 118
119
120/* GSO packet is too big, segment it so that tbf can transmit
121 * each segment in time
122 */
123static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
124{
125 struct tbf_sched_data *q = qdisc_priv(sch);
126 struct sk_buff *segs, *nskb;
127 netdev_features_t features = netif_skb_features(skb);
128 int ret, nb;
129
130 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
131
132 if (IS_ERR_OR_NULL(segs))
133 return qdisc_reshape_fail(skb, sch);
134
135 nb = 0;
136 while (segs) {
137 nskb = segs->next;
138 segs->next = NULL;
139 if (likely(segs->len <= q->max_size)) {
140 qdisc_skb_cb(segs)->pkt_len = segs->len;
141 ret = qdisc_enqueue(segs, q->qdisc);
142 } else {
143 ret = qdisc_reshape_fail(skb, sch);
144 }
145 if (ret != NET_XMIT_SUCCESS) {
146 if (net_xmit_drop_count(ret))
147 sch->qstats.drops++;
148 } else {
149 nb++;
150 }
151 segs = nskb;
152 }
153 sch->q.qlen += nb;
154 if (nb > 1)
155 qdisc_tree_decrease_qlen(sch, 1 - nb);
156 consume_skb(skb);
157 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
158}
159
119static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 160static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
120{ 161{
121 struct tbf_sched_data *q = qdisc_priv(sch); 162 struct tbf_sched_data *q = qdisc_priv(sch);
122 int ret; 163 int ret;
123 164
124 if (qdisc_pkt_len(skb) > q->max_size) 165 if (qdisc_pkt_len(skb) > q->max_size) {
166 if (skb_is_gso(skb))
167 return tbf_segment(skb, sch);
125 return qdisc_reshape_fail(skb, sch); 168 return qdisc_reshape_fail(skb, sch);
126 169 }
127 ret = qdisc_enqueue(skb, q->qdisc); 170 ret = qdisc_enqueue(skb, q->qdisc);
128 if (ret != NET_XMIT_SUCCESS) { 171 if (ret != NET_XMIT_SUCCESS) {
129 if (net_xmit_drop_count(ret)) 172 if (net_xmit_drop_count(ret))