aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 12:31:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 12:31:37 -0400
commit2e923b0251932ad4a82cc87ec1443a1f1d17073e (patch)
treed12032bc9bcfbb8a57659275d1b9b582f23f2ecc
parentffd8221bc348f8c282d1271883dbe629ea8ae289 (diff)
parentf2d9da1a8375cbe53df5b415d059429013a3a79f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Include fixes for netrom and dsa (Fabian Frederick and Florian Fainelli) 2) Fix FIXED_PHY support in stmmac, from Giuseppe CAVALLARO. 3) Several SKB use after free fixes (vxlan, openvswitch, vxlan, ip_tunnel, fou), from Li ROngQing. 4) fec driver PTP support fixes from Luwei Zhou and Nimrod Andy. 5) Use after free in virtio_net, from Michael S Tsirkin. 6) Fix flow mask handling for megaflows in openvswitch, from Pravin B Shelar. 7) ISDN gigaset and capi bug fixes from Tilman Schmidt. 8) Fix route leak in ip_send_unicast_reply(), from Vasily Averin. 9) Fix two eBPF JIT bugs on x86, from Alexei Starovoitov. 10) TCP_SKB_CB() reorganization caused a few regressions, fixed by Cong Wang and Eric Dumazet. 11) Don't overwrite end of SKB when parsing malformed sctp ASCONF chunks, from Daniel Borkmann. 12) Don't call sock_kfree_s() with NULL pointers, this function also has the side effect of adjusting the socket memory usage. From Cong Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (90 commits) bna: fix skb->truesize underestimation net: dsa: add includes for ethtool and phy_fixed definitions openvswitch: Set flow-key members. netrom: use linux/uaccess.h dsa: Fix conversion from host device to mii bus tipc: fix bug in bundled buffer reception ipv6: introduce tcp_v6_iif() sfc: add support for skb->xmit_more r8152: return -EBUSY for runtime suspend ipv4: fix a potential use after free in fou.c ipv4: fix a potential use after free in ip_tunnel_core.c hyperv: Add handling of IP header with option field in netvsc_set_hash() openvswitch: Create right mask with disabled megaflows vxlan: fix a free after use openvswitch: fix a use after free ipv4: dst_entry leak in ip_send_unicast_reply() ipv4: clean up cookie_v4_check() ipv4: share tcp_v4_save_options() with cookie_v4_check() ipv4: call __ip_options_echo() in cookie_v4_check() atm: simplify lanai.c by using module_pci_driver ...
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt91
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts4
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi24
-rw-r--r--arch/x86/net/bpf_jit_comp.c25
-rw-r--r--drivers/atm/lanai.c22
-rw-r--r--drivers/isdn/capi/capidrv.c24
-rw-r--r--drivers/isdn/capi/capiutil.c41
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/gigaset/capi.c155
-rw-r--r--drivers/isdn/gigaset/ev-layer.c116
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/net/dsa/mv88e6060.c16
-rw-r--r--drivers/net/dsa/mv88e6171.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c14
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c25
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c389
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h41
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/fec.h60
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c69
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c277
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c65
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c6
-rw-r--r--drivers/net/ethernet/sfc/nic.h29
-rw-r--r--drivers/net/ethernet/sfc/tx.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c20
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c26
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c148
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--include/linux/genl_magic_func.h4
-rw-r--r--include/linux/kernelcapi.h2
-rw-r--r--include/linux/netdevice.h12
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet6_hashtables.h5
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/sm.h6
-rw-r--r--include/net/tcp.h32
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bpf.h1
-rw-r--r--include/uapi/linux/bpf_common.h55
-rw-r--r--include/uapi/linux/filter.h56
-rw-r--r--net/caif/caif_usb.c7
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/ip_output.c12
-rw-r--r--net/ipv4/ip_tunnel_core.c3
-rw-r--r--net/ipv4/syncookies.c16
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/tcp_output.c34
-rw-r--r--net/ipv6/anycast.c1
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c26
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/nr_in.c2
-rw-r--r--net/netrom/nr_out.c2
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/netrom/nr_subr.c2
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/openvswitch/flow.c9
-rw-r--r--net/openvswitch/flow_netlink.c93
-rw-r--r--net/openvswitch/vport-geneve.c2
-rw-r--r--net/openvswitch/vport.c4
-rw-r--r--net/rds/rdma.c7
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/inqueue.c33
-rw-r--r--net/sctp/sm_make_chunk.c99
-rw-r--r--net/sctp/sm_statefuns.c21
-rw-r--r--net/tipc/link.c7
106 files changed, 2010 insertions, 957 deletions
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
index 3dd3d0bf112f..6762a6b5da7e 100644
--- a/Documentation/devicetree/bindings/net/sti-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -1,58 +1,65 @@
1STMicroelectronics SoC DWMAC glue layer controller 1STMicroelectronics SoC DWMAC glue layer controller
2 2
3This file documents differences between the core properties in
4Documentation/devicetree/bindings/net/stmmac.txt
5and what is needed on STi platforms to program the stmmac glue logic.
6
3The device node has following properties. 7The device node has following properties.
4 8
5Required properties: 9Required properties:
6 - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or 10 - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac",
7 "st,stid127-dwmac". 11 "st,stih407-dwmac", "st,stid127-dwmac".
8 - reg : Offset of the glue configuration register map in system 12 - reg : Offset of the glue configuration register map in system
9 configuration regmap pointed by st,syscon property and size. 13 configuration regmap pointed by st,syscon property and size.
10 14 - st,syscon : Should be phandle to system configuration node which
11 - reg-names : Should be "sti-ethconf".
12
13 - st,syscon : Should be phandle to system configuration node which
14 encompases this glue registers. 15 encompases this glue registers.
16 - st,gmac_en: this is to enable the gmac into a dedicated sysctl control
17 register available on STiH407 SoC.
18 - sti-ethconf: this is the gmac glue logic register to enable the GMAC,
19 select among the different modes and program the clk retiming.
20 - pinctrl-0: pin-control for all the MII mode supported.
15 21
16 - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be 22Optional properties:
17 wired up in from different sources. One via TXCLK pin and other via CLK_125 23 - resets : phandle pointing to the system reset controller with correct
18 pin. This wiring is totally board dependent. However the retiming glue 24 reset line index for ethernet reset.
19 logic should be configured accordingly. Possible values for this property 25 - st,ext-phyclk: valid only for RMII where PHY can generate 50MHz clock or
20 26 MAC can generate it.
21 "txclk" - if 125Mhz clock is wired up via txclk line. 27 - st,tx-retime-src: This specifies which clk is wired up to the mac for
22 "clk_125" - if 125Mhz clock is wired up via clk_125 line. 28 retimeing tx lines. This is totally board dependent and can take one of the
23 29 posssible values from "txclk", "clk_125" or "clkgen".
24 This property is only valid for Giga bit setup( GMII, RGMII), and it is 30 If not passed, the internal clock will be used by default.
25 un-used for non-giga bit (MII and RMII) setups. Also note that internal 31 - sti-ethclk: this is the phy clock.
26 clockgen can not generate stable 125Mhz clock. 32 - sti-clkconf: this is an extra sysconfig register, available in new SoCs,
27 33 to program the clk retiming.
28 - st,ext-phyclk: This boolean property indicates who is generating the clock 34 - st,gmac_en: to enable the GMAC, this only is present in some SoCs; e.g.
29 for tx and rx. This property is only valid for RMII case where the clock can 35 STiH407.
30 be generated from the MAC or PHY.
31
32 - clock-names: should be "sti-ethclk".
33 - clocks: Should point to ethernet clockgen which can generate phyclk.
34
35 36
36Example: 37Example:
37 38
38ethernet0: dwmac@fe810000 { 39ethernet0: dwmac@9630000 {
39 device_type = "network"; 40 device_type = "network";
40 compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710"; 41 status = "disabled";
41 reg = <0xfe810000 0x8000>, <0x8bc 0x4>; 42 compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710";
42 reg-names = "stmmaceth", "sti-ethconf"; 43 reg = <0x9630000 0x8000>, <0x80 0x4>;
43 interrupts = <0 133 0>, <0 134 0>, <0 135 0>; 44 reg-names = "stmmaceth", "sti-ethconf";
44 interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
45 phy-mode = "mii";
46 45
47 st,syscon = <&syscfg_rear>; 46 st,syscon = <&syscfg_sbc_reg>;
47 st,gmac_en;
48 resets = <&softreset STIH407_ETH1_SOFTRESET>;
49 reset-names = "stmmaceth";
48 50
49 snps,pbl = <32>; 51 interrupts = <GIC_SPI 98 IRQ_TYPE_NONE>,
52 <GIC_SPI 99 IRQ_TYPE_NONE>,
53 <GIC_SPI 100 IRQ_TYPE_NONE>;
54 interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
55
56 snps,pbl = <32>;
50 snps,mixed-burst; 57 snps,mixed-burst;
51 58
52 resets = <&softreset STIH416_ETH0_SOFTRESET>; 59 pinctrl-names = "default";
53 reset-names = "stmmaceth"; 60 pinctrl-0 = <&pinctrl_rgmii1>;
54 pinctrl-0 = <&pinctrl_mii0>; 61
55 pinctrl-names = "default"; 62 clock-names = "stmmaceth", "sti-ethclk";
56 clocks = <&CLK_S_GMAC0_PHY>; 63 clocks = <&CLK_S_C0_FLEXGEN CLK_EXT2F_A9>,
57 clock-names = "stmmaceth"; 64 <&CLK_S_C0_FLEXGEN CLK_ETH_PHY>;
58}; 65};
diff --git a/MAINTAINERS b/MAINTAINERS
index b0f17d59078e..d61b727fbfa8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -564,7 +564,7 @@ L: linux-alpha@vger.kernel.org
564F: arch/alpha/ 564F: arch/alpha/
565 565
566ALTERA TRIPLE SPEED ETHERNET DRIVER 566ALTERA TRIPLE SPEED ETHERNET DRIVER
567M: Vince Bridgers <vbridgers2013@gmail.com> 567M: Vince Bridgers <vbridger@opensource.altera.com>
568L: netdev@vger.kernel.org 568L: netdev@vger.kernel.org
569L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 569L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
570S: Maintained 570S: Maintained
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index 8eb6d94c7851..2e25de0800b9 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -41,6 +41,10 @@
41 status = "ok"; 41 status = "ok";
42}; 42};
43 43
44&sgenet0 {
45 status = "ok";
46};
47
44&xgenet { 48&xgenet {
45 status = "ok"; 49 status = "ok";
46}; 50};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 87d3205e98d5..295c72d52a1f 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -176,6 +176,16 @@
176 clock-output-names = "menetclk"; 176 clock-output-names = "menetclk";
177 }; 177 };
178 178
179 sge0clk: sge0clk@1f21c000 {
180 compatible = "apm,xgene-device-clock";
181 #clock-cells = <1>;
182 clocks = <&socplldiv2 0>;
183 reg = <0x0 0x1f21c000 0x0 0x1000>;
184 reg-names = "csr-reg";
185 csr-mask = <0x3>;
186 clock-output-names = "sge0clk";
187 };
188
179 xge0clk: xge0clk@1f61c000 { 189 xge0clk: xge0clk@1f61c000 {
180 compatible = "apm,xgene-device-clock"; 190 compatible = "apm,xgene-device-clock";
181 #clock-cells = <1>; 191 #clock-cells = <1>;
@@ -611,6 +621,20 @@
611 }; 621 };
612 }; 622 };
613 623
624 sgenet0: ethernet@1f210000 {
625 compatible = "apm,xgene-enet";
626 status = "disabled";
627 reg = <0x0 0x1f210000 0x0 0x10000>,
628 <0x0 0x1f200000 0x0 0X10000>,
629 <0x0 0x1B000000 0x0 0X20000>;
630 reg-names = "enet_csr", "ring_csr", "ring_cmd";
631 interrupts = <0x0 0xA0 0x4>;
632 dma-coherent;
633 clocks = <&sge0clk 0>;
634 local-mac-address = [00 00 00 00 00 00];
635 phy-connection-type = "sgmii";
636 };
637
614 xgenet: ethernet@1f610000 { 638 xgenet: ethernet@1f610000 {
615 compatible = "apm,xgene-enet"; 639 compatible = "apm,xgene-enet";
616 status = "disabled"; 640 status = "disabled";
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d56cd1f515bd..3f627345d51c 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -182,12 +182,17 @@ struct jit_context {
182 bool seen_ld_abs; 182 bool seen_ld_abs;
183}; 183};
184 184
185/* maximum number of bytes emitted while JITing one eBPF insn */
186#define BPF_MAX_INSN_SIZE 128
187#define BPF_INSN_SAFETY 64
188
185static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 189static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
186 int oldproglen, struct jit_context *ctx) 190 int oldproglen, struct jit_context *ctx)
187{ 191{
188 struct bpf_insn *insn = bpf_prog->insnsi; 192 struct bpf_insn *insn = bpf_prog->insnsi;
189 int insn_cnt = bpf_prog->len; 193 int insn_cnt = bpf_prog->len;
190 u8 temp[64]; 194 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
191 int i; 196 int i;
192 int proglen = 0; 197 int proglen = 0;
193 u8 *prog = temp; 198 u8 *prog = temp;
@@ -225,7 +230,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
225 EMIT2(0x31, 0xc0); /* xor eax, eax */ 230 EMIT2(0x31, 0xc0); /* xor eax, eax */
226 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ 231 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
227 232
228 if (ctx->seen_ld_abs) { 233 if (seen_ld_abs) {
229 /* r9d : skb->len - skb->data_len (headlen) 234 /* r9d : skb->len - skb->data_len (headlen)
230 * r10 : skb->data 235 * r10 : skb->data
231 */ 236 */
@@ -685,7 +690,7 @@ xadd: if (is_imm8(insn->off))
685 case BPF_JMP | BPF_CALL: 690 case BPF_JMP | BPF_CALL:
686 func = (u8 *) __bpf_call_base + imm32; 691 func = (u8 *) __bpf_call_base + imm32;
687 jmp_offset = func - (image + addrs[i]); 692 jmp_offset = func - (image + addrs[i]);
688 if (ctx->seen_ld_abs) { 693 if (seen_ld_abs) {
689 EMIT2(0x41, 0x52); /* push %r10 */ 694 EMIT2(0x41, 0x52); /* push %r10 */
690 EMIT2(0x41, 0x51); /* push %r9 */ 695 EMIT2(0x41, 0x51); /* push %r9 */
691 /* need to adjust jmp offset, since 696 /* need to adjust jmp offset, since
@@ -699,7 +704,7 @@ xadd: if (is_imm8(insn->off))
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 EMIT1_off32(0xE8, jmp_offset); 706 EMIT1_off32(0xE8, jmp_offset);
702 if (ctx->seen_ld_abs) { 707 if (seen_ld_abs) {
703 EMIT2(0x41, 0x59); /* pop %r9 */ 708 EMIT2(0x41, 0x59); /* pop %r9 */
704 EMIT2(0x41, 0x5A); /* pop %r10 */ 709 EMIT2(0x41, 0x5A); /* pop %r10 */
705 } 710 }
@@ -804,7 +809,8 @@ emit_jmp:
804 goto common_load; 809 goto common_load;
805 case BPF_LD | BPF_ABS | BPF_W: 810 case BPF_LD | BPF_ABS | BPF_W:
806 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); 811 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
807common_load: ctx->seen_ld_abs = true; 812common_load:
813 ctx->seen_ld_abs = seen_ld_abs = true;
808 jmp_offset = func - (image + addrs[i]); 814 jmp_offset = func - (image + addrs[i]);
809 if (!func || !is_simm32(jmp_offset)) { 815 if (!func || !is_simm32(jmp_offset)) {
810 pr_err("unsupported bpf func %d addr %p image %p\n", 816 pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -878,6 +884,11 @@ common_load: ctx->seen_ld_abs = true;
878 } 884 }
879 885
880 ilen = prog - temp; 886 ilen = prog - temp;
887 if (ilen > BPF_MAX_INSN_SIZE) {
888 pr_err("bpf_jit_compile fatal insn size error\n");
889 return -EFAULT;
890 }
891
881 if (image) { 892 if (image) {
882 if (unlikely(proglen + ilen > oldproglen)) { 893 if (unlikely(proglen + ilen > oldproglen)) {
883 pr_err("bpf_jit_compile fatal error\n"); 894 pr_err("bpf_jit_compile fatal error\n");
@@ -934,9 +945,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
934 goto out; 945 goto out;
935 } 946 }
936 if (image) { 947 if (image) {
937 if (proglen != oldproglen) 948 if (proglen != oldproglen) {
938 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 949 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
939 proglen, oldproglen); 950 proglen, oldproglen);
951 goto out;
952 }
940 break; 953 break;
941 } 954 }
942 if (proglen == oldproglen) { 955 if (proglen == oldproglen) {
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index fa7d701933ba..93eaf8d94492 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2614,27 +2614,7 @@ static struct pci_driver lanai_driver = {
2614 .probe = lanai_init_one, 2614 .probe = lanai_init_one,
2615}; 2615};
2616 2616
2617static int __init lanai_module_init(void) 2617module_pci_driver(lanai_driver);
2618{
2619 int x;
2620
2621 x = pci_register_driver(&lanai_driver);
2622 if (x != 0)
2623 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
2624 return x;
2625}
2626
2627static void __exit lanai_module_exit(void)
2628{
2629 /* We'll only get called when all the interfaces are already
2630 * gone, so there isn't much to do
2631 */
2632 DPRINTK("cleanup_module()\n");
2633 pci_unregister_driver(&lanai_driver);
2634}
2635
2636module_init(lanai_module_init);
2637module_exit(lanai_module_exit);
2638 2618
2639MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>"); 2619MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
2640MODULE_DESCRIPTION("Efficient Networks Speedstream 3010 driver"); 2620MODULE_DESCRIPTION("Efficient Networks Speedstream 3010 driver");
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index fd6d28f3fc36..1cc6ca8bfbda 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -506,7 +506,10 @@ static void send_message(capidrv_contr *card, _cmsg *cmsg)
506 struct sk_buff *skb; 506 struct sk_buff *skb;
507 size_t len; 507 size_t len;
508 508
509 capi_cmsg2message(cmsg, cmsg->buf); 509 if (capi_cmsg2message(cmsg, cmsg->buf)) {
510 printk(KERN_ERR "capidrv::send_message: parser failure\n");
511 return;
512 }
510 len = CAPIMSG_LEN(cmsg->buf); 513 len = CAPIMSG_LEN(cmsg->buf);
511 skb = alloc_skb(len, GFP_ATOMIC); 514 skb = alloc_skb(len, GFP_ATOMIC);
512 if (!skb) { 515 if (!skb) {
@@ -1578,7 +1581,12 @@ static _cmsg s_cmsg;
1578 1581
1579static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb) 1582static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
1580{ 1583{
1581 capi_message2cmsg(&s_cmsg, skb->data); 1584 if (capi_message2cmsg(&s_cmsg, skb->data)) {
1585 printk(KERN_ERR "capidrv: applid=%d: received invalid message\n",
1586 ap->applid);
1587 kfree_skb(skb);
1588 return;
1589 }
1582 if (debugmode > 3) { 1590 if (debugmode > 3) {
1583 _cdebbuf *cdb = capi_cmsg2str(&s_cmsg); 1591 _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
1584 1592
@@ -1903,7 +1911,11 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
1903 NULL, /* Useruserdata */ 1911 NULL, /* Useruserdata */
1904 NULL /* Facilitydataarray */ 1912 NULL /* Facilitydataarray */
1905 ); 1913 );
1906 capi_cmsg2message(&cmdcmsg, cmdcmsg.buf); 1914 if (capi_cmsg2message(&cmdcmsg, cmdcmsg.buf)) {
1915 printk(KERN_ERR "capidrv-%d: capidrv_command: parser failure\n",
1916 card->contrnr);
1917 return -EINVAL;
1918 }
1907 plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP); 1919 plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP);
1908 send_message(card, &cmdcmsg); 1920 send_message(card, &cmdcmsg);
1909 return 0; 1921 return 0;
@@ -2090,7 +2102,11 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
2090 if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0) 2102 if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0)
2091 return 0; 2103 return 0;
2092 2104
2093 capi_cmsg2message(&sendcmsg, sendcmsg.buf); 2105 if (capi_cmsg2message(&sendcmsg, sendcmsg.buf)) {
2106 printk(KERN_ERR "capidrv-%d: if_sendbuf: parser failure\n",
2107 card->contrnr);
2108 return -EINVAL;
2109 }
2094 msglen = CAPIMSG_LEN(sendcmsg.buf); 2110 msglen = CAPIMSG_LEN(sendcmsg.buf);
2095 if (skb_headroom(skb) < msglen) { 2111 if (skb_headroom(skb) < msglen) {
2096 struct sk_buff *nskb = skb_realloc_headroom(skb, msglen); 2112 struct sk_buff *nskb = skb_realloc_headroom(skb, msglen);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 4073d1684d07..36c1b37cea0a 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -207,9 +207,24 @@ static unsigned command_2_index(unsigned c, unsigned sc)
207 c = 0x9 + (c & 0x0f); 207 c = 0x9 + (c & 0x0f);
208 else if (c == 0x41) 208 else if (c == 0x41)
209 c = 0x9 + 0x1; 209 c = 0x9 + 0x1;
210 if (c > 0x18)
211 c = 0x00;
210 return (sc & 3) * (0x9 + 0x9) + c; 212 return (sc & 3) * (0x9 + 0x9) + c;
211} 213}
212 214
215/**
216 * capi_cmd2par() - find parameter string for CAPI 2.0 command/subcommand
217 * @cmd: command number
218 * @subcmd: subcommand number
219 *
220 * Return value: static string, NULL if command/subcommand unknown
221 */
222
223static unsigned char *capi_cmd2par(u8 cmd, u8 subcmd)
224{
225 return cpars[command_2_index(cmd, subcmd)];
226}
227
213/*-------------------------------------------------------*/ 228/*-------------------------------------------------------*/
214#define TYP (cdef[cmsg->par[cmsg->p]].typ) 229#define TYP (cdef[cmsg->par[cmsg->p]].typ)
215#define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off) 230#define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off)
@@ -302,7 +317,9 @@ unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)
302 cmsg->m = msg; 317 cmsg->m = msg;
303 cmsg->l = 8; 318 cmsg->l = 8;
304 cmsg->p = 0; 319 cmsg->p = 0;
305 cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; 320 cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
321 if (!cmsg->par)
322 return 1; /* invalid command/subcommand */
306 323
307 pars_2_message(cmsg); 324 pars_2_message(cmsg);
308 325
@@ -375,7 +392,9 @@ unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)
375 cmsg->p = 0; 392 cmsg->p = 0;
376 byteTRcpy(cmsg->m + 4, &cmsg->Command); 393 byteTRcpy(cmsg->m + 4, &cmsg->Command);
377 byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); 394 byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
378 cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; 395 cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
396 if (!cmsg->par)
397 return 1; /* invalid command/subcommand */
379 398
380 message_2_pars(cmsg); 399 message_2_pars(cmsg);
381 400
@@ -470,12 +489,17 @@ static char *mnames[] =
470 * @cmd: command number 489 * @cmd: command number
471 * @subcmd: subcommand number 490 * @subcmd: subcommand number
472 * 491 *
473 * Return value: static string, NULL if command/subcommand unknown 492 * Return value: static string
474 */ 493 */
475 494
476char *capi_cmd2str(u8 cmd, u8 subcmd) 495char *capi_cmd2str(u8 cmd, u8 subcmd)
477{ 496{
478 return mnames[command_2_index(cmd, subcmd)]; 497 char *result;
498
499 result = mnames[command_2_index(cmd, subcmd)];
500 if (result == NULL)
501 result = "INVALID_COMMAND";
502 return result;
479} 503}
480 504
481 505
@@ -625,6 +649,9 @@ static _cdebbuf *printstruct(_cdebbuf *cdb, u8 *m)
625 649
626static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level) 650static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level)
627{ 651{
652 if (!cmsg->par)
653 return NULL; /* invalid command/subcommand */
654
628 for (; TYP != _CEND; cmsg->p++) { 655 for (; TYP != _CEND; cmsg->p++) {
629 int slen = 29 + 3 - level; 656 int slen = 29 + 3 - level;
630 int i; 657 int i;
@@ -759,10 +786,10 @@ _cdebbuf *capi_message2str(u8 *msg)
759 cmsg->p = 0; 786 cmsg->p = 0;
760 byteTRcpy(cmsg->m + 4, &cmsg->Command); 787 byteTRcpy(cmsg->m + 4, &cmsg->Command);
761 byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); 788 byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
762 cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; 789 cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
763 790
764 cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n", 791 cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n",
765 mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], 792 capi_cmd2str(cmsg->Command, cmsg->Subcommand),
766 ((unsigned short *) msg)[1], 793 ((unsigned short *) msg)[1],
767 ((unsigned short *) msg)[3], 794 ((unsigned short *) msg)[3],
768 ((unsigned short *) msg)[0]); 795 ((unsigned short *) msg)[0]);
@@ -796,7 +823,7 @@ _cdebbuf *capi_cmsg2str(_cmsg *cmsg)
796 cmsg->l = 8; 823 cmsg->l = 8;
797 cmsg->p = 0; 824 cmsg->p = 0;
798 cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n", 825 cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
799 mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], 826 capi_cmd2str(cmsg->Command, cmsg->Subcommand),
800 ((u16 *) cmsg->m)[1], 827 ((u16 *) cmsg->m)[1],
801 ((u16 *) cmsg->m)[3], 828 ((u16 *) cmsg->m)[3],
802 ((u16 *) cmsg->m)[0]); 829 ((u16 *) cmsg->m)[0]);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index c123709acf82..823f6985b260 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1184,7 +1184,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1184 * Return value: CAPI result code 1184 * Return value: CAPI result code
1185 */ 1185 */
1186 1186
1187int capi20_manufacturer(unsigned int cmd, void __user *data) 1187int capi20_manufacturer(unsigned long cmd, void __user *data)
1188{ 1188{
1189 struct capi_ctr *ctr; 1189 struct capi_ctr *ctr;
1190 int retval; 1190 int retval;
@@ -1259,7 +1259,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
1259 } 1259 }
1260 1260
1261 default: 1261 default:
1262 printk(KERN_ERR "kcapi: manufacturer command %d unknown.\n", 1262 printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n",
1263 cmd); 1263 cmd);
1264 break; 1264 break;
1265 1265
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 3286903a95d2..ccec7778cad2 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -250,6 +250,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
250 l -= 12; 250 l -= 12;
251 if (l <= 0) 251 if (l <= 0)
252 return; 252 return;
253 if (l > 64)
254 l = 64; /* arbitrary limit */
253 dbgline = kmalloc(3 * l, GFP_ATOMIC); 255 dbgline = kmalloc(3 * l, GFP_ATOMIC);
254 if (!dbgline) 256 if (!dbgline)
255 return; 257 return;
@@ -645,7 +647,13 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
645 __func__); 647 __func__);
646 break; 648 break;
647 } 649 }
648 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); 650 if (capi_cmsg2message(&iif->hcmsg,
651 __skb_put(skb, msgsize))) {
652 dev_err(cs->dev, "%s: message parser failure\n",
653 __func__);
654 dev_kfree_skb_any(skb);
655 break;
656 }
649 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); 657 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
650 658
651 /* add to listeners on this B channel, update state */ 659 /* add to listeners on this B channel, update state */
@@ -691,7 +699,12 @@ static void send_disconnect_ind(struct bc_state *bcs,
691 dev_err(cs->dev, "%s: out of memory\n", __func__); 699 dev_err(cs->dev, "%s: out of memory\n", __func__);
692 return; 700 return;
693 } 701 }
694 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN)); 702 if (capi_cmsg2message(&iif->hcmsg,
703 __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
704 dev_err(cs->dev, "%s: message parser failure\n", __func__);
705 dev_kfree_skb_any(skb);
706 return;
707 }
695 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); 708 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
696 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 709 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
697} 710}
@@ -721,8 +734,12 @@ static void send_disconnect_b3_ind(struct bc_state *bcs,
721 dev_err(cs->dev, "%s: out of memory\n", __func__); 734 dev_err(cs->dev, "%s: out of memory\n", __func__);
722 return; 735 return;
723 } 736 }
724 capi_cmsg2message(&iif->hcmsg, 737 if (capi_cmsg2message(&iif->hcmsg,
725 __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN)); 738 __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
739 dev_err(cs->dev, "%s: message parser failure\n", __func__);
740 dev_kfree_skb_any(skb);
741 return;
742 }
726 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); 743 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
727 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 744 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
728} 745}
@@ -787,7 +804,11 @@ void gigaset_isdn_connD(struct bc_state *bcs)
787 dev_err(cs->dev, "%s: out of memory\n", __func__); 804 dev_err(cs->dev, "%s: out of memory\n", __func__);
788 return; 805 return;
789 } 806 }
790 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); 807 if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
808 dev_err(cs->dev, "%s: message parser failure\n", __func__);
809 dev_kfree_skb_any(skb);
810 return;
811 }
791 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); 812 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
792 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 813 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
793} 814}
@@ -887,7 +908,11 @@ void gigaset_isdn_connB(struct bc_state *bcs)
887 dev_err(cs->dev, "%s: out of memory\n", __func__); 908 dev_err(cs->dev, "%s: out of memory\n", __func__);
888 return; 909 return;
889 } 910 }
890 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); 911 if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
912 dev_err(cs->dev, "%s: message parser failure\n", __func__);
913 dev_kfree_skb_any(skb);
914 return;
915 }
891 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); 916 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
892 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 917 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
893} 918}
@@ -1094,13 +1119,19 @@ static void send_conf(struct gigaset_capi_ctr *iif,
1094 struct sk_buff *skb, 1119 struct sk_buff *skb,
1095 u16 info) 1120 u16 info)
1096{ 1121{
1122 struct cardstate *cs = iif->ctr.driverdata;
1123
1097 /* 1124 /*
1098 * _CONF replies always only have NCCI and Info parameters 1125 * _CONF replies always only have NCCI and Info parameters
1099 * so they'll fit into the _REQ message skb 1126 * so they'll fit into the _REQ message skb
1100 */ 1127 */
1101 capi_cmsg_answer(&iif->acmsg); 1128 capi_cmsg_answer(&iif->acmsg);
1102 iif->acmsg.Info = info; 1129 iif->acmsg.Info = info;
1103 capi_cmsg2message(&iif->acmsg, skb->data); 1130 if (capi_cmsg2message(&iif->acmsg, skb->data)) {
1131 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1132 dev_kfree_skb_any(skb);
1133 return;
1134 }
1104 __skb_trim(skb, CAPI_STDCONF_LEN); 1135 __skb_trim(skb, CAPI_STDCONF_LEN);
1105 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1136 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1106 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 1137 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
@@ -1122,7 +1153,11 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1122 static u8 confparam[10]; /* max. 9 octets + length byte */ 1153 static u8 confparam[10]; /* max. 9 octets + length byte */
1123 1154
1124 /* decode message */ 1155 /* decode message */
1125 capi_message2cmsg(cmsg, skb->data); 1156 if (capi_message2cmsg(cmsg, skb->data)) {
1157 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1158 dev_kfree_skb_any(skb);
1159 return;
1160 }
1126 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1161 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1127 1162
1128 /* 1163 /*
@@ -1180,6 +1215,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1180 confparam[3] = 2; /* length */ 1215 confparam[3] = 2; /* length */
1181 capimsg_setu16(confparam, 4, 1216 capimsg_setu16(confparam, 4,
1182 CapiSupplementaryServiceNotSupported); 1217 CapiSupplementaryServiceNotSupported);
1218 break;
1183 } 1219 }
1184 info = CapiSuccess; 1220 info = CapiSuccess;
1185 confparam[3] = 2; /* length */ 1221 confparam[3] = 2; /* length */
@@ -1220,6 +1256,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1220 } 1256 }
1221 1257
1222 /* send FACILITY_CONF with given Info and confirmation parameter */ 1258 /* send FACILITY_CONF with given Info and confirmation parameter */
1259 dev_kfree_skb_any(skb);
1223 capi_cmsg_answer(cmsg); 1260 capi_cmsg_answer(cmsg);
1224 cmsg->Info = info; 1261 cmsg->Info = info;
1225 cmsg->FacilityConfirmationParameter = confparam; 1262 cmsg->FacilityConfirmationParameter = confparam;
@@ -1229,7 +1266,11 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1229 dev_err(cs->dev, "%s: out of memory\n", __func__); 1266 dev_err(cs->dev, "%s: out of memory\n", __func__);
1230 return; 1267 return;
1231 } 1268 }
1232 capi_cmsg2message(cmsg, __skb_put(cskb, msgsize)); 1269 if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) {
1270 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1271 dev_kfree_skb_any(cskb);
1272 return;
1273 }
1233 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1274 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1234 capi_ctr_handle_message(&iif->ctr, ap->id, cskb); 1275 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
1235} 1276}
@@ -1243,8 +1284,14 @@ static void do_listen_req(struct gigaset_capi_ctr *iif,
1243 struct gigaset_capi_appl *ap, 1284 struct gigaset_capi_appl *ap,
1244 struct sk_buff *skb) 1285 struct sk_buff *skb)
1245{ 1286{
1287 struct cardstate *cs = iif->ctr.driverdata;
1288
1246 /* decode message */ 1289 /* decode message */
1247 capi_message2cmsg(&iif->acmsg, skb->data); 1290 if (capi_message2cmsg(&iif->acmsg, skb->data)) {
1291 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1292 dev_kfree_skb_any(skb);
1293 return;
1294 }
1248 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1295 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1249 1296
1250 /* store listening parameters */ 1297 /* store listening parameters */
@@ -1261,8 +1308,14 @@ static void do_alert_req(struct gigaset_capi_ctr *iif,
1261 struct gigaset_capi_appl *ap, 1308 struct gigaset_capi_appl *ap,
1262 struct sk_buff *skb) 1309 struct sk_buff *skb)
1263{ 1310{
1311 struct cardstate *cs = iif->ctr.driverdata;
1312
1264 /* decode message */ 1313 /* decode message */
1265 capi_message2cmsg(&iif->acmsg, skb->data); 1314 if (capi_message2cmsg(&iif->acmsg, skb->data)) {
1315 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1316 dev_kfree_skb_any(skb);
1317 return;
1318 }
1266 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 1319 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1267 send_conf(iif, ap, skb, CapiAlertAlreadySent); 1320 send_conf(iif, ap, skb, CapiAlertAlreadySent);
1268} 1321}
@@ -1287,7 +1340,11 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1287 u16 info; 1340 u16 info;
1288 1341
1289 /* decode message */ 1342 /* decode message */
1290 capi_message2cmsg(cmsg, skb->data); 1343 if (capi_message2cmsg(cmsg, skb->data)) {
1344 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1345 dev_kfree_skb_any(skb);
1346 return;
1347 }
1291 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1348 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1292 1349
1293 /* get free B channel & construct PLCI */ 1350 /* get free B channel & construct PLCI */
@@ -1574,7 +1631,11 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
1574 int channel; 1631 int channel;
1575 1632
1576 /* decode message */ 1633 /* decode message */
1577 capi_message2cmsg(cmsg, skb->data); 1634 if (capi_message2cmsg(cmsg, skb->data)) {
1635 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1636 dev_kfree_skb_any(skb);
1637 return;
1638 }
1578 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1639 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1579 dev_kfree_skb_any(skb); 1640 dev_kfree_skb_any(skb);
1580 1641
@@ -1740,7 +1801,11 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1740 int channel; 1801 int channel;
1741 1802
1742 /* decode message */ 1803 /* decode message */
1743 capi_message2cmsg(cmsg, skb->data); 1804 if (capi_message2cmsg(cmsg, skb->data)) {
1805 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1806 dev_kfree_skb_any(skb);
1807 return;
1808 }
1744 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1809 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1745 1810
1746 /* extract and check channel number from PLCI */ 1811 /* extract and check channel number from PLCI */
@@ -1785,7 +1850,11 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1785 u8 command; 1850 u8 command;
1786 1851
1787 /* decode message */ 1852 /* decode message */
1788 capi_message2cmsg(cmsg, skb->data); 1853 if (capi_message2cmsg(cmsg, skb->data)) {
1854 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1855 dev_kfree_skb_any(skb);
1856 return;
1857 }
1789 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1858 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1790 1859
1791 /* extract and check channel number and NCCI */ 1860 /* extract and check channel number and NCCI */
@@ -1825,7 +1894,11 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1825 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND, 1894 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
1826 ap->nextMessageNumber++, cmsg->adr.adrNCCI); 1895 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
1827 __skb_trim(skb, msgsize); 1896 __skb_trim(skb, msgsize);
1828 capi_cmsg2message(cmsg, skb->data); 1897 if (capi_cmsg2message(cmsg, skb->data)) {
1898 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1899 dev_kfree_skb_any(skb);
1900 return;
1901 }
1829 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1902 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1830 capi_ctr_handle_message(&iif->ctr, ap->id, skb); 1903 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1831} 1904}
@@ -1847,7 +1920,11 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1847 int channel; 1920 int channel;
1848 1921
1849 /* decode message */ 1922 /* decode message */
1850 capi_message2cmsg(cmsg, skb->data); 1923 if (capi_message2cmsg(cmsg, skb->data)) {
1924 dev_err(cs->dev, "%s: message parser failure\n", __func__);
1925 dev_kfree_skb_any(skb);
1926 return;
1927 }
1851 dump_cmsg(DEBUG_CMD, __func__, cmsg); 1928 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1852 1929
1853 /* extract and check channel number from PLCI */ 1930 /* extract and check channel number from PLCI */
@@ -1903,8 +1980,14 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1903 kfree(b3cmsg); 1980 kfree(b3cmsg);
1904 return; 1981 return;
1905 } 1982 }
1906 capi_cmsg2message(b3cmsg, 1983 if (capi_cmsg2message(b3cmsg,
1907 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); 1984 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
1985 dev_err(cs->dev, "%s: message parser failure\n",
1986 __func__);
1987 kfree(b3cmsg);
1988 dev_kfree_skb_any(b3skb);
1989 return;
1990 }
1908 dump_cmsg(DEBUG_CMD, __func__, b3cmsg); 1991 dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
1909 kfree(b3cmsg); 1992 kfree(b3cmsg);
1910 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); 1993 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
@@ -1935,7 +2018,11 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1935 int channel; 2018 int channel;
1936 2019
1937 /* decode message */ 2020 /* decode message */
1938 capi_message2cmsg(cmsg, skb->data); 2021 if (capi_message2cmsg(cmsg, skb->data)) {
2022 dev_err(cs->dev, "%s: message parser failure\n", __func__);
2023 dev_kfree_skb_any(skb);
2024 return;
2025 }
1939 dump_cmsg(DEBUG_CMD, __func__, cmsg); 2026 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1940 2027
1941 /* extract and check channel number and NCCI */ 2028 /* extract and check channel number and NCCI */
@@ -2052,8 +2139,14 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
2052 struct gigaset_capi_appl *ap, 2139 struct gigaset_capi_appl *ap,
2053 struct sk_buff *skb) 2140 struct sk_buff *skb)
2054{ 2141{
2142 struct cardstate *cs = iif->ctr.driverdata;
2143
2055 /* decode message */ 2144 /* decode message */
2056 capi_message2cmsg(&iif->acmsg, skb->data); 2145 if (capi_message2cmsg(&iif->acmsg, skb->data)) {
2146 dev_err(cs->dev, "%s: message parser failure\n", __func__);
2147 dev_kfree_skb_any(skb);
2148 return;
2149 }
2057 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 2150 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2058 send_conf(iif, ap, skb, 2151 send_conf(iif, ap, skb,
2059 CapiResetProcedureNotSupportedByCurrentProtocol); 2152 CapiResetProcedureNotSupportedByCurrentProtocol);
@@ -2066,8 +2159,14 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
2066 struct gigaset_capi_appl *ap, 2159 struct gigaset_capi_appl *ap,
2067 struct sk_buff *skb) 2160 struct sk_buff *skb)
2068{ 2161{
2162 struct cardstate *cs = iif->ctr.driverdata;
2163
2069 /* decode message */ 2164 /* decode message */
2070 capi_message2cmsg(&iif->acmsg, skb->data); 2165 if (capi_message2cmsg(&iif->acmsg, skb->data)) {
2166 dev_err(cs->dev, "%s: message parser failure\n", __func__);
2167 dev_kfree_skb_any(skb);
2168 return;
2169 }
2071 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 2170 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2072 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); 2171 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
2073} 2172}
@@ -2079,8 +2178,14 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
2079 struct gigaset_capi_appl *ap, 2178 struct gigaset_capi_appl *ap,
2080 struct sk_buff *skb) 2179 struct sk_buff *skb)
2081{ 2180{
2181 struct cardstate *cs = iif->ctr.driverdata;
2182
2082 /* decode message */ 2183 /* decode message */
2083 capi_message2cmsg(&iif->acmsg, skb->data); 2184 if (capi_message2cmsg(&iif->acmsg, skb->data)) {
2185 dev_err(cs->dev, "%s: message parser failure\n", __func__);
2186 dev_kfree_skb_any(skb);
2187 return;
2188 }
2084 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); 2189 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2085 dev_kfree_skb_any(skb); 2190 dev_kfree_skb_any(skb);
2086} 2191}
@@ -2357,7 +2462,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2357 struct gigaset_capi_ctr *iif; 2462 struct gigaset_capi_ctr *iif;
2358 int rc; 2463 int rc;
2359 2464
2360 iif = kmalloc(sizeof(*iif), GFP_KERNEL); 2465 iif = kzalloc(sizeof(*iif), GFP_KERNEL);
2361 if (!iif) { 2466 if (!iif) {
2362 pr_err("%s: out of memory\n", __func__); 2467 pr_err("%s: out of memory\n", __func__);
2363 return -ENOMEM; 2468 return -ENOMEM;
@@ -2366,7 +2471,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2366 /* prepare controller structure */ 2471 /* prepare controller structure */
2367 iif->ctr.owner = THIS_MODULE; 2472 iif->ctr.owner = THIS_MODULE;
2368 iif->ctr.driverdata = cs; 2473 iif->ctr.driverdata = cs;
2369 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); 2474 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1);
2370 iif->ctr.driver_name = "gigaset"; 2475 iif->ctr.driver_name = "gigaset";
2371 iif->ctr.load_firmware = NULL; 2476 iif->ctr.load_firmware = NULL;
2372 iif->ctr.reset_ctr = NULL; 2477 iif->ctr.reset_ctr = NULL;
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index dcae14aef376..c8ced12fa452 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -604,14 +604,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
604} 604}
605EXPORT_SYMBOL_GPL(gigaset_handle_modem_response); 605EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
606 606
607/* disconnect 607/* disconnect_nobc
608 * process closing of connection associated with given AT state structure 608 * process closing of connection associated with given AT state structure
609 * without B channel
609 */ 610 */
610static void disconnect(struct at_state_t **at_state_p) 611static void disconnect_nobc(struct at_state_t **at_state_p,
612 struct cardstate *cs)
611{ 613{
612 unsigned long flags; 614 unsigned long flags;
613 struct bc_state *bcs = (*at_state_p)->bcs;
614 struct cardstate *cs = (*at_state_p)->cs;
615 615
616 spin_lock_irqsave(&cs->lock, flags); 616 spin_lock_irqsave(&cs->lock, flags);
617 ++(*at_state_p)->seq_index; 617 ++(*at_state_p)->seq_index;
@@ -622,23 +622,44 @@ static void disconnect(struct at_state_t **at_state_p)
622 gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE"); 622 gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
623 cs->commands_pending = 1; 623 cs->commands_pending = 1;
624 } 624 }
625 spin_unlock_irqrestore(&cs->lock, flags);
626 625
627 if (bcs) { 626 /* check for and deallocate temporary AT state */
628 /* B channel assigned: invoke hardware specific handler */ 627 if (!list_empty(&(*at_state_p)->list)) {
629 cs->ops->close_bchannel(bcs);
630 /* notify LL */
631 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
632 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
633 gigaset_isdn_hupD(bcs);
634 }
635 } else {
636 /* no B channel assigned: just deallocate */
637 spin_lock_irqsave(&cs->lock, flags);
638 list_del(&(*at_state_p)->list); 628 list_del(&(*at_state_p)->list);
639 kfree(*at_state_p); 629 kfree(*at_state_p);
640 *at_state_p = NULL; 630 *at_state_p = NULL;
641 spin_unlock_irqrestore(&cs->lock, flags); 631 }
632
633 spin_unlock_irqrestore(&cs->lock, flags);
634}
635
636/* disconnect_bc
637 * process closing of connection associated with given AT state structure
638 * and B channel
639 */
640static void disconnect_bc(struct at_state_t *at_state,
641 struct cardstate *cs, struct bc_state *bcs)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&cs->lock, flags);
646 ++at_state->seq_index;
647
648 /* revert to selected idle mode */
649 if (!cs->cidmode) {
650 cs->at_state.pending_commands |= PC_UMMODE;
651 gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
652 cs->commands_pending = 1;
653 }
654 spin_unlock_irqrestore(&cs->lock, flags);
655
656 /* invoke hardware specific handler */
657 cs->ops->close_bchannel(bcs);
658
659 /* notify LL */
660 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
661 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
662 gigaset_isdn_hupD(bcs);
642 } 663 }
643} 664}
644 665
@@ -646,7 +667,7 @@ static void disconnect(struct at_state_t **at_state_p)
646 * get a free AT state structure: either one of those associated with the 667 * get a free AT state structure: either one of those associated with the
647 * B channels of the Gigaset device, or if none of those is available, 668 * B channels of the Gigaset device, or if none of those is available,
648 * a newly allocated one with bcs=NULL 669 * a newly allocated one with bcs=NULL
649 * The structure should be freed by calling disconnect() after use. 670 * The structure should be freed by calling disconnect_nobc() after use.
650 */ 671 */
651static inline struct at_state_t *get_free_channel(struct cardstate *cs, 672static inline struct at_state_t *get_free_channel(struct cardstate *cs,
652 int cid) 673 int cid)
@@ -1057,7 +1078,7 @@ static void do_action(int action, struct cardstate *cs,
1057 struct event_t *ev) 1078 struct event_t *ev)
1058{ 1079{
1059 struct at_state_t *at_state = *p_at_state; 1080 struct at_state_t *at_state = *p_at_state;
1060 struct at_state_t *at_state2; 1081 struct bc_state *bcs2;
1061 unsigned long flags; 1082 unsigned long flags;
1062 1083
1063 int channel; 1084 int channel;
@@ -1156,8 +1177,8 @@ static void do_action(int action, struct cardstate *cs,
1156 break; 1177 break;
1157 case ACT_RING: 1178 case ACT_RING:
1158 /* get fresh AT state structure for new CID */ 1179 /* get fresh AT state structure for new CID */
1159 at_state2 = get_free_channel(cs, ev->parameter); 1180 at_state = get_free_channel(cs, ev->parameter);
1160 if (!at_state2) { 1181 if (!at_state) {
1161 dev_warn(cs->dev, 1182 dev_warn(cs->dev,
1162 "RING ignored: could not allocate channel structure\n"); 1183 "RING ignored: could not allocate channel structure\n");
1163 break; 1184 break;
@@ -1166,16 +1187,16 @@ static void do_action(int action, struct cardstate *cs,
1166 /* initialize AT state structure 1187 /* initialize AT state structure
1167 * note that bcs may be NULL if no B channel is free 1188 * note that bcs may be NULL if no B channel is free
1168 */ 1189 */
1169 at_state2->ConState = 700; 1190 at_state->ConState = 700;
1170 for (i = 0; i < STR_NUM; ++i) { 1191 for (i = 0; i < STR_NUM; ++i) {
1171 kfree(at_state2->str_var[i]); 1192 kfree(at_state->str_var[i]);
1172 at_state2->str_var[i] = NULL; 1193 at_state->str_var[i] = NULL;
1173 } 1194 }
1174 at_state2->int_var[VAR_ZCTP] = -1; 1195 at_state->int_var[VAR_ZCTP] = -1;
1175 1196
1176 spin_lock_irqsave(&cs->lock, flags); 1197 spin_lock_irqsave(&cs->lock, flags);
1177 at_state2->timer_expires = RING_TIMEOUT; 1198 at_state->timer_expires = RING_TIMEOUT;
1178 at_state2->timer_active = 1; 1199 at_state->timer_active = 1;
1179 spin_unlock_irqrestore(&cs->lock, flags); 1200 spin_unlock_irqrestore(&cs->lock, flags);
1180 break; 1201 break;
1181 case ACT_ICALL: 1202 case ACT_ICALL:
@@ -1213,14 +1234,17 @@ static void do_action(int action, struct cardstate *cs,
1213 case ACT_DISCONNECT: 1234 case ACT_DISCONNECT:
1214 cs->cur_at_seq = SEQ_NONE; 1235 cs->cur_at_seq = SEQ_NONE;
1215 at_state->cid = -1; 1236 at_state->cid = -1;
1216 if (bcs && cs->onechannel && cs->dle) { 1237 if (!bcs) {
1238 disconnect_nobc(p_at_state, cs);
1239 } else if (cs->onechannel && cs->dle) {
1217 /* Check for other open channels not needed: 1240 /* Check for other open channels not needed:
1218 * DLE only used for M10x with one B channel. 1241 * DLE only used for M10x with one B channel.
1219 */ 1242 */
1220 at_state->pending_commands |= PC_DLE0; 1243 at_state->pending_commands |= PC_DLE0;
1221 cs->commands_pending = 1; 1244 cs->commands_pending = 1;
1222 } else 1245 } else {
1223 disconnect(p_at_state); 1246 disconnect_bc(at_state, cs, bcs);
1247 }
1224 break; 1248 break;
1225 case ACT_FAKEDLE0: 1249 case ACT_FAKEDLE0:
1226 at_state->int_var[VAR_ZDLE] = 0; 1250 at_state->int_var[VAR_ZDLE] = 0;
@@ -1228,25 +1252,27 @@ static void do_action(int action, struct cardstate *cs,
1228 /* fall through */ 1252 /* fall through */
1229 case ACT_DLE0: 1253 case ACT_DLE0:
1230 cs->cur_at_seq = SEQ_NONE; 1254 cs->cur_at_seq = SEQ_NONE;
1231 at_state2 = &cs->bcs[cs->curchannel].at_state; 1255 bcs2 = cs->bcs + cs->curchannel;
1232 disconnect(&at_state2); 1256 disconnect_bc(&bcs2->at_state, cs, bcs2);
1233 break; 1257 break;
1234 case ACT_ABORTHUP: 1258 case ACT_ABORTHUP:
1235 cs->cur_at_seq = SEQ_NONE; 1259 cs->cur_at_seq = SEQ_NONE;
1236 dev_warn(cs->dev, "Could not hang up.\n"); 1260 dev_warn(cs->dev, "Could not hang up.\n");
1237 at_state->cid = -1; 1261 at_state->cid = -1;
1238 if (bcs && cs->onechannel) 1262 if (!bcs)
1263 disconnect_nobc(p_at_state, cs);
1264 else if (cs->onechannel)
1239 at_state->pending_commands |= PC_DLE0; 1265 at_state->pending_commands |= PC_DLE0;
1240 else 1266 else
1241 disconnect(p_at_state); 1267 disconnect_bc(at_state, cs, bcs);
1242 schedule_init(cs, MS_RECOVER); 1268 schedule_init(cs, MS_RECOVER);
1243 break; 1269 break;
1244 case ACT_FAILDLE0: 1270 case ACT_FAILDLE0:
1245 cs->cur_at_seq = SEQ_NONE; 1271 cs->cur_at_seq = SEQ_NONE;
1246 dev_warn(cs->dev, "Error leaving DLE mode.\n"); 1272 dev_warn(cs->dev, "Error leaving DLE mode.\n");
1247 cs->dle = 0; 1273 cs->dle = 0;
1248 at_state2 = &cs->bcs[cs->curchannel].at_state; 1274 bcs2 = cs->bcs + cs->curchannel;
1249 disconnect(&at_state2); 1275 disconnect_bc(&bcs2->at_state, cs, bcs2);
1250 schedule_init(cs, MS_RECOVER); 1276 schedule_init(cs, MS_RECOVER);
1251 break; 1277 break;
1252 case ACT_FAILDLE1: 1278 case ACT_FAILDLE1:
@@ -1275,14 +1301,14 @@ static void do_action(int action, struct cardstate *cs,
1275 if (reinit_and_retry(cs, channel) < 0) { 1301 if (reinit_and_retry(cs, channel) < 0) {
1276 dev_warn(cs->dev, 1302 dev_warn(cs->dev,
1277 "Could not get a call ID. Cannot dial.\n"); 1303 "Could not get a call ID. Cannot dial.\n");
1278 at_state2 = &cs->bcs[channel].at_state; 1304 bcs2 = cs->bcs + channel;
1279 disconnect(&at_state2); 1305 disconnect_bc(&bcs2->at_state, cs, bcs2);
1280 } 1306 }
1281 break; 1307 break;
1282 case ACT_ABORTCID: 1308 case ACT_ABORTCID:
1283 cs->cur_at_seq = SEQ_NONE; 1309 cs->cur_at_seq = SEQ_NONE;
1284 at_state2 = &cs->bcs[cs->curchannel].at_state; 1310 bcs2 = cs->bcs + cs->curchannel;
1285 disconnect(&at_state2); 1311 disconnect_bc(&bcs2->at_state, cs, bcs2);
1286 break; 1312 break;
1287 1313
1288 case ACT_DIALING: 1314 case ACT_DIALING:
@@ -1291,7 +1317,10 @@ static void do_action(int action, struct cardstate *cs,
1291 break; 1317 break;
1292 1318
1293 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */ 1319 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
1294 disconnect(p_at_state); 1320 if (bcs)
1321 disconnect_bc(at_state, cs, bcs);
1322 else
1323 disconnect_nobc(p_at_state, cs);
1295 break; 1324 break;
1296 1325
1297 case ACT_ABORTDIAL: /* error/timeout during dial preparation */ 1326 case ACT_ABORTDIAL: /* error/timeout during dial preparation */
@@ -1380,6 +1409,11 @@ static void do_action(int action, struct cardstate *cs,
1380 /* events from the LL */ 1409 /* events from the LL */
1381 1410
1382 case ACT_DIAL: 1411 case ACT_DIAL:
1412 if (!ev->ptr) {
1413 *p_genresp = 1;
1414 *p_resp_code = RSP_ERROR;
1415 break;
1416 }
1383 start_dial(at_state, ev->ptr, ev->parameter); 1417 start_dial(at_state, ev->ptr, ev->parameter);
1384 break; 1418 break;
1385 case ACT_ACCEPT: 1419 case ACT_ACCEPT:
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 82e91ba1acd3..a8e652dac54d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -497,6 +497,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
497static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) 497static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
498{ 498{
499 unsigned long flags; 499 unsigned long flags;
500 int len;
500 501
501 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? 502 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
502 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 503 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
@@ -515,10 +516,11 @@ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
515 spin_unlock_irqrestore(&cs->cmdlock, flags); 516 spin_unlock_irqrestore(&cs->cmdlock, flags);
516 517
517 spin_lock_irqsave(&cs->lock, flags); 518 spin_lock_irqsave(&cs->lock, flags);
519 len = cb->len;
518 if (cs->connected) 520 if (cs->connected)
519 tasklet_schedule(&cs->write_tasklet); 521 tasklet_schedule(&cs->write_tasklet);
520 spin_unlock_irqrestore(&cs->lock, flags); 522 spin_unlock_irqrestore(&cs->lock, flags);
521 return cb->len; 523 return len;
522} 524}
523 525
524static int gigaset_write_room(struct cardstate *cs) 526static int gigaset_write_room(struct cardstate *cs)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 776e965dc9f4..05b0ca3bf71d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -21,8 +21,12 @@
21 21
22static int reg_read(struct dsa_switch *ds, int addr, int reg) 22static int reg_read(struct dsa_switch *ds, int addr, int reg)
23{ 23{
24 return mdiobus_read(to_mii_bus(ds->master_dev), 24 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
25 ds->pd->sw_addr + addr, reg); 25
26 if (bus == NULL)
27 return -EINVAL;
28
29 return mdiobus_read(bus, ds->pd->sw_addr + addr, reg);
26} 30}
27 31
28#define REG_READ(addr, reg) \ 32#define REG_READ(addr, reg) \
@@ -38,8 +42,12 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
38 42
39static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) 43static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
40{ 44{
41 return mdiobus_write(to_mii_bus(ds->master_dev), 45 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
42 ds->pd->sw_addr + addr, reg, val); 46
47 if (bus == NULL)
48 return -EINVAL;
49
50 return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val);
43} 51}
44 52
45#define REG_WRITE(addr, reg, val) \ 53#define REG_WRITE(addr, reg, val) \
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 6365e30138af..1020a7af67cf 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -206,7 +206,7 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
206 */ 206 */
207 val = 0x0433; 207 val = 0x0433;
208 if (dsa_is_cpu_port(ds, p)) { 208 if (dsa_is_cpu_port(ds, p)) {
209 if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) 209 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
210 val |= 0x3300; 210 val |= 0x3300;
211 else 211 else
212 val |= 0x0100; 212 val |= 0x0100;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index d6f6428b27dc..a6c90cf5634d 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -75,11 +75,14 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) 75int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
76{ 76{
77 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 77 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
78 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
78 int ret; 79 int ret;
79 80
81 if (bus == NULL)
82 return -EINVAL;
83
80 mutex_lock(&ps->smi_mutex); 84 mutex_lock(&ps->smi_mutex);
81 ret = __mv88e6xxx_reg_read(to_mii_bus(ds->master_dev), 85 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
82 ds->pd->sw_addr, addr, reg);
83 mutex_unlock(&ps->smi_mutex); 86 mutex_unlock(&ps->smi_mutex);
84 87
85 return ret; 88 return ret;
@@ -119,11 +122,14 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
119int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) 122int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
120{ 123{
121 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 124 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
125 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
122 int ret; 126 int ret;
123 127
128 if (bus == NULL)
129 return -EINVAL;
130
124 mutex_lock(&ps->smi_mutex); 131 mutex_lock(&ps->smi_mutex);
125 ret = __mv88e6xxx_reg_write(to_mii_bus(ds->master_dev), 132 ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
126 ds->pd->sw_addr, addr, reg, val);
127 mutex_unlock(&ps->smi_mutex); 133 mutex_unlock(&ps->smi_mutex);
128 134
129 return ret; 135 return ret;
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 589b35247713..68be565548c0 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -2,6 +2,6 @@
2# Makefile for APM X-Gene Ethernet Driver. 2# Makefile for APM X-Gene Ethernet Driver.
3# 3#
4 4
5xgene-enet-objs := xgene_enet_hw.o xgene_enet_xgmac.o \ 5xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
6 xgene_enet_main.o xgene_enet_ethtool.o 6 xgene_enet_main.o xgene_enet_ethtool.o
7obj-$(CONFIG_NET_XGENE) += xgene-enet.o 7obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index c1c997b92342..416d6ebfc2ce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -64,16 +64,25 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
64 return -ENODEV; 64 return -ENODEV;
65 65
66 return phy_ethtool_gset(phydev, cmd); 66 return phy_ethtool_gset(phydev, cmd);
67 } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
68 cmd->supported = SUPPORTED_1000baseT_Full |
69 SUPPORTED_Autoneg | SUPPORTED_MII;
70 cmd->advertising = cmd->supported;
71 ethtool_cmd_speed_set(cmd, SPEED_1000);
72 cmd->duplex = DUPLEX_FULL;
73 cmd->port = PORT_MII;
74 cmd->transceiver = XCVR_INTERNAL;
75 cmd->autoneg = AUTONEG_ENABLE;
76 } else {
77 cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
78 cmd->advertising = cmd->supported;
79 ethtool_cmd_speed_set(cmd, SPEED_10000);
80 cmd->duplex = DUPLEX_FULL;
81 cmd->port = PORT_FIBRE;
82 cmd->transceiver = XCVR_INTERNAL;
83 cmd->autoneg = AUTONEG_DISABLE;
67 } 84 }
68 85
69 cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
70 cmd->advertising = cmd->supported;
71 ethtool_cmd_speed_set(cmd, SPEED_10000);
72 cmd->duplex = DUPLEX_FULL;
73 cmd->port = PORT_FIBRE;
74 cmd->transceiver = XCVR_EXTERNAL;
75 cmd->autoneg = AUTONEG_DISABLE;
76
77 return 0; 86 return 0;
78} 87}
79 88
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c8f3824f7606..63ea1941e973 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -410,7 +410,6 @@ static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
410 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 410 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
411 (dev_addr[1] << 8) | dev_addr[0]; 411 (dev_addr[1] << 8) | dev_addr[0];
412 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); 412 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
413 addr1 |= pdata->phy_addr & 0xFFFF;
414 413
415 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); 414 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
416 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); 415 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 15ec4267779c..38558584080e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -44,6 +44,7 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
44 44
45enum xgene_enet_rm { 45enum xgene_enet_rm {
46 RM0, 46 RM0,
47 RM1,
47 RM3 = 3 48 RM3 = 3
48}; 49};
49 50
@@ -143,6 +144,8 @@ enum xgene_enet_rm {
143#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) 144#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
144#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) 145#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
145#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) 146#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16)
147#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0))
148#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16))
146#define ICM_CONFIG0_REG_0_ADDR 0x0400 149#define ICM_CONFIG0_REG_0_ADDR 0x0400
147#define ICM_CONFIG2_REG_0_ADDR 0x0410 150#define ICM_CONFIG2_REG_0_ADDR 0x0410
148#define RX_DV_GATE_REG_0_ADDR 0x05fc 151#define RX_DV_GATE_REG_0_ADDR 0x05fc
@@ -179,7 +182,6 @@ enum xgene_enet_rm {
179#define TUND_ADDR 0x4a 182#define TUND_ADDR 0x4a
180 183
181#define TSO_IPPROTO_TCP 1 184#define TSO_IPPROTO_TCP 1
182#define FULL_DUPLEX 2
183 185
184#define USERINFO_POS 0 186#define USERINFO_POS 0
185#define USERINFO_LEN 32 187#define USERINFO_LEN 32
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 9b85239ceedf..3c208cc6f6bb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -21,6 +21,7 @@
21 21
22#include "xgene_enet_main.h" 22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h" 23#include "xgene_enet_hw.h"
24#include "xgene_enet_sgmac.h"
24#include "xgene_enet_xgmac.h" 25#include "xgene_enet_xgmac.h"
25 26
26static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 27static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
@@ -813,6 +814,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
813 return pdata->phy_mode; 814 return pdata->phy_mode;
814 } 815 }
815 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && 816 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
817 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
816 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { 818 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
817 dev_err(dev, "Incorrect phy-connection-type specified\n"); 819 dev_err(dev, "Incorrect phy-connection-type specified\n");
818 return -ENODEV; 820 return -ENODEV;
@@ -830,14 +832,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
830 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; 832 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
831 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; 833 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
832 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; 834 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
833 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { 835 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
836 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
834 pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; 837 pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
835 pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; 838 pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
836 pdata->rm = RM3;
837 } else { 839 } else {
838 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; 840 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
839 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; 841 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
840 pdata->rm = RM0;
841 } 842 }
842 pdata->rx_buff_cnt = NUM_PKT_BUF; 843 pdata->rx_buff_cnt = NUM_PKT_BUF;
843 844
@@ -881,10 +882,17 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
881 case PHY_INTERFACE_MODE_RGMII: 882 case PHY_INTERFACE_MODE_RGMII:
882 pdata->mac_ops = &xgene_gmac_ops; 883 pdata->mac_ops = &xgene_gmac_ops;
883 pdata->port_ops = &xgene_gport_ops; 884 pdata->port_ops = &xgene_gport_ops;
885 pdata->rm = RM3;
886 break;
887 case PHY_INTERFACE_MODE_SGMII:
888 pdata->mac_ops = &xgene_sgmac_ops;
889 pdata->port_ops = &xgene_sgport_ops;
890 pdata->rm = RM1;
884 break; 891 break;
885 default: 892 default:
886 pdata->mac_ops = &xgene_xgmac_ops; 893 pdata->mac_ops = &xgene_xgmac_ops;
887 pdata->port_ops = &xgene_xgport_ops; 894 pdata->port_ops = &xgene_xgport_ops;
895 pdata->rm = RM0;
888 break; 896 break;
889 } 897 }
890} 898}
@@ -895,6 +903,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
895 struct xgene_enet_pdata *pdata; 903 struct xgene_enet_pdata *pdata;
896 struct device *dev = &pdev->dev; 904 struct device *dev = &pdev->dev;
897 struct napi_struct *napi; 905 struct napi_struct *napi;
906 struct xgene_mac_ops *mac_ops;
898 int ret; 907 int ret;
899 908
900 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); 909 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -937,10 +946,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
937 946
938 napi = &pdata->rx_ring->napi; 947 napi = &pdata->rx_ring->napi;
939 netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); 948 netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
949 mac_ops = pdata->mac_ops;
940 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 950 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
941 ret = xgene_enet_mdio_config(pdata); 951 ret = xgene_enet_mdio_config(pdata);
942 else 952 else
943 INIT_DELAYED_WORK(&pdata->link_work, xgene_enet_link_state); 953 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
944 954
945 return ret; 955 return ret;
946err: 956err:
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 86cf68b65584..874e5a01161f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -39,6 +39,9 @@
39#define NUM_PKT_BUF 64 39#define NUM_PKT_BUF 64
40#define NUM_BUFPOOL 32 40#define NUM_BUFPOOL 32
41 41
42#define PHY_POLL_LINK_ON (10 * HZ)
43#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
44
42/* software context of a descriptor ring */ 45/* software context of a descriptor ring */
43struct xgene_enet_desc_ring { 46struct xgene_enet_desc_ring {
44 struct net_device *ndev; 47 struct net_device *ndev;
@@ -76,6 +79,7 @@ struct xgene_mac_ops {
76 void (*tx_disable)(struct xgene_enet_pdata *pdata); 79 void (*tx_disable)(struct xgene_enet_pdata *pdata);
77 void (*rx_disable)(struct xgene_enet_pdata *pdata); 80 void (*rx_disable)(struct xgene_enet_pdata *pdata);
78 void (*set_mac_addr)(struct xgene_enet_pdata *pdata); 81 void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
82 void (*link_state)(struct work_struct *work);
79}; 83};
80 84
81struct xgene_port_ops { 85struct xgene_port_ops {
@@ -109,7 +113,6 @@ struct xgene_enet_pdata {
109 void __iomem *base_addr; 113 void __iomem *base_addr;
110 void __iomem *ring_csr_addr; 114 void __iomem *ring_csr_addr;
111 void __iomem *ring_cmd_addr; 115 void __iomem *ring_cmd_addr;
112 u32 phy_addr;
113 int phy_mode; 116 int phy_mode;
114 enum xgene_enet_rm rm; 117 enum xgene_enet_rm rm;
115 struct rtnl_link_stats64 stats; 118 struct rtnl_link_stats64 stats;
@@ -118,6 +121,13 @@ struct xgene_enet_pdata {
118 struct delayed_work link_work; 121 struct delayed_work link_work;
119}; 122};
120 123
124struct xgene_indirect_ctl {
125 void __iomem *addr;
126 void __iomem *ctl;
127 void __iomem *cmd;
128 void __iomem *cmd_done;
129};
130
121/* Set the specified value into a bit-field defined by its starting position 131/* Set the specified value into a bit-field defined by its starting position
122 * and length within a single u64. 132 * and length within a single u64.
123 */ 133 */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
new file mode 100644
index 000000000000..e6d24c210198
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -0,0 +1,389 @@
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Keyur Chudgar <kchudgar@apm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "xgene_enet_main.h"
22#include "xgene_enet_hw.h"
23#include "xgene_enet_sgmac.h"
24
25static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
26{
27 iowrite32(val, p->eth_csr_addr + offset);
28}
29
30static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
31 u32 offset, u32 val)
32{
33 iowrite32(val, p->eth_ring_if_addr + offset);
34}
35
36static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
37 u32 offset, u32 val)
38{
39 iowrite32(val, p->eth_diag_csr_addr + offset);
40}
41
42static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
43 u32 wr_addr, u32 wr_data)
44{
45 int i;
46
47 iowrite32(wr_addr, ctl->addr);
48 iowrite32(wr_data, ctl->ctl);
49 iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
50
51 /* wait for write command to complete */
52 for (i = 0; i < 10; i++) {
53 if (ioread32(ctl->cmd_done)) {
54 iowrite32(0, ctl->cmd);
55 return true;
56 }
57 udelay(1);
58 }
59
60 return false;
61}
62
63static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
64 u32 wr_addr, u32 wr_data)
65{
66 struct xgene_indirect_ctl ctl = {
67 .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
68 .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
69 .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
70 .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
71 };
72
73 if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
74 netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
75}
76
77static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
78{
79 return ioread32(p->eth_csr_addr + offset);
80}
81
82static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
83{
84 return ioread32(p->eth_diag_csr_addr + offset);
85}
86
87static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
88{
89 u32 rd_data;
90 int i;
91
92 iowrite32(rd_addr, ctl->addr);
93 iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
94
95 /* wait for read command to complete */
96 for (i = 0; i < 10; i++) {
97 if (ioread32(ctl->cmd_done)) {
98 rd_data = ioread32(ctl->ctl);
99 iowrite32(0, ctl->cmd);
100
101 return rd_data;
102 }
103 udelay(1);
104 }
105
106 pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
107
108 return 0;
109}
110
111static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
112{
113 struct xgene_indirect_ctl ctl = {
114 .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
115 .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
116 .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
117 .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
118 };
119
120 return xgene_enet_rd_indirect(&ctl, rd_addr);
121}
122
123static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
124{
125 struct net_device *ndev = p->ndev;
126 u32 data;
127 int i;
128
129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
130 for (i = 0; i < 10 && data != ~0U ; i++) {
131 usleep_range(100, 110);
132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
133 }
134
135 if (data != ~0U) {
136 netdev_err(ndev, "Failed to release memory from shutdown\n");
137 return -ENODEV;
138 }
139
140 return 0;
141}
142
143static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
144{
145 u32 val = 0xffffffff;
146
147 xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
148 xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
149}
150
151static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
152 u32 reg, u16 data)
153{
154 u32 addr, wr_data, done;
155 int i;
156
157 addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
158 xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
159
160 wr_data = PHY_CONTROL(data);
161 xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
162
163 for (i = 0; i < 10; i++) {
164 done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
165 if (!(done & BUSY_MASK))
166 return;
167 usleep_range(10, 20);
168 }
169
170 netdev_err(p->ndev, "MII_MGMT write failed\n");
171}
172
173static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
174{
175 u32 addr, data, done;
176 int i;
177
178 addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
179 xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
180 xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
181
182 for (i = 0; i < 10; i++) {
183 done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
184 if (!(done & BUSY_MASK)) {
185 data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
186 xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
187
188 return data;
189 }
190 usleep_range(10, 20);
191 }
192
193 netdev_err(p->ndev, "MII_MGMT read failed\n");
194
195 return 0;
196}
197
198static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
199{
200 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
201 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
202}
203
204static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
205{
206 u32 addr0, addr1;
207 u8 *dev_addr = p->ndev->dev_addr;
208
209 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
210 (dev_addr[1] << 8) | dev_addr[0];
211 xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
212
213 addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
214 addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
215 xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
216}
217
218static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
219{
220 u32 data;
221
222 data = xgene_mii_phy_read(p, INT_PHY_ADDR,
223 SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
224
225 return data & LINK_UP;
226}
227
228static void xgene_sgmac_init(struct xgene_enet_pdata *p)
229{
230 u32 data, loop = 10;
231
232 xgene_sgmac_reset(p);
233
234 /* Enable auto-negotiation */
235 xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
236 xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
237
238 while (loop--) {
239 data = xgene_mii_phy_read(p, INT_PHY_ADDR,
240 SGMII_STATUS_ADDR >> 2);
241 if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
242 break;
243 usleep_range(10, 20);
244 }
245 if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
246 netdev_err(p->ndev, "Auto-negotiation failed\n");
247
248 data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
249 ENET_INTERFACE_MODE2_SET(&data, 2);
250 xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
251 xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
252
253 data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
254 data |= MPA_IDLE_WITH_QMI_EMPTY;
255 xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
256
257 xgene_sgmac_set_mac_addr(p);
258
259 data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
260 data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
261 xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
262
263 /* Adjust MDC clock frequency */
264 data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
265 MGMT_CLOCK_SEL_SET(&data, 7);
266 xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
267
268 /* Enable drop if bufpool not available */
269 data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
270 data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
271 xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
272
273 /* Rtype should be copied from FP */
274 xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
275
276 /* Bypass traffic gating */
277 xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
278 xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
279 xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
280}
281
282static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
283{
284 u32 data;
285
286 data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
287
288 if (set)
289 data |= bits;
290 else
291 data &= ~bits;
292
293 xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
294}
295
296static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
297{
298 xgene_sgmac_rxtx(p, RX_EN, true);
299}
300
301static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
302{
303 xgene_sgmac_rxtx(p, TX_EN, true);
304}
305
306static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
307{
308 xgene_sgmac_rxtx(p, RX_EN, false);
309}
310
311static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
312{
313 xgene_sgmac_rxtx(p, TX_EN, false);
314}
315
316static void xgene_enet_reset(struct xgene_enet_pdata *p)
317{
318 clk_prepare_enable(p->clk);
319 clk_disable_unprepare(p->clk);
320 clk_prepare_enable(p->clk);
321
322 xgene_enet_ecc_init(p);
323 xgene_enet_config_ring_if_assoc(p);
324}
325
326static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
327 u32 dst_ring_num, u16 bufpool_id)
328{
329 u32 data, fpsel;
330
331 data = CFG_CLE_BYPASS_EN0;
332 xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
333
334 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
335 data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
336 xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
337}
338
339static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
340{
341 clk_disable_unprepare(p->clk);
342}
343
344static void xgene_enet_link_state(struct work_struct *work)
345{
346 struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
347 struct xgene_enet_pdata, link_work);
348 struct net_device *ndev = p->ndev;
349 u32 link, poll_interval;
350
351 link = xgene_enet_link_status(p);
352 if (link) {
353 if (!netif_carrier_ok(ndev)) {
354 netif_carrier_on(ndev);
355 xgene_sgmac_init(p);
356 xgene_sgmac_rx_enable(p);
357 xgene_sgmac_tx_enable(p);
358 netdev_info(ndev, "Link is Up - 1Gbps\n");
359 }
360 poll_interval = PHY_POLL_LINK_ON;
361 } else {
362 if (netif_carrier_ok(ndev)) {
363 xgene_sgmac_rx_disable(p);
364 xgene_sgmac_tx_disable(p);
365 netif_carrier_off(ndev);
366 netdev_info(ndev, "Link is Down\n");
367 }
368 poll_interval = PHY_POLL_LINK_OFF;
369 }
370
371 schedule_delayed_work(&p->link_work, poll_interval);
372}
373
374struct xgene_mac_ops xgene_sgmac_ops = {
375 .init = xgene_sgmac_init,
376 .reset = xgene_sgmac_reset,
377 .rx_enable = xgene_sgmac_rx_enable,
378 .tx_enable = xgene_sgmac_tx_enable,
379 .rx_disable = xgene_sgmac_rx_disable,
380 .tx_disable = xgene_sgmac_tx_disable,
381 .set_mac_addr = xgene_sgmac_set_mac_addr,
382 .link_state = xgene_enet_link_state
383};
384
385struct xgene_port_ops xgene_sgport_ops = {
386 .reset = xgene_enet_reset,
387 .cle_bypass = xgene_enet_cle_bypass,
388 .shutdown = xgene_enet_shutdown
389};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
new file mode 100644
index 000000000000..de432465009c
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
@@ -0,0 +1,41 @@
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Keyur Chudgar <kchudgar@apm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __XGENE_ENET_SGMAC_H__
22#define __XGENE_ENET_SGMAC_H__
23
24#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8))
25#define REG_ADDR(src) ((src) & GENMASK(4, 0))
26#define PHY_CONTROL(src) ((src) & GENMASK(15, 0))
27#define INT_PHY_ADDR 0x1e
28#define SGMII_TBI_CONTROL_ADDR 0x44
29#define SGMII_CONTROL_ADDR 0x00
30#define SGMII_STATUS_ADDR 0x04
31#define SGMII_BASE_PAGE_ABILITY_ADDR 0x14
32#define AUTO_NEG_COMPLETE BIT(5)
33#define LINK_STATUS BIT(2)
34#define LINK_UP BIT(15)
35#define MPA_IDLE_WITH_QMI_EMPTY BIT(12)
36#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc
37
38extern struct xgene_mac_ops xgene_sgmac_ops;
39extern struct xgene_port_ops xgene_sgport_ops;
40
41#endif /* __XGENE_ENET_SGMAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index cd64b9f18b58..67d07206b3c7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -284,7 +284,7 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
284 clk_disable_unprepare(pdata->clk); 284 clk_disable_unprepare(pdata->clk);
285} 285}
286 286
287void xgene_enet_link_state(struct work_struct *work) 287static void xgene_enet_link_state(struct work_struct *work)
288{ 288{
289 struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), 289 struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
290 struct xgene_enet_pdata, link_work); 290 struct xgene_enet_pdata, link_work);
@@ -322,6 +322,7 @@ struct xgene_mac_ops xgene_xgmac_ops = {
322 .rx_disable = xgene_xgmac_rx_disable, 322 .rx_disable = xgene_xgmac_rx_disable,
323 .tx_disable = xgene_xgmac_tx_disable, 323 .tx_disable = xgene_xgmac_tx_disable,
324 .set_mac_addr = xgene_xgmac_set_mac_addr, 324 .set_mac_addr = xgene_xgmac_set_mac_addr,
325 .link_state = xgene_enet_link_state
325}; 326};
326 327
327struct xgene_port_ops xgene_xgport_ops = { 328struct xgene_port_ops xgene_xgport_ops = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index d2d59e7ed9ab..5a5296a6d1df 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -47,10 +47,6 @@
47#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 47#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
48#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 48#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
49 49
50#define PHY_POLL_LINK_ON (10 * HZ)
51#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
52
53void xgene_enet_link_state(struct work_struct *work);
54extern struct xgene_mac_ops xgene_xgmac_ops; 50extern struct xgene_mac_ops xgene_xgmac_ops;
55extern struct xgene_port_ops xgene_xgport_ops; 51extern struct xgene_port_ops xgene_xgport_ops;
56 52
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ba499489969a..dbb41c1923e6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8099,9 +8099,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8099 /* Sync BD data before updating mailbox */ 8099 /* Sync BD data before updating mailbox */
8100 wmb(); 8100 wmb();
8101 8101
8102 /* Packets are ready, update Tx producer idx local and on card. */
8103 tw32_tx_mbox(tnapi->prodmbox, entry);
8104
8105 tnapi->tx_prod = entry; 8102 tnapi->tx_prod = entry;
8106 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8103 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8107 netif_tx_stop_queue(txq); 8104 netif_tx_stop_queue(txq);
@@ -8116,7 +8113,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8116 netif_tx_wake_queue(txq); 8113 netif_tx_wake_queue(txq);
8117 } 8114 }
8118 8115
8119 mmiowb(); 8116 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8117 /* Packets are ready, update Tx producer idx on card. */
8118 tw32_tx_mbox(tnapi->prodmbox, entry);
8119 mmiowb();
8120 }
8121
8120 return NETDEV_TX_OK; 8122 return NETDEV_TX_OK;
8121 8123
8122dma_error: 8124dma_error:
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 153cafac323c..c3861de9dc81 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
552 552
553 len = (vec == nvecs) ? 553 len = (vec == nvecs) ?
554 last_fraglen : unmap->vector.len; 554 last_fraglen : unmap->vector.len;
555 skb->truesize += unmap->vector.len;
555 totlen += len; 556 totlen += len;
556 557
557 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 558 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
563 564
564 skb->len += totlen; 565 skb->len += totlen;
565 skb->data_len += totlen; 566 skb->data_len += totlen;
566 skb->truesize += totlen;
567} 567}
568 568
569static inline void 569static inline void
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index c3ce9df0041a..ac6473f75eb9 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -68,7 +68,7 @@ config CHELSIO_T3
68 68
69config CHELSIO_T4 69config CHELSIO_T4
70 tristate "Chelsio Communications T4/T5 Ethernet support" 70 tristate "Chelsio Communications T4/T5 Ethernet support"
71 depends on PCI 71 depends on PCI && (IPV6 || IPV6=n)
72 select FW_LOADER 72 select FW_LOADER
73 select MDIO 73 select MDIO
74 ---help--- 74 ---help---
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 410ed5805a9a..3c481b260745 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -986,6 +986,8 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
986int t4_seeprom_wp(struct adapter *adapter, bool enable); 986int t4_seeprom_wp(struct adapter *adapter, bool enable);
987int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 987int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
988int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 988int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
989int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
990 const u8 *fw_data, unsigned int size, int force);
989unsigned int t4_flash_cfg_addr(struct adapter *adapter); 991unsigned int t4_flash_cfg_addr(struct adapter *adapter);
990int t4_get_fw_version(struct adapter *adapter, u32 *vers); 992int t4_get_fw_version(struct adapter *adapter, u32 *vers);
991int t4_get_tp_version(struct adapter *adapter, u32 *vers); 993int t4_get_tp_version(struct adapter *adapter, u32 *vers);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5b38e955af6e..3f60070f2519 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2929,16 +2929,26 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2929 int ret; 2929 int ret;
2930 const struct firmware *fw; 2930 const struct firmware *fw;
2931 struct adapter *adap = netdev2adap(netdev); 2931 struct adapter *adap = netdev2adap(netdev);
2932 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2932 2933
2933 ef->data[sizeof(ef->data) - 1] = '\0'; 2934 ef->data[sizeof(ef->data) - 1] = '\0';
2934 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 2935 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2935 if (ret < 0) 2936 if (ret < 0)
2936 return ret; 2937 return ret;
2937 2938
2938 ret = t4_load_fw(adap, fw->data, fw->size); 2939 /* If the adapter has been fully initialized then we'll go ahead and
2940 * try to get the firmware's cooperation in upgrading to the new
2941 * firmware image otherwise we'll try to do the entire job from the
2942 * host ... and we always "force" the operation in this path.
2943 */
2944 if (adap->flags & FULL_INIT_DONE)
2945 mbox = adap->mbox;
2946
2947 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2939 release_firmware(fw); 2948 release_firmware(fw);
2940 if (!ret) 2949 if (!ret)
2941 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); 2950 dev_info(adap->pdev_dev, "loaded firmware %s,"
2951 " reload cxgb4 driver\n", ef->data);
2942 return ret; 2952 return ret;
2943} 2953}
2944 2954
@@ -4359,6 +4369,7 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
4359 * success (true) if it belongs otherwise failure (false). 4369 * success (true) if it belongs otherwise failure (false).
4360 * Called with rcu_read_lock() held. 4370 * Called with rcu_read_lock() held.
4361 */ 4371 */
4372#if IS_ENABLED(CONFIG_IPV6)
4362static bool cxgb4_netdev(const struct net_device *netdev) 4373static bool cxgb4_netdev(const struct net_device *netdev)
4363{ 4374{
4364 struct adapter *adap; 4375 struct adapter *adap;
@@ -4480,6 +4491,13 @@ static int update_root_dev_clip(struct net_device *dev)
4480 return ret; 4491 return ret;
4481 4492
4482 /* Parse all bond and vlan devices layered on top of the physical dev */ 4493 /* Parse all bond and vlan devices layered on top of the physical dev */
4494 root_dev = netdev_master_upper_dev_get_rcu(dev);
4495 if (root_dev) {
4496 ret = update_dev_clip(root_dev, dev);
4497 if (ret)
4498 return ret;
4499 }
4500
4483 for (i = 0; i < VLAN_N_VID; i++) { 4501 for (i = 0; i < VLAN_N_VID; i++) {
4484 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); 4502 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4485 if (!root_dev) 4503 if (!root_dev)
@@ -4512,6 +4530,7 @@ static void update_clip(const struct adapter *adap)
4512 } 4530 }
4513 rcu_read_unlock(); 4531 rcu_read_unlock();
4514} 4532}
4533#endif /* IS_ENABLED(CONFIG_IPV6) */
4515 4534
4516/** 4535/**
4517 * cxgb_up - enable the adapter 4536 * cxgb_up - enable the adapter
@@ -4558,7 +4577,9 @@ static int cxgb_up(struct adapter *adap)
4558 t4_intr_enable(adap); 4577 t4_intr_enable(adap);
4559 adap->flags |= FULL_INIT_DONE; 4578 adap->flags |= FULL_INIT_DONE;
4560 notify_ulds(adap, CXGB4_STATE_UP); 4579 notify_ulds(adap, CXGB4_STATE_UP);
4580#if IS_ENABLED(CONFIG_IPV6)
4561 update_clip(adap); 4581 update_clip(adap);
4582#endif
4562 out: 4583 out:
4563 return err; 4584 return err;
4564 irq_err: 4585 irq_err:
@@ -6852,14 +6873,18 @@ static int __init cxgb4_init_module(void)
6852 if (ret < 0) 6873 if (ret < 0)
6853 debugfs_remove(cxgb4_debugfs_root); 6874 debugfs_remove(cxgb4_debugfs_root);
6854 6875
6876#if IS_ENABLED(CONFIG_IPV6)
6855 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6877 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6878#endif
6856 6879
6857 return ret; 6880 return ret;
6858} 6881}
6859 6882
6860static void __exit cxgb4_cleanup_module(void) 6883static void __exit cxgb4_cleanup_module(void)
6861{ 6884{
6885#if IS_ENABLED(CONFIG_IPV6)
6862 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6886 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6887#endif
6863 pci_unregister_driver(&cxgb4_driver); 6888 pci_unregister_driver(&cxgb4_driver);
6864 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 6889 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6865} 6890}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 1fff1495fe31..a9d9d74e4f09 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -37,8 +37,6 @@
37#include "t4_regs.h" 37#include "t4_regs.h"
38#include "t4fw_api.h" 38#include "t4fw_api.h"
39 39
40static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
42/** 40/**
43 * t4_wait_op_done_val - wait until an operation is completed 41 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation 42 * @adapter: the adapter performing the operation
@@ -3076,8 +3074,8 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3076 * positive errno indicates that the adapter is ~probably~ intact, a 3074 * positive errno indicates that the adapter is ~probably~ intact, a
3077 * negative errno indicates that things are looking bad ... 3075 * negative errno indicates that things are looking bad ...
3078 */ 3076 */
3079static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 3077int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3080 const u8 *fw_data, unsigned int size, int force) 3078 const u8 *fw_data, unsigned int size, int force)
3081{ 3079{
3082 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 3080 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3083 int reset, ret; 3081 int reset, ret;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1d5e1822bb2c..9af296a1ca99 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -367,6 +367,56 @@ struct bufdesc_ex {
367#define FEC_VLAN_TAG_LEN 0x04 367#define FEC_VLAN_TAG_LEN 0x04
368#define FEC_ETHTYPE_LEN 0x02 368#define FEC_ETHTYPE_LEN 0x02
369 369
370/* Controller is ENET-MAC */
371#define FEC_QUIRK_ENET_MAC (1 << 0)
372/* Controller needs driver to swap frame */
373#define FEC_QUIRK_SWAP_FRAME (1 << 1)
374/* Controller uses gasket */
375#define FEC_QUIRK_USE_GASKET (1 << 2)
376/* Controller has GBIT support */
377#define FEC_QUIRK_HAS_GBIT (1 << 3)
378/* Controller has extend desc buffer */
379#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
380/* Controller has hardware checksum support */
381#define FEC_QUIRK_HAS_CSUM (1 << 5)
382/* Controller has hardware vlan support */
383#define FEC_QUIRK_HAS_VLAN (1 << 6)
384/* ENET IP errata ERR006358
385 *
386 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
387 * detected as not set during a prior frame transmission, then the
388 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
389 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
390 * frames not being transmitted until there is a 0-to-1 transition on
391 * ENET_TDAR[TDAR].
392 */
393#define FEC_QUIRK_ERR006358 (1 << 7)
394/* ENET IP hw AVB
395 *
396 * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
397 * - Two class indicators on receive with configurable priority
398 * - Two class indicators and line speed timer on transmit allowing
399 * implementation class credit based shapers externally
400 * - Additional DMA registers provisioned to allow managing up to 3
401 * independent rings
402 */
403#define FEC_QUIRK_HAS_AVB (1 << 8)
404/* There is a TDAR race condition for mutliQ when the software sets TDAR
405 * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
406 * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
407 * The issue exist at i.MX6SX enet IP.
408 */
409#define FEC_QUIRK_ERR007885 (1 << 9)
410/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
411 * After set ENET_ATCR[Capture], there need some time cycles before the counter
412 * value is capture in the register clock domain.
413 * The wait-time-cycles is at least 6 clock cycles of the slower clock between
414 * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
415 * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
416 * (40ns * 6).
417 */
418#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
419
370struct fec_enet_priv_tx_q { 420struct fec_enet_priv_tx_q {
371 int index; 421 int index;
372 unsigned char *tx_bounce[TX_RING_SIZE]; 422 unsigned char *tx_bounce[TX_RING_SIZE];
@@ -484,12 +534,22 @@ struct fec_enet_private {
484 unsigned int itr_clk_rate; 534 unsigned int itr_clk_rate;
485 535
486 u32 rx_copybreak; 536 u32 rx_copybreak;
537
538 /* ptp clock period in ns*/
539 unsigned int ptp_inc;
540
541 /* pps */
542 int pps_channel;
543 unsigned int reload_period;
544 int pps_enable;
545 unsigned int next_counter;
487}; 546};
488 547
489void fec_ptp_init(struct platform_device *pdev); 548void fec_ptp_init(struct platform_device *pdev);
490void fec_ptp_start_cyclecounter(struct net_device *ndev); 549void fec_ptp_start_cyclecounter(struct net_device *ndev);
491int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 550int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
492int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 551int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
552uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
493 553
494/****************************************************************************/ 554/****************************************************************************/
495#endif /* FEC_H */ 555#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 87975b5dda94..81b96cf87574 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -78,47 +78,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
78#define FEC_ENET_RAFL_V 0x8 78#define FEC_ENET_RAFL_V 0x8
79#define FEC_ENET_OPD_V 0xFFF0 79#define FEC_ENET_OPD_V 0xFFF0
80 80
81/* Controller is ENET-MAC */
82#define FEC_QUIRK_ENET_MAC (1 << 0)
83/* Controller needs driver to swap frame */
84#define FEC_QUIRK_SWAP_FRAME (1 << 1)
85/* Controller uses gasket */
86#define FEC_QUIRK_USE_GASKET (1 << 2)
87/* Controller has GBIT support */
88#define FEC_QUIRK_HAS_GBIT (1 << 3)
89/* Controller has extend desc buffer */
90#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
91/* Controller has hardware checksum support */
92#define FEC_QUIRK_HAS_CSUM (1 << 5)
93/* Controller has hardware vlan support */
94#define FEC_QUIRK_HAS_VLAN (1 << 6)
95/* ENET IP errata ERR006358
96 *
97 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
98 * detected as not set during a prior frame transmission, then the
99 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
100 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
101 * frames not being transmitted until there is a 0-to-1 transition on
102 * ENET_TDAR[TDAR].
103 */
104#define FEC_QUIRK_ERR006358 (1 << 7)
105/* ENET IP hw AVB
106 *
107 * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
108 * - Two class indicators on receive with configurable priority
109 * - Two class indicators and line speed timer on transmit allowing
110 * implementation class credit based shapers externally
111 * - Additional DMA registers provisioned to allow managing up to 3
112 * independent rings
113 */
114#define FEC_QUIRK_HAS_AVB (1 << 8)
115/* There is a TDAR race condition for mutliQ when the software sets TDAR
116 * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
117 * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
118 * The issue exist at i.MX6SX enet IP.
119 */
120#define FEC_QUIRK_ERR007885 (1 << 9)
121
122static struct platform_device_id fec_devtype[] = { 81static struct platform_device_id fec_devtype[] = {
123 { 82 {
124 /* keep it for coldfire */ 83 /* keep it for coldfire */
@@ -146,7 +105,7 @@ static struct platform_device_id fec_devtype[] = {
146 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 105 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
147 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 106 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
148 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 107 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
149 FEC_QUIRK_ERR007885, 108 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
150 }, { 109 }, {
151 /* sentinel */ 110 /* sentinel */
152 } 111 }
@@ -1622,6 +1581,8 @@ fec_enet_interrupt(int irq, void *dev_id)
1622 complete(&fep->mdio_done); 1581 complete(&fep->mdio_done);
1623 } 1582 }
1624 1583
1584 fec_ptp_check_pps_event(fep);
1585
1625 return ret; 1586 return ret;
1626} 1587}
1627 1588
@@ -2912,20 +2873,12 @@ static void fec_poll_controller(struct net_device *dev)
2912#endif 2873#endif
2913 2874
2914#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM 2875#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2915 2876static inline void fec_enet_set_netdev_features(struct net_device *netdev,
2916static int fec_set_features(struct net_device *netdev,
2917 netdev_features_t features) 2877 netdev_features_t features)
2918{ 2878{
2919 struct fec_enet_private *fep = netdev_priv(netdev); 2879 struct fec_enet_private *fep = netdev_priv(netdev);
2920 netdev_features_t changed = features ^ netdev->features; 2880 netdev_features_t changed = features ^ netdev->features;
2921 2881
2922 /* Quiesce the device if necessary */
2923 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2924 napi_disable(&fep->napi);
2925 netif_tx_lock_bh(netdev);
2926 fec_stop(netdev);
2927 }
2928
2929 netdev->features = features; 2882 netdev->features = features;
2930 2883
2931 /* Receive checksum has been changed */ 2884 /* Receive checksum has been changed */
@@ -2935,13 +2888,25 @@ static int fec_set_features(struct net_device *netdev,
2935 else 2888 else
2936 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 2889 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
2937 } 2890 }
2891}
2892
2893static int fec_set_features(struct net_device *netdev,
2894 netdev_features_t features)
2895{
2896 struct fec_enet_private *fep = netdev_priv(netdev);
2897 netdev_features_t changed = features ^ netdev->features;
2938 2898
2939 /* Resume the device after updates */
2940 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { 2899 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2900 napi_disable(&fep->napi);
2901 netif_tx_lock_bh(netdev);
2902 fec_stop(netdev);
2903 fec_enet_set_netdev_features(netdev, features);
2941 fec_restart(netdev); 2904 fec_restart(netdev);
2942 netif_tx_wake_all_queues(netdev); 2905 netif_tx_wake_all_queues(netdev);
2943 netif_tx_unlock_bh(netdev); 2906 netif_tx_unlock_bh(netdev);
2944 napi_enable(&fep->napi); 2907 napi_enable(&fep->napi);
2908 } else {
2909 fec_enet_set_netdev_features(netdev, features);
2945 } 2910 }
2946 2911
2947 return 0; 2912 return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index cca3617a2321..992c8c3db553 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -61,6 +61,24 @@
61#define FEC_T_INC_CORR_MASK 0x00007f00 61#define FEC_T_INC_CORR_MASK 0x00007f00
62#define FEC_T_INC_CORR_OFFSET 8 62#define FEC_T_INC_CORR_OFFSET 8
63 63
64#define FEC_T_CTRL_PINPER 0x00000080
65#define FEC_T_TF0_MASK 0x00000001
66#define FEC_T_TF0_OFFSET 0
67#define FEC_T_TF1_MASK 0x00000002
68#define FEC_T_TF1_OFFSET 1
69#define FEC_T_TF2_MASK 0x00000004
70#define FEC_T_TF2_OFFSET 2
71#define FEC_T_TF3_MASK 0x00000008
72#define FEC_T_TF3_OFFSET 3
73#define FEC_T_TDRE_MASK 0x00000001
74#define FEC_T_TDRE_OFFSET 0
75#define FEC_T_TMODE_MASK 0x0000003C
76#define FEC_T_TMODE_OFFSET 2
77#define FEC_T_TIE_MASK 0x00000040
78#define FEC_T_TIE_OFFSET 6
79#define FEC_T_TF_MASK 0x00000080
80#define FEC_T_TF_OFFSET 7
81
64#define FEC_ATIME_CTRL 0x400 82#define FEC_ATIME_CTRL 0x400
65#define FEC_ATIME 0x404 83#define FEC_ATIME 0x404
66#define FEC_ATIME_EVT_OFFSET 0x408 84#define FEC_ATIME_EVT_OFFSET 0x408
@@ -69,7 +87,143 @@
69#define FEC_ATIME_INC 0x414 87#define FEC_ATIME_INC 0x414
70#define FEC_TS_TIMESTAMP 0x418 88#define FEC_TS_TIMESTAMP 0x418
71 89
90#define FEC_TGSR 0x604
91#define FEC_TCSR(n) (0x608 + n * 0x08)
92#define FEC_TCCR(n) (0x60C + n * 0x08)
93#define MAX_TIMER_CHANNEL 3
94#define FEC_TMODE_TOGGLE 0x05
95#define FEC_HIGH_PULSE 0x0F
96
72#define FEC_CC_MULT (1 << 31) 97#define FEC_CC_MULT (1 << 31)
98#define FEC_COUNTER_PERIOD (1 << 31)
99#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
100#define FEC_CHANNLE_0 0
101#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
102
103/**
104 * fec_ptp_enable_pps
105 * @fep: the fec_enet_private structure handle
106 * @enable: enable the channel pps output
107 *
108 * This function enble the PPS ouput on the timer channel.
109 */
110static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
111{
112 unsigned long flags;
113 u32 val, tempval;
114 int inc;
115 struct timespec ts;
116 u64 ns;
117 u32 remainder;
118 val = 0;
119
120 if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
121 dev_err(&fep->pdev->dev, "No ptp stack is running\n");
122 return -EINVAL;
123 }
124
125 if (fep->pps_enable == enable)
126 return 0;
127
128 fep->pps_channel = DEFAULT_PPS_CHANNEL;
129 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
130 inc = fep->ptp_inc;
131
132 spin_lock_irqsave(&fep->tmreg_lock, flags);
133
134 if (enable) {
135 /* clear capture or output compare interrupt status if have.
136 */
137 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
138
139 /* It is recommended to doulbe check the TMODE field in the
140 * TCSR register to be cleared before the first compare counter
141 * is written into TCCR register. Just add a double check.
142 */
143 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
144 do {
145 val &= ~(FEC_T_TMODE_MASK);
146 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
147 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
148 } while (val & FEC_T_TMODE_MASK);
149
150 /* Dummy read counter to update the counter */
151 timecounter_read(&fep->tc);
152 /* We want to find the first compare event in the next
153 * second point. So we need to know what the ptp time
154 * is now and how many nanoseconds is ahead to get next second.
155 * The remaining nanosecond ahead before the next second would be
156 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
157 * to current timer would be next second.
158 */
159 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
160 tempval |= FEC_T_CTRL_CAPTURE;
161 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
162
163 tempval = readl(fep->hwp + FEC_ATIME);
164 /* Convert the ptp local counter to 1588 timestamp */
165 ns = timecounter_cyc2time(&fep->tc, tempval);
166 ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
167 ts.tv_nsec = remainder;
168
169 /* The tempval is less than 3 seconds, and so val is less than
170 * 4 seconds. No overflow for 32bit calculation.
171 */
172 val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
173
174 /* Need to consider the situation that the current time is
175 * very close to the second point, which means NSEC_PER_SEC
176 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
177 * is still running when we calculate the first compare event, it is
178 * possible that the remaining nanoseonds run out before the compare
179 * counter is calculated and written into TCCR register. To avoid
180 * this possibility, we will set the compare event to be the next
181 * of next second. The current setting is 31-bit timer and wrap
182 * around over 2 seconds. So it is okay to set the next of next
183 * seond for the timer.
184 */
185 val += NSEC_PER_SEC;
186
187 /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
188 * ptp counter, which maybe cause 32-bit wrap. Since the
189 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
190 * We can ensure the wrap will not cause issue. If the offset
191 * is bigger than fep->cc.mask would be a error.
192 */
193 val &= fep->cc.mask;
194 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
195
196 /* Calculate the second the compare event timestamp */
197 fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
198
199 /* * Enable compare event when overflow */
200 val = readl(fep->hwp + FEC_ATIME_CTRL);
201 val |= FEC_T_CTRL_PINPER;
202 writel(val, fep->hwp + FEC_ATIME_CTRL);
203
204 /* Compare channel setting. */
205 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
206 val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
207 val &= ~(1 << FEC_T_TDRE_OFFSET);
208 val &= ~(FEC_T_TMODE_MASK);
209 val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
210 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
211
212 /* Write the second compare event timestamp and calculate
213 * the third timestamp. Refer the TCCR register detail in the spec.
214 */
215 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
216 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
217 } else {
218 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
219 }
220
221 fep->pps_enable = enable;
222 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
223
224 return 0;
225}
226
73/** 227/**
74 * fec_ptp_read - read raw cycle counter (to be used by time counter) 228 * fec_ptp_read - read raw cycle counter (to be used by time counter)
75 * @cc: the cyclecounter structure 229 * @cc: the cyclecounter structure
@@ -82,12 +236,17 @@ static cycle_t fec_ptp_read(const struct cyclecounter *cc)
82{ 236{
83 struct fec_enet_private *fep = 237 struct fec_enet_private *fep =
84 container_of(cc, struct fec_enet_private, cc); 238 container_of(cc, struct fec_enet_private, cc);
239 const struct platform_device_id *id_entry =
240 platform_get_device_id(fep->pdev);
85 u32 tempval; 241 u32 tempval;
86 242
87 tempval = readl(fep->hwp + FEC_ATIME_CTRL); 243 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
88 tempval |= FEC_T_CTRL_CAPTURE; 244 tempval |= FEC_T_CTRL_CAPTURE;
89 writel(tempval, fep->hwp + FEC_ATIME_CTRL); 245 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
90 246
247 if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
248 udelay(1);
249
91 return readl(fep->hwp + FEC_ATIME); 250 return readl(fep->hwp + FEC_ATIME);
92} 251}
93 252
@@ -113,14 +272,15 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
113 /* 1ns counter */ 272 /* 1ns counter */
114 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); 273 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
115 274
116 /* use free running count */ 275 /* use 31-bit timer counter */
117 writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD); 276 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
118 277
119 writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL); 278 writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
279 fep->hwp + FEC_ATIME_CTRL);
120 280
121 memset(&fep->cc, 0, sizeof(fep->cc)); 281 memset(&fep->cc, 0, sizeof(fep->cc));
122 fep->cc.read = fec_ptp_read; 282 fep->cc.read = fec_ptp_read;
123 fep->cc.mask = CLOCKSOURCE_MASK(32); 283 fep->cc.mask = CLOCKSOURCE_MASK(31);
124 fep->cc.shift = 31; 284 fep->cc.shift = 31;
125 fep->cc.mult = FEC_CC_MULT; 285 fep->cc.mult = FEC_CC_MULT;
126 286
@@ -143,32 +303,59 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
143 */ 303 */
144static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 304static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
145{ 305{
146 u64 diff;
147 unsigned long flags; 306 unsigned long flags;
148 int neg_adj = 0; 307 int neg_adj = 0;
149 u32 mult = FEC_CC_MULT; 308 u32 i, tmp;
309 u32 corr_inc, corr_period;
310 u32 corr_ns;
311 u64 lhs, rhs;
150 312
151 struct fec_enet_private *fep = 313 struct fec_enet_private *fep =
152 container_of(ptp, struct fec_enet_private, ptp_caps); 314 container_of(ptp, struct fec_enet_private, ptp_caps);
153 315
316 if (ppb == 0)
317 return 0;
318
154 if (ppb < 0) { 319 if (ppb < 0) {
155 ppb = -ppb; 320 ppb = -ppb;
156 neg_adj = 1; 321 neg_adj = 1;
157 } 322 }
158 323
159 diff = mult; 324 /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
160 diff *= ppb; 325 * Try to find the corr_inc between 1 to fep->ptp_inc to
161 diff = div_u64(diff, 1000000000ULL); 326 * meet adjustment requirement.
327 */
328 lhs = NSEC_PER_SEC;
329 rhs = (u64)ppb * (u64)fep->ptp_inc;
330 for (i = 1; i <= fep->ptp_inc; i++) {
331 if (lhs >= rhs) {
332 corr_inc = i;
333 corr_period = div_u64(lhs, rhs);
334 break;
335 }
336 lhs += NSEC_PER_SEC;
337 }
338 /* Not found? Set it to high value - double speed
339 * correct in every clock step.
340 */
341 if (i > fep->ptp_inc) {
342 corr_inc = fep->ptp_inc;
343 corr_period = 1;
344 }
345
346 if (neg_adj)
347 corr_ns = fep->ptp_inc - corr_inc;
348 else
349 corr_ns = fep->ptp_inc + corr_inc;
162 350
163 spin_lock_irqsave(&fep->tmreg_lock, flags); 351 spin_lock_irqsave(&fep->tmreg_lock, flags);
164 /*
165 * dummy read to set cycle_last in tc to now.
166 * So use adjusted mult to calculate when next call
167 * timercounter_read.
168 */
169 timecounter_read(&fep->tc);
170 352
171 fep->cc.mult = neg_adj ? mult - diff : mult + diff; 353 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
354 tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
355 writel(tmp, fep->hwp + FEC_ATIME_INC);
356 writel(corr_period, fep->hwp + FEC_ATIME_CORR);
357 /* dummy read to update the timer. */
358 timecounter_read(&fep->tc);
172 359
173 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 360 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
174 361
@@ -188,12 +375,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
188 container_of(ptp, struct fec_enet_private, ptp_caps); 375 container_of(ptp, struct fec_enet_private, ptp_caps);
189 unsigned long flags; 376 unsigned long flags;
190 u64 now; 377 u64 now;
378 u32 counter;
191 379
192 spin_lock_irqsave(&fep->tmreg_lock, flags); 380 spin_lock_irqsave(&fep->tmreg_lock, flags);
193 381
194 now = timecounter_read(&fep->tc); 382 now = timecounter_read(&fep->tc);
195 now += delta; 383 now += delta;
196 384
385 /* Get the timer value based on adjusted timestamp.
386 * Update the counter with the masked value.
387 */
388 counter = now & fep->cc.mask;
389 writel(counter, fep->hwp + FEC_ATIME);
390
197 /* reset the timecounter */ 391 /* reset the timecounter */
198 timecounter_init(&fep->tc, &fep->cc, now); 392 timecounter_init(&fep->tc, &fep->cc, now);
199 393
@@ -244,6 +438,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
244 438
245 u64 ns; 439 u64 ns;
246 unsigned long flags; 440 unsigned long flags;
441 u32 counter;
247 442
248 mutex_lock(&fep->ptp_clk_mutex); 443 mutex_lock(&fep->ptp_clk_mutex);
249 /* Check the ptp clock */ 444 /* Check the ptp clock */
@@ -254,8 +449,13 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
254 449
255 ns = ts->tv_sec * 1000000000ULL; 450 ns = ts->tv_sec * 1000000000ULL;
256 ns += ts->tv_nsec; 451 ns += ts->tv_nsec;
452 /* Get the timer value based on timestamp.
453 * Update the counter with the masked value.
454 */
455 counter = ns & fep->cc.mask;
257 456
258 spin_lock_irqsave(&fep->tmreg_lock, flags); 457 spin_lock_irqsave(&fep->tmreg_lock, flags);
458 writel(counter, fep->hwp + FEC_ATIME);
259 timecounter_init(&fep->tc, &fep->cc, ns); 459 timecounter_init(&fep->tc, &fep->cc, ns);
260 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 460 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
261 mutex_unlock(&fep->ptp_clk_mutex); 461 mutex_unlock(&fep->ptp_clk_mutex);
@@ -272,6 +472,15 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
272static int fec_ptp_enable(struct ptp_clock_info *ptp, 472static int fec_ptp_enable(struct ptp_clock_info *ptp,
273 struct ptp_clock_request *rq, int on) 473 struct ptp_clock_request *rq, int on)
274{ 474{
475 struct fec_enet_private *fep =
476 container_of(ptp, struct fec_enet_private, ptp_caps);
477 int ret = 0;
478
479 if (rq->type == PTP_CLK_REQ_PPS) {
480 ret = fec_ptp_enable_pps(fep, on);
481
482 return ret;
483 }
275 return -EOPNOTSUPP; 484 return -EOPNOTSUPP;
276} 485}
277 486
@@ -386,7 +595,7 @@ void fec_ptp_init(struct platform_device *pdev)
386 fep->ptp_caps.n_ext_ts = 0; 595 fep->ptp_caps.n_ext_ts = 0;
387 fep->ptp_caps.n_per_out = 0; 596 fep->ptp_caps.n_per_out = 0;
388 fep->ptp_caps.n_pins = 0; 597 fep->ptp_caps.n_pins = 0;
389 fep->ptp_caps.pps = 0; 598 fep->ptp_caps.pps = 1;
390 fep->ptp_caps.adjfreq = fec_ptp_adjfreq; 599 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
391 fep->ptp_caps.adjtime = fec_ptp_adjtime; 600 fep->ptp_caps.adjtime = fec_ptp_adjtime;
392 fep->ptp_caps.gettime = fec_ptp_gettime; 601 fep->ptp_caps.gettime = fec_ptp_gettime;
@@ -394,6 +603,7 @@ void fec_ptp_init(struct platform_device *pdev)
394 fep->ptp_caps.enable = fec_ptp_enable; 603 fep->ptp_caps.enable = fec_ptp_enable;
395 604
396 fep->cycle_speed = clk_get_rate(fep->clk_ptp); 605 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
606 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
397 607
398 spin_lock_init(&fep->tmreg_lock); 608 spin_lock_init(&fep->tmreg_lock);
399 609
@@ -409,3 +619,36 @@ void fec_ptp_init(struct platform_device *pdev)
409 619
410 schedule_delayed_work(&fep->time_keep, HZ); 620 schedule_delayed_work(&fep->time_keep, HZ);
411} 621}
622
623/**
624 * fec_ptp_check_pps_event
625 * @fep: the fec_enet_private structure handle
626 *
627 * This function check the pps event and reload the timer compare counter.
628 */
629uint fec_ptp_check_pps_event(struct fec_enet_private *fep)
630{
631 u32 val;
632 u8 channel = fep->pps_channel;
633 struct ptp_clock_event event;
634
635 val = readl(fep->hwp + FEC_TCSR(channel));
636 if (val & FEC_T_TF_MASK) {
637 /* Write the next next compare(not the next according the spec)
638 * value to the register
639 */
640 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
641 do {
642 writel(val, fep->hwp + FEC_TCSR(channel));
643 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
644
645 /* Update the counter; */
646 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
647
648 event.type = PTP_CLOCK_PPS;
649 ptp_clock_event(fep->ptp_clock, &event);
650 return 1;
651 }
652
653 return 0;
654}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 379b1a578d3d..4fdf0aa16978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -338,7 +338,7 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
338 338
339static void gfar_rx_buff_size_config(struct gfar_private *priv) 339static void gfar_rx_buff_size_config(struct gfar_private *priv)
340{ 340{
341 int frame_size = priv->ndev->mtu + ETH_HLEN; 341 int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
342 342
343 /* set this when rx hw offload (TOE) functions are being used */ 343 /* set this when rx hw offload (TOE) functions are being used */
344 priv->uses_rxfcb = 0; 344 priv->uses_rxfcb = 0;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 6919adb66f53..5b8300a32bf5 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -320,4 +320,15 @@ config FM10K
320 To compile this driver as a module, choose M here. The module 320 To compile this driver as a module, choose M here. The module
321 will be called fm10k. MSI-X interrupt support is required 321 will be called fm10k. MSI-X interrupt support is required
322 322
323config FM10K_VXLAN
324 bool "Virtual eXtensible Local Area Network Support"
325 default n
326 depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
327 ---help---
328 This allows one to create VXLAN virtual interfaces that provide
329 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
330 to tunnel virtual network infrastructure in virtualized environments.
331 Say Y here if you want to use Virtual eXtensible Local Area Network
332 (VXLAN) in the driver.
333
323endif # NET_VENDOR_INTEL 334endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 9d7118a0d67a..e645af412e76 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -929,6 +929,30 @@ static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
929 return i == tx_ring->count; 929 return i == tx_ring->count;
930} 930}
931 931
932static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
933{
934 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
935
936 smp_mb();
937
938 /* We need to check again in a case another CPU has just
939 * made room available. */
940 if (likely(fm10k_desc_unused(tx_ring) < size))
941 return -EBUSY;
942
943 /* A reprieve! - use start_queue because it doesn't call schedule */
944 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
945 ++tx_ring->tx_stats.restart_queue;
946 return 0;
947}
948
949static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
950{
951 if (likely(fm10k_desc_unused(tx_ring) >= size))
952 return 0;
953 return __fm10k_maybe_stop_tx(tx_ring, size);
954}
955
932static void fm10k_tx_map(struct fm10k_ring *tx_ring, 956static void fm10k_tx_map(struct fm10k_ring *tx_ring,
933 struct fm10k_tx_buffer *first) 957 struct fm10k_tx_buffer *first)
934{ 958{
@@ -1022,13 +1046,18 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
1022 1046
1023 tx_ring->next_to_use = i; 1047 tx_ring->next_to_use = i;
1024 1048
1049 /* Make sure there is space in the ring for the next send. */
1050 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1051
1025 /* notify HW of packet */ 1052 /* notify HW of packet */
1026 writel(i, tx_ring->tail); 1053 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1054 writel(i, tx_ring->tail);
1027 1055
1028 /* we need this if more than one processor can write to our tail 1056 /* we need this if more than one processor can write to our tail
1029 * at a time, it synchronizes IO on IA64/Altix systems 1057 * at a time, it synchronizes IO on IA64/Altix systems
1030 */ 1058 */
1031 mmiowb(); 1059 mmiowb();
1060 }
1032 1061
1033 return; 1062 return;
1034dma_error: 1063dma_error:
@@ -1048,30 +1077,6 @@ dma_error:
1048 tx_ring->next_to_use = i; 1077 tx_ring->next_to_use = i;
1049} 1078}
1050 1079
1051static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1052{
1053 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1054
1055 smp_mb();
1056
1057 /* We need to check again in a case another CPU has just
1058 * made room available. */
1059 if (likely(fm10k_desc_unused(tx_ring) < size))
1060 return -EBUSY;
1061
1062 /* A reprieve! - use start_queue because it doesn't call schedule */
1063 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1064 ++tx_ring->tx_stats.restart_queue;
1065 return 0;
1066}
1067
1068static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1069{
1070 if (likely(fm10k_desc_unused(tx_ring) >= size))
1071 return 0;
1072 return __fm10k_maybe_stop_tx(tx_ring, size);
1073}
1074
1075netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1080netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1076 struct fm10k_ring *tx_ring) 1081 struct fm10k_ring *tx_ring)
1077{ 1082{
@@ -1116,8 +1121,6 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1116 1121
1117 fm10k_tx_map(tx_ring, first); 1122 fm10k_tx_map(tx_ring, first);
1118 1123
1119 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1120
1121 return NETDEV_TX_OK; 1124 return NETDEV_TX_OK;
1122 1125
1123out_drop: 1126out_drop:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index bf44a8fe711f..8811364b91cb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -20,9 +20,9 @@
20 20
21#include "fm10k.h" 21#include "fm10k.h"
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#if IS_ENABLED(CONFIG_VXLAN) 23#if IS_ENABLED(CONFIG_FM10K_VXLAN)
24#include <net/vxlan.h> 24#include <net/vxlan.h>
25#endif /* CONFIG_VXLAN */ 25#endif /* CONFIG_FM10K_VXLAN */
26 26
27/** 27/**
28 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) 28 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -556,7 +556,7 @@ int fm10k_open(struct net_device *netdev)
556 if (err) 556 if (err)
557 goto err_set_queues; 557 goto err_set_queues;
558 558
559#if IS_ENABLED(CONFIG_VXLAN) 559#if IS_ENABLED(CONFIG_FM10K_VXLAN)
560 /* update VXLAN port configuration */ 560 /* update VXLAN port configuration */
561 vxlan_get_rx_port(netdev); 561 vxlan_get_rx_port(netdev);
562 562
@@ -785,14 +785,14 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
785 if (!(netdev->flags & IFF_PROMISC)) { 785 if (!(netdev->flags & IFF_PROMISC)) {
786 err = hw->mac.ops.update_vlan(hw, vid, 0, set); 786 err = hw->mac.ops.update_vlan(hw, vid, 0, set);
787 if (err) 787 if (err)
788 return err; 788 goto err_out;
789 } 789 }
790 790
791 /* update our base MAC address */ 791 /* update our base MAC address */
792 err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, 792 err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr,
793 vid, set, 0); 793 vid, set, 0);
794 if (err) 794 if (err)
795 return err; 795 goto err_out;
796 796
797 /* set vid prior to syncing/unsyncing the VLAN */ 797 /* set vid prior to syncing/unsyncing the VLAN */
798 interface->vid = vid + (set ? VLAN_N_VID : 0); 798 interface->vid = vid + (set ? VLAN_N_VID : 0);
@@ -801,9 +801,10 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
801 __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); 801 __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
802 __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); 802 __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
803 803
804err_out:
804 fm10k_mbx_unlock(interface); 805 fm10k_mbx_unlock(interface);
805 806
806 return 0; 807 return err;
807} 808}
808 809
809static int fm10k_vlan_rx_add_vid(struct net_device *netdev, 810static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e02036c427b9..a0cb74ab3dc6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1489,6 +1489,7 @@ void fm10k_up(struct fm10k_intfc *interface)
1489 netif_tx_start_all_queues(interface->netdev); 1489 netif_tx_start_all_queues(interface->netdev);
1490 1490
1491 /* kick off the service timer */ 1491 /* kick off the service timer */
1492 hw->mac.get_host_state = 1;
1492 mod_timer(&interface->service_timer, jiffies); 1493 mod_timer(&interface->service_timer, jiffies);
1493} 1494}
1494 1495
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 706fc69aa0c5..97c85b859536 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1261,6 +1261,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1261 struct ixgbe_hw *hw = &adapter->hw; 1261 struct ixgbe_hw *hw = &adapter->hw;
1262 u32 regval; 1262 u32 regval;
1263 1263
1264 if (vf >= adapter->num_vfs)
1265 return -EINVAL;
1266
1264 adapter->vfinfo[vf].spoofchk_enabled = setting; 1267 adapter->vfinfo[vf].spoofchk_enabled = setting;
1265 1268
1266 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 1269 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index ffbae293cef5..6e6f18fc5d76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -11,7 +11,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, 11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, 12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, 13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, 14 {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
15 {QLCNIC_CMD_INTRPT_TEST, 4, 1}, 15 {QLCNIC_CMD_INTRPT_TEST, 4, 1},
16 {QLCNIC_CMD_SET_MTU, 4, 1}, 16 {QLCNIC_CMD_SET_MTU, 4, 1},
17 {QLCNIC_CMD_READ_PHY, 4, 2}, 17 {QLCNIC_CMD_READ_PHY, 4, 2},
@@ -32,7 +32,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, 32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, 33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, 34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, 35 {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
@@ -129,7 +129,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
129 } 129 }
130 130
131 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 131 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
132 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) 132 for (i = 1; i < cmd->req.num; i++)
133 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); 133 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
134 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 134 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
135 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); 135 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 60f85149fc4c..f77cce034ad4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -71,9 +71,17 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
72} 72}
73 73
74/* Report whether the NIC considers this TX queue empty, given the 74/* Get partner of a TX queue, seen as part of the same net core queue */
75 * write_count used for the last doorbell push. May return false 75static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
76 * negative. 76{
77 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
78 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
79 else
80 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
81}
82
83/* Report whether this TX queue would be empty for the given write_count.
84 * May return false negative.
77 */ 85 */
78static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, 86static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
79 unsigned int write_count) 87 unsigned int write_count)
@@ -86,9 +94,18 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
86 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 94 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
87} 95}
88 96
89static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) 97/* Decide whether we can use TX PIO, ie. write packet data directly into
98 * a buffer on the device. This can reduce latency at the expense of
99 * throughput, so we only do this if both hardware and software TX rings
100 * are empty. This also ensures that only one packet at a time can be
101 * using the PIO buffer.
102 */
103static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
90{ 104{
91 return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count); 105 struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
106 return tx_queue->piobuf &&
107 __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
108 __efx_nic_tx_is_empty(partner, partner->insert_count);
92} 109}
93 110
94/* Decide whether to push a TX descriptor to the NIC vs merely writing 111/* Decide whether to push a TX descriptor to the NIC vs merely writing
@@ -96,6 +113,8 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
96 * descriptor to an empty queue, but is otherwise pointless. Further, 113 * descriptor to an empty queue, but is otherwise pointless. Further,
97 * Falcon and Siena have hardware bugs (SF bug 33851) that may be 114 * Falcon and Siena have hardware bugs (SF bug 33851) that may be
98 * triggered if we don't check this. 115 * triggered if we don't check this.
116 * We use the write_count used for the last doorbell push, to get the
117 * NIC's view of the tx queue.
99 */ 118 */
100static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, 119static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
101 unsigned int write_count) 120 unsigned int write_count)
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 320609842211..ee84a90e371c 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -132,15 +132,6 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
132 return max_descs; 132 return max_descs;
133} 133}
134 134
135/* Get partner of a TX queue, seen as part of the same net core queue */
136static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
137{
138 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
139 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
140 else
141 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
142}
143
144static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 135static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
145{ 136{
146 /* We need to consider both queues that the net core sees as one */ 137 /* We need to consider both queues that the net core sees as one */
@@ -344,6 +335,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
344 struct efx_nic *efx = tx_queue->efx; 335 struct efx_nic *efx = tx_queue->efx;
345 struct device *dma_dev = &efx->pci_dev->dev; 336 struct device *dma_dev = &efx->pci_dev->dev;
346 struct efx_tx_buffer *buffer; 337 struct efx_tx_buffer *buffer;
338 unsigned int old_insert_count = tx_queue->insert_count;
347 skb_frag_t *fragment; 339 skb_frag_t *fragment;
348 unsigned int len, unmap_len = 0; 340 unsigned int len, unmap_len = 0;
349 dma_addr_t dma_addr, unmap_addr = 0; 341 dma_addr_t dma_addr, unmap_addr = 0;
@@ -351,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
351 unsigned short dma_flags; 343 unsigned short dma_flags;
352 int i = 0; 344 int i = 0;
353 345
354 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 346 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
355 347
356 if (skb_shinfo(skb)->gso_size) 348 if (skb_shinfo(skb)->gso_size)
357 return efx_enqueue_skb_tso(tx_queue, skb); 349 return efx_enqueue_skb_tso(tx_queue, skb);
@@ -369,9 +361,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
369 361
370 /* Consider using PIO for short packets */ 362 /* Consider using PIO for short packets */
371#ifdef EFX_USE_PIO 363#ifdef EFX_USE_PIO
372 if (skb->len <= efx_piobuf_size && tx_queue->piobuf && 364 if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
373 efx_nic_tx_is_empty(tx_queue) && 365 efx_nic_may_tx_pio(tx_queue)) {
374 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
375 buffer = efx_enqueue_skb_pio(tx_queue, skb); 366 buffer = efx_enqueue_skb_pio(tx_queue, skb);
376 dma_flags = EFX_TX_BUF_OPTION; 367 dma_flags = EFX_TX_BUF_OPTION;
377 goto finish_packet; 368 goto finish_packet;
@@ -439,13 +430,14 @@ finish_packet:
439 430
440 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 431 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
441 432
433 efx_tx_maybe_stop_queue(tx_queue);
434
442 /* Pass off to hardware */ 435 /* Pass off to hardware */
443 efx_nic_push_buffers(tx_queue); 436 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
437 efx_nic_push_buffers(tx_queue);
444 438
445 tx_queue->tx_packets++; 439 tx_queue->tx_packets++;
446 440
447 efx_tx_maybe_stop_queue(tx_queue);
448
449 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
450 442
451 dma_err: 443 dma_err:
@@ -458,7 +450,7 @@ finish_packet:
458 dev_kfree_skb_any(skb); 450 dev_kfree_skb_any(skb);
459 451
460 /* Work backwards until we hit the original insert pointer value */ 452 /* Work backwards until we hit the original insert pointer value */
461 while (tx_queue->insert_count != tx_queue->write_count) { 453 while (tx_queue->insert_count != old_insert_count) {
462 unsigned int pkts_compl = 0, bytes_compl = 0; 454 unsigned int pkts_compl = 0, bytes_compl = 0;
463 --tx_queue->insert_count; 455 --tx_queue->insert_count;
464 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 456 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
@@ -989,12 +981,13 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
989/* Remove buffers put into a tx_queue. None of the buffers must have 981/* Remove buffers put into a tx_queue. None of the buffers must have
990 * an skb attached. 982 * an skb attached.
991 */ 983 */
992static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 984static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
985 unsigned int insert_count)
993{ 986{
994 struct efx_tx_buffer *buffer; 987 struct efx_tx_buffer *buffer;
995 988
996 /* Work backwards until we hit the original insert pointer value */ 989 /* Work backwards until we hit the original insert pointer value */
997 while (tx_queue->insert_count != tx_queue->write_count) { 990 while (tx_queue->insert_count != insert_count) {
998 --tx_queue->insert_count; 991 --tx_queue->insert_count;
999 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 992 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
1000 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 993 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
@@ -1258,13 +1251,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1258 struct sk_buff *skb) 1251 struct sk_buff *skb)
1259{ 1252{
1260 struct efx_nic *efx = tx_queue->efx; 1253 struct efx_nic *efx = tx_queue->efx;
1254 unsigned int old_insert_count = tx_queue->insert_count;
1261 int frag_i, rc; 1255 int frag_i, rc;
1262 struct tso_state state; 1256 struct tso_state state;
1263 1257
1264 /* Find the packet protocol and sanity-check it */ 1258 /* Find the packet protocol and sanity-check it */
1265 state.protocol = efx_tso_check_protocol(skb); 1259 state.protocol = efx_tso_check_protocol(skb);
1266 1260
1267 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1261 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
1268 1262
1269 rc = tso_start(&state, efx, skb); 1263 rc = tso_start(&state, efx, skb);
1270 if (rc) 1264 if (rc)
@@ -1308,11 +1302,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1308 1302
1309 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 1303 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1310 1304
1311 /* Pass off to hardware */
1312 efx_nic_push_buffers(tx_queue);
1313
1314 efx_tx_maybe_stop_queue(tx_queue); 1305 efx_tx_maybe_stop_queue(tx_queue);
1315 1306
1307 /* Pass off to hardware */
1308 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
1309 efx_nic_push_buffers(tx_queue);
1310
1316 tx_queue->tso_bursts++; 1311 tx_queue->tso_bursts++;
1317 return NETDEV_TX_OK; 1312 return NETDEV_TX_OK;
1318 1313
@@ -1336,6 +1331,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1336 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, 1331 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1337 state.header_unmap_len, DMA_TO_DEVICE); 1332 state.header_unmap_len, DMA_TO_DEVICE);
1338 1333
1339 efx_enqueue_unwind(tx_queue); 1334 efx_enqueue_unwind(tx_queue, old_insert_count);
1340 return NETDEV_TX_OK; 1335 return NETDEV_TX_OK;
1341} 1336}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 552bbc17863c..ccfe7e510418 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited 4 * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
5 * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> 5 * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
6 * 6 * Contributors: Giuseppe Cavallaro <peppe.cavallaro@st.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -22,45 +22,22 @@
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_net.h> 23#include <linux/of_net.h>
24 24
25#define DWMAC_125MHZ 125000000
26#define DWMAC_50MHZ 50000000
27#define DWMAC_25MHZ 25000000
28#define DWMAC_2_5MHZ 2500000
29
30#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
31 iface == PHY_INTERFACE_MODE_RGMII_ID || \
32 iface == PHY_INTERFACE_MODE_RGMII_RXID || \
33 iface == PHY_INTERFACE_MODE_RGMII_TXID)
34
35#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
36 iface == PHY_INTERFACE_MODE_GMII)
37
38/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */
39
25/** 40/**
26 * STi GMAC glue logic.
27 * --------------------
28 *
29 * _
30 * | \
31 * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
32 * phyclk | |___________________________________________
33 * | | | (phyclk-in)
34 * --------|1 / |
35 * int-clk |_ / |
36 * | _
37 * | | \
38 * |_______|1 \ ETH_SEL_TX_RETIME_CLK
39 * | |___________________________
40 * | | (tx-retime-clk)
41 * _______|0 /
42 * | |_ /
43 * _ |
44 * | \ |
45 * --------|0 \ |
46 * clk_125 | |__|
47 * | | ETH_SEL_TXCLK_NOT_CLK125
48 * --------|1 /
49 * txclk |_ /
50 *
51 *
52 * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
53 * generate 50MHz clock or MAC can generate it.
54 * This bit is configured by "st,ext-phyclk" property.
55 *
56 * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
57 * clock either comes from clk-125 pin or txclk pin. This configuration is
58 * totally driven by the board wiring. This bit is configured by
59 * "st,tx-retime-src" property.
60 *
61 * TXCLK configuration is different for different phy interface modes
62 * and changes according to link speed in modes like RGMII.
63 *
64 * Below table summarizes the clock requirement and clock sources for 41 * Below table summarizes the clock requirement and clock sources for
65 * supported phy interface modes with link speeds. 42 * supported phy interface modes with link speeds.
66 * ________________________________________________ 43 * ________________________________________________
@@ -74,44 +51,58 @@
74 * ------------------------------------------------ 51 * ------------------------------------------------
75 *| RGMII | 125Mhz | 25Mhz | 52 *| RGMII | 125Mhz | 25Mhz |
76 *| | clk-125/txclk | clkgen | 53 *| | clk-125/txclk | clkgen |
54 *| | clkgen | |
77 * ------------------------------------------------ 55 * ------------------------------------------------
78 *| RMII | n/a | 25Mhz | 56 *| RMII | n/a | 25Mhz |
79 *| | |clkgen/phyclk-in | 57 *| | |clkgen/phyclk-in |
80 * ------------------------------------------------ 58 * ------------------------------------------------
81 * 59 *
82 * TX lines are always retimed with a clk, which can vary depending 60 * Register Configuration
83 * on the board configuration. Below is the table of these bits 61 *-------------------------------
84 * in eth configuration register depending on source of retime clk. 62 * src |BIT(8)| BIT(7)| BIT(6)|
85 * 63 *-------------------------------
86 *--------------------------------------------------------------- 64 * txclk | 0 | n/a | 1 |
87 * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| 65 *-------------------------------
88 *--------------------------------------------------------------- 66 * ck_125| 0 | n/a | 0 |
89 * txclk | 0 | n/a | 1 | 67 *-------------------------------
90 *--------------------------------------------------------------- 68 * phyclk| 1 | 0 | n/a |
91 * ck_125| 0 | n/a | 0 | 69 *-------------------------------
92 *--------------------------------------------------------------- 70 * clkgen| 1 | 1 | n/a |
93 * phyclk| 1 | 0 | n/a | 71 *-------------------------------
94 *---------------------------------------------------------------
95 * clkgen| 1 | 1 | n/a |
96 *---------------------------------------------------------------
97 */ 72 */
98 73
99 /* Register definition */ 74#define STIH4XX_RETIME_SRC_MASK GENMASK(8, 6)
75#define STIH4XX_ETH_SEL_TX_RETIME_CLK BIT(8)
76#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
77#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
78
79/* STiD127 register definitions */
100 80
101 /* 3 bits [8:6] 81/**
102 * [6:6] ETH_SEL_TXCLK_NOT_CLK125 82 *-----------------------
103 * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK 83 * src |BIT(6)| BIT(7)|
104 * [8:8] ETH_SEL_TX_RETIME_CLK 84 *-----------------------
105 * 85 * MII | 1 | n/a |
106 */ 86 *-----------------------
87 * RMII | n/a | 1 |
88 * clkgen| | |
89 *-----------------------
90 * RMII | n/a | 0 |
91 * phyclk| | |
92 *-----------------------
93 * RGMII | 1 | n/a |
94 * clkgen| | |
95 *-----------------------
96 */
107 97
108#define TX_RETIME_SRC_MASK GENMASK(8, 6) 98#define STID127_RETIME_SRC_MASK GENMASK(7, 6)
109#define ETH_SEL_TX_RETIME_CLK BIT(8) 99#define STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
110#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) 100#define STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK BIT(6)
111#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
112 101
113#define ENMII_MASK GENMASK(5, 5) 102#define ENMII_MASK GENMASK(5, 5)
114#define ENMII BIT(5) 103#define ENMII BIT(5)
104#define EN_MASK GENMASK(1, 1)
105#define EN BIT(1)
115 106
116/** 107/**
117 * 3 bits [4:2] 108 * 3 bits [4:2]
@@ -120,29 +111,23 @@
120 * 010-SGMII 111 * 010-SGMII
121 * 100-RMII 112 * 100-RMII
122*/ 113*/
123#define MII_PHY_SEL_MASK GENMASK(4, 2) 114#define MII_PHY_SEL_MASK GENMASK(4, 2)
124#define ETH_PHY_SEL_RMII BIT(4) 115#define ETH_PHY_SEL_RMII BIT(4)
125#define ETH_PHY_SEL_SGMII BIT(3) 116#define ETH_PHY_SEL_SGMII BIT(3)
126#define ETH_PHY_SEL_RGMII BIT(2) 117#define ETH_PHY_SEL_RGMII BIT(2)
127#define ETH_PHY_SEL_GMII 0x0 118#define ETH_PHY_SEL_GMII 0x0
128#define ETH_PHY_SEL_MII 0x0 119#define ETH_PHY_SEL_MII 0x0
129
130#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
131 iface == PHY_INTERFACE_MODE_RGMII_ID || \
132 iface == PHY_INTERFACE_MODE_RGMII_RXID || \
133 iface == PHY_INTERFACE_MODE_RGMII_TXID)
134
135#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
136 iface == PHY_INTERFACE_MODE_GMII)
137 120
138struct sti_dwmac { 121struct sti_dwmac {
139 int interface; 122 int interface; /* MII interface */
140 bool ext_phyclk; 123 bool ext_phyclk; /* Clock from external PHY */
141 bool is_tx_retime_src_clk_125; 124 u32 tx_retime_src; /* TXCLK Retiming*/
142 struct clk *clk; 125 struct clk *clk; /* PHY clock */
143 int reg; 126 int ctrl_reg; /* GMAC glue-logic control register */
127 int clk_sel_reg; /* GMAC ext clk selection register */
144 struct device *dev; 128 struct device *dev;
145 struct regmap *regmap; 129 struct regmap *regmap;
130 u32 speed;
146}; 131};
147 132
148static u32 phy_intf_sels[] = { 133static u32 phy_intf_sels[] = {
@@ -162,74 +147,133 @@ enum {
162 TX_RETIME_SRC_CLKGEN, 147 TX_RETIME_SRC_CLKGEN,
163}; 148};
164 149
165static const char *const tx_retime_srcs[] = { 150static u32 stih4xx_tx_retime_val[] = {
166 [TX_RETIME_SRC_NA] = "", 151 [TX_RETIME_SRC_TXCLK] = STIH4XX_ETH_SEL_TXCLK_NOT_CLK125,
167 [TX_RETIME_SRC_TXCLK] = "txclk",
168 [TX_RETIME_SRC_CLK_125] = "clk_125",
169 [TX_RETIME_SRC_PHYCLK] = "phyclk",
170 [TX_RETIME_SRC_CLKGEN] = "clkgen",
171};
172
173static u32 tx_retime_val[] = {
174 [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
175 [TX_RETIME_SRC_CLK_125] = 0x0, 152 [TX_RETIME_SRC_CLK_125] = 0x0,
176 [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, 153 [TX_RETIME_SRC_PHYCLK] = STIH4XX_ETH_SEL_TX_RETIME_CLK,
177 [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | 154 [TX_RETIME_SRC_CLKGEN] = STIH4XX_ETH_SEL_TX_RETIME_CLK
178 ETH_SEL_INTERNAL_NOTEXT_PHYCLK, 155 | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
179}; 156};
180 157
181static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) 158static void stih4xx_fix_retime_src(void *priv, u32 spd)
182{ 159{
183 u32 src = 0, freq = 0; 160 struct sti_dwmac *dwmac = priv;
184 161 u32 src = dwmac->tx_retime_src;
185 if (spd == SPEED_100) { 162 u32 reg = dwmac->ctrl_reg;
186 if (dwmac->interface == PHY_INTERFACE_MODE_MII || 163 u32 freq = 0;
187 dwmac->interface == PHY_INTERFACE_MODE_GMII) { 164
188 src = TX_RETIME_SRC_TXCLK; 165 if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
189 } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { 166 src = TX_RETIME_SRC_TXCLK;
190 if (dwmac->ext_phyclk) { 167 } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
191 src = TX_RETIME_SRC_PHYCLK; 168 if (dwmac->ext_phyclk) {
192 } else { 169 src = TX_RETIME_SRC_PHYCLK;
193 src = TX_RETIME_SRC_CLKGEN; 170 } else {
194 freq = 50000000;
195 }
196
197 } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
198 src = TX_RETIME_SRC_CLKGEN; 171 src = TX_RETIME_SRC_CLKGEN;
199 freq = 25000000; 172 freq = DWMAC_50MHZ;
200 } 173 }
174 } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
175 /* On GiGa clk source can be either ext or from clkgen */
176 if (spd == SPEED_1000) {
177 freq = DWMAC_125MHZ;
178 } else {
179 /* Switch to clkgen for these speeds */
180 src = TX_RETIME_SRC_CLKGEN;
181 if (spd == SPEED_100)
182 freq = DWMAC_25MHZ;
183 else if (spd == SPEED_10)
184 freq = DWMAC_2_5MHZ;
185 }
186 }
201 187
202 if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) 188 if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq)
203 clk_set_rate(dwmac->clk, freq); 189 clk_set_rate(dwmac->clk, freq);
204 190
205 } else if (spd == SPEED_1000) { 191 regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
206 if (dwmac->is_tx_retime_src_clk_125) 192 stih4xx_tx_retime_val[src]);
207 src = TX_RETIME_SRC_CLK_125; 193}
208 else 194
209 src = TX_RETIME_SRC_TXCLK; 195static void stid127_fix_retime_src(void *priv, u32 spd)
196{
197 struct sti_dwmac *dwmac = priv;
198 u32 reg = dwmac->ctrl_reg;
199 u32 freq = 0;
200 u32 val = 0;
201
202 if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
203 val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
204 } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
205 if (!dwmac->ext_phyclk) {
206 val = STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK;
207 freq = DWMAC_50MHZ;
208 }
209 } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
210 val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
211 if (spd == SPEED_1000)
212 freq = DWMAC_125MHZ;
213 else if (spd == SPEED_100)
214 freq = DWMAC_25MHZ;
215 else if (spd == SPEED_10)
216 freq = DWMAC_2_5MHZ;
210 } 217 }
211 218
212 regmap_update_bits(dwmac->regmap, dwmac->reg, 219 if (dwmac->clk && freq)
213 TX_RETIME_SRC_MASK, tx_retime_val[src]); 220 clk_set_rate(dwmac->clk, freq);
221
222 regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
214} 223}
215 224
216static void sti_dwmac_exit(struct platform_device *pdev, void *priv) 225static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
217{ 226{
218 struct sti_dwmac *dwmac = priv; 227 struct regmap *regmap = dwmac->regmap;
228 int iface = dwmac->interface;
229 struct device *dev = dwmac->dev;
230 struct device_node *np = dev->of_node;
231 u32 reg = dwmac->ctrl_reg;
232 u32 val;
219 233
220 if (dwmac->clk) 234 if (dwmac->clk)
221 clk_disable_unprepare(dwmac->clk); 235 clk_prepare_enable(dwmac->clk);
236
237 if (of_property_read_bool(np, "st,gmac_en"))
238 regmap_update_bits(regmap, reg, EN_MASK, EN);
239
240 regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
241
242 val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
243 regmap_update_bits(regmap, reg, ENMII_MASK, val);
244}
245
246static int stix4xx_init(struct platform_device *pdev, void *priv)
247{
248 struct sti_dwmac *dwmac = priv;
249 u32 spd = dwmac->speed;
250
251 sti_dwmac_ctrl_init(dwmac);
252
253 stih4xx_fix_retime_src(priv, spd);
254
255 return 0;
222} 256}
223 257
224static void sti_fix_mac_speed(void *priv, unsigned int spd) 258static int stid127_init(struct platform_device *pdev, void *priv)
225{ 259{
226 struct sti_dwmac *dwmac = priv; 260 struct sti_dwmac *dwmac = priv;
261 u32 spd = dwmac->speed;
227 262
228 setup_retime_src(dwmac, spd); 263 sti_dwmac_ctrl_init(dwmac);
229 264
230 return; 265 stid127_fix_retime_src(priv, spd);
266
267 return 0;
231} 268}
232 269
270static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
271{
272 struct sti_dwmac *dwmac = priv;
273
274 if (dwmac->clk)
275 clk_disable_unprepare(dwmac->clk);
276}
233static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, 277static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
234 struct platform_device *pdev) 278 struct platform_device *pdev)
235{ 279{
@@ -245,6 +289,13 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); 289 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
246 if (!res) 290 if (!res)
247 return -ENODATA; 291 return -ENODATA;
292 dwmac->ctrl_reg = res->start;
293
294 /* clk selection from extra syscfg register */
295 dwmac->clk_sel_reg = -ENXIO;
296 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
297 if (res)
298 dwmac->clk_sel_reg = res->start;
248 299
249 regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); 300 regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
250 if (IS_ERR(regmap)) 301 if (IS_ERR(regmap))
@@ -253,53 +304,31 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
253 dwmac->dev = dev; 304 dwmac->dev = dev;
254 dwmac->interface = of_get_phy_mode(np); 305 dwmac->interface = of_get_phy_mode(np);
255 dwmac->regmap = regmap; 306 dwmac->regmap = regmap;
256 dwmac->reg = res->start;
257 dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); 307 dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
258 dwmac->is_tx_retime_src_clk_125 = false; 308 dwmac->tx_retime_src = TX_RETIME_SRC_NA;
309 dwmac->speed = SPEED_100;
259 310
260 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 311 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
261 const char *rs; 312 const char *rs;
313 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
262 314
263 err = of_property_read_string(np, "st,tx-retime-src", &rs); 315 err = of_property_read_string(np, "st,tx-retime-src", &rs);
264 if (err < 0) { 316 if (err < 0)
265 dev_err(dev, "st,tx-retime-src not specified\n"); 317 dev_warn(dev, "Use internal clock source\n");
266 return err;
267 }
268 318
269 if (!strcasecmp(rs, "clk_125")) 319 if (!strcasecmp(rs, "clk_125"))
270 dwmac->is_tx_retime_src_clk_125 = true; 320 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
321 else if (!strcasecmp(rs, "txclk"))
322 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
323
324 dwmac->speed = SPEED_1000;
271 } 325 }
272 326
273 dwmac->clk = devm_clk_get(dev, "sti-ethclk"); 327 dwmac->clk = devm_clk_get(dev, "sti-ethclk");
274 328 if (IS_ERR(dwmac->clk)) {
275 if (IS_ERR(dwmac->clk)) 329 dev_warn(dev, "No phy clock provided...\n");
276 dwmac->clk = NULL; 330 dwmac->clk = NULL;
277 331 }
278 return 0;
279}
280
281static int sti_dwmac_init(struct platform_device *pdev, void *priv)
282{
283 struct sti_dwmac *dwmac = priv;
284 struct regmap *regmap = dwmac->regmap;
285 int iface = dwmac->interface;
286 u32 reg = dwmac->reg;
287 u32 val, spd;
288
289 if (dwmac->clk)
290 clk_prepare_enable(dwmac->clk);
291
292 regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
293
294 val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
295 regmap_update_bits(regmap, reg, ENMII_MASK, val);
296
297 if (IS_PHY_IF_MODE_GBIT(iface))
298 spd = SPEED_1000;
299 else
300 spd = SPEED_100;
301
302 setup_retime_src(dwmac, spd);
303 332
304 return 0; 333 return 0;
305} 334}
@@ -322,9 +351,16 @@ static void *sti_dwmac_setup(struct platform_device *pdev)
322 return dwmac; 351 return dwmac;
323} 352}
324 353
325const struct stmmac_of_data sti_gmac_data = { 354const struct stmmac_of_data stih4xx_dwmac_data = {
326 .fix_mac_speed = sti_fix_mac_speed, 355 .fix_mac_speed = stih4xx_fix_retime_src,
356 .setup = sti_dwmac_setup,
357 .init = stix4xx_init,
358 .exit = sti_dwmac_exit,
359};
360
361const struct stmmac_of_data stid127_dwmac_data = {
362 .fix_mac_speed = stid127_fix_retime_src,
327 .setup = sti_dwmac_setup, 363 .setup = sti_dwmac_setup,
328 .init = sti_dwmac_init, 364 .init = stid127_init,
329 .exit = sti_dwmac_exit, 365 .exit = sti_dwmac_exit,
330}; 366};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 44528896355d..c3c40650b309 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -144,7 +144,8 @@ extern const struct stmmac_of_data meson6_dwmac_data;
144extern const struct stmmac_of_data sun7i_gmac_data; 144extern const struct stmmac_of_data sun7i_gmac_data;
145#endif 145#endif
146#ifdef CONFIG_DWMAC_STI 146#ifdef CONFIG_DWMAC_STI
147extern const struct stmmac_of_data sti_gmac_data; 147extern const struct stmmac_of_data stih4xx_dwmac_data;
148extern const struct stmmac_of_data stid127_dwmac_data;
148#endif 149#endif
149#ifdef CONFIG_DWMAC_SOCFPGA 150#ifdef CONFIG_DWMAC_SOCFPGA
150extern const struct stmmac_of_data socfpga_gmac_data; 151extern const struct stmmac_of_data socfpga_gmac_data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 652171706258..db56fa7ce8f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -37,9 +37,10 @@ static const struct of_device_id stmmac_dt_ids[] = {
37 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 37 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
38#endif 38#endif
39#ifdef CONFIG_DWMAC_STI 39#ifdef CONFIG_DWMAC_STI
40 { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, 40 { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
41 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, 41 { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
42 { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, 42 { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
43 { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
43#endif 44#endif
44#ifdef CONFIG_DWMAC_SOCFPGA 45#ifdef CONFIG_DWMAC_SOCFPGA
45 { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, 46 { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
@@ -160,11 +161,16 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
160 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 161 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
161 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 162 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
162 163
163 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 164 if (plat->phy_bus_name)
164 sizeof(struct stmmac_mdio_bus_data), 165 plat->mdio_bus_data = NULL;
165 GFP_KERNEL); 166 else
167 plat->mdio_bus_data =
168 devm_kzalloc(&pdev->dev,
169 sizeof(struct stmmac_mdio_bus_data),
170 GFP_KERNEL);
166 171
167 plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); 172 plat->force_sf_dma_mode =
173 of_property_read_bool(np, "snps,force_sf_dma_mode");
168 174
169 /* Set the maxmtu to a default of JUMBO_LEN in case the 175 /* Set the maxmtu to a default of JUMBO_LEN in case the
170 * parameter is not present in the device tree. 176 * parameter is not present in the device tree.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ab167dc49ce4..952e1e4764b7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2392,6 +2392,15 @@ clean_ndev_ret:
2392 return ret; 2392 return ret;
2393} 2393}
2394 2394
2395static int cpsw_remove_child_device(struct device *dev, void *c)
2396{
2397 struct platform_device *pdev = to_platform_device(dev);
2398
2399 of_device_unregister(pdev);
2400
2401 return 0;
2402}
2403
2395static int cpsw_remove(struct platform_device *pdev) 2404static int cpsw_remove(struct platform_device *pdev)
2396{ 2405{
2397 struct net_device *ndev = platform_get_drvdata(pdev); 2406 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -2406,6 +2415,7 @@ static int cpsw_remove(struct platform_device *pdev)
2406 cpdma_chan_destroy(priv->rxch); 2415 cpdma_chan_destroy(priv->rxch);
2407 cpdma_ctlr_destroy(priv->dma); 2416 cpdma_ctlr_destroy(priv->dma);
2408 pm_runtime_disable(&pdev->dev); 2417 pm_runtime_disable(&pdev->dev);
2418 device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
2409 if (priv->data.dual_emac) 2419 if (priv->data.dual_emac)
2410 free_netdev(cpsw_get_slave_ndev(priv, 1)); 2420 free_netdev(cpsw_get_slave_ndev(priv, 1));
2411 free_netdev(ndev); 2421 free_netdev(ndev);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4a000f6dd6fc..657b65bf5cac 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -193,12 +193,9 @@ fail:
193 193
194static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 194static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
195{ 195{
196 unsigned long flags;
197
198 if (!pool) 196 if (!pool)
199 return; 197 return;
200 198
201 spin_lock_irqsave(&pool->lock, flags);
202 WARN_ON(pool->used_desc); 199 WARN_ON(pool->used_desc);
203 if (pool->cpumap) { 200 if (pool->cpumap) {
204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 201 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
@@ -206,7 +203,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
206 } else { 203 } else {
207 iounmap(pool->iomap); 204 iounmap(pool->iomap);
208 } 205 }
209 spin_unlock_irqrestore(&pool->lock, flags);
210} 206}
211 207
212static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 208static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -561,7 +557,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
561 cpdma_chan_stop(chan); 557 cpdma_chan_stop(chan);
562 ctlr->channels[chan->chan_num] = NULL; 558 ctlr->channels[chan->chan_num] = NULL;
563 spin_unlock_irqrestore(&ctlr->lock, flags); 559 spin_unlock_irqrestore(&ctlr->lock, flags);
564 kfree(chan);
565 return 0; 560 return 0;
566} 561}
567EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 562EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0fcb5e7eb073..9e17d1a91e71 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -162,7 +162,7 @@ union sub_key {
162 * data: network byte order 162 * data: network byte order
163 * return: host byte order 163 * return: host byte order
164 */ 164 */
165static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen) 165static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
166{ 166{
167 union sub_key subk; 167 union sub_key subk;
168 int k_next = 4; 168 int k_next = 4;
@@ -176,7 +176,7 @@ static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
176 for (i = 0; i < dlen; i++) { 176 for (i = 0; i < dlen; i++) {
177 subk.kb = key[k_next]; 177 subk.kb = key[k_next];
178 k_next = (k_next + 1) % klen; 178 k_next = (k_next + 1) % klen;
179 dt = data[i]; 179 dt = ((u8 *)data)[i];
180 for (j = 0; j < 8; j++) { 180 for (j = 0; j < 8; j++) {
181 if (dt & 0x80) 181 if (dt & 0x80)
182 ret ^= subk.ka; 182 ret ^= subk.ka;
@@ -190,26 +190,20 @@ static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
190 190
191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) 191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
192{ 192{
193 struct iphdr *iphdr; 193 struct flow_keys flow;
194 int data_len; 194 int data_len;
195 bool ret = false;
196 195
197 if (eth_hdr(skb)->h_proto != htons(ETH_P_IP)) 196 if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP))
198 return false; 197 return false;
199 198
200 iphdr = ip_hdr(skb); 199 if (flow.ip_proto == IPPROTO_TCP)
200 data_len = 12;
201 else
202 data_len = 8;
201 203
202 if (iphdr->version == 4) { 204 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
203 if (iphdr->protocol == IPPROTO_TCP)
204 data_len = 12;
205 else
206 data_len = 8;
207 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
208 (u8 *)&iphdr->saddr, data_len);
209 ret = true;
210 }
211 205
212 return ret; 206 return true;
213} 207}
214 208
215static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 209static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0c6adaaf898c..65e2892342bd 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -298,7 +298,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
298 */ 298 */
299 if (q->flags & IFF_VNET_HDR) 299 if (q->flags & IFF_VNET_HDR)
300 features |= vlan->tap_features; 300 features |= vlan->tap_features;
301 if (netif_needs_gso(skb, features)) { 301 if (netif_needs_gso(dev, skb, features)) {
302 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 302 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
303 303
304 if (IS_ERR(segs)) 304 if (IS_ERR(segs))
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 492435fce1d4..8c2a29a9bd7f 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -198,8 +198,10 @@ static int ksz8021_config_init(struct phy_device *phydev)
198 if (rc) 198 if (rc)
199 dev_err(&phydev->dev, "failed to set led mode\n"); 199 dev_err(&phydev->dev, "failed to set led mode\n");
200 200
201 phy_write(phydev, MII_KSZPHY_OMSO, val);
202 rc = ksz_config_flags(phydev); 201 rc = ksz_config_flags(phydev);
202 if (rc < 0)
203 return rc;
204 rc = phy_write(phydev, MII_KSZPHY_OMSO, val);
203 return rc < 0 ? rc : 0; 205 return rc < 0 ? rc : 0;
204} 206}
205 207
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 864159eb744e..e3d84c322e4e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3189,31 +3189,39 @@ static void r8153_init(struct r8152 *tp)
3189static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3189static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3190{ 3190{
3191 struct r8152 *tp = usb_get_intfdata(intf); 3191 struct r8152 *tp = usb_get_intfdata(intf);
3192 struct net_device *netdev = tp->netdev;
3193 int ret = 0;
3192 3194
3193 mutex_lock(&tp->control); 3195 mutex_lock(&tp->control);
3194 3196
3195 if (PMSG_IS_AUTO(message)) 3197 if (PMSG_IS_AUTO(message)) {
3198 if (netif_running(netdev) && work_busy(&tp->schedule.work)) {
3199 ret = -EBUSY;
3200 goto out1;
3201 }
3202
3196 set_bit(SELECTIVE_SUSPEND, &tp->flags); 3203 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3197 else 3204 } else {
3198 netif_device_detach(tp->netdev); 3205 netif_device_detach(netdev);
3206 }
3199 3207
3200 if (netif_running(tp->netdev)) { 3208 if (netif_running(netdev)) {
3201 clear_bit(WORK_ENABLE, &tp->flags); 3209 clear_bit(WORK_ENABLE, &tp->flags);
3202 usb_kill_urb(tp->intr_urb); 3210 usb_kill_urb(tp->intr_urb);
3203 cancel_delayed_work_sync(&tp->schedule);
3204 tasklet_disable(&tp->tl); 3211 tasklet_disable(&tp->tl);
3205 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3212 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3206 rtl_stop_rx(tp); 3213 rtl_stop_rx(tp);
3207 rtl_runtime_suspend_enable(tp, true); 3214 rtl_runtime_suspend_enable(tp, true);
3208 } else { 3215 } else {
3216 cancel_delayed_work_sync(&tp->schedule);
3209 tp->rtl_ops.down(tp); 3217 tp->rtl_ops.down(tp);
3210 } 3218 }
3211 tasklet_enable(&tp->tl); 3219 tasklet_enable(&tp->tl);
3212 } 3220 }
3213 3221out1:
3214 mutex_unlock(&tp->control); 3222 mutex_unlock(&tp->control);
3215 3223
3216 return 0; 3224 return ret;
3217} 3225}
3218 3226
3219static int rtl8152_resume(struct usb_interface *intf) 3227static int rtl8152_resume(struct usb_interface *intf)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3d0ce4468ce6..13d0a8bc8bf3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -920,6 +920,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
920 int qnum = skb_get_queue_mapping(skb); 920 int qnum = skb_get_queue_mapping(skb);
921 struct send_queue *sq = &vi->sq[qnum]; 921 struct send_queue *sq = &vi->sq[qnum];
922 int err; 922 int err;
923 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
924 bool kick = !skb->xmit_more;
923 925
924 /* Free up any pending old buffers before queueing new ones. */ 926 /* Free up any pending old buffers before queueing new ones. */
925 free_old_xmit_skbs(sq); 927 free_old_xmit_skbs(sq);
@@ -956,7 +958,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
956 } 958 }
957 } 959 }
958 960
959 if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more) 961 if (kick || netif_xmit_stopped(txq))
960 virtqueue_kick(sq->vq); 962 virtqueue_kick(sq->vq);
961 963
962 return NETDEV_TX_OK; 964 return NETDEV_TX_OK;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2a51e6e48e1e..ca309820d39e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1437,9 +1437,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1437 if (!in6_dev) 1437 if (!in6_dev)
1438 goto out; 1438 goto out;
1439 1439
1440 if (!pskb_may_pull(skb, skb->len))
1441 goto out;
1442
1443 iphdr = ipv6_hdr(skb); 1440 iphdr = ipv6_hdr(skb);
1444 saddr = &iphdr->saddr; 1441 saddr = &iphdr->saddr;
1445 daddr = &iphdr->daddr; 1442 daddr = &iphdr->daddr;
@@ -1668,6 +1665,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1668 struct pcpu_sw_netstats *tx_stats, *rx_stats; 1665 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1669 union vxlan_addr loopback; 1666 union vxlan_addr loopback;
1670 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1667 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1668 struct net_device *dev = skb->dev;
1669 int len = skb->len;
1671 1670
1672 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1671 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1673 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1672 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
@@ -1691,16 +1690,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1691 1690
1692 u64_stats_update_begin(&tx_stats->syncp); 1691 u64_stats_update_begin(&tx_stats->syncp);
1693 tx_stats->tx_packets++; 1692 tx_stats->tx_packets++;
1694 tx_stats->tx_bytes += skb->len; 1693 tx_stats->tx_bytes += len;
1695 u64_stats_update_end(&tx_stats->syncp); 1694 u64_stats_update_end(&tx_stats->syncp);
1696 1695
1697 if (netif_rx(skb) == NET_RX_SUCCESS) { 1696 if (netif_rx(skb) == NET_RX_SUCCESS) {
1698 u64_stats_update_begin(&rx_stats->syncp); 1697 u64_stats_update_begin(&rx_stats->syncp);
1699 rx_stats->rx_packets++; 1698 rx_stats->rx_packets++;
1700 rx_stats->rx_bytes += skb->len; 1699 rx_stats->rx_bytes += len;
1701 u64_stats_update_end(&rx_stats->syncp); 1700 u64_stats_update_end(&rx_stats->syncp);
1702 } else { 1701 } else {
1703 skb->dev->stats.rx_dropped++; 1702 dev->stats.rx_dropped++;
1704 } 1703 }
1705} 1704}
1706 1705
@@ -1878,7 +1877,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1878 return arp_reduce(dev, skb); 1877 return arp_reduce(dev, skb);
1879#if IS_ENABLED(CONFIG_IPV6) 1878#if IS_ENABLED(CONFIG_IPV6)
1880 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 1879 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
1881 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && 1880 pskb_may_pull(skb, sizeof(struct ipv6hdr)
1881 + sizeof(struct nd_msg)) &&
1882 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 1882 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
1883 struct nd_msg *msg; 1883 struct nd_msg *msg;
1884 1884
@@ -1887,6 +1887,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1887 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 1887 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
1888 return neigh_reduce(dev, skb); 1888 return neigh_reduce(dev, skb);
1889 } 1889 }
1890 eth = eth_hdr(skb);
1890#endif 1891#endif
1891 } 1892 }
1892 1893
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index fa671442f420..cca871346a0f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -638,7 +638,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
638 638
639 if (unlikely(!netif_carrier_ok(dev) || 639 if (unlikely(!netif_carrier_ok(dev) ||
640 (slots > 1 && !xennet_can_sg(dev)) || 640 (slots > 1 && !xennet_can_sg(dev)) ||
641 netif_needs_gso(skb, netif_skb_features(skb)))) { 641 netif_needs_gso(dev, skb, netif_skb_features(skb)))) {
642 spin_unlock_irqrestore(&queue->tx_lock, flags); 642 spin_unlock_irqrestore(&queue->tx_lock, flags);
643 goto drop; 643 goto drop;
644 } 644 }
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 02e69e7ee4a3..3e0a0d315f72 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -259,6 +259,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
260} 260}
261 261
262#if IS_ENABLED(CONFIG_IPV6)
262static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 263static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
263 struct l2t_entry *e) 264 struct l2t_entry *e)
264{ 265{
@@ -344,6 +345,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
344 345
345 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 346 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
346} 347}
348#endif
347 349
348static void send_close_req(struct cxgbi_sock *csk) 350static void send_close_req(struct cxgbi_sock *csk)
349{ 351{
@@ -756,7 +758,7 @@ static int act_open_rpl_status_to_errno(int status)
756 758
757static void csk_act_open_retry_timer(unsigned long data) 759static void csk_act_open_retry_timer(unsigned long data)
758{ 760{
759 struct sk_buff *skb; 761 struct sk_buff *skb = NULL;
760 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 762 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
761 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 763 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
762 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 764 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
@@ -781,9 +783,11 @@ static void csk_act_open_retry_timer(unsigned long data)
781 if (csk->csk_family == AF_INET) { 783 if (csk->csk_family == AF_INET) {
782 send_act_open_func = send_act_open_req; 784 send_act_open_func = send_act_open_req;
783 skb = alloc_wr(size, 0, GFP_ATOMIC); 785 skb = alloc_wr(size, 0, GFP_ATOMIC);
786#if IS_ENABLED(CONFIG_IPV6)
784 } else { 787 } else {
785 send_act_open_func = send_act_open_req6; 788 send_act_open_func = send_act_open_req6;
786 skb = alloc_wr(size6, 0, GFP_ATOMIC); 789 skb = alloc_wr(size6, 0, GFP_ATOMIC);
790#endif
787 } 791 }
788 792
789 if (!skb) 793 if (!skb)
@@ -1313,11 +1317,6 @@ static int init_act_open(struct cxgbi_sock *csk)
1313 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1317 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1314 cxgbi_sock_get(csk); 1318 cxgbi_sock_get(csk);
1315 1319
1316 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1317 if (!n) {
1318 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1319 goto rel_resource;
1320 }
1321 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1320 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1322 if (!csk->l2t) { 1321 if (!csk->l2t) {
1323 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1322 pr_err("%s, cannot alloc l2t.\n", ndev->name);
@@ -1335,8 +1334,10 @@ static int init_act_open(struct cxgbi_sock *csk)
1335 1334
1336 if (csk->csk_family == AF_INET) 1335 if (csk->csk_family == AF_INET)
1337 skb = alloc_wr(size, 0, GFP_NOIO); 1336 skb = alloc_wr(size, 0, GFP_NOIO);
1337#if IS_ENABLED(CONFIG_IPV6)
1338 else 1338 else
1339 skb = alloc_wr(size6, 0, GFP_NOIO); 1339 skb = alloc_wr(size6, 0, GFP_NOIO);
1340#endif
1340 1341
1341 if (!skb) 1342 if (!skb)
1342 goto rel_resource; 1343 goto rel_resource;
@@ -1370,8 +1371,10 @@ static int init_act_open(struct cxgbi_sock *csk)
1370 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1371 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1371 if (csk->csk_family == AF_INET) 1372 if (csk->csk_family == AF_INET)
1372 send_act_open_req(csk, skb, csk->l2t); 1373 send_act_open_req(csk, skb, csk->l2t);
1374#if IS_ENABLED(CONFIG_IPV6)
1373 else 1375 else
1374 send_act_open_req6(csk, skb, csk->l2t); 1376 send_act_open_req6(csk, skb, csk->l2t);
1377#endif
1375 neigh_release(n); 1378 neigh_release(n);
1376 1379
1377 return 0; 1380 return 0;
@@ -1635,129 +1638,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1635 return 0; 1638 return 0;
1636} 1639}
1637 1640
1638#if IS_ENABLED(CONFIG_IPV6)
1639static int cxgbi_inet6addr_handler(struct notifier_block *this,
1640 unsigned long event, void *data)
1641{
1642 struct inet6_ifaddr *ifa = data;
1643 struct net_device *event_dev = ifa->idev->dev;
1644 struct cxgbi_device *cdev;
1645 int ret = NOTIFY_DONE;
1646
1647 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1648 event_dev = vlan_dev_real_dev(event_dev);
1649
1650 cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL);
1651
1652 if (!cdev)
1653 return ret;
1654
1655 switch (event) {
1656 case NETDEV_UP:
1657 ret = cxgb4_clip_get(event_dev,
1658 (const struct in6_addr *)
1659 ((ifa)->addr.s6_addr));
1660 if (ret < 0)
1661 return ret;
1662
1663 ret = NOTIFY_OK;
1664 break;
1665
1666 case NETDEV_DOWN:
1667 cxgb4_clip_release(event_dev,
1668 (const struct in6_addr *)
1669 ((ifa)->addr.s6_addr));
1670 ret = NOTIFY_OK;
1671 break;
1672
1673 default:
1674 break;
1675 }
1676
1677 return ret;
1678}
1679
1680static struct notifier_block cxgbi_inet6addr_notifier = {
1681 .notifier_call = cxgbi_inet6addr_handler
1682};
1683
1684/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
1685 * a physical device.
1686 * The physical device reference is needed to send the actual CLIP command.
1687 */
1688static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
1689{
1690 struct inet6_dev *idev = NULL;
1691 struct inet6_ifaddr *ifa;
1692 int ret = 0;
1693
1694 idev = __in6_dev_get(root_dev);
1695 if (!idev)
1696 return ret;
1697
1698 read_lock_bh(&idev->lock);
1699 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1700 pr_info("updating the clip for addr %pI6\n",
1701 ifa->addr.s6_addr);
1702 ret = cxgb4_clip_get(dev, (const struct in6_addr *)
1703 ifa->addr.s6_addr);
1704 if (ret < 0)
1705 break;
1706 }
1707
1708 read_unlock_bh(&idev->lock);
1709 return ret;
1710}
1711
1712static int update_root_dev_clip(struct net_device *dev)
1713{
1714 struct net_device *root_dev = NULL;
1715 int i, ret = 0;
1716
1717 /* First populate the real net device's IPv6 address */
1718 ret = update_dev_clip(dev, dev);
1719 if (ret)
1720 return ret;
1721
1722 /* Parse all bond and vlan devices layered on top of the physical dev */
1723 root_dev = netdev_master_upper_dev_get(dev);
1724 if (root_dev) {
1725 ret = update_dev_clip(root_dev, dev);
1726 if (ret)
1727 return ret;
1728 }
1729
1730 for (i = 0; i < VLAN_N_VID; i++) {
1731 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
1732 if (!root_dev)
1733 continue;
1734
1735 ret = update_dev_clip(root_dev, dev);
1736 if (ret)
1737 break;
1738 }
1739 return ret;
1740}
1741
1742static void cxgbi_update_clip(struct cxgbi_device *cdev)
1743{
1744 int i;
1745
1746 rcu_read_lock();
1747
1748 for (i = 0; i < cdev->nports; i++) {
1749 struct net_device *dev = cdev->ports[i];
1750 int ret = 0;
1751
1752 if (dev)
1753 ret = update_root_dev_clip(dev);
1754 if (ret < 0)
1755 break;
1756 }
1757 rcu_read_unlock();
1758}
1759#endif /* IS_ENABLED(CONFIG_IPV6) */
1760
1761static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1641static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1762{ 1642{
1763 struct cxgbi_device *cdev; 1643 struct cxgbi_device *cdev;
@@ -1876,10 +1756,6 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1876 switch (state) { 1756 switch (state) {
1877 case CXGB4_STATE_UP: 1757 case CXGB4_STATE_UP:
1878 pr_info("cdev 0x%p, UP.\n", cdev); 1758 pr_info("cdev 0x%p, UP.\n", cdev);
1879#if IS_ENABLED(CONFIG_IPV6)
1880 cxgbi_update_clip(cdev);
1881#endif
1882 /* re-initialize */
1883 break; 1759 break;
1884 case CXGB4_STATE_START_RECOVERY: 1760 case CXGB4_STATE_START_RECOVERY:
1885 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 1761 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
@@ -1910,17 +1786,11 @@ static int __init cxgb4i_init_module(void)
1910 return rc; 1786 return rc;
1911 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1787 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1912 1788
1913#if IS_ENABLED(CONFIG_IPV6)
1914 register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1915#endif
1916 return 0; 1789 return 0;
1917} 1790}
1918 1791
1919static void __exit cxgb4i_exit_module(void) 1792static void __exit cxgb4i_exit_module(void)
1920{ 1793{
1921#if IS_ENABLED(CONFIG_IPV6)
1922 unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1923#endif
1924 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1794 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1925 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1795 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1926 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1796 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 6a2001d6b442..54fa6e0bc1bb 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -275,6 +275,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
275} 275}
276EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); 276EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
277 277
278#if IS_ENABLED(CONFIG_IPV6)
278static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 279static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
279 int *port) 280 int *port)
280{ 281{
@@ -307,6 +308,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
307 ndev, ndev->name); 308 ndev, ndev->name);
308 return NULL; 309 return NULL;
309} 310}
311#endif
310 312
311void cxgbi_hbas_remove(struct cxgbi_device *cdev) 313void cxgbi_hbas_remove(struct cxgbi_device *cdev)
312{ 314{
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index c0894dd8827b..667c31101b8b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -178,12 +178,12 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
178#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \ 178#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
179 nla = ntb[attr_nr]; \ 179 nla = ntb[attr_nr]; \
180 if (nla) { \ 180 if (nla) { \
181 if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ 181 if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
182 pr_info("<< must not change invariant attr: %s\n", #name); \ 182 pr_info("<< must not change invariant attr: %s\n", #name); \
183 return -EEXIST; \ 183 return -EEXIST; \
184 } \ 184 } \
185 assignment; \ 185 assignment; \
186 } else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ 186 } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
187 /* attribute missing from payload, */ \ 187 /* attribute missing from payload, */ \
188 /* which was expected */ \ 188 /* which was expected */ \
189 } else if ((attr_flag) & DRBD_F_REQUIRED) { \ 189 } else if ((attr_flag) & DRBD_F_REQUIRED) { \
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 9be37da93680..e985ba679c4a 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -41,7 +41,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
41u16 capi20_get_version(u32 contr, struct capi_version *verp); 41u16 capi20_get_version(u32 contr, struct capi_version *verp);
42u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); 42u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
43u16 capi20_get_profile(u32 contr, struct capi_profile *profp); 43u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
44int capi20_manufacturer(unsigned int cmd, void __user *data); 44int capi20_manufacturer(unsigned long cmd, void __user *data);
45 45
46#define CAPICTR_UP 0 46#define CAPICTR_UP 0
47#define CAPICTR_DOWN 1 47#define CAPICTR_DOWN 1
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 838407aea705..74fd5d37f15a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -998,6 +998,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
998 * Callback to use for xmit over the accelerated station. This 998 * Callback to use for xmit over the accelerated station. This
999 * is used in place of ndo_start_xmit on accelerated net 999 * is used in place of ndo_start_xmit on accelerated net
1000 * devices. 1000 * devices.
1001 * bool (*ndo_gso_check) (struct sk_buff *skb,
1002 * struct net_device *dev);
1003 * Called by core transmit path to determine if device is capable of
1004 * performing GSO on a packet. The device returns true if it is
1005 * able to GSO the packet, false otherwise. If the return value is
1006 * false the stack will do software GSO.
1001 */ 1007 */
1002struct net_device_ops { 1008struct net_device_ops {
1003 int (*ndo_init)(struct net_device *dev); 1009 int (*ndo_init)(struct net_device *dev);
@@ -1147,6 +1153,8 @@ struct net_device_ops {
1147 struct net_device *dev, 1153 struct net_device *dev,
1148 void *priv); 1154 void *priv);
1149 int (*ndo_get_lock_subclass)(struct net_device *dev); 1155 int (*ndo_get_lock_subclass)(struct net_device *dev);
1156 bool (*ndo_gso_check) (struct sk_buff *skb,
1157 struct net_device *dev);
1150}; 1158};
1151 1159
1152/** 1160/**
@@ -3572,10 +3580,12 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3572 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 3580 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3573} 3581}
3574 3582
3575static inline bool netif_needs_gso(struct sk_buff *skb, 3583static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
3576 netdev_features_t features) 3584 netdev_features_t features)
3577{ 3585{
3578 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 3586 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3587 (dev->netdev_ops->ndo_gso_check &&
3588 !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
3579 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 3589 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3580 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 3590 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3581} 3591}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3ab0749d6875..a59d9343c25b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1203,7 +1203,12 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1203 might_sleep_if(pri & __GFP_WAIT); 1203 might_sleep_if(pri & __GFP_WAIT);
1204 if (skb_cloned(skb)) { 1204 if (skb_cloned(skb)) {
1205 struct sk_buff *nskb = skb_copy(skb, pri); 1205 struct sk_buff *nskb = skb_copy(skb, pri);
1206 kfree_skb(skb); /* Free our shared copy */ 1206
1207 /* Free our shared copy */
1208 if (likely(nskb))
1209 consume_skb(skb);
1210 else
1211 kfree_skb(skb);
1207 skb = nskb; 1212 skb = nskb;
1208 } 1213 }
1209 return skb; 1214 return skb;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 58ad8c6492db..b76559293535 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/phy.h> 19#include <linux/phy.h>
20#include <linux/phy_fixed.h> 20#include <linux/phy_fixed.h>
21#include <linux/ethtool.h>
21 22
22enum dsa_tag_protocol { 23enum dsa_tag_protocol {
23 DSA_TAG_PROTO_NONE = 0, 24 DSA_TAG_PROTO_NONE = 0,
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 55a8d4056cc9..98e5f9578f86 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,7 +146,6 @@ struct ifacaddr6 {
146 struct ifacaddr6 *aca_next; 146 struct ifacaddr6 *aca_next;
147 int aca_users; 147 int aca_users;
148 atomic_t aca_refcnt; 148 atomic_t aca_refcnt;
149 spinlock_t aca_lock;
150 unsigned long aca_cstamp; 149 unsigned long aca_cstamp;
151 unsigned long aca_tstamp; 150 unsigned long aca_tstamp;
152}; 151};
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index ae0613544308..d1d272843b3b 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -80,7 +80,8 @@ static inline struct sock *__inet6_lookup(struct net *net,
80static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, 80static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
81 struct sk_buff *skb, 81 struct sk_buff *skb,
82 const __be16 sport, 82 const __be16 sport,
83 const __be16 dport) 83 const __be16 dport,
84 int iif)
84{ 85{
85 struct sock *sk = skb_steal_sock(skb); 86 struct sock *sk = skb_steal_sock(skb);
86 87
@@ -90,7 +91,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
90 return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, 91 return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
91 &ipv6_hdr(skb)->saddr, sport, 92 &ipv6_hdr(skb)->saddr, sport,
92 &ipv6_hdr(skb)->daddr, ntohs(dport), 93 &ipv6_hdr(skb)->daddr, ntohs(dport),
93 inet6_iif(skb)); 94 iif);
94} 95}
95 96
96struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, 97struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 6c1076275aaa..7b903e1bdbbb 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -431,7 +431,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
431/** 431/**
432 * nlmsg_put - Add a new netlink message to an skb 432 * nlmsg_put - Add a new netlink message to an skb
433 * @skb: socket buffer to store message in 433 * @skb: socket buffer to store message in
434 * @portid: netlink process id 434 * @portid: netlink PORTID of requesting application
435 * @seq: sequence number of message 435 * @seq: sequence number of message
436 * @type: message type 436 * @type: message type
437 * @payload: length of message payload 437 * @payload: length of message payload
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 9fbd856e6713..856f01cb51dd 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -426,6 +426,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
426 asoc->pmtu_pending = 0; 426 asoc->pmtu_pending = 0;
427} 427}
428 428
429static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
430{
431 return !list_empty(&chunk->list);
432}
433
429/* Walk through a list of TLV parameters. Don't trust the 434/* Walk through a list of TLV parameters. Don't trust the
430 * individual parameter lengths and instead depend on 435 * individual parameter lengths and instead depend on
431 * the chunk length to indicate when to stop. Make sure 436 * the chunk length to indicate when to stop. Make sure
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 7f4eeb340a54..72a31db47ded 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
248 int, __be16); 248 int, __be16);
249struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, 249struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
250 union sctp_addr *addr); 250 union sctp_addr *addr);
251int sctp_verify_asconf(const struct sctp_association *asoc, 251bool sctp_verify_asconf(const struct sctp_association *asoc,
252 struct sctp_paramhdr *param_hdr, void *chunk_end, 252 struct sctp_chunk *chunk, bool addr_param_needed,
253 struct sctp_paramhdr **errp); 253 struct sctp_paramhdr **errp);
254struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, 254struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
255 struct sctp_chunk *asconf); 255 struct sctp_chunk *asconf);
256int sctp_process_asconf_ack(struct sctp_association *asoc, 256int sctp_process_asconf_ack(struct sctp_association *asoc,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 74efeda994b3..c9766f89deba 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -468,8 +468,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
468/* From syncookies.c */ 468/* From syncookies.c */
469int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, 469int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
470 u32 cookie); 470 u32 cookie);
471struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 471struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
472 struct ip_options *opt);
473#ifdef CONFIG_SYN_COOKIES 472#ifdef CONFIG_SYN_COOKIES
474 473
475/* Syncookies use a monotonic timer which increments every 60 seconds. 474/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -730,6 +729,15 @@ struct tcp_skb_cb {
730 729
731#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 730#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
732 731
732
733/* This is the variant of inet6_iif() that must be used by TCP,
734 * as TCP moves IP6CB into a different location in skb->cb[]
735 */
736static inline int tcp_v6_iif(const struct sk_buff *skb)
737{
738 return TCP_SKB_CB(skb)->header.h6.iif;
739}
740
733/* Due to TSO, an SKB can be composed of multiple actual 741/* Due to TSO, an SKB can be composed of multiple actual
734 * packets. To keep these tracked properly, we use this. 742 * packets. To keep these tracked properly, we use this.
735 */ 743 */
@@ -1666,4 +1674,24 @@ int tcpv4_offload_init(void);
1666void tcp_v4_init(void); 1674void tcp_v4_init(void);
1667void tcp_init(void); 1675void tcp_init(void);
1668 1676
1677/*
1678 * Save and compile IPv4 options, return a pointer to it
1679 */
1680static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1681{
1682 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1683 struct ip_options_rcu *dopt = NULL;
1684
1685 if (opt->optlen) {
1686 int opt_size = sizeof(*dopt) + opt->optlen;
1687
1688 dopt = kmalloc(opt_size, GFP_ATOMIC);
1689 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1690 kfree(dopt);
1691 dopt = NULL;
1692 }
1693 }
1694 return dopt;
1695}
1696
1669#endif /* _TCP_H */ 1697#endif /* _TCP_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 3cc8e1c2b996..6cad97485bad 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -68,6 +68,7 @@ header-y += binfmts.h
68header-y += blkpg.h 68header-y += blkpg.h
69header-y += blktrace_api.h 69header-y += blktrace_api.h
70header-y += bpf.h 70header-y += bpf.h
71header-y += bpf_common.h
71header-y += bpqether.h 72header-y += bpqether.h
72header-y += bsg.h 73header-y += bsg.h
73header-y += btrfs.h 74header-y += btrfs.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 31b0ac208a52..d18316f9e9c4 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -8,6 +8,7 @@
8#define _UAPI__LINUX_BPF_H__ 8#define _UAPI__LINUX_BPF_H__
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/bpf_common.h>
11 12
12/* Extended instruction set based on top of classic BPF */ 13/* Extended instruction set based on top of classic BPF */
13 14
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
new file mode 100644
index 000000000000..a5c220e0828f
--- /dev/null
+++ b/include/uapi/linux/bpf_common.h
@@ -0,0 +1,55 @@
1#ifndef _UAPI__LINUX_BPF_COMMON_H__
2#define _UAPI__LINUX_BPF_COMMON_H__
3
4/* Instruction classes */
5#define BPF_CLASS(code) ((code) & 0x07)
6#define BPF_LD 0x00
7#define BPF_LDX 0x01
8#define BPF_ST 0x02
9#define BPF_STX 0x03
10#define BPF_ALU 0x04
11#define BPF_JMP 0x05
12#define BPF_RET 0x06
13#define BPF_MISC 0x07
14
15/* ld/ldx fields */
16#define BPF_SIZE(code) ((code) & 0x18)
17#define BPF_W 0x00
18#define BPF_H 0x08
19#define BPF_B 0x10
20#define BPF_MODE(code) ((code) & 0xe0)
21#define BPF_IMM 0x00
22#define BPF_ABS 0x20
23#define BPF_IND 0x40
24#define BPF_MEM 0x60
25#define BPF_LEN 0x80
26#define BPF_MSH 0xa0
27
28/* alu/jmp fields */
29#define BPF_OP(code) ((code) & 0xf0)
30#define BPF_ADD 0x00
31#define BPF_SUB 0x10
32#define BPF_MUL 0x20
33#define BPF_DIV 0x30
34#define BPF_OR 0x40
35#define BPF_AND 0x50
36#define BPF_LSH 0x60
37#define BPF_RSH 0x70
38#define BPF_NEG 0x80
39#define BPF_MOD 0x90
40#define BPF_XOR 0xa0
41
42#define BPF_JA 0x00
43#define BPF_JEQ 0x10
44#define BPF_JGT 0x20
45#define BPF_JGE 0x30
46#define BPF_JSET 0x40
47#define BPF_SRC(code) ((code) & 0x08)
48#define BPF_K 0x00
49#define BPF_X 0x08
50
51#ifndef BPF_MAXINSNS
52#define BPF_MAXINSNS 4096
53#endif
54
55#endif /* _UAPI__LINUX_BPF_COMMON_H__ */
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 253b4d42cf2b..47785d5ecf17 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -7,7 +7,7 @@
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <linux/types.h> 9#include <linux/types.h>
10 10#include <linux/bpf_common.h>
11 11
12/* 12/*
13 * Current version of the filter code architecture. 13 * Current version of the filter code architecture.
@@ -32,56 +32,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
32 struct sock_filter __user *filter; 32 struct sock_filter __user *filter;
33}; 33};
34 34
35/*
36 * Instruction classes
37 */
38
39#define BPF_CLASS(code) ((code) & 0x07)
40#define BPF_LD 0x00
41#define BPF_LDX 0x01
42#define BPF_ST 0x02
43#define BPF_STX 0x03
44#define BPF_ALU 0x04
45#define BPF_JMP 0x05
46#define BPF_RET 0x06
47#define BPF_MISC 0x07
48
49/* ld/ldx fields */
50#define BPF_SIZE(code) ((code) & 0x18)
51#define BPF_W 0x00
52#define BPF_H 0x08
53#define BPF_B 0x10
54#define BPF_MODE(code) ((code) & 0xe0)
55#define BPF_IMM 0x00
56#define BPF_ABS 0x20
57#define BPF_IND 0x40
58#define BPF_MEM 0x60
59#define BPF_LEN 0x80
60#define BPF_MSH 0xa0
61
62/* alu/jmp fields */
63#define BPF_OP(code) ((code) & 0xf0)
64#define BPF_ADD 0x00
65#define BPF_SUB 0x10
66#define BPF_MUL 0x20
67#define BPF_DIV 0x30
68#define BPF_OR 0x40
69#define BPF_AND 0x50
70#define BPF_LSH 0x60
71#define BPF_RSH 0x70
72#define BPF_NEG 0x80
73#define BPF_MOD 0x90
74#define BPF_XOR 0xa0
75
76#define BPF_JA 0x00
77#define BPF_JEQ 0x10
78#define BPF_JGT 0x20
79#define BPF_JGE 0x30
80#define BPF_JSET 0x40
81#define BPF_SRC(code) ((code) & 0x08)
82#define BPF_K 0x00
83#define BPF_X 0x08
84
85/* ret - BPF_K and BPF_X also apply */ 35/* ret - BPF_K and BPF_X also apply */
86#define BPF_RVAL(code) ((code) & 0x18) 36#define BPF_RVAL(code) ((code) & 0x18)
87#define BPF_A 0x10 37#define BPF_A 0x10
@@ -91,10 +41,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
91#define BPF_TAX 0x00 41#define BPF_TAX 0x00
92#define BPF_TXA 0x80 42#define BPF_TXA 0x80
93 43
94#ifndef BPF_MAXINSNS
95#define BPF_MAXINSNS 4096
96#endif
97
98/* 44/*
99 * Macros for filter block array initializers. 45 * Macros for filter block array initializers.
100 */ 46 */
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index ba02db022900..5cd44f001f64 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -87,13 +87,12 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
87{ 87{
88 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); 88 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
89 89
90 if (!this) { 90 if (!this)
91 pr_warn("Out of memory\n");
92 return NULL; 91 return NULL;
93 } 92
94 caif_assert(offsetof(struct cfusbl, layer) == 0); 93 caif_assert(offsetof(struct cfusbl, layer) == 0);
95 94
96 memset(this, 0, sizeof(struct cflayer)); 95 memset(&this->layer, 0, sizeof(this->layer));
97 this->layer.receive = cfusbl_receive; 96 this->layer.receive = cfusbl_receive;
98 this->layer.transmit = cfusbl_transmit; 97 this->layer.transmit = cfusbl_transmit;
99 this->layer.ctrlcmd = cfusbl_ctrlcmd; 98 this->layer.ctrlcmd = cfusbl_ctrlcmd;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 8c5d6386319f..510aa5a753f0 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -47,10 +47,10 @@ static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
47 47
48struct cflayer *cfmuxl_create(void) 48struct cflayer *cfmuxl_create(void)
49{ 49{
50 struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); 50 struct cfmuxl *this = kzalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
51
51 if (!this) 52 if (!this)
52 return NULL; 53 return NULL;
53 memset(this, 0, sizeof(*this));
54 this->layer.receive = cfmuxl_receive; 54 this->layer.receive = cfmuxl_receive;
55 this->layer.transmit = cfmuxl_transmit; 55 this->layer.transmit = cfmuxl_transmit;
56 this->layer.ctrlcmd = cfmuxl_ctrlcmd; 56 this->layer.ctrlcmd = cfmuxl_ctrlcmd;
diff --git a/net/core/dev.c b/net/core/dev.c
index 6470716ddba4..b793e3521a36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2675,7 +2675,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2675 if (skb->encapsulation) 2675 if (skb->encapsulation)
2676 features &= dev->hw_enc_features; 2676 features &= dev->hw_enc_features;
2677 2677
2678 if (netif_needs_gso(skb, features)) { 2678 if (netif_needs_gso(dev, skb, features)) {
2679 struct sk_buff *segs; 2679 struct sk_buff *segs;
2680 2680
2681 segs = skb_gso_segment(skb, features); 2681 segs = skb_gso_segment(skb, features);
diff --git a/net/core/sock.c b/net/core/sock.c
index b4f3ea2fce60..15e0c67b1069 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1718,6 +1718,8 @@ EXPORT_SYMBOL(sock_kmalloc);
1718 */ 1718 */
1719void sock_kfree_s(struct sock *sk, void *mem, int size) 1719void sock_kfree_s(struct sock *sk, void *mem, int size)
1720{ 1720{
1721 if (WARN_ON_ONCE(!mem))
1722 return;
1721 kfree(mem); 1723 kfree(mem);
1722 atomic_sub(size, &sk->sk_omem_alloc); 1724 atomic_sub(size, &sk->sk_omem_alloc);
1723} 1725}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ad2acfe1ca61..6bcaa33cd804 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -757,7 +757,8 @@ static int dccp_v6_rcv(struct sk_buff *skb)
757 /* Step 2: 757 /* Step 2:
758 * Look up flow ID in table and get corresponding socket */ 758 * Look up flow ID in table and get corresponding socket */
759 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, 759 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
760 dh->dccph_sport, dh->dccph_dport); 760 dh->dccph_sport, dh->dccph_dport,
761 inet6_iif(skb));
761 /* 762 /*
762 * Step 2: 763 * Step 2:
763 * If no socket ... 764 * If no socket ...
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8030489d9cbe..a851e9f14118 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/phy_fixed.h>
14#include <linux/of_net.h> 15#include <linux/of_net.h>
15#include <linux/of_mdio.h> 16#include <linux/of_mdio.h>
16#include "dsa_priv.h" 17#include "dsa_priv.h"
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5b6efb3d2308..f99f41bd15b8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -537,7 +537,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
537 return 1; 537 return 1;
538 538
539 attrlen = rtnh_attrlen(rtnh); 539 attrlen = rtnh_attrlen(rtnh);
540 if (attrlen < 0) { 540 if (attrlen > 0) {
541 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 541 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
542 542
543 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 543 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index efa70ad44906..32e78924e246 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -87,6 +87,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
87 if (!pskb_may_pull(skb, len)) 87 if (!pskb_may_pull(skb, len))
88 goto drop; 88 goto drop;
89 89
90 uh = udp_hdr(skb);
91 guehdr = (struct guehdr *)&uh[1];
92
90 if (guehdr->version != 0) 93 if (guehdr->version != 0)
91 goto drop; 94 goto drop;
92 95
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e35b71289156..88e5ef2c7f51 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1535,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1535 struct sk_buff *nskb; 1535 struct sk_buff *nskb;
1536 struct sock *sk; 1536 struct sock *sk;
1537 struct inet_sock *inet; 1537 struct inet_sock *inet;
1538 int err;
1538 1539
1539 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) 1540 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
1540 return; 1541 return;
@@ -1574,8 +1575,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1574 sock_net_set(sk, net); 1575 sock_net_set(sk, net);
1575 __skb_queue_head_init(&sk->sk_write_queue); 1576 __skb_queue_head_init(&sk->sk_write_queue);
1576 sk->sk_sndbuf = sysctl_wmem_default; 1577 sk->sk_sndbuf = sysctl_wmem_default;
1577 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, 1578 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1578 &ipc, &rt, MSG_DONTWAIT); 1579 len, 0, &ipc, &rt, MSG_DONTWAIT);
1580 if (unlikely(err)) {
1581 ip_flush_pending_frames(sk);
1582 goto out;
1583 }
1584
1579 nskb = skb_peek(&sk->sk_write_queue); 1585 nskb = skb_peek(&sk->sk_write_queue);
1580 if (nskb) { 1586 if (nskb) {
1581 if (arg->csumoffset >= 0) 1587 if (arg->csumoffset >= 0)
@@ -1587,7 +1593,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
1587 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1593 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1588 ip_push_pending_frames(sk, &fl4); 1594 ip_push_pending_frames(sk, &fl4);
1589 } 1595 }
1590 1596out:
1591 put_cpu_var(unicast_sock); 1597 put_cpu_var(unicast_sock);
1592 1598
1593 ip_rt_put(rt); 1599 ip_rt_put(rt);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index f4c987bb7e94..88c386cf7d85 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
91 skb_pull_rcsum(skb, hdr_len); 91 skb_pull_rcsum(skb, hdr_len);
92 92
93 if (inner_proto == htons(ETH_P_TEB)) { 93 if (inner_proto == htons(ETH_P_TEB)) {
94 struct ethhdr *eh = (struct ethhdr *)skb->data; 94 struct ethhdr *eh;
95 95
96 if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) 96 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
97 return -ENOMEM; 97 return -ENOMEM;
98 98
99 eh = (struct ethhdr *)skb->data;
99 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) 100 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
100 skb->protocol = eh->h_proto; 101 skb->protocol = eh->h_proto;
101 else 102 else
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index af660030e3c7..32b98d0207b4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -255,9 +255,9 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
255} 255}
256EXPORT_SYMBOL(cookie_check_timestamp); 256EXPORT_SYMBOL(cookie_check_timestamp);
257 257
258struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 258struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
259 struct ip_options *opt)
260{ 259{
260 struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
261 struct tcp_options_received tcp_opt; 261 struct tcp_options_received tcp_opt;
262 struct inet_request_sock *ireq; 262 struct inet_request_sock *ireq;
263 struct tcp_request_sock *treq; 263 struct tcp_request_sock *treq;
@@ -317,15 +317,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
317 /* We throwed the options of the initial SYN away, so we hope 317 /* We throwed the options of the initial SYN away, so we hope
318 * the ACK carries the same options again (see RFC1122 4.2.3.8) 318 * the ACK carries the same options again (see RFC1122 4.2.3.8)
319 */ 319 */
320 if (opt && opt->optlen) { 320 ireq->opt = tcp_v4_save_options(skb);
321 int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
322
323 ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
324 if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
325 kfree(ireq->opt);
326 ireq->opt = NULL;
327 }
328 }
329 321
330 if (security_inet_conn_request(sk, skb, req)) { 322 if (security_inet_conn_request(sk, skb, req)) {
331 reqsk_free(req); 323 reqsk_free(req);
@@ -344,7 +336,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
344 flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark, 336 flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
345 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, 337 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
346 inet_sk_flowi_flags(sk), 338 inet_sk_flowi_flags(sk),
347 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr, 339 opt->srr ? opt->faddr : ireq->ir_rmt_addr,
348 ireq->ir_loc_addr, th->source, th->dest); 340 ireq->ir_loc_addr, th->source, th->dest);
349 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 341 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
350 rt = ip_route_output_key(sock_net(sk), &fl4); 342 rt = ip_route_output_key(sock_net(sk), &fl4);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 00a41499d52c..a12b455928e5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -68,6 +68,7 @@
68#include <linux/module.h> 68#include <linux/module.h>
69#include <linux/sysctl.h> 69#include <linux/sysctl.h>
70#include <linux/kernel.h> 70#include <linux/kernel.h>
71#include <linux/prefetch.h>
71#include <net/dst.h> 72#include <net/dst.h>
72#include <net/tcp.h> 73#include <net/tcp.h>
73#include <net/inet_common.h> 74#include <net/inet_common.h>
@@ -3029,6 +3030,21 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3029 return packets_acked; 3030 return packets_acked;
3030} 3031}
3031 3032
3033static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3034 u32 prior_snd_una)
3035{
3036 const struct skb_shared_info *shinfo;
3037
3038 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3039 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)))
3040 return;
3041
3042 shinfo = skb_shinfo(skb);
3043 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3044 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
3045 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3046}
3047
3032/* Remove acknowledged frames from the retransmission queue. If our packet 3048/* Remove acknowledged frames from the retransmission queue. If our packet
3033 * is before the ack sequence we can discard it as it's confirmed to have 3049 * is before the ack sequence we can discard it as it's confirmed to have
3034 * arrived at the other end. 3050 * arrived at the other end.
@@ -3052,14 +3068,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3052 first_ackt.v64 = 0; 3068 first_ackt.v64 = 0;
3053 3069
3054 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3070 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3055 struct skb_shared_info *shinfo = skb_shinfo(skb);
3056 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3071 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3057 u8 sacked = scb->sacked; 3072 u8 sacked = scb->sacked;
3058 u32 acked_pcount; 3073 u32 acked_pcount;
3059 3074
3060 if (unlikely(shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3075 tcp_ack_tstamp(sk, skb, prior_snd_una);
3061 between(shinfo->tskey, prior_snd_una, tp->snd_una - 1))
3062 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3063 3076
3064 /* Determine how many packets and what bytes were acked, tso and else */ 3077 /* Determine how many packets and what bytes were acked, tso and else */
3065 if (after(scb->end_seq, tp->snd_una)) { 3078 if (after(scb->end_seq, tp->snd_una)) {
@@ -3073,10 +3086,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3073 3086
3074 fully_acked = false; 3087 fully_acked = false;
3075 } else { 3088 } else {
3089 /* Speedup tcp_unlink_write_queue() and next loop */
3090 prefetchw(skb->next);
3076 acked_pcount = tcp_skb_pcount(skb); 3091 acked_pcount = tcp_skb_pcount(skb);
3077 } 3092 }
3078 3093
3079 if (sacked & TCPCB_RETRANS) { 3094 if (unlikely(sacked & TCPCB_RETRANS)) {
3080 if (sacked & TCPCB_SACKED_RETRANS) 3095 if (sacked & TCPCB_SACKED_RETRANS)
3081 tp->retrans_out -= acked_pcount; 3096 tp->retrans_out -= acked_pcount;
3082 flag |= FLAG_RETRANS_DATA_ACKED; 3097 flag |= FLAG_RETRANS_DATA_ACKED;
@@ -3107,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3107 * connection startup slow start one packet too 3122 * connection startup slow start one packet too
3108 * quickly. This is severely frowned upon behavior. 3123 * quickly. This is severely frowned upon behavior.
3109 */ 3124 */
3110 if (!(scb->tcp_flags & TCPHDR_SYN)) { 3125 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3111 flag |= FLAG_DATA_ACKED; 3126 flag |= FLAG_DATA_ACKED;
3112 } else { 3127 } else {
3113 flag |= FLAG_SYN_ACKED; 3128 flag |= FLAG_SYN_ACKED;
@@ -3119,9 +3134,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3119 3134
3120 tcp_unlink_write_queue(skb, sk); 3135 tcp_unlink_write_queue(skb, sk);
3121 sk_wmem_free_skb(sk, skb); 3136 sk_wmem_free_skb(sk, skb);
3122 if (skb == tp->retransmit_skb_hint) 3137 if (unlikely(skb == tp->retransmit_skb_hint))
3123 tp->retransmit_skb_hint = NULL; 3138 tp->retransmit_skb_hint = NULL;
3124 if (skb == tp->lost_skb_hint) 3139 if (unlikely(skb == tp->lost_skb_hint))
3125 tp->lost_skb_hint = NULL; 3140 tp->lost_skb_hint = NULL;
3126 } 3141 }
3127 3142
@@ -3132,7 +3147,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3132 flag |= FLAG_SACK_RENEGING; 3147 flag |= FLAG_SACK_RENEGING;
3133 3148
3134 skb_mstamp_get(&now); 3149 skb_mstamp_get(&now);
3135 if (first_ackt.v64) { 3150 if (likely(first_ackt.v64)) {
3136 seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); 3151 seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
3137 ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); 3152 ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
3138 } 3153 }
@@ -3394,6 +3409,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3394 int acked = 0; /* Number of packets newly acked */ 3409 int acked = 0; /* Number of packets newly acked */
3395 long sack_rtt_us = -1L; 3410 long sack_rtt_us = -1L;
3396 3411
3412 /* We very likely will need to access write queue head. */
3413 prefetchw(sk->sk_write_queue.next);
3414
3397 /* If the ack is older than previous acks 3415 /* If the ack is older than previous acks
3398 * then we can probably ignore it. 3416 * then we can probably ignore it.
3399 */ 3417 */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 552e87e3c269..94d1a7757ff7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -880,26 +880,6 @@ bool tcp_syn_flood_action(struct sock *sk,
880} 880}
881EXPORT_SYMBOL(tcp_syn_flood_action); 881EXPORT_SYMBOL(tcp_syn_flood_action);
882 882
883/*
884 * Save and compile IPv4 options into the request_sock if needed.
885 */
886static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
887{
888 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
889 struct ip_options_rcu *dopt = NULL;
890
891 if (opt && opt->optlen) {
892 int opt_size = sizeof(*dopt) + opt->optlen;
893
894 dopt = kmalloc(opt_size, GFP_ATOMIC);
895 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
896 kfree(dopt);
897 dopt = NULL;
898 }
899 }
900 return dopt;
901}
902
903#ifdef CONFIG_TCP_MD5SIG 883#ifdef CONFIG_TCP_MD5SIG
904/* 884/*
905 * RFC2385 MD5 checksumming requires a mapping of 885 * RFC2385 MD5 checksumming requires a mapping of
@@ -1428,7 +1408,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1428 1408
1429#ifdef CONFIG_SYN_COOKIES 1409#ifdef CONFIG_SYN_COOKIES
1430 if (!th->syn) 1410 if (!th->syn)
1431 sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt); 1411 sk = cookie_v4_check(sk, skb);
1432#endif 1412#endif
1433 return sk; 1413 return sk;
1434} 1414}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index becd98ce9a1c..3af21296d967 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -839,26 +839,38 @@ void tcp_wfree(struct sk_buff *skb)
839{ 839{
840 struct sock *sk = skb->sk; 840 struct sock *sk = skb->sk;
841 struct tcp_sock *tp = tcp_sk(sk); 841 struct tcp_sock *tp = tcp_sk(sk);
842 int wmem;
843
844 /* Keep one reference on sk_wmem_alloc.
845 * Will be released by sk_free() from here or tcp_tasklet_func()
846 */
847 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
848
849 /* If this softirq is serviced by ksoftirqd, we are likely under stress.
850 * Wait until our queues (qdisc + devices) are drained.
851 * This gives :
852 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
853 * - chance for incoming ACK (processed by another cpu maybe)
854 * to migrate this flow (skb->ooo_okay will be eventually set)
855 */
856 if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
857 goto out;
842 858
843 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 859 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
844 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 860 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
845 unsigned long flags; 861 unsigned long flags;
846 struct tsq_tasklet *tsq; 862 struct tsq_tasklet *tsq;
847 863
848 /* Keep a ref on socket.
849 * This last ref will be released in tcp_tasklet_func()
850 */
851 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
852
853 /* queue this socket to tasklet queue */ 864 /* queue this socket to tasklet queue */
854 local_irq_save(flags); 865 local_irq_save(flags);
855 tsq = this_cpu_ptr(&tsq_tasklet); 866 tsq = this_cpu_ptr(&tsq_tasklet);
856 list_add(&tp->tsq_node, &tsq->head); 867 list_add(&tp->tsq_node, &tsq->head);
857 tasklet_schedule(&tsq->tasklet); 868 tasklet_schedule(&tsq->tasklet);
858 local_irq_restore(flags); 869 local_irq_restore(flags);
859 } else { 870 return;
860 sock_wfree(skb);
861 } 871 }
872out:
873 sk_free(sk);
862} 874}
863 875
864/* This routine actually transmits TCP packets queued in by 876/* This routine actually transmits TCP packets queued in by
@@ -914,9 +926,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
914 tcp_ca_event(sk, CA_EVENT_TX_START); 926 tcp_ca_event(sk, CA_EVENT_TX_START);
915 927
916 /* if no packet is in qdisc/device queue, then allow XPS to select 928 /* if no packet is in qdisc/device queue, then allow XPS to select
917 * another queue. 929 * another queue. We can be called from tcp_tsq_handler()
930 * which holds one reference to sk_wmem_alloc.
931 *
932 * TODO: Ideally, in-flight pure ACK packets should not matter here.
933 * One way to get this would be to set skb->truesize = 2 on them.
918 */ 934 */
919 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; 935 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
920 936
921 skb_push(skb, tcp_header_size); 937 skb_push(skb, tcp_header_size);
922 skb_reset_transport_header(skb); 938 skb_reset_transport_header(skb);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f5e319a8d4e2..baf2742d1ec4 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -235,7 +235,6 @@ static struct ifacaddr6 *aca_alloc(struct rt6_info *rt,
235 /* aca_tstamp should be updated upon changes */ 235 /* aca_tstamp should be updated upon changes */
236 aca->aca_cstamp = aca->aca_tstamp = jiffies; 236 aca->aca_cstamp = aca->aca_tstamp = jiffies;
237 atomic_set(&aca->aca_refcnt, 1); 237 atomic_set(&aca->aca_refcnt, 1);
238 spin_lock_init(&aca->aca_lock);
239 238
240 return aca; 239 return aca;
241} 240}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index e25b633266c3..2f25cb6347ca 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -214,7 +214,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
214 /* So that link locals have meaning */ 214 /* So that link locals have meaning */
215 if (!sk->sk_bound_dev_if && 215 if (!sk->sk_bound_dev_if &&
216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
217 ireq->ir_iif = inet6_iif(skb); 217 ireq->ir_iif = tcp_v6_iif(skb);
218 218
219 ireq->ir_mark = inet_request_mark(sk, skb); 219 ireq->ir_mark = inet_request_mark(sk, skb);
220 220
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index cf2e45ab2fa4..831495529b82 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -424,6 +424,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
424 if (sock_owned_by_user(sk)) 424 if (sock_owned_by_user(sk))
425 goto out; 425 goto out;
426 426
427 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
427 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, 428 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
428 &hdr->saddr, inet6_iif(skb)); 429 &hdr->saddr, inet6_iif(skb));
429 if (!req) 430 if (!req)
@@ -738,7 +739,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
738 /* So that link locals have meaning */ 739 /* So that link locals have meaning */
739 if (!sk->sk_bound_dev_if && 740 if (!sk->sk_bound_dev_if &&
740 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 741 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
741 ireq->ir_iif = inet6_iif(skb); 742 ireq->ir_iif = tcp_v6_iif(skb);
742 743
743 if (!TCP_SKB_CB(skb)->tcp_tw_isn && 744 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
744 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || 745 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
@@ -860,7 +861,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
860 861
861 fl6.flowi6_proto = IPPROTO_TCP; 862 fl6.flowi6_proto = IPPROTO_TCP;
862 if (rt6_need_strict(&fl6.daddr) && !oif) 863 if (rt6_need_strict(&fl6.daddr) && !oif)
863 fl6.flowi6_oif = inet6_iif(skb); 864 fl6.flowi6_oif = tcp_v6_iif(skb);
864 else 865 else
865 fl6.flowi6_oif = oif; 866 fl6.flowi6_oif = oif;
866 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 867 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
@@ -918,7 +919,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
918 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), 919 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919 &tcp_hashinfo, &ipv6h->saddr, 920 &tcp_hashinfo, &ipv6h->saddr,
920 th->source, &ipv6h->daddr, 921 th->source, &ipv6h->daddr,
921 ntohs(th->source), inet6_iif(skb)); 922 ntohs(th->source), tcp_v6_iif(skb));
922 if (!sk1) 923 if (!sk1)
923 return; 924 return;
924 925
@@ -1000,13 +1001,14 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1000 /* Find possible connection requests. */ 1001 /* Find possible connection requests. */
1001 req = inet6_csk_search_req(sk, &prev, th->source, 1002 req = inet6_csk_search_req(sk, &prev, th->source,
1002 &ipv6_hdr(skb)->saddr, 1003 &ipv6_hdr(skb)->saddr,
1003 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 1004 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1004 if (req) 1005 if (req)
1005 return tcp_check_req(sk, skb, req, prev, false); 1006 return tcp_check_req(sk, skb, req, prev, false);
1006 1007
1007 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 1008 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1008 &ipv6_hdr(skb)->saddr, th->source, 1009 &ipv6_hdr(skb)->saddr, th->source,
1009 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); 1010 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1011 tcp_v6_iif(skb));
1010 1012
1011 if (nsk) { 1013 if (nsk) {
1012 if (nsk->sk_state != TCP_TIME_WAIT) { 1014 if (nsk->sk_state != TCP_TIME_WAIT) {
@@ -1090,7 +1092,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1090 newnp->ipv6_fl_list = NULL; 1092 newnp->ipv6_fl_list = NULL;
1091 newnp->pktoptions = NULL; 1093 newnp->pktoptions = NULL;
1092 newnp->opt = NULL; 1094 newnp->opt = NULL;
1093 newnp->mcast_oif = inet6_iif(skb); 1095 newnp->mcast_oif = tcp_v6_iif(skb);
1094 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1096 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1095 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); 1097 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1096 if (np->repflow) 1098 if (np->repflow)
@@ -1174,7 +1176,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1174 skb_set_owner_r(newnp->pktoptions, newsk); 1176 skb_set_owner_r(newnp->pktoptions, newsk);
1175 } 1177 }
1176 newnp->opt = NULL; 1178 newnp->opt = NULL;
1177 newnp->mcast_oif = inet6_iif(skb); 1179 newnp->mcast_oif = tcp_v6_iif(skb);
1178 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1180 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1179 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); 1181 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1180 if (np->repflow) 1182 if (np->repflow)
@@ -1360,7 +1362,7 @@ ipv6_pktoptions:
1360 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1362 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1361 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1363 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1362 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1364 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1363 np->mcast_oif = inet6_iif(opt_skb); 1365 np->mcast_oif = tcp_v6_iif(opt_skb);
1364 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1366 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1365 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1367 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1366 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) 1368 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
@@ -1427,7 +1429,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1427 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); 1429 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1428 TCP_SKB_CB(skb)->sacked = 0; 1430 TCP_SKB_CB(skb)->sacked = 0;
1429 1431
1430 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1432 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1433 tcp_v6_iif(skb));
1431 if (!sk) 1434 if (!sk)
1432 goto no_tcp_socket; 1435 goto no_tcp_socket;
1433 1436
@@ -1514,7 +1517,7 @@ do_time_wait:
1514 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1517 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1515 &ipv6_hdr(skb)->saddr, th->source, 1518 &ipv6_hdr(skb)->saddr, th->source,
1516 &ipv6_hdr(skb)->daddr, 1519 &ipv6_hdr(skb)->daddr,
1517 ntohs(th->dest), inet6_iif(skb)); 1520 ntohs(th->dest), tcp_v6_iif(skb));
1518 if (sk2 != NULL) { 1521 if (sk2 != NULL) {
1519 struct inet_timewait_sock *tw = inet_twsk(sk); 1522 struct inet_timewait_sock *tw = inet_twsk(sk);
1520 inet_twsk_deschedule(tw, &tcp_death_row); 1523 inet_twsk_deschedule(tw, &tcp_death_row);
@@ -1553,6 +1556,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1553 if (th->doff < sizeof(struct tcphdr) / 4) 1556 if (th->doff < sizeof(struct tcphdr) / 4)
1554 return; 1557 return;
1555 1558
1559 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1556 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1560 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1557 &hdr->saddr, th->source, 1561 &hdr->saddr, th->source,
1558 &hdr->daddr, ntohs(th->dest), 1562 &hdr->daddr, ntohs(th->dest),
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71cf1bffea06..1b06a1fcf3e8 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -30,7 +30,7 @@
30#include <linux/skbuff.h> 30#include <linux/skbuff.h>
31#include <net/net_namespace.h> 31#include <net/net_namespace.h>
32#include <net/sock.h> 32#include <net/sock.h>
33#include <asm/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/fcntl.h> 34#include <linux/fcntl.h>
35#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 35#include <linux/termios.h> /* For TIOCINQ/OUTQ */
36#include <linux/mm.h> 36#include <linux/mm.h>
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 743262becd6e..6ae063cebf7d 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -20,8 +20,8 @@
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/if_ether.h> /* For the statistics structure. */ 21#include <linux/if_ether.h> /* For the statistics structure. */
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/uaccess.h>
23 24
24#include <asm/uaccess.h>
25#include <asm/io.h> 25#include <asm/io.h>
26 26
27#include <linux/inet.h> 27#include <linux/inet.h>
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index c3073a2ef634..80dbd0beb516 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -23,7 +23,7 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp_states.h> 25#include <net/tcp_states.h>
26#include <asm/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/fcntl.h> 27#include <linux/fcntl.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 0b4bcb2bf38f..00fbf1419ec6 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -22,7 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/fcntl.h> 26#include <linux/fcntl.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b976d5eff2de..96b64d2f6dbf 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -25,7 +25,7 @@
25#include <linux/if_arp.h> 25#include <linux/if_arp.h>
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <asm/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/fcntl.h> 29#include <linux/fcntl.h>
30#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 30#include <linux/termios.h> /* For TIOCINQ/OUTQ */
31#include <linux/mm.h> 31#include <linux/mm.h>
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index ca40e2298f5a..029c8bb90f4c 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -22,7 +22,7 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/fcntl.h> 26#include <linux/fcntl.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index ff2c1b142f57..94d05806a9a2 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -23,7 +23,7 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp_states.h> 25#include <net/tcp_states.h>
26#include <asm/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/fcntl.h> 27#include <linux/fcntl.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 62db02ba36bc..2b78789ea7c5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -274,6 +274,8 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
274 key->ip.frag = OVS_FRAG_TYPE_LATER; 274 key->ip.frag = OVS_FRAG_TYPE_LATER;
275 else 275 else
276 key->ip.frag = OVS_FRAG_TYPE_FIRST; 276 key->ip.frag = OVS_FRAG_TYPE_FIRST;
277 } else {
278 key->ip.frag = OVS_FRAG_TYPE_NONE;
277 } 279 }
278 280
279 nh_len = payload_ofs - nh_ofs; 281 nh_len = payload_ofs - nh_ofs;
@@ -358,6 +360,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
358 */ 360 */
359 key->tp.src = htons(icmp->icmp6_type); 361 key->tp.src = htons(icmp->icmp6_type);
360 key->tp.dst = htons(icmp->icmp6_code); 362 key->tp.dst = htons(icmp->icmp6_code);
363 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
361 364
362 if (icmp->icmp6_code == 0 && 365 if (icmp->icmp6_code == 0 &&
363 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 366 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -557,10 +560,11 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
557 } else if (key->eth.type == htons(ETH_P_ARP) || 560 } else if (key->eth.type == htons(ETH_P_ARP) ||
558 key->eth.type == htons(ETH_P_RARP)) { 561 key->eth.type == htons(ETH_P_RARP)) {
559 struct arp_eth_header *arp; 562 struct arp_eth_header *arp;
563 bool arp_available = arphdr_ok(skb);
560 564
561 arp = (struct arp_eth_header *)skb_network_header(skb); 565 arp = (struct arp_eth_header *)skb_network_header(skb);
562 566
563 if (arphdr_ok(skb) && 567 if (arp_available &&
564 arp->ar_hrd == htons(ARPHRD_ETHER) && 568 arp->ar_hrd == htons(ARPHRD_ETHER) &&
565 arp->ar_pro == htons(ETH_P_IP) && 569 arp->ar_pro == htons(ETH_P_IP) &&
566 arp->ar_hln == ETH_ALEN && 570 arp->ar_hln == ETH_ALEN &&
@@ -673,9 +677,6 @@ int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
673 key->ovs_flow_hash = 0; 677 key->ovs_flow_hash = 0;
674 key->recirc_id = 0; 678 key->recirc_id = 0;
675 679
676 /* Flags are always used as part of stats */
677 key->tp.flags = 0;
678
679 return key_extract(skb, key); 680 return key_extract(skb, key);
680} 681}
681 682
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 368f23307911..939bcb32100f 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -103,10 +103,19 @@ static void update_range__(struct sw_flow_match *match,
103 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ 103 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
104 value_p, len, is_mask) 104 value_p, len, is_mask)
105 105
106static u16 range_n_bytes(const struct sw_flow_key_range *range) 106#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
107{ 107 do { \
108 return range->end - range->start; 108 update_range__(match, offsetof(struct sw_flow_key, field), \
109} 109 sizeof((match)->key->field), is_mask); \
110 if (is_mask) { \
111 if ((match)->mask) \
112 memset((u8 *)&(match)->mask->key.field, value,\
113 sizeof((match)->mask->key.field)); \
114 } else { \
115 memset((u8 *)&(match)->key->field, value, \
116 sizeof((match)->key->field)); \
117 } \
118 } while (0)
110 119
111static bool match_validate(const struct sw_flow_match *match, 120static bool match_validate(const struct sw_flow_match *match,
112 u64 key_attrs, u64 mask_attrs) 121 u64 key_attrs, u64 mask_attrs)
@@ -809,13 +818,26 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
809 return 0; 818 return 0;
810} 819}
811 820
812static void sw_flow_mask_set(struct sw_flow_mask *mask, 821static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
813 struct sw_flow_key_range *range, u8 val)
814{ 822{
815 u8 *m = (u8 *)&mask->key + range->start; 823 struct nlattr *nla;
824 int rem;
825
826 /* The nlattr stream should already have been validated */
827 nla_for_each_nested(nla, attr, rem) {
828 /* We assume that ovs_key_lens[type] == -1 means that type is a
829 * nested attribute
830 */
831 if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1)
832 nlattr_set(nla, val, false);
833 else
834 memset(nla_data(nla), val, nla_len(nla));
835 }
836}
816 837
817 mask->range = *range; 838static void mask_set_nlattr(struct nlattr *attr, u8 val)
818 memset(m, val, range_n_bytes(range)); 839{
840 nlattr_set(attr, val, true);
819} 841}
820 842
821/** 843/**
@@ -836,6 +858,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
836{ 858{
837 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; 859 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
838 const struct nlattr *encap; 860 const struct nlattr *encap;
861 struct nlattr *newmask = NULL;
839 u64 key_attrs = 0; 862 u64 key_attrs = 0;
840 u64 mask_attrs = 0; 863 u64 mask_attrs = 0;
841 bool encap_valid = false; 864 bool encap_valid = false;
@@ -882,18 +905,44 @@ int ovs_nla_get_match(struct sw_flow_match *match,
882 if (err) 905 if (err)
883 return err; 906 return err;
884 907
908 if (match->mask && !mask) {
909 /* Create an exact match mask. We need to set to 0xff all the
910 * 'match->mask' fields that have been touched in 'match->key'.
911 * We cannot simply memset 'match->mask', because padding bytes
912 * and fields not specified in 'match->key' should be left to 0.
913 * Instead, we use a stream of netlink attributes, copied from
914 * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care
915 * of filling 'match->mask' appropriately.
916 */
917 newmask = kmemdup(key, nla_total_size(nla_len(key)),
918 GFP_KERNEL);
919 if (!newmask)
920 return -ENOMEM;
921
922 mask_set_nlattr(newmask, 0xff);
923
924 /* The userspace does not send tunnel attributes that are 0,
925 * but we should not wildcard them nonetheless.
926 */
927 if (match->key->tun_key.ipv4_dst)
928 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true);
929
930 mask = newmask;
931 }
932
885 if (mask) { 933 if (mask) {
886 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); 934 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
887 if (err) 935 if (err)
888 return err; 936 goto free_newmask;
889 937
890 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { 938 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
891 __be16 eth_type = 0; 939 __be16 eth_type = 0;
892 __be16 tci = 0; 940 __be16 tci = 0;
893 941
894 if (!encap_valid) { 942 if (!encap_valid) {
895 OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); 943 OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
896 return -EINVAL; 944 err = -EINVAL;
945 goto free_newmask;
897 } 946 }
898 947
899 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); 948 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
@@ -904,10 +953,13 @@ int ovs_nla_get_match(struct sw_flow_match *match,
904 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); 953 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
905 encap = a[OVS_KEY_ATTR_ENCAP]; 954 encap = a[OVS_KEY_ATTR_ENCAP];
906 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); 955 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
956 if (err)
957 goto free_newmask;
907 } else { 958 } else {
908 OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", 959 OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
909 ntohs(eth_type)); 960 ntohs(eth_type));
910 return -EINVAL; 961 err = -EINVAL;
962 goto free_newmask;
911 } 963 }
912 964
913 if (a[OVS_KEY_ATTR_VLAN]) 965 if (a[OVS_KEY_ATTR_VLAN])
@@ -915,23 +967,22 @@ int ovs_nla_get_match(struct sw_flow_match *match,
915 967
916 if (!(tci & htons(VLAN_TAG_PRESENT))) { 968 if (!(tci & htons(VLAN_TAG_PRESENT))) {
917 OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); 969 OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
918 return -EINVAL; 970 err = -EINVAL;
971 goto free_newmask;
919 } 972 }
920 } 973 }
921 974
922 err = ovs_key_from_nlattrs(match, mask_attrs, a, true); 975 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
923 if (err) 976 if (err)
924 return err; 977 goto free_newmask;
925 } else {
926 /* Populate exact match flow's key mask. */
927 if (match->mask)
928 sw_flow_mask_set(match->mask, &match->range, 0xff);
929 } 978 }
930 979
931 if (!match_validate(match, key_attrs, mask_attrs)) 980 if (!match_validate(match, key_attrs, mask_attrs))
932 return -EINVAL; 981 err = -EINVAL;
933 982
934 return 0; 983free_newmask:
984 kfree(newmask);
985 return err;
935} 986}
936 987
937/** 988/**
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 910b3ef2c0d5..106a9d80b663 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -30,7 +30,7 @@
30 30
31/** 31/**
32 * struct geneve_port - Keeps track of open UDP ports 32 * struct geneve_port - Keeps track of open UDP ports
33 * @sock: The socket created for this port number. 33 * @gs: The socket created for this port number.
34 * @name: vport name. 34 * @name: vport name.
35 */ 35 */
36struct geneve_port { 36struct geneve_port {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 53001b020ca7..6015802ebe6f 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -408,13 +408,13 @@ int ovs_vport_get_upcall_portids(const struct vport *vport,
408 * 408 *
409 * Returns the portid of the target socket. Must be called with rcu_read_lock. 409 * Returns the portid of the target socket. Must be called with rcu_read_lock.
410 */ 410 */
411u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb) 411u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
412{ 412{
413 struct vport_portids *ids; 413 struct vport_portids *ids;
414 u32 ids_index; 414 u32 ids_index;
415 u32 hash; 415 u32 hash;
416 416
417 ids = rcu_dereference(p->upcall_portids); 417 ids = rcu_dereference(vport->upcall_portids);
418 418
419 if (ids->n_ids == 1 && ids->ids[0] == 0) 419 if (ids->n_ids == 1 && ids->ids[0] == 0)
420 return 0; 420 return 0;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 4e37c1cbe8b2..40084d843e9f 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -564,12 +564,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
564 564
565 if (rs->rs_bound_addr == 0) { 565 if (rs->rs_bound_addr == 0) {
566 ret = -ENOTCONN; /* XXX not a great errno */ 566 ret = -ENOTCONN; /* XXX not a great errno */
567 goto out; 567 goto out_ret;
568 } 568 }
569 569
570 if (args->nr_local > UIO_MAXIOV) { 570 if (args->nr_local > UIO_MAXIOV) {
571 ret = -EMSGSIZE; 571 ret = -EMSGSIZE;
572 goto out; 572 goto out_ret;
573 } 573 }
574 574
575 /* Check whether to allocate the iovec area */ 575 /* Check whether to allocate the iovec area */
@@ -578,7 +578,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
578 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); 578 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
579 if (!iovs) { 579 if (!iovs) {
580 ret = -ENOMEM; 580 ret = -ENOMEM;
581 goto out; 581 goto out_ret;
582 } 582 }
583 } 583 }
584 584
@@ -696,6 +696,7 @@ out:
696 if (iovs != iovstack) 696 if (iovs != iovstack)
697 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); 697 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
698 kfree(pages); 698 kfree(pages);
699out_ret:
699 if (ret) 700 if (ret)
700 rds_rdma_free_op(op); 701 rds_rdma_free_op(op);
701 else 702 else
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a88b8524846e..f791edd64d6c 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1668,6 +1668,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1668 * ack chunk whose serial number matches that of the request. 1668 * ack chunk whose serial number matches that of the request.
1669 */ 1669 */
1670 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { 1670 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1671 if (sctp_chunk_pending(ack))
1672 continue;
1671 if (ack->subh.addip_hdr->serial == serial) { 1673 if (ack->subh.addip_hdr->serial == serial) {
1672 sctp_chunk_hold(ack); 1674 sctp_chunk_hold(ack);
1673 return ack; 1675 return ack;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 4de12afa13d4..7e8a16c77039 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
140 } else { 140 } else {
141 /* Nothing to do. Next chunk in the packet, please. */ 141 /* Nothing to do. Next chunk in the packet, please. */
142 ch = (sctp_chunkhdr_t *) chunk->chunk_end; 142 ch = (sctp_chunkhdr_t *) chunk->chunk_end;
143
144 /* Force chunk->skb->data to chunk->chunk_end. */ 143 /* Force chunk->skb->data to chunk->chunk_end. */
145 skb_pull(chunk->skb, 144 skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
146 chunk->chunk_end - chunk->skb->data); 145 /* We are guaranteed to pull a SCTP header. */
147
148 /* Verify that we have at least chunk headers
149 * worth of buffer left.
150 */
151 if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
152 sctp_chunk_free(chunk);
153 chunk = queue->in_progress = NULL;
154 }
155 } 146 }
156 } 147 }
157 148
@@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
187 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); 178 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
188 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 179 chunk->subh.v = NULL; /* Subheader is no longer valid. */
189 180
190 if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { 181 if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
182 skb_tail_pointer(chunk->skb)) {
191 /* This is not a singleton */ 183 /* This is not a singleton */
192 chunk->singleton = 0; 184 chunk->singleton = 0;
193 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { 185 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
194 /* RFC 2960, Section 6.10 Bundling 186 /* Discard inside state machine. */
195 * 187 chunk->pdiscard = 1;
196 * Partial chunks MUST NOT be placed in an SCTP packet. 188 chunk->chunk_end = skb_tail_pointer(chunk->skb);
197 * If the receiver detects a partial chunk, it MUST drop
198 * the chunk.
199 *
200 * Since the end of the chunk is past the end of our buffer
201 * (which contains the whole packet, we can freely discard
202 * the whole packet.
203 */
204 sctp_chunk_free(chunk);
205 chunk = queue->in_progress = NULL;
206
207 return NULL;
208 } else { 189 } else {
209 /* We are at the end of the packet, so mark the chunk 190 /* We are at the end of the packet, so mark the chunk
210 * in case we need to send a SACK. 191 * in case we need to send a SACK.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ae0e616a7ca5..ab734be8cb20 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3110,50 +3110,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3110 return SCTP_ERROR_NO_ERROR; 3110 return SCTP_ERROR_NO_ERROR;
3111} 3111}
3112 3112
3113/* Verify the ASCONF packet before we process it. */ 3113/* Verify the ASCONF packet before we process it. */
3114int sctp_verify_asconf(const struct sctp_association *asoc, 3114bool sctp_verify_asconf(const struct sctp_association *asoc,
3115 struct sctp_paramhdr *param_hdr, void *chunk_end, 3115 struct sctp_chunk *chunk, bool addr_param_needed,
3116 struct sctp_paramhdr **errp) { 3116 struct sctp_paramhdr **errp)
3117 sctp_addip_param_t *asconf_param; 3117{
3118 sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
3118 union sctp_params param; 3119 union sctp_params param;
3119 int length, plen; 3120 bool addr_param_seen = false;
3120
3121 param.v = (sctp_paramhdr_t *) param_hdr;
3122 while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
3123 length = ntohs(param.p->length);
3124 *errp = param.p;
3125 3121
3126 if (param.v > chunk_end - length || 3122 sctp_walk_params(param, addip, addip_hdr.params) {
3127 length < sizeof(sctp_paramhdr_t)) 3123 size_t length = ntohs(param.p->length);
3128 return 0;
3129 3124
3125 *errp = param.p;
3130 switch (param.p->type) { 3126 switch (param.p->type) {
3127 case SCTP_PARAM_ERR_CAUSE:
3128 break;
3129 case SCTP_PARAM_IPV4_ADDRESS:
3130 if (length != sizeof(sctp_ipv4addr_param_t))
3131 return false;
3132 addr_param_seen = true;
3133 break;
3134 case SCTP_PARAM_IPV6_ADDRESS:
3135 if (length != sizeof(sctp_ipv6addr_param_t))
3136 return false;
3137 addr_param_seen = true;
3138 break;
3131 case SCTP_PARAM_ADD_IP: 3139 case SCTP_PARAM_ADD_IP:
3132 case SCTP_PARAM_DEL_IP: 3140 case SCTP_PARAM_DEL_IP:
3133 case SCTP_PARAM_SET_PRIMARY: 3141 case SCTP_PARAM_SET_PRIMARY:
3134 asconf_param = (sctp_addip_param_t *)param.v; 3142 /* In ASCONF chunks, these need to be first. */
3135 plen = ntohs(asconf_param->param_hdr.length); 3143 if (addr_param_needed && !addr_param_seen)
3136 if (plen < sizeof(sctp_addip_param_t) + 3144 return false;
3137 sizeof(sctp_paramhdr_t)) 3145 length = ntohs(param.addip->param_hdr.length);
3138 return 0; 3146 if (length < sizeof(sctp_addip_param_t) +
3147 sizeof(sctp_paramhdr_t))
3148 return false;
3139 break; 3149 break;
3140 case SCTP_PARAM_SUCCESS_REPORT: 3150 case SCTP_PARAM_SUCCESS_REPORT:
3141 case SCTP_PARAM_ADAPTATION_LAYER_IND: 3151 case SCTP_PARAM_ADAPTATION_LAYER_IND:
3142 if (length != sizeof(sctp_addip_param_t)) 3152 if (length != sizeof(sctp_addip_param_t))
3143 return 0; 3153 return false;
3144
3145 break; 3154 break;
3146 default: 3155 default:
3147 break; 3156 /* This is unkown to us, reject! */
3157 return false;
3148 } 3158 }
3149
3150 param.v += WORD_ROUND(length);
3151 } 3159 }
3152 3160
3153 if (param.v != chunk_end) 3161 /* Remaining sanity checks. */
3154 return 0; 3162 if (addr_param_needed && !addr_param_seen)
3163 return false;
3164 if (!addr_param_needed && addr_param_seen)
3165 return false;
3166 if (param.v != chunk->chunk_end)
3167 return false;
3155 3168
3156 return 1; 3169 return true;
3157} 3170}
3158 3171
3159/* Process an incoming ASCONF chunk with the next expected serial no. and 3172/* Process an incoming ASCONF chunk with the next expected serial no. and
@@ -3162,16 +3175,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
3162struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, 3175struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3163 struct sctp_chunk *asconf) 3176 struct sctp_chunk *asconf)
3164{ 3177{
3178 sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
3179 bool all_param_pass = true;
3180 union sctp_params param;
3165 sctp_addiphdr_t *hdr; 3181 sctp_addiphdr_t *hdr;
3166 union sctp_addr_param *addr_param; 3182 union sctp_addr_param *addr_param;
3167 sctp_addip_param_t *asconf_param; 3183 sctp_addip_param_t *asconf_param;
3168 struct sctp_chunk *asconf_ack; 3184 struct sctp_chunk *asconf_ack;
3169
3170 __be16 err_code; 3185 __be16 err_code;
3171 int length = 0; 3186 int length = 0;
3172 int chunk_len; 3187 int chunk_len;
3173 __u32 serial; 3188 __u32 serial;
3174 int all_param_pass = 1;
3175 3189
3176 chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); 3190 chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
3177 hdr = (sctp_addiphdr_t *)asconf->skb->data; 3191 hdr = (sctp_addiphdr_t *)asconf->skb->data;
@@ -3199,9 +3213,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3199 goto done; 3213 goto done;
3200 3214
3201 /* Process the TLVs contained within the ASCONF chunk. */ 3215 /* Process the TLVs contained within the ASCONF chunk. */
3202 while (chunk_len > 0) { 3216 sctp_walk_params(param, addip, addip_hdr.params) {
3217 /* Skip preceeding address parameters. */
3218 if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
3219 param.p->type == SCTP_PARAM_IPV6_ADDRESS)
3220 continue;
3221
3203 err_code = sctp_process_asconf_param(asoc, asconf, 3222 err_code = sctp_process_asconf_param(asoc, asconf,
3204 asconf_param); 3223 param.addip);
3205 /* ADDIP 4.1 A7) 3224 /* ADDIP 4.1 A7)
3206 * If an error response is received for a TLV parameter, 3225 * If an error response is received for a TLV parameter,
3207 * all TLVs with no response before the failed TLV are 3226 * all TLVs with no response before the failed TLV are
@@ -3209,28 +3228,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3209 * the failed response are considered unsuccessful unless 3228 * the failed response are considered unsuccessful unless
3210 * a specific success indication is present for the parameter. 3229 * a specific success indication is present for the parameter.
3211 */ 3230 */
3212 if (SCTP_ERROR_NO_ERROR != err_code) 3231 if (err_code != SCTP_ERROR_NO_ERROR)
3213 all_param_pass = 0; 3232 all_param_pass = false;
3214
3215 if (!all_param_pass) 3233 if (!all_param_pass)
3216 sctp_add_asconf_response(asconf_ack, 3234 sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
3217 asconf_param->crr_id, err_code, 3235 err_code, param.addip);
3218 asconf_param);
3219 3236
3220 /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add 3237 /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
3221 * an IP address sends an 'Out of Resource' in its response, it 3238 * an IP address sends an 'Out of Resource' in its response, it
3222 * MUST also fail any subsequent add or delete requests bundled 3239 * MUST also fail any subsequent add or delete requests bundled
3223 * in the ASCONF. 3240 * in the ASCONF.
3224 */ 3241 */
3225 if (SCTP_ERROR_RSRC_LOW == err_code) 3242 if (err_code == SCTP_ERROR_RSRC_LOW)
3226 goto done; 3243 goto done;
3227
3228 /* Move to the next ASCONF param. */
3229 length = ntohs(asconf_param->param_hdr.length);
3230 asconf_param = (void *)asconf_param + length;
3231 chunk_len -= length;
3232 } 3244 }
3233
3234done: 3245done:
3235 asoc->peer.addip_serial++; 3246 asoc->peer.addip_serial++;
3236 3247
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c8f606324134..3ee27b7704ff 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
170{ 170{
171 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); 171 __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
172 172
173 /* Previously already marked? */
174 if (unlikely(chunk->pdiscard))
175 return 0;
173 if (unlikely(chunk_length < required_length)) 176 if (unlikely(chunk_length < required_length))
174 return 0; 177 return 0;
175 178
@@ -3591,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
3591 struct sctp_chunk *asconf_ack = NULL; 3594 struct sctp_chunk *asconf_ack = NULL;
3592 struct sctp_paramhdr *err_param = NULL; 3595 struct sctp_paramhdr *err_param = NULL;
3593 sctp_addiphdr_t *hdr; 3596 sctp_addiphdr_t *hdr;
3594 union sctp_addr_param *addr_param;
3595 __u32 serial; 3597 __u32 serial;
3596 int length;
3597 3598
3598 if (!sctp_vtag_verify(chunk, asoc)) { 3599 if (!sctp_vtag_verify(chunk, asoc)) {
3599 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3600 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3618,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
3618 hdr = (sctp_addiphdr_t *)chunk->skb->data; 3619 hdr = (sctp_addiphdr_t *)chunk->skb->data;
3619 serial = ntohl(hdr->serial); 3620 serial = ntohl(hdr->serial);
3620 3621
3621 addr_param = (union sctp_addr_param *)hdr->params;
3622 length = ntohs(addr_param->p.length);
3623 if (length < sizeof(sctp_paramhdr_t))
3624 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3625 (void *)addr_param, commands);
3626
3627 /* Verify the ASCONF chunk before processing it. */ 3622 /* Verify the ASCONF chunk before processing it. */
3628 if (!sctp_verify_asconf(asoc, 3623 if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
3629 (sctp_paramhdr_t *)((void *)addr_param + length),
3630 (void *)chunk->chunk_end,
3631 &err_param))
3632 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 3624 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3633 (void *)err_param, commands); 3625 (void *)err_param, commands);
3634 3626
@@ -3745,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
3745 rcvd_serial = ntohl(addip_hdr->serial); 3737 rcvd_serial = ntohl(addip_hdr->serial);
3746 3738
3747 /* Verify the ASCONF-ACK chunk before processing it. */ 3739 /* Verify the ASCONF-ACK chunk before processing it. */
3748 if (!sctp_verify_asconf(asoc, 3740 if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
3749 (sctp_paramhdr_t *)addip_hdr->params,
3750 (void *)asconf_ack->chunk_end,
3751 &err_param))
3752 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, 3741 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3753 (void *)err_param, commands); 3742 (void *)err_param, commands);
3754 3743
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 65410e18b8a6..1db162aa64a5 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1924,7 +1924,12 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
1924 } 1924 }
1925 omsg = buf_msg(obuf); 1925 omsg = buf_msg(obuf);
1926 pos += align(msg_size(omsg)); 1926 pos += align(msg_size(omsg));
1927 if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) { 1927 if (msg_isdata(omsg)) {
1928 if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
1929 tipc_sk_mcast_rcv(obuf);
1930 else
1931 tipc_sk_rcv(obuf);
1932 } else if (msg_user(omsg) == CONN_MANAGER) {
1928 tipc_sk_rcv(obuf); 1933 tipc_sk_rcv(obuf);
1929 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { 1934 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
1930 tipc_named_rcv(obuf); 1935 tipc_named_rcv(obuf);