aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-11-13 20:54:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-13 20:54:08 -0500
commit5cf52037042d3ad7432df1aec004a935e83939a6 (patch)
treef66190e4a21171d08417bc9815df75dd75c49dff
parent971ad4e4d6833d5f250d0db332ff863c599ae19f (diff)
parent19ca9fc1445b76b60d34148f7ff837b055f5dcf3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) sunhme driver lacks DMA mapping error checks, based upon a report by Meelis Roos. 2) Fix memory leak in mvpp2 driver, from Sudip Mukherjee. 3) DMA memory allocation sizes are wrong in systemport ethernet driver, fix from Florian Fainelli. 4) Fix use after free in mac80211 defragmentation code, from Johannes Berg. 5) Some networking uapi headers missing from Kbuild file, from Stephen Hemminger. 6) TUN driver gets csum_start offset wrong when VLAN accel is enabled, and macvtap has a similar bug, from Herbert Xu. 7) Adjust several tunneling drivers to set dev->iflink after registry, because registry sets that to -1 overwriting whatever we did. From Steffen Klassert. 8) Geneve forgets to set inner tunneling type, causing GSO segmentation to fail on some NICs. From Jesse Gross. 9) Fix several locking bugs in stmmac driver, from Fabrice Gasnier and Giuseppe CAVALLARO. 10) Fix spurious timeouts with NewReno on low traffic connections, from Marcelo Leitner. 11) Fix descriptor updates in enic driver, from Govindarajulu Varadarajan. 12) PPP calls bpf_prog_create() with locks held, which isn't kosher. Fix from Takashi Iwai. 13) Fix NULL deref in SCTP with malformed INIT packets, from Daniel Borkmann. 14) psock_fanout selftest accesses past the end of the mmap ring, fix from Shuah Khan. 15) Fix PTP timestamping for VLAN packets, from Richard Cochran. 16) netlink_unbind() calls in netlink pass wrong initial argument, from Hiroaki SHIMODA. 17) vxlan socket reuse accidently reuses a socket when the address family is different, so we have to explicitly check this, from Marcelo Lietner. 18) Fix missing include in nft_reject_bridge.c breaking the build on ppc and other architectures, from Guenter Roeck. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits) vxlan: Do not reuse sockets for a different address family smsc911x: power-up phydev before doing a software reset. lib: rhashtable - Remove weird non-ASCII characters from comments net/smsc911x: Fix delays in the PHY enable/disable routines net/smsc911x: Fix rare soft reset timeout issue due to PHY power-down mode netlink: Properly unbind in error conditions. net: ptp: fix time stamp matching logic for VLAN packets. cxgb4 : dcb open-lldp interop fixes selftests/net: psock_fanout seg faults in sock_fanout_read_ring() net: bcmgenet: apply MII configuration in bcmgenet_open() net: bcmgenet: connect and disconnect from the PHY state machine net: qualcomm: Fix dependency ixgbe: phy: fix uninitialized status in ixgbe_setup_phy_link_tnx net: phy: Correctly handle MII ioctl which changes autonegotiation. ipv6: fix IPV6_PKTINFO with v4 mapped net: sctp: fix memory leak in auth key management net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet net: ppp: Don't call bpf_prog_create() in ppp_lock net/mlx4_en: Advertize encapsulation offloads features only when VXLAN tunnel is set cxgb4 : Fix bug in DCB app deletion ...
-rw-r--r--Documentation/networking/ip-sysctl.txt14
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c1
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/phy.c36
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/tun.c28
-rw-r--r--drivers/net/usb/asix_devices.c14
-rw-r--r--drivers/net/vxlan.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/udp_tunnel.h9
-rw-r--r--include/uapi/linux/Kbuild4
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--lib/rhashtable.c10
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/dsa/slave.c7
-rw-r--r--net/ipv4/fou.c2
-rw-r--r--net/ipv4/geneve.c3
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/tcp_input.c60
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c18
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mac80211/spectmgmt.c18
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--tools/testing/selftests/net/psock_fanout.c2
77 files changed, 784 insertions, 376 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 0307e2875f21..a476b08a43e0 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
56 0 - disabled 56 0 - disabled
57 1 - enabled 57 1 - enabled
58 58
59fwmark_reflect - BOOLEAN
60 Controls the fwmark of kernel-generated IPv4 reply packets that are not
61 associated with a socket for example, TCP RSTs or ICMP echo replies).
62 If unset, these packets have a fwmark of zero. If set, they have the
63 fwmark of the packet they are replying to.
64 Default: 0
65
59route/max_size - INTEGER 66route/max_size - INTEGER
60 Maximum number of routes allowed in the kernel. Increase 67 Maximum number of routes allowed in the kernel. Increase
61 this when using large numbers of interfaces and/or routes. 68 this when using large numbers of interfaces and/or routes.
@@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
1201proxy_ndp - BOOLEAN 1208proxy_ndp - BOOLEAN
1202 Do proxy ndp. 1209 Do proxy ndp.
1203 1210
1211fwmark_reflect - BOOLEAN
1212 Controls the fwmark of kernel-generated IPv6 reply packets that are not
1213 associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
1214 If unset, these packets have a fwmark of zero. If set, they have the
1215 fwmark of the packet they are replying to.
1216 Default: 0
1217
1204conf/interface/*: 1218conf/interface/*:
1205 Change special settings per interface. 1219 Change special settings per interface.
1206 1220
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 295c72d52a1f..f1ad9c2ab2e9 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -599,7 +599,7 @@
599 compatible = "apm,xgene-enet"; 599 compatible = "apm,xgene-enet";
600 status = "disabled"; 600 status = "disabled";
601 reg = <0x0 0x17020000 0x0 0xd100>, 601 reg = <0x0 0x17020000 0x0 0xd100>,
602 <0x0 0X17030000 0x0 0X400>, 602 <0x0 0X17030000 0x0 0Xc300>,
603 <0x0 0X10000000 0x0 0X200>; 603 <0x0 0X10000000 0x0 0X200>;
604 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 604 reg-names = "enet_csr", "ring_csr", "ring_cmd";
605 interrupts = <0x0 0x3c 0x4>; 605 interrupts = <0x0 0x3c 0x4>;
@@ -624,9 +624,9 @@
624 sgenet0: ethernet@1f210000 { 624 sgenet0: ethernet@1f210000 {
625 compatible = "apm,xgene-enet"; 625 compatible = "apm,xgene-enet";
626 status = "disabled"; 626 status = "disabled";
627 reg = <0x0 0x1f210000 0x0 0x10000>, 627 reg = <0x0 0x1f210000 0x0 0xd100>,
628 <0x0 0x1f200000 0x0 0X10000>, 628 <0x0 0x1f200000 0x0 0Xc300>,
629 <0x0 0x1B000000 0x0 0X20000>; 629 <0x0 0x1B000000 0x0 0X200>;
630 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 630 reg-names = "enet_csr", "ring_csr", "ring_cmd";
631 interrupts = <0x0 0xA0 0x4>; 631 interrupts = <0x0 0xA0 0x4>;
632 dma-coherent; 632 dma-coherent;
@@ -639,7 +639,7 @@
639 compatible = "apm,xgene-enet"; 639 compatible = "apm,xgene-enet";
640 status = "disabled"; 640 status = "disabled";
641 reg = <0x0 0x1f610000 0x0 0xd100>, 641 reg = <0x0 0x1f610000 0x0 0xd100>,
642 <0x0 0x1f600000 0x0 0X400>, 642 <0x0 0x1f600000 0x0 0Xc300>,
643 <0x0 0x18000000 0x0 0X200>; 643 <0x0 0x18000000 0x0 0X200>;
644 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 644 reg-names = "enet_csr", "ring_csr", "ring_cmd";
645 interrupts = <0x0 0x60 0x4>; 645 interrupts = <0x0 0x60 0x4>;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea1941e973..7ba83ffb08ac 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
576} 576}
577 577
578static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 578bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
579{
580 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
581 return false;
582
583 if (ioread32(p->ring_csr_addr + SRST_ADDR))
584 return false;
585
586 return true;
587}
588
589static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
579{ 590{
580 u32 val; 591 u32 val;
581 592
593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV;
595
582 clk_prepare_enable(pdata->clk); 596 clk_prepare_enable(pdata->clk);
583 clk_disable_unprepare(pdata->clk); 597 clk_disable_unprepare(pdata->clk);
584 clk_prepare_enable(pdata->clk); 598 clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
590 val |= SCAN_AUTO_INCR; 604 val |= SCAN_AUTO_INCR;
591 MGMT_CLOCK_SEL_SET(&val, 1); 605 MGMT_CLOCK_SEL_SET(&val, 1);
592 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 606 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
607
608 return 0;
593} 609}
594 610
595static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 611static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 38558584080e..ec45f3256f0e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
104#define BLOCK_ETH_MAC_OFFSET 0x0000 104#define BLOCK_ETH_MAC_OFFSET 0x0000
105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
106 106
107#define CLKEN_ADDR 0xc208
108#define SRST_ADDR 0xc200
109
107#define MAC_ADDR_REG_OFFSET 0x00 110#define MAC_ADDR_REG_OFFSET 0x00
108#define MAC_COMMAND_REG_OFFSET 0x04 111#define MAC_COMMAND_REG_OFFSET 0x04
109#define MAC_WRITE_REG_OFFSET 0x08 112#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
318 321
319int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); 322int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
320void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); 323void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
324bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
321 325
322extern struct xgene_mac_ops xgene_gmac_ops; 326extern struct xgene_mac_ops xgene_gmac_ops;
323extern struct xgene_port_ops xgene_gport_ops; 327extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc6f6bb..123669696184 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
639 struct device *dev = ndev_to_dev(ndev); 639 struct device *dev = ndev_to_dev(ndev);
640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
641 struct xgene_enet_desc_ring *buf_pool = NULL; 641 struct xgene_enet_desc_ring *buf_pool = NULL;
642 u8 cpu_bufnum = 0, eth_bufnum = 0; 642 u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
643 u8 bp_bufnum = 0x20; 643 u8 bp_bufnum = START_BP_BUFNUM;
644 u16 ring_id, ring_num = 0; 644 u16 ring_id, ring_num = START_RING_NUM;
645 int ret; 645 int ret;
646 646
647 /* allocate rx descriptor ring */ 647 /* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
852 u16 dst_ring_num; 852 u16 dst_ring_num;
853 int ret; 853 int ret;
854 854
855 pdata->port_ops->reset(pdata); 855 ret = pdata->port_ops->reset(pdata);
856 if (ret)
857 return ret;
856 858
857 ret = xgene_enet_create_desc_rings(ndev); 859 ret = xgene_enet_create_desc_rings(ndev);
858 if (ret) { 860 if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
954 956
955 return ret; 957 return ret;
956err: 958err:
959 unregister_netdev(ndev);
957 free_netdev(ndev); 960 free_netdev(ndev);
958 return ret; 961 return ret;
959} 962}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a01161f..f9958fae6ffd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) 38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
39#define NUM_PKT_BUF 64 39#define NUM_PKT_BUF 64
40#define NUM_BUFPOOL 32 40#define NUM_BUFPOOL 32
41#define START_ETH_BUFNUM 2
42#define START_BP_BUFNUM 0x22
43#define START_RING_NUM 8
41 44
42#define PHY_POLL_LINK_ON (10 * HZ) 45#define PHY_POLL_LINK_ON (10 * HZ)
43#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) 46#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
83}; 86};
84 87
85struct xgene_port_ops { 88struct xgene_port_ops {
86 void (*reset)(struct xgene_enet_pdata *pdata); 89 int (*reset)(struct xgene_enet_pdata *pdata);
87 void (*cle_bypass)(struct xgene_enet_pdata *pdata, 90 void (*cle_bypass)(struct xgene_enet_pdata *pdata,
88 u32 dst_ring_num, u16 bufpool_id); 91 u32 dst_ring_num, u16 bufpool_id);
89 void (*shutdown)(struct xgene_enet_pdata *pdata); 92 void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index c22f32622fa9..f5d4f68c288c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
311 xgene_sgmac_rxtx(p, TX_EN, false); 311 xgene_sgmac_rxtx(p, TX_EN, false);
312} 312}
313 313
314static void xgene_enet_reset(struct xgene_enet_pdata *p) 314static int xgene_enet_reset(struct xgene_enet_pdata *p)
315{ 315{
316 if (!xgene_ring_mgr_init(p))
317 return -ENODEV;
318
316 clk_prepare_enable(p->clk); 319 clk_prepare_enable(p->clk);
317 clk_disable_unprepare(p->clk); 320 clk_disable_unprepare(p->clk);
318 clk_prepare_enable(p->clk); 321 clk_prepare_enable(p->clk);
319 322
320 xgene_enet_ecc_init(p); 323 xgene_enet_ecc_init(p);
321 xgene_enet_config_ring_if_assoc(p); 324 xgene_enet_config_ring_if_assoc(p);
325
326 return 0;
322} 327}
323 328
324static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, 329static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d07206b3c7..a18a9d1f1143 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); 252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
253} 253}
254 254
255static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 255static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
256{ 256{
257 if (!xgene_ring_mgr_init(pdata))
258 return -ENODEV;
259
257 clk_prepare_enable(pdata->clk); 260 clk_prepare_enable(pdata->clk);
258 clk_disable_unprepare(pdata->clk); 261 clk_disable_unprepare(pdata->clk);
259 clk_prepare_enable(pdata->clk); 262 clk_prepare_enable(pdata->clk);
260 263
261 xgene_enet_ecc_init(pdata); 264 xgene_enet_ecc_init(pdata);
262 xgene_enet_config_ring_if_assoc(pdata); 265 xgene_enet_config_ring_if_assoc(pdata);
266
267 return 0;
263} 268}
264 269
265static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, 270static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3a6778a667f4..531bb7c57531 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1110 /* We just need one DMA descriptor which is DMA-able, since writing to 1110 /* We just need one DMA descriptor which is DMA-able, since writing to
1111 * the port will allocate a new descriptor in its internal linked-list 1111 * the port will allocate a new descriptor in its internal linked-list
1112 */ 1112 */
1113 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); 1113 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1114 GFP_KERNEL);
1114 if (!p) { 1115 if (!p) {
1115 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1116 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1116 return -ENOMEM; 1117 return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1174 if (!(reg & TDMA_DISABLED)) 1175 if (!(reg & TDMA_DISABLED))
1175 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1176 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1176 1177
1178 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1179 * fail, so by checking this pointer we know whether the TX ring was
1180 * fully initialized or not.
1181 */
1182 if (!ring->cbs)
1183 return;
1184
1177 napi_disable(&ring->napi); 1185 napi_disable(&ring->napi);
1178 netif_napi_del(&ring->napi); 1186 netif_napi_del(&ring->napi);
1179 1187
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1183 ring->cbs = NULL; 1191 ring->cbs = NULL;
1184 1192
1185 if (ring->desc_dma) { 1193 if (ring->desc_dma) {
1186 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); 1194 dma_free_coherent(kdev, sizeof(struct dma_desc),
1195 ring->desc_cpu, ring->desc_dma);
1187 ring->desc_dma = 0; 1196 ring->desc_dma = 0;
1188 } 1197 }
1189 ring->size = 0; 1198 ring->size = 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec09e453..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
2140 goto err_irq0; 2140 goto err_irq0;
2141 } 2141 }
2142 2142
2143 /* Re-configure the port multiplexer towards the PHY device */
2144 bcmgenet_mii_config(priv->dev, false);
2145
2146 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2147 priv->phy_interface);
2148
2143 bcmgenet_netif_start(dev); 2149 bcmgenet_netif_start(dev);
2144 2150
2145 return 0; 2151 return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
2184 2190
2185 bcmgenet_netif_stop(dev); 2191 bcmgenet_netif_stop(dev);
2186 2192
2193 /* Really kill the PHY state machine and disconnect from it */
2194 phy_disconnect(priv->phydev);
2195
2187 /* Disable MAC receive */ 2196 /* Disable MAC receive */
2188 umac_enable_set(priv, CMD_RX_EN, false); 2197 umac_enable_set(priv, CMD_RX_EN, false);
2189 2198
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
2685 2694
2686 phy_init_hw(priv->phydev); 2695 phy_init_hw(priv->phydev);
2687 /* Speed settings must be restored */ 2696 /* Speed settings must be restored */
2688 bcmgenet_mii_config(priv->dev); 2697 bcmgenet_mii_config(priv->dev, false);
2689 2698
2690 /* disable ethernet MAC while updating its registers */ 2699 /* disable ethernet MAC while updating its registers */
2691 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 2700 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524ea3b19..31b2da5f9b82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
617 617
618/* MDIO routines */ 618/* MDIO routines */
619int bcmgenet_mii_init(struct net_device *dev); 619int bcmgenet_mii_init(struct net_device *dev);
620int bcmgenet_mii_config(struct net_device *dev); 620int bcmgenet_mii_config(struct net_device *dev, bool init);
621void bcmgenet_mii_exit(struct net_device *dev); 621void bcmgenet_mii_exit(struct net_device *dev);
622void bcmgenet_mii_reset(struct net_device *dev); 622void bcmgenet_mii_reset(struct net_device *dev);
623void bcmgenet_mii_setup(struct net_device *dev);
623 624
624/* Wake-on-LAN routines */ 625/* Wake-on-LAN routines */
625void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); 626void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a9f801..933cd7e7cd33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
77/* setup netdev link state when PHY link status change and 77/* setup netdev link state when PHY link status change and
78 * update UMAC and RGMII block when link up 78 * update UMAC and RGMII block when link up
79 */ 79 */
80static void bcmgenet_mii_setup(struct net_device *dev) 80void bcmgenet_mii_setup(struct net_device *dev)
81{ 81{
82 struct bcmgenet_priv *priv = netdev_priv(dev); 82 struct bcmgenet_priv *priv = netdev_priv(dev);
83 struct phy_device *phydev = priv->phydev; 83 struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); 211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
212} 212}
213 213
214int bcmgenet_mii_config(struct net_device *dev) 214int bcmgenet_mii_config(struct net_device *dev, bool init)
215{ 215{
216 struct bcmgenet_priv *priv = netdev_priv(dev); 216 struct bcmgenet_priv *priv = netdev_priv(dev);
217 struct phy_device *phydev = priv->phydev; 217 struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 dev_info(kdev, "configuring instance for %s\n", phy_name); 301 if (init)
302 dev_info(kdev, "configuring instance for %s\n", phy_name);
302 303
303 return 0; 304 return 0;
304} 305}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
350 * PHY speed which is needed for bcmgenet_mii_config() to configure 351 * PHY speed which is needed for bcmgenet_mii_config() to configure
351 * things appropriately. 352 * things appropriately.
352 */ 353 */
353 ret = bcmgenet_mii_config(dev); 354 ret = bcmgenet_mii_config(dev, true);
354 if (ret) { 355 if (ret) {
355 phy_disconnect(priv->phydev); 356 phy_disconnect(priv->phydev);
356 return ret; 357 return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6fe300e316c3..cca604994003 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
79 app.protocol = dcb->app_priority[i].protocolid; 79 app.protocol = dcb->app_priority[i].protocolid;
80 80
81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { 81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
82 app.priority = dcb->app_priority[i].user_prio_map;
82 app.selector = dcb->app_priority[i].sel_field + 1; 83 app.selector = dcb->app_priority[i].sel_field + 1;
83 err = dcb_ieee_setapp(dev, &app); 84 err = dcb_ieee_delapp(dev, &app);
84 } else { 85 } else {
85 app.selector = !!(dcb->app_priority[i].sel_field); 86 app.selector = !!(dcb->app_priority[i].sel_field);
86 err = dcb_setapp(dev, &app); 87 err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
122 case CXGB4_DCB_INPUT_FW_ENABLED: { 123 case CXGB4_DCB_INPUT_FW_ENABLED: {
123 /* we're going to use Firmware DCB */ 124 /* we're going to use Firmware DCB */
124 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 125 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
125 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 126 dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
127 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
128 dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
129 else
130 dcb->supported |= DCB_CAP_DCBX_VER_CEE;
126 break; 131 break;
127 } 132 }
128 133
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
436 *up_tc_map = (1 << tc); 441 *up_tc_map = (1 << tc);
437 442
438 /* prio_type is link strict */ 443 /* prio_type is link strict */
439 *prio_type = 0x2; 444 if (*pgid != 0xF)
445 *prio_type = 0x2;
440} 446}
441 447
442static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, 448static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
443 u8 *prio_type, u8 *pgid, u8 *bw_per, 449 u8 *prio_type, u8 *pgid, u8 *bw_per,
444 u8 *up_tc_map) 450 u8 *up_tc_map)
445{ 451{
446 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); 452 /* tc 0 is written at MSB position */
453 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
454 up_tc_map, 1);
447} 455}
448 456
449 457
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
451 u8 *prio_type, u8 *pgid, u8 *bw_per, 459 u8 *prio_type, u8 *pgid, u8 *bw_per,
452 u8 *up_tc_map) 460 u8 *up_tc_map)
453{ 461{
454 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); 462 /* tc 0 is written at MSB position */
463 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
464 up_tc_map, 0);
455} 465}
456 466
457static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, 467static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
461 struct fw_port_cmd pcmd; 471 struct fw_port_cmd pcmd;
462 struct port_info *pi = netdev2pinfo(dev); 472 struct port_info *pi = netdev2pinfo(dev);
463 struct adapter *adap = pi->adapter; 473 struct adapter *adap = pi->adapter;
474 int fw_tc = 7 - tc;
464 u32 _pgid; 475 u32 _pgid;
465 int err; 476 int err;
466 477
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
479 } 490 }
480 491
481 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); 492 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
482 _pgid &= ~(0xF << (tc * 4)); 493 _pgid &= ~(0xF << (fw_tc * 4));
483 _pgid |= pgid << (tc * 4); 494 _pgid |= pgid << (fw_tc * 4);
484 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); 495 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
485 496
486 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); 497 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
593 priority >= CXGB4_MAX_PRIORITY) 604 priority >= CXGB4_MAX_PRIORITY)
594 *pfccfg = 0; 605 *pfccfg = 0;
595 else 606 else
596 *pfccfg = (pi->dcb.pfcen >> priority) & 1; 607 *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
597} 608}
598 609
599/* Enable/disable Priority Pause Frames for the specified Traffic Class 610/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
618 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; 629 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
619 630
620 if (pfccfg) 631 if (pfccfg)
621 pcmd.u.dcb.pfc.pfcen |= (1 << priority); 632 pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
622 else 633 else
623 pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); 634 pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
624 635
625 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 636 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
626 if (err != FW_PORT_DCB_CFG_SUCCESS) { 637 if (err != FW_PORT_DCB_CFG_SUCCESS) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314e11af..39f2b13e66c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2914int t4_sge_init(struct adapter *adap) 2914int t4_sge_init(struct adapter *adap)
2915{ 2915{
2916 struct sge *s = &adap->sge; 2916 struct sge *s = &adap->sge;
2917 u32 sge_control, sge_conm_ctrl; 2917 u32 sge_control, sge_control2, sge_conm_ctrl;
2918 unsigned int ingpadboundary, ingpackboundary;
2918 int ret, egress_threshold; 2919 int ret, egress_threshold;
2919 2920
2920 /* 2921 /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
2924 sge_control = t4_read_reg(adap, SGE_CONTROL); 2925 sge_control = t4_read_reg(adap, SGE_CONTROL);
2925 s->pktshift = PKTSHIFT_GET(sge_control); 2926 s->pktshift = PKTSHIFT_GET(sge_control);
2926 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2927 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2927 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + 2928
2928 X_INGPADBOUNDARY_SHIFT); 2929 /* T4 uses a single control field to specify both the PCIe Padding and
2930 * Packing Boundary. T5 introduced the ability to specify these
2931 * separately. The actual Ingress Packet Data alignment boundary
2932 * within Packed Buffer Mode is the maximum of these two
2933 * specifications.
2934 */
2935 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
2936 X_INGPADBOUNDARY_SHIFT);
2937 if (is_t4(adap->params.chip)) {
2938 s->fl_align = ingpadboundary;
2939 } else {
2940 /* T5 has a different interpretation of one of the PCIe Packing
2941 * Boundary values.
2942 */
2943 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2944 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2945 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2946 ingpackboundary = 16;
2947 else
2948 ingpackboundary = 1 << (ingpackboundary +
2949 INGPACKBOUNDARY_SHIFT_X);
2950
2951 s->fl_align = max(ingpadboundary, ingpackboundary);
2952 }
2929 2953
2930 if (adap->flags & USING_SOFT_PARAMS) 2954 if (adap->flags & USING_SOFT_PARAMS)
2931 ret = t4_sge_init_soft(adap); 2955 ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74e4f09..163a2a14948c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3129 HOSTPAGESIZEPF6(sge_hps) | 3129 HOSTPAGESIZEPF6(sge_hps) |
3130 HOSTPAGESIZEPF7(sge_hps)); 3130 HOSTPAGESIZEPF7(sge_hps));
3131 3131
3132 t4_set_reg_field(adap, SGE_CONTROL, 3132 if (is_t4(adap->params.chip)) {
3133 INGPADBOUNDARY_MASK | 3133 t4_set_reg_field(adap, SGE_CONTROL,
3134 EGRSTATUSPAGESIZE_MASK, 3134 INGPADBOUNDARY_MASK |
3135 INGPADBOUNDARY(fl_align_log - 5) | 3135 EGRSTATUSPAGESIZE_MASK,
3136 EGRSTATUSPAGESIZE(stat_len != 64)); 3136 INGPADBOUNDARY(fl_align_log - 5) |
3137 3137 EGRSTATUSPAGESIZE(stat_len != 64));
3138 } else {
3139 /* T5 introduced the separation of the Free List Padding and
3140 * Packing Boundaries. Thus, we can select a smaller Padding
3141 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3142 * Bandwidth, and use a Packing Boundary which is large enough
3143 * to avoid false sharing between CPUs, etc.
3144 *
3145 * For the PCI Link, the smaller the Padding Boundary the
3146 * better. For the Memory Controller, a smaller Padding
3147 * Boundary is better until we cross under the Memory Line
3148 * Size (the minimum unit of transfer to/from Memory). If we
3149 * have a Padding Boundary which is smaller than the Memory
3150 * Line Size, that'll involve a Read-Modify-Write cycle on the
3151 * Memory Controller which is never good. For T5 the smallest
3152 * Padding Boundary which we can select is 32 bytes which is
3153 * larger than any known Memory Controller Line Size so we'll
3154 * use that.
3155 *
3156 * T5 has a different interpretation of the "0" value for the
3157 * Packing Boundary. This corresponds to 16 bytes instead of
3158 * the expected 32 bytes. We never have a Packing Boundary
3159 * less than 32 bytes so we can't use that special value but
3160 * on the other hand, if we wanted 32 bytes, the best we can
3161 * really do is 64 bytes.
3162 */
3163 if (fl_align <= 32) {
3164 fl_align = 64;
3165 fl_align_log = 6;
3166 }
3167 t4_set_reg_field(adap, SGE_CONTROL,
3168 INGPADBOUNDARY_MASK |
3169 EGRSTATUSPAGESIZE_MASK,
3170 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3171 EGRSTATUSPAGESIZE(stat_len != 64));
3172 t4_set_reg_field(adap, SGE_CONTROL2_A,
3173 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3174 INGPACKBOUNDARY_V(fl_align_log -
3175 INGPACKBOUNDARY_SHIFT_X));
3176 }
3138 /* 3177 /*
3139 * Adjust various SGE Free List Host Buffer Sizes. 3178 * Adjust various SGE Free List Host Buffer Sizes.
3140 * 3179 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db5dc13..8d2de1006b08 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
95#define X_INGPADBOUNDARY_SHIFT 5 95#define X_INGPADBOUNDARY_SHIFT 5
96 96
97#define SGE_CONTROL 0x1008 97#define SGE_CONTROL 0x1008
98#define SGE_CONTROL2_A 0x1124
98#define DCASYSTYPE 0x00080000U 99#define DCASYSTYPE 0x00080000U
99#define RXPKTCPLMODE_MASK 0x00040000U 100#define RXPKTCPLMODE_MASK 0x00040000U
100#define RXPKTCPLMODE_SHIFT 18 101#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
106#define PKTSHIFT_SHIFT 10 107#define PKTSHIFT_SHIFT 10
107#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
108#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
110#define INGPCIEBOUNDARY_32B_X 0
109#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define INGPCIEBOUNDARY_MASK 0x00000380U
110#define INGPCIEBOUNDARY_SHIFT 7 112#define INGPCIEBOUNDARY_SHIFT 7
111#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
114#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
115#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
116 >> INGPADBOUNDARY_SHIFT) 118 >> INGPADBOUNDARY_SHIFT)
119#define INGPACKBOUNDARY_16B_X 0
120#define INGPACKBOUNDARY_SHIFT_X 5
121
122#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M)
117#define EGRPCIEBOUNDARY_MASK 0x0000000eU 127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
118#define EGRPCIEBOUNDARY_SHIFT 1 128#define EGRPCIEBOUNDARY_SHIFT 1
119#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) 129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
301 301
302 /* Decoded Adapter Parameters.
303 */
304 u32 fl_pg_order; /* large page allocation size */
305 u32 stat_len; /* length of status page at ring end */
306 u32 pktshift; /* padding between CPL & packet data */
307 u32 fl_align; /* response queue message alignment */
308 u32 fl_starve_thres; /* Free List starvation threshold */
309
302 /* 310 /*
303 * Reverse maps from Absolute Queue IDs to associated queue pointers. 311 * Reverse maps from Absolute Queue IDs to associated queue pointers.
304 * The absolute Queue IDs are in a compact range which start at a 312 * The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -102,12 +94,6 @@ enum {
102 MAX_TIMER_TX_RECLAIM = 100, 94 MAX_TIMER_TX_RECLAIM = 100,
103 95
104 /* 96 /*
105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
106 * timer will attempt to refill it.
107 */
108 FL_STARVE_THRES = 4,
109
110 /*
111 * Suspend an Ethernet TX queue with fewer available descriptors than 97 * Suspend an Ethernet TX queue with fewer available descriptors than
112 * this. We always want to have room for a maximum sized packet: 98 * this. We always want to have room for a maximum sized packet:
113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 250
265/** 251/**
266 * fl_starving - return whether a Free List is starving. 252 * fl_starving - return whether a Free List is starving.
253 * @adapter: pointer to the adapter
267 * @fl: the Free List 254 * @fl: the Free List
268 * 255 *
269 * Tests specified Free List to see whether the number of buffers 256 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 257 * available to the hardware has falled below our "starvation"
271 * threshold. 258 * threshold.
272 */ 259 */
273static inline bool fl_starving(const struct sge_fl *fl) 260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
274{ 262{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 266}
277 267
278/** 268/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 447
458/** 448/**
459 * get_buf_size - return the size of an RX Free List buffer. 449 * get_buf_size - return the size of an RX Free List buffer.
450 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 451 * @sdesc: pointer to the software buffer descriptor
461 */ 452 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
463{ 455{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 456 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 457
466 : PAGE_SIZE; 458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 460}
468 461
469/** 462/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 476
484 if (is_buf_mapped(sdesc)) 477 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 481 put_page(sdesc->page);
488 sdesc->page = NULL; 482 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 483 if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 505
512 if (is_buf_mapped(sdesc)) 506 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 510 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 511 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 512 fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 584static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 585 int n, gfp_t gfp)
591{ 586{
587 struct sge *s = &adapter->sge;
592 struct page *page; 588 struct page *page;
593 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 590 unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
608 * If we don't support large pages, drop directly into the small page 604 * If we don't support large pages, drop directly into the small page
609 * allocation code. 605 * allocation code.
610 */ 606 */
611 if (FL_PG_ORDER == 0) 607 if (s->fl_pg_order == 0)
612 goto alloc_small_pages; 608 goto alloc_small_pages;
613 609
614 while (n) { 610 while (n) {
615 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 611 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
616 FL_PG_ORDER); 612 s->fl_pg_order);
617 if (unlikely(!page)) { 613 if (unlikely(!page)) {
618 /* 614 /*
619 * We've failed inour attempt to allocate a "large 615 * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
623 fl->large_alloc_failed++; 619 fl->large_alloc_failed++;
624 break; 620 break;
625 } 621 }
626 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 622 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
627 623
628 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 624 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
629 PAGE_SIZE << FL_PG_ORDER, 625 PAGE_SIZE << s->fl_pg_order,
630 PCI_DMA_FROMDEVICE); 626 PCI_DMA_FROMDEVICE);
631 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 627 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
632 /* 628 /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
637 * because DMA mapping resources are typically 633 * because DMA mapping resources are typically
638 * critical resources once they become scarse. 634 * critical resources once they become scarse.
639 */ 635 */
640 __free_pages(page, FL_PG_ORDER); 636 __free_pages(page, s->fl_pg_order);
641 goto out; 637 goto out;
642 } 638 }
643 dma_addr |= RX_LARGE_BUF; 639 dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
693 fl->pend_cred += cred; 689 fl->pend_cred += cred;
694 ring_fl_db(adapter, fl); 690 ring_fl_db(adapter, fl);
695 691
696 if (unlikely(fl_starving(fl))) { 692 if (unlikely(fl_starving(adapter, fl))) {
697 smp_wmb(); 693 smp_wmb();
698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 694 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
699 } 695 }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1468static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1464static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1469 const struct cpl_rx_pkt *pkt) 1465 const struct cpl_rx_pkt *pkt)
1470{ 1466{
1467 struct adapter *adapter = rxq->rspq.adapter;
1468 struct sge *s = &adapter->sge;
1471 int ret; 1469 int ret;
1472 struct sk_buff *skb; 1470 struct sk_buff *skb;
1473 1471
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 return; 1476 return;
1479 } 1477 }
1480 1478
1481 copy_frags(skb, gl, PKTSHIFT); 1479 copy_frags(skb, gl, s->pktshift);
1482 skb->len = gl->tot_len - PKTSHIFT; 1480 skb->len = gl->tot_len - s->pktshift;
1483 skb->data_len = skb->len; 1481 skb->data_len = skb->len;
1484 skb->truesize += skb->data_len; 1482 skb->truesize += skb->data_len;
1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1516 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1514 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1517 (rspq->netdev->features & NETIF_F_RXCSUM); 1515 (rspq->netdev->features & NETIF_F_RXCSUM);
1518 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1516 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1517 struct adapter *adapter = rspq->adapter;
1518 struct sge *s = &adapter->sge;
1519 1519
1520 /* 1520 /*
1521 * If this is a good TCP packet and we have Generic Receive Offload 1521 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1537 rxq->stats.rx_drops++; 1537 rxq->stats.rx_drops++;
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 __skb_pull(skb, PKTSHIFT); 1540 __skb_pull(skb, s->pktshift);
1541 skb->protocol = eth_type_trans(skb, rspq->netdev); 1541 skb->protocol = eth_type_trans(skb, rspq->netdev);
1542 skb_record_rx_queue(skb, rspq->idx); 1542 skb_record_rx_queue(skb, rspq->idx);
1543 rxq->stats.pkts++; 1543 rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1648static int process_responses(struct sge_rspq *rspq, int budget) 1648static int process_responses(struct sge_rspq *rspq, int budget)
1649{ 1649{
1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1651 struct adapter *adapter = rspq->adapter;
1652 struct sge *s = &adapter->sge;
1651 int budget_left = budget; 1653 int budget_left = budget;
1652 1654
1653 while (likely(budget_left)) { 1655 while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1697 BUG_ON(frag >= MAX_SKB_FRAGS); 1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1698 BUG_ON(rxq->fl.avail == 0); 1700 BUG_ON(rxq->fl.avail == 0);
1699 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1700 bufsz = get_buf_size(sdesc); 1702 bufsz = get_buf_size(adapter, sdesc);
1701 fp->page = sdesc->page; 1703 fp->page = sdesc->page;
1702 fp->offset = rspq->offset; 1704 fp->offset = rspq->offset;
1703 fp->size = min(bufsz, len); 1705 fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1726 */ 1728 */
1727 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1728 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1729 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1731 rspq->offset += ALIGN(fp->size, s->fl_align);
1730 else 1732 else
1731 restore_rx_bufs(&gl, &rxq->fl, frag); 1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1732 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
1963 * schedule napi but the FL is no longer starving. 1965 * schedule napi but the FL is no longer starving.
1964 * No biggie. 1966 * No biggie.
1965 */ 1967 */
1966 if (fl_starving(fl)) { 1968 if (fl_starving(adapter, fl)) {
1967 struct sge_eth_rxq *rxq; 1969 struct sge_eth_rxq *rxq;
1968 1970
1969 rxq = container_of(fl, struct sge_eth_rxq, fl); 1971 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2047 int intr_dest, 2049 int intr_dest,
2048 struct sge_fl *fl, rspq_handler_t hnd) 2050 struct sge_fl *fl, rspq_handler_t hnd)
2049{ 2051{
2052 struct sge *s = &adapter->sge;
2050 struct port_info *pi = netdev_priv(dev); 2053 struct port_info *pi = netdev_priv(dev);
2051 struct fw_iq_cmd cmd, rpl; 2054 struct fw_iq_cmd cmd, rpl;
2052 int ret, iqandst, flsz = 0; 2055 int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2117 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2120 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2118 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2121 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2119 sizeof(__be64), sizeof(struct rx_sw_desc), 2122 sizeof(__be64), sizeof(struct rx_sw_desc),
2120 &fl->addr, &fl->sdesc, STAT_LEN); 2123 &fl->addr, &fl->sdesc, s->stat_len);
2121 if (!fl->desc) { 2124 if (!fl->desc) {
2122 ret = -ENOMEM; 2125 ret = -ENOMEM;
2123 goto err; 2126 goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2129 * free list ring) in Egress Queue Units. 2132 * free list ring) in Egress Queue Units.
2130 */ 2133 */
2131 flsz = (fl->size / FL_PER_EQ_UNIT + 2134 flsz = (fl->size / FL_PER_EQ_UNIT +
2132 STAT_LEN / EQ_UNIT); 2135 s->stat_len / EQ_UNIT);
2133 2136
2134 /* 2137 /*
2135 * Fill in all the relevant firmware Ingress Queue Command 2138 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2217 struct net_device *dev, struct netdev_queue *devq, 2220 struct net_device *dev, struct netdev_queue *devq,
2218 unsigned int iqid) 2221 unsigned int iqid)
2219{ 2222{
2223 struct sge *s = &adapter->sge;
2220 int ret, nentries; 2224 int ret, nentries;
2221 struct fw_eq_eth_cmd cmd, rpl; 2225 struct fw_eq_eth_cmd cmd, rpl;
2222 struct port_info *pi = netdev_priv(dev); 2226 struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2225 * Calculate the size of the hardware TX Queue (including the Status 2229 * Calculate the size of the hardware TX Queue (including the Status
2226 * Page on the end of the TX Queue) in units of TX Descriptors. 2230 * Page on the end of the TX Queue) in units of TX Descriptors.
2227 */ 2231 */
2228 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2232 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2229 2233
2230 /* 2234 /*
2231 * Allocate the hardware ring for the TX ring (with space for its 2235 * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2234 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2238 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2235 sizeof(struct tx_desc), 2239 sizeof(struct tx_desc),
2236 sizeof(struct tx_sw_desc), 2240 sizeof(struct tx_sw_desc),
2237 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2241 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2238 if (!txq->q.desc) 2242 if (!txq->q.desc)
2239 return -ENOMEM; 2243 return -ENOMEM;
2240 2244
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2307 */ 2311 */
2308static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2312static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2309{ 2313{
2314 struct sge *s = &adapter->sge;
2315
2310 dma_free_coherent(adapter->pdev_dev, 2316 dma_free_coherent(adapter->pdev_dev,
2311 tq->size * sizeof(*tq->desc) + STAT_LEN, 2317 tq->size * sizeof(*tq->desc) + s->stat_len,
2312 tq->desc, tq->phys_addr); 2318 tq->desc, tq->phys_addr);
2313 tq->cntxt_id = 0; 2319 tq->cntxt_id = 0;
2314 tq->sdesc = NULL; 2320 tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2322static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2328static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2323 struct sge_fl *fl) 2329 struct sge_fl *fl)
2324{ 2330{
2331 struct sge *s = &adapter->sge;
2325 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2332 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2326 2333
2327 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2334 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2337 if (fl) { 2344 if (fl) {
2338 free_rx_bufs(adapter, fl, fl->avail); 2345 free_rx_bufs(adapter, fl, fl->avail);
2339 dma_free_coherent(adapter->pdev_dev, 2346 dma_free_coherent(adapter->pdev_dev,
2340 fl->size * sizeof(*fl->desc) + STAT_LEN, 2347 fl->size * sizeof(*fl->desc) + s->stat_len,
2341 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2342 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2343 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
2423 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2430 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2424 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2431 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2425 struct sge *s = &adapter->sge; 2432 struct sge *s = &adapter->sge;
2433 unsigned int ingpadboundary, ingpackboundary;
2426 2434
2427 /* 2435 /*
2428 * Start by vetting the basic SGE parameters which have been set up by 2436 * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
2443 * Now translate the adapter parameters into our internal forms. 2451 * Now translate the adapter parameters into our internal forms.
2444 */ 2452 */
2445 if (fl1) 2453 if (fl1)
2446 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2454 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2447 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2455 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2448 ? 128 : 64); 2456 ? 128 : 64);
2449 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2457 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2450 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2458
2451 SGE_INGPADBOUNDARY_SHIFT); 2459 /* T4 uses a single control field to specify both the PCIe Padding and
2460 * Packing Boundary. T5 introduced the ability to specify these
2461 * separately. The actual Ingress Packet Data alignment boundary
2462 * within Packed Buffer Mode is the maximum of these two
2463 * specifications. (Note that it makes no real practical sense to
2464 * have the Pading Boudary be larger than the Packing Boundary but you
2465 * could set the chip up that way and, in fact, legacy T4 code would
2466 * end doing this because it would initialize the Padding Boundary and
2467 * leave the Packing Boundary initialized to 0 (16 bytes).)
2468 */
2469 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2470 X_INGPADBOUNDARY_SHIFT);
2471 if (is_t4(adapter->params.chip)) {
2472 s->fl_align = ingpadboundary;
2473 } else {
2474 /* T5 has a different interpretation of one of the PCIe Packing
2475 * Boundary values.
2476 */
2477 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2478 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2479 ingpackboundary = 16;
2480 else
2481 ingpackboundary = 1 << (ingpackboundary +
2482 INGPACKBOUNDARY_SHIFT_X);
2483
2484 s->fl_align = max(ingpadboundary, ingpackboundary);
2485 }
2486
2487 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2488 * timer will attempt to refill it. This needs to be larger than the
2489 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2490 * stuck waiting for new packets while the SGE is waiting for us to
2491 * give it more Free List entries. (Note that the SGE's Egress
2492 * Congestion Threshold is in units of 2 Free List pointers.)
2493 */
2494 s->fl_starve_thres
2495 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
2452 2496
2453 /* 2497 /*
2454 * Set up tasklet timers. 2498 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61dcb4ce..4b6a6d14d86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
134 */ 134 */
135struct sge_params { 135struct sge_params {
136 u32 sge_control; /* padding, boundaries, lengths, etc. */ 136 u32 sge_control; /* padding, boundaries, lengths, etc. */
137 u32 sge_control2; /* T5: more of the same */
137 u32 sge_host_page_size; /* RDMA page sizes */ 138 u32 sge_host_page_size; /* RDMA page sizes */
138 u32 sge_queues_per_page; /* RDMA queues/page */ 139 u32 sge_queues_per_page; /* RDMA queues/page */
139 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ 140 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
140 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ 141 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
141 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ 142 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
143 u32 sge_congestion_control; /* congestion thresholds, etc. */
142 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ 144 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
143 u32 sge_timer_value_2_and_3; 145 u32 sge_timer_value_2_and_3;
144 u32 sge_timer_value_4_and_5; 146 u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc48ba2..1e896b923234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
468 sge_params->sge_timer_value_2_and_3 = vals[5]; 468 sge_params->sge_timer_value_2_and_3 = vals[5];
469 sge_params->sge_timer_value_4_and_5 = vals[6]; 469 sge_params->sge_timer_value_4_and_5 = vals[6];
470 470
471 /* T4 uses a single control field to specify both the PCIe Padding and
472 * Packing Boundary. T5 introduced the ability to specify these
473 * separately with the Padding Boundary in SGE_CONTROL and and Packing
474 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
475 * SGE_CONTROL in order to determine how ingress packet data will be
476 * laid out in Packed Buffer Mode. Unfortunately, older versions of
477 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
478 * failure grabbing it we throw an error since we can't figure out the
479 * right value.
480 */
481 if (!is_t4(adapter->params.chip)) {
482 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
483 FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
484 v = t4vf_query_params(adapter, 1, params, vals);
485 if (v != FW_SUCCESS) {
486 dev_err(adapter->pdev_dev,
487 "Unable to get SGE Control2; "
488 "probably old firmware.\n");
489 return v;
490 }
491 sge_params->sge_control2 = vals[0];
492 }
493
471 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 494 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
472 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); 495 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
473 v = t4vf_query_params(adapter, 1, params, vals); 496 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
497 FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
498 v = t4vf_query_params(adapter, 2, params, vals);
474 if (v) 499 if (v)
475 return v; 500 return v;
476 sge_params->sge_ingress_rx_threshold = vals[0]; 501 sge_params->sge_ingress_rx_threshold = vals[0];
502 sge_params->sge_congestion_control = vals[1];
477 503
478 return 0; 504 return 0;
479} 505}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 180e53fa628f..73cf1653a4a3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
940 struct vnic_rq_buf *buf = rq->to_use; 940 struct vnic_rq_buf *buf = rq->to_use;
941 941
942 if (buf->os_buf) { 942 if (buf->os_buf) {
943 buf = buf->next; 943 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
944 rq->to_use = buf; 944 buf->len);
945 rq->ring.desc_avail--;
946 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
947 /* Adding write memory barrier prevents compiler and/or
948 * CPU reordering, thus avoiding descriptor posting
949 * before descriptor is initialized. Otherwise, hardware
950 * can read stale descriptor fields.
951 */
952 wmb();
953 iowrite32(buf->index, &rq->ctrl->posted_index);
954 }
955 945
956 return 0; 946 return 0;
957 } 947 }
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1037 enic->rq_truncated_pkts++; 1027 enic->rq_truncated_pkts++;
1038 } 1028 }
1039 1029
1030 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1031 PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_any(skb); 1032 dev_kfree_skb_any(skb);
1033 buf->os_buf = NULL;
1041 1034
1042 return; 1035 return;
1043 } 1036 }
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1088 /* Buffer overflow 1081 /* Buffer overflow
1089 */ 1082 */
1090 1083
1084 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1085 PCI_DMA_FROMDEVICE);
1091 dev_kfree_skb_any(skb); 1086 dev_kfree_skb_any(skb);
1087 buf->os_buf = NULL;
1092 } 1088 }
1093} 1089}
1094 1090
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50a851db2852..3dca494797bd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
298 return bufaddr; 298 return bufaddr;
299} 299}
300 300
301static void swap_buffer2(void *dst_buf, void *src_buf, int len)
302{
303 int i;
304 unsigned int *src = src_buf;
305 unsigned int *dst = dst_buf;
306
307 for (i = 0; i < len; i += 4, src++, dst++)
308 *dst = swab32p(src);
309}
310
301static void fec_dump(struct net_device *ndev) 311static void fec_dump(struct net_device *ndev)
302{ 312{
303 struct fec_enet_private *fep = netdev_priv(ndev); 313 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1307} 1317}
1308 1318
1309static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1319static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1310 struct bufdesc *bdp, u32 length) 1320 struct bufdesc *bdp, u32 length, bool swap)
1311{ 1321{
1312 struct fec_enet_private *fep = netdev_priv(ndev); 1322 struct fec_enet_private *fep = netdev_priv(ndev);
1313 struct sk_buff *new_skb; 1323 struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1322 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1332 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1323 FEC_ENET_RX_FRSIZE - fep->rx_align, 1333 FEC_ENET_RX_FRSIZE - fep->rx_align,
1324 DMA_FROM_DEVICE); 1334 DMA_FROM_DEVICE);
1325 memcpy(new_skb->data, (*skb)->data, length); 1335 if (!swap)
1336 memcpy(new_skb->data, (*skb)->data, length);
1337 else
1338 swap_buffer2(new_skb->data, (*skb)->data, length);
1326 *skb = new_skb; 1339 *skb = new_skb;
1327 1340
1328 return true; 1341 return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1352 u16 vlan_tag; 1365 u16 vlan_tag;
1353 int index = 0; 1366 int index = 0;
1354 bool is_copybreak; 1367 bool is_copybreak;
1368 bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
1355 1369
1356#ifdef CONFIG_M532x 1370#ifdef CONFIG_M532x
1357 flush_cache_all(); 1371 flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1415 * include that when passing upstream as it messes up 1429 * include that when passing upstream as it messes up
1416 * bridging applications. 1430 * bridging applications.
1417 */ 1431 */
1418 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); 1432 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1433 need_swap);
1419 if (!is_copybreak) { 1434 if (!is_copybreak) {
1420 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1435 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1421 if (unlikely(!skb_new)) { 1436 if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1430 prefetch(skb->data - NET_IP_ALIGN); 1445 prefetch(skb->data - NET_IP_ALIGN);
1431 skb_put(skb, pkt_len - 4); 1446 skb_put(skb, pkt_len - 4);
1432 data = skb->data; 1447 data = skb->data;
1433 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 1448 if (!is_copybreak && need_swap)
1434 swap_buffer(data, pkt_len); 1449 swap_buffer(data, pkt_len);
1435 1450
1436 /* Extract the enhanced buffer descriptor */ 1451 /* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
3343 netif_device_detach(ndev); 3358 netif_device_detach(ndev);
3344 netif_tx_unlock_bh(ndev); 3359 netif_tx_unlock_bh(ndev);
3345 fec_stop(ndev); 3360 fec_stop(ndev);
3361 fec_enet_clk_enable(ndev, false);
3362 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3346 } 3363 }
3347 rtnl_unlock(); 3364 rtnl_unlock();
3348 3365
3349 fec_enet_clk_enable(ndev, false);
3350 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3351
3352 if (fep->reg_phy) 3366 if (fep->reg_phy)
3353 regulator_disable(fep->reg_phy); 3367 regulator_disable(fep->reg_phy);
3354 3368
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
3367 return ret; 3381 return ret;
3368 } 3382 }
3369 3383
3370 pinctrl_pm_select_default_state(&fep->pdev->dev);
3371 ret = fec_enet_clk_enable(ndev, true);
3372 if (ret)
3373 goto failed_clk;
3374
3375 rtnl_lock(); 3384 rtnl_lock();
3376 if (netif_running(ndev)) { 3385 if (netif_running(ndev)) {
3386 pinctrl_pm_select_default_state(&fep->pdev->dev);
3387 ret = fec_enet_clk_enable(ndev, true);
3388 if (ret) {
3389 rtnl_unlock();
3390 goto failed_clk;
3391 }
3377 fec_restart(ndev); 3392 fec_restart(ndev);
3378 netif_tx_lock_bh(ndev); 3393 netif_tx_lock_bh(ndev);
3379 netif_device_attach(ndev); 3394 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..28b81ae09b5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
635 **/ 635 **/
636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
637{ 637{
638 s32 status;
639 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 638 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
640 bool autoneg = false; 639 bool autoneg = false;
641 ixgbe_link_speed speed; 640 ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
700 699
701 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 700 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
702 MDIO_MMD_AN, autoneg_reg); 701 MDIO_MMD_AN, autoneg_reg);
703 702 return 0;
704 return status;
705} 703}
706 704
707/** 705/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a949f352..d44560d1d268 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1047 int tx_index;
1048 struct tx_desc *desc; 1048 struct tx_desc *desc;
1049 u32 cmd_sts; 1049 u32 cmd_sts;
1050 struct sk_buff *skb;
1051 1050
1052 tx_index = txq->tx_used_desc; 1051 tx_index = txq->tx_used_desc;
1053 desc = &txq->tx_desc_area[tx_index]; 1052 desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1066 reclaimed++; 1065 reclaimed++;
1067 txq->tx_desc_count--; 1066 txq->tx_desc_count--;
1068 1067
1069 skb = NULL; 1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1070 if (cmd_sts & TX_LAST_DESC) 1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1071 skb = __skb_dequeue(&txq->tx_skb); 1070 desc->byte_cnt, DMA_TO_DEVICE);
1071
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1074
1075 if (!WARN_ON(!skb))
1076 dev_kfree_skb(skb);
1077 }
1072 1078
1073 if (cmd_sts & ERROR_SUMMARY) { 1079 if (cmd_sts & ERROR_SUMMARY) {
1074 netdev_info(mp->dev, "tx error\n"); 1080 netdev_info(mp->dev, "tx error\n");
1075 mp->dev->stats.tx_errors++; 1081 mp->dev->stats.tx_errors++;
1076 } 1082 }
1077 1083
1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1080 desc->byte_cnt, DMA_TO_DEVICE);
1081 dev_kfree_skb(skb);
1082 } 1084 }
1083 1085
1084 __netif_tx_unlock_bh(nq); 1086 __netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f101526..fdf3e382e464 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1692{ 1692{
1693 struct mvpp2_prs_entry *pe; 1693 struct mvpp2_prs_entry *pe;
1694 int tid_aux, tid; 1694 int tid_aux, tid;
1695 int ret = 0;
1695 1696
1696 pe = mvpp2_prs_vlan_find(priv, tpid, ai); 1697 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1697 1698
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1723 break; 1724 break;
1724 } 1725 }
1725 1726
1726 if (tid <= tid_aux) 1727 if (tid <= tid_aux) {
1727 return -EINVAL; 1728 ret = -EINVAL;
1729 goto error;
1730 }
1728 1731
1729 memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); 1732 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1730 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1733 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1756 1759
1757 mvpp2_prs_hw_write(priv, pe); 1760 mvpp2_prs_hw_write(priv, pe);
1758 1761
1762error:
1759 kfree(pe); 1763 kfree(pe);
1760 1764
1761 return 0; 1765 return ret;
1762} 1766}
1763 1767
1764/* Get first free double vlan ai number */ 1768/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1821 unsigned int port_map) 1825 unsigned int port_map)
1822{ 1826{
1823 struct mvpp2_prs_entry *pe; 1827 struct mvpp2_prs_entry *pe;
1824 int tid_aux, tid, ai; 1828 int tid_aux, tid, ai, ret = 0;
1825 1829
1826 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 1830 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1827 1831
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1838 1842
1839 /* Set ai value for new double vlan entry */ 1843 /* Set ai value for new double vlan entry */
1840 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 1844 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1841 if (ai < 0) 1845 if (ai < 0) {
1842 return ai; 1846 ret = ai;
1847 goto error;
1848 }
1843 1849
1844 /* Get first single/triple vlan tid */ 1850 /* Get first single/triple vlan tid */
1845 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 1851 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1859 break; 1865 break;
1860 } 1866 }
1861 1867
1862 if (tid >= tid_aux) 1868 if (tid >= tid_aux) {
1863 return -ERANGE; 1869 ret = -ERANGE;
1870 goto error;
1871 }
1864 1872
1865 memset(pe, 0, sizeof(struct mvpp2_prs_entry)); 1873 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1866 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1874 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1887 mvpp2_prs_tcam_port_map_set(pe, port_map); 1895 mvpp2_prs_tcam_port_map_set(pe, port_map);
1888 mvpp2_prs_hw_write(priv, pe); 1896 mvpp2_prs_hw_write(priv, pe);
1889 1897
1898error:
1890 kfree(pe); 1899 kfree(pe);
1891 return 0; 1900 return ret;
1892} 1901}
1893 1902
1894/* IPv4 header parsing for fragmentation and L4 offset */ 1903/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fec8fce..02266e3de514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2282 VXLAN_STEER_BY_OUTER_MAC, 1); 2282 VXLAN_STEER_BY_OUTER_MAC, 1);
2283out: 2283out:
2284 if (ret) 2284 if (ret) {
2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2286 return;
2287 }
2288
2289 /* set offloads */
2290 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2291 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2292 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2293 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2286} 2294}
2287 2295
2288static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2296static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2290 int ret; 2298 int ret;
2291 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2299 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2292 vxlan_del_task); 2300 vxlan_del_task);
2301 /* unset offloads */
2302 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2303 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2304 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2305 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2293 2306
2294 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2307 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2295 VXLAN_STEER_BY_OUTER_MAC, 0); 2308 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2568 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 2581 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2569 dev->priv_flags |= IFF_UNICAST_FLT; 2582 dev->priv_flags |= IFF_UNICAST_FLT;
2570 2583
2571 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2572 dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2573 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2574 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2575 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2576 }
2577
2578 mdev->pndev[port] = dev; 2584 mdev->pndev[port] = dev;
2579 2585
2580 netif_carrier_off(dev); 2586 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a278238a2db6..ad2c96a02a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", 374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
375 name, pci_name(dev->pdev)); 375 name, pci_name(dev->pdev));
376 eq->eqn = out.eq_number; 376 eq->eqn = out.eq_number;
377 eq->irqn = vecidx;
378 eq->dev = dev;
379 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
377 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 380 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
378 eq->name, eq); 381 eq->name, eq);
379 if (err) 382 if (err)
380 goto err_eq; 383 goto err_eq;
381 384
382 eq->irqn = vecidx;
383 eq->dev = dev;
384 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
385
386 err = mlx5_debug_eq_add(dev, eq); 385 err = mlx5_debug_eq_add(dev, eq);
387 if (err) 386 if (err)
388 goto err_irq; 387 goto err_irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e489b2d..71b10b210792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
864 dev->profile = &profile[prof_sel]; 864 dev->profile = &profile[prof_sel];
865 dev->event = mlx5_core_event; 865 dev->event = mlx5_core_event;
866 866
867 INIT_LIST_HEAD(&priv->ctx_list);
868 spin_lock_init(&priv->ctx_lock);
867 err = mlx5_dev_init(dev, pdev); 869 err = mlx5_dev_init(dev, pdev);
868 if (err) { 870 if (err) {
869 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); 871 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
870 goto out; 872 goto out;
871 } 873 }
872 874
873 INIT_LIST_HEAD(&priv->ctx_list);
874 spin_lock_init(&priv->ctx_lock);
875 err = mlx5_register_device(dev); 875 err = mlx5_register_device(dev);
876 if (err) { 876 if (err) {
877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1ccd276d..613037584d08 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
2762 if (test_bit(__NX_RESETTING, &adapter->state)) 2762 if (test_bit(__NX_RESETTING, &adapter->state))
2763 goto reschedule; 2763 goto reschedule;
2764 2764
2765 if (test_bit(__NX_DEV_UP, &adapter->state)) { 2765 if (test_bit(__NX_DEV_UP, &adapter->state) &&
2766 !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
2766 if (!adapter->has_link_events) { 2767 if (!adapter->has_link_events) {
2767 2768
2768 netxen_nic_handle_phy_intr(adapter); 2769 netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a47147937d..9a49f42ac2ba 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
5config NET_VENDOR_QUALCOMM 5config NET_VENDOR_QUALCOMM
6 bool "Qualcomm devices" 6 bool "Qualcomm devices"
7 default y 7 default y
8 depends on SPI_MASTER && OF_GPIO
9 ---help--- 8 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 9 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 10 and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
20 19
21config QCA7000 20config QCA7000
22 tristate "Qualcomm Atheros QCA7000 support" 21 tristate "Qualcomm Atheros QCA7000 support"
23 depends on SPI_MASTER && OF_GPIO 22 depends on SPI_MASTER && OF
24 ---help--- 23 ---help---
25 This SPI protocol driver supports the Qualcomm Atheros QCA7000. 24 This SPI protocol driver supports the Qualcomm Atheros QCA7000.
26 25
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cdc319f..a77f05ce8325 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
180 EFX_MAX_CHANNELS, 180 EFX_MAX_CHANNELS,
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / 181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); 182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0); 183 if (WARN_ON(efx->max_channels == 0))
184 return -EIO;
184 185
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 186 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
186 if (!nic_data) 187 if (!nic_data)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 2c62208077fe..6cc3cf6f17c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
2243 const struct of_device_id *match = NULL; 2243 const struct of_device_id *match = NULL;
2244 struct smc_local *lp; 2244 struct smc_local *lp;
2245 struct net_device *ndev; 2245 struct net_device *ndev;
2246 struct resource *res, *ires; 2246 struct resource *res;
2247 unsigned int __iomem *addr; 2247 unsigned int __iomem *addr;
2248 unsigned long irq_flags = SMC_IRQ_FLAGS; 2248 unsigned long irq_flags = SMC_IRQ_FLAGS;
2249 unsigned long irq_resflags;
2249 int ret; 2250 int ret;
2250 2251
2251 ndev = alloc_etherdev(sizeof(struct smc_local)); 2252 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
2337 goto out_free_netdev; 2338 goto out_free_netdev;
2338 } 2339 }
2339 2340
2340 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2341 ndev->irq = platform_get_irq(pdev, 0);
2341 if (!ires) { 2342 if (ndev->irq <= 0) {
2342 ret = -ENODEV; 2343 ret = -ENODEV;
2343 goto out_release_io; 2344 goto out_release_io;
2344 } 2345 }
2345 2346 /*
2346 ndev->irq = ires->start; 2347 * If this platform does not specify any special irqflags, or if
2347 2348 * the resource supplies a trigger, override the irqflags with
2348 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) 2349 * the trigger flags from the resource.
2349 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2350 */
2351 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2352 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2353 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2350 2354
2351 ret = smc_request_attrib(pdev, ndev); 2355 ret = smc_request_attrib(pdev, ndev);
2352 if (ret) 2356 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29da353e..77ed74561e5f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1342 spin_unlock(&pdata->mac_lock); 1342 spin_unlock(&pdata->mac_lock);
1343} 1343}
1344 1344
1345static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1346{
1347 int rc = 0;
1348
1349 if (!pdata->phy_dev)
1350 return rc;
1351
1352 /* If the internal PHY is in General Power-Down mode, all, except the
1353 * management interface, is powered-down and stays in that condition as
1354 * long as Phy register bit 0.11 is HIGH.
1355 *
1356 * In that case, clear the bit 0.11, so the PHY powers up and we can
1357 * access to the phy registers.
1358 */
1359 rc = phy_read(pdata->phy_dev, MII_BMCR);
1360 if (rc < 0) {
1361 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
1362 return rc;
1363 }
1364
1365 /* If the PHY general power-down bit is not set is not necessary to
1366 * disable the general power down-mode.
1367 */
1368 if (rc & BMCR_PDOWN) {
1369 rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
1370 if (rc < 0) {
1371 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1372 return rc;
1373 }
1374
1375 usleep_range(1000, 1500);
1376 }
1377
1378 return 0;
1379}
1380
1345static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) 1381static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1346{ 1382{
1347 int rc = 0; 1383 int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1356 return rc; 1392 return rc;
1357 } 1393 }
1358 1394
1359 /* 1395 /* Only disable if energy detect mode is already enabled */
1360 * If energy is detected the PHY is already awake so is not necessary 1396 if (rc & MII_LAN83C185_EDPWRDOWN) {
1361 * to disable the energy detect power-down mode.
1362 */
1363 if ((rc & MII_LAN83C185_EDPWRDOWN) &&
1364 !(rc & MII_LAN83C185_ENERGYON)) {
1365 /* Disable energy detect mode for this SMSC Transceivers */ 1397 /* Disable energy detect mode for this SMSC Transceivers */
1366 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1367 rc & (~MII_LAN83C185_EDPWRDOWN)); 1399 rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1370 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1371 return rc; 1403 return rc;
1372 } 1404 }
1373 1405 /* Allow PHY to wakeup */
1374 mdelay(1); 1406 mdelay(2);
1375 } 1407 }
1376 1408
1377 return 0; 1409 return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1393 1425
1394 /* Only enable if energy detect mode is already disabled */ 1426 /* Only enable if energy detect mode is already disabled */
1395 if (!(rc & MII_LAN83C185_EDPWRDOWN)) { 1427 if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
1396 mdelay(100);
1397 /* Enable energy detect mode for this SMSC Transceivers */ 1428 /* Enable energy detect mode for this SMSC Transceivers */
1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1429 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1399 rc | MII_LAN83C185_EDPWRDOWN); 1430 rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1433 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1403 return rc; 1434 return rc;
1404 } 1435 }
1405
1406 mdelay(1);
1407 } 1436 }
1408 return 0; 1437 return 0;
1409} 1438}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1415 int ret; 1444 int ret;
1416 1445
1417 /* 1446 /*
1447 * Make sure to power-up the PHY chip before doing a reset, otherwise
1448 * the reset fails.
1449 */
1450 ret = smsc911x_phy_general_power_up(pdata);
1451 if (ret) {
1452 SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
1453 return ret;
1454 }
1455
1456 /*
1418 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that 1457 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
1419 * are initialized in a Energy Detect Power-Down mode that prevents 1458 * are initialized in a Energy Detect Power-Down mode that prevents
1420 * the MAC chip to be software reseted. So we have to wakeup the PHY 1459 * the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46c7e2c..18c46bb0f3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
276bool stmmac_eee_init(struct stmmac_priv *priv) 276bool stmmac_eee_init(struct stmmac_priv *priv)
277{ 277{
278 char *phy_bus_name = priv->plat->phy_bus_name; 278 char *phy_bus_name = priv->plat->phy_bus_name;
279 unsigned long flags;
279 bool ret = false; 280 bool ret = false;
280 281
281 /* Using PCS we cannot dial with the phy registers at this stage 282 /* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
300 * changed). 301 * changed).
301 * In that case the driver disable own timers. 302 * In that case the driver disable own timers.
302 */ 303 */
304 spin_lock_irqsave(&priv->lock, flags);
303 if (priv->eee_active) { 305 if (priv->eee_active) {
304 pr_debug("stmmac: disable EEE\n"); 306 pr_debug("stmmac: disable EEE\n");
305 del_timer_sync(&priv->eee_ctrl_timer); 307 del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
307 tx_lpi_timer); 309 tx_lpi_timer);
308 } 310 }
309 priv->eee_active = 0; 311 priv->eee_active = 0;
312 spin_unlock_irqrestore(&priv->lock, flags);
310 goto out; 313 goto out;
311 } 314 }
312 /* Activate the EEE and start timers */ 315 /* Activate the EEE and start timers */
316 spin_lock_irqsave(&priv->lock, flags);
313 if (!priv->eee_active) { 317 if (!priv->eee_active) {
314 priv->eee_active = 1; 318 priv->eee_active = 1;
315 init_timer(&priv->eee_ctrl_timer); 319 init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
325 /* Set HW EEE according to the speed */ 329 /* Set HW EEE according to the speed */
326 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); 330 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
327 331
328 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
329
330 ret = true; 332 ret = true;
333 spin_unlock_irqrestore(&priv->lock, flags);
334
335 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
331 } 336 }
332out: 337out:
333 return ret; 338 return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
760 if (new_state && netif_msg_link(priv)) 765 if (new_state && netif_msg_link(priv))
761 phy_print_status(phydev); 766 phy_print_status(phydev);
762 767
768 spin_unlock_irqrestore(&priv->lock, flags);
769
763 /* At this stage, it could be needed to setup the EEE or adjust some 770 /* At this stage, it could be needed to setup the EEE or adjust some
764 * MAC related HW registers. 771 * MAC related HW registers.
765 */ 772 */
766 priv->eee_enabled = stmmac_eee_init(priv); 773 priv->eee_enabled = stmmac_eee_init(priv);
767
768 spin_unlock_irqrestore(&priv->lock, flags);
769} 774}
770 775
771/** 776/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
959} 964}
960 965
961static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 966static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
962 int i) 967 int i, gfp_t flags)
963{ 968{
964 struct sk_buff *skb; 969 struct sk_buff *skb;
965 970
966 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 971 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
967 GFP_KERNEL); 972 flags);
968 if (!skb) { 973 if (!skb) {
969 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 974 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
970 return -ENOMEM; 975 return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1006 * and allocates the socket buffers. It suppors the chained and ring 1011 * and allocates the socket buffers. It suppors the chained and ring
1007 * modes. 1012 * modes.
1008 */ 1013 */
1009static int init_dma_desc_rings(struct net_device *dev) 1014static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1010{ 1015{
1011 int i; 1016 int i;
1012 struct stmmac_priv *priv = netdev_priv(dev); 1017 struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
1041 else 1046 else
1042 p = priv->dma_rx + i; 1047 p = priv->dma_rx + i;
1043 1048
1044 ret = stmmac_init_rx_buffers(priv, p, i); 1049 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1045 if (ret) 1050 if (ret)
1046 goto err_init_rx_buffers; 1051 goto err_init_rx_buffers;
1047 1052
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1647 struct stmmac_priv *priv = netdev_priv(dev); 1652 struct stmmac_priv *priv = netdev_priv(dev);
1648 int ret; 1653 int ret;
1649 1654
1650 ret = init_dma_desc_rings(dev);
1651 if (ret < 0) {
1652 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1653 return ret;
1654 }
1655 /* DMA initialization and SW reset */ 1655 /* DMA initialization and SW reset */
1656 ret = stmmac_init_dma_engine(priv); 1656 ret = stmmac_init_dma_engine(priv);
1657 if (ret < 0) { 1657 if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1705 } 1705 }
1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1707 1707
1708 priv->eee_enabled = stmmac_eee_init(priv);
1709
1710 stmmac_init_tx_coalesce(priv);
1711
1712 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 1708 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1713 priv->rx_riwt = MAX_DMA_RIWT; 1709 priv->rx_riwt = MAX_DMA_RIWT;
1714 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1710 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
1761 goto dma_desc_error; 1757 goto dma_desc_error;
1762 } 1758 }
1763 1759
1760 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1761 if (ret < 0) {
1762 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1763 goto init_error;
1764 }
1765
1764 ret = stmmac_hw_setup(dev); 1766 ret = stmmac_hw_setup(dev);
1765 if (ret < 0) { 1767 if (ret < 0) {
1766 pr_err("%s: Hw setup failed\n", __func__); 1768 pr_err("%s: Hw setup failed\n", __func__);
1767 goto init_error; 1769 goto init_error;
1768 } 1770 }
1769 1771
1772 stmmac_init_tx_coalesce(priv);
1773
1770 if (priv->phydev) 1774 if (priv->phydev)
1771 phy_start(priv->phydev); 1775 phy_start(priv->phydev);
1772 1776
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1894 unsigned int nopaged_len = skb_headlen(skb); 1898 unsigned int nopaged_len = skb_headlen(skb);
1895 unsigned int enh_desc = priv->plat->enh_desc; 1899 unsigned int enh_desc = priv->plat->enh_desc;
1896 1900
1901 spin_lock(&priv->tx_lock);
1902
1897 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1903 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1904 spin_unlock(&priv->tx_lock);
1898 if (!netif_queue_stopped(dev)) { 1905 if (!netif_queue_stopped(dev)) {
1899 netif_stop_queue(dev); 1906 netif_stop_queue(dev);
1900 /* This is a hard error, log it. */ 1907 /* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1903 return NETDEV_TX_BUSY; 1910 return NETDEV_TX_BUSY;
1904 } 1911 }
1905 1912
1906 spin_lock(&priv->tx_lock);
1907
1908 if (priv->tx_path_in_lpi_mode) 1913 if (priv->tx_path_in_lpi_mode)
1909 stmmac_disable_eee_mode(priv); 1914 stmmac_disable_eee_mode(priv);
1910 1915
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2025 return NETDEV_TX_OK; 2030 return NETDEV_TX_OK;
2026 2031
2027dma_map_err: 2032dma_map_err:
2033 spin_unlock(&priv->tx_lock);
2028 dev_err(priv->device, "Tx dma map failed\n"); 2034 dev_err(priv->device, "Tx dma map failed\n");
2029 dev_kfree_skb(skb); 2035 dev_kfree_skb(skb);
2030 priv->dev->stats.tx_dropped++; 2036 priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
2281{ 2287{
2282 struct stmmac_priv *priv = netdev_priv(dev); 2288 struct stmmac_priv *priv = netdev_priv(dev);
2283 2289
2284 spin_lock(&priv->lock);
2285 priv->hw->mac->set_filter(priv->hw, dev); 2290 priv->hw->mac->set_filter(priv->hw, dev);
2286 spin_unlock(&priv->lock);
2287} 2291}
2288 2292
2289/** 2293/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
2950 stmmac_set_mac(priv->ioaddr, false); 2954 stmmac_set_mac(priv->ioaddr, false);
2951 pinctrl_pm_select_sleep_state(priv->device); 2955 pinctrl_pm_select_sleep_state(priv->device);
2952 /* Disable clock in case of PWM is off */ 2956 /* Disable clock in case of PWM is off */
2953 clk_disable_unprepare(priv->stmmac_clk); 2957 clk_disable(priv->stmmac_clk);
2954 } 2958 }
2955 spin_unlock_irqrestore(&priv->lock, flags); 2959 spin_unlock_irqrestore(&priv->lock, flags);
2956 2960
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
2982 } else { 2986 } else {
2983 pinctrl_pm_select_default_state(priv->device); 2987 pinctrl_pm_select_default_state(priv->device);
2984 /* enable the clk prevously disabled */ 2988 /* enable the clk prevously disabled */
2985 clk_prepare_enable(priv->stmmac_clk); 2989 clk_enable(priv->stmmac_clk);
2986 /* reset the phy so that it's ready */ 2990 /* reset the phy so that it's ready */
2987 if (priv->mii) 2991 if (priv->mii)
2988 stmmac_mdio_reset(priv->mii); 2992 stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
2990 2994
2991 netif_device_attach(ndev); 2995 netif_device_attach(ndev);
2992 2996
2997 init_dma_desc_rings(ndev, GFP_ATOMIC);
2993 stmmac_hw_setup(ndev); 2998 stmmac_hw_setup(ndev);
2999 stmmac_init_tx_coalesce(priv);
2994 3000
2995 napi_enable(&priv->napi); 3001 napi_enable(&priv->napi);
2996 3002
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1262 HMD(("init rxring, ")); 1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 u32 mapping;
1265 1266
1266 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1267 if (!skb) { 1268 if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1272 1273
1273 /* Because we reserve afterwards. */ 1274 /* Because we reserve afterwards. */
1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1275 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1276 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1277 dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, 1285 mapping);
1278 DMA_FROM_DEVICE));
1279 skb_reserve(skb, RX_OFFSET); 1286 skb_reserve(skb, RX_OFFSET);
1280 } 1287 }
1281 1288
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2020 skb = hp->rx_skbs[elem]; 2027 skb = hp->rx_skbs[elem];
2021 if (len > RX_COPY_THRESHOLD) { 2028 if (len > RX_COPY_THRESHOLD) {
2022 struct sk_buff *new_skb; 2029 struct sk_buff *new_skb;
2030 u32 mapping;
2023 2031
2024 /* Now refill the entry, if we can. */ 2032 /* Now refill the entry, if we can. */
2025 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2027 drops++; 2035 drops++;
2028 goto drop_it; 2036 goto drop_it;
2029 } 2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2031 hp->rx_skbs[elem] = new_skb; 2049 hp->rx_skbs[elem] = new_skb;
2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2033 hme_write_rxd(hp, this, 2050 hme_write_rxd(hp, this,
2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2035 dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, 2052 mapping);
2036 DMA_FROM_DEVICE));
2037 skb_reserve(new_skb, RX_OFFSET); 2053 skb_reserve(new_skb, RX_OFFSET);
2038 2054
2039 /* Trim the original skb for the netif. */ 2055 /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2248 netif_wake_queue(dev); 2264 netif_wake_queue(dev);
2249} 2265}
2250 2266
2267static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269{
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284}
2285
2251static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, 2286static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2252 struct net_device *dev) 2287 struct net_device *dev)
2253{ 2288{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2284 2319
2285 len = skb->len; 2320 len = skb->len;
2286 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); 2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2287 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2288 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2289 (tx_flags | (len & TXFLAG_SIZE)), 2326 (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2299 first_len = skb_headlen(skb); 2336 first_len = skb_headlen(skb);
2300 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, 2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2301 DMA_TO_DEVICE); 2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2302 entry = NEXT_TX(entry); 2341 entry = NEXT_TX(entry);
2303 2342
2304 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2308 len = skb_frag_size(this_frag); 2347 len = skb_frag_size(this_frag);
2309 mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2310 0, len, DMA_TO_DEVICE); 2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2311 this_txflags = tx_flags; 2355 this_txflags = tx_flags;
2312 if (frag == skb_shinfo(skb)->nr_frags - 1) 2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2313 this_txflags |= TXFLAG_EOP; 2357 this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2333 2377
2334 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2335 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2380
2381out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2336} 2388}
2337 2389
2338static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2390static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 3ae83879a75f..097ebe7077ac 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
785{ 785{
786 if (!ale) 786 if (!ale)
787 return -EINVAL; 787 return -EINVAL;
788 cpsw_ale_stop(ale);
789 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); 788 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
790 kfree(ale); 789 kfree(ale);
791 return 0; 790 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67da035..4a4388b813ac 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
264 264
265 switch (ptp_class & PTP_CLASS_PMASK) { 265 switch (ptp_class & PTP_CLASS_PMASK) {
266 case PTP_CLASS_IPV4: 266 case PTP_CLASS_IPV4:
267 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 267 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
268 break; 268 break;
269 case PTP_CLASS_IPV6: 269 case PTP_CLASS_IPV6:
270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6f226de655a4..880cc090dc44 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
629 if (skb->ip_summed == CHECKSUM_PARTIAL) { 629 if (skb->ip_summed == CHECKSUM_PARTIAL) {
630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
631 vnet_hdr->csum_start = skb_checksum_start_offset(skb); 631 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
632 if (vlan_tx_tag_present(skb))
633 vnet_hdr->csum_start += VLAN_HLEN;
632 vnet_hdr->csum_offset = skb->csum_offset; 634 vnet_hdr->csum_offset = skb->csum_offset;
633 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
634 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2954052706e8..e22e602beef3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
791 791
792 switch (type & PTP_CLASS_PMASK) { 792 switch (type & PTP_CLASS_PMASK) {
793 case PTP_CLASS_IPV4: 793 case PTP_CLASS_IPV4:
794 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 794 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
795 break; 795 break;
796 case PTP_CLASS_IPV6: 796 case PTP_CLASS_IPV6:
797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
934 934
935 switch (type & PTP_CLASS_PMASK) { 935 switch (type & PTP_CLASS_PMASK) {
936 case PTP_CLASS_IPV4: 936 case PTP_CLASS_IPV4:
937 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 937 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
938 break; 938 break;
939 case PTP_CLASS_IPV6: 939 case PTP_CLASS_IPV6:
940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1dfffdc9dfc3..767cd110f496 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
352{ 352{
353 struct mii_ioctl_data *mii_data = if_mii(ifr); 353 struct mii_ioctl_data *mii_data = if_mii(ifr);
354 u16 val = mii_data->val_in; 354 u16 val = mii_data->val_in;
355 bool change_autoneg = false;
355 356
356 switch (cmd) { 357 switch (cmd) {
357 case SIOCGMIIPHY: 358 case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
367 if (mii_data->phy_id == phydev->addr) { 368 if (mii_data->phy_id == phydev->addr) {
368 switch (mii_data->reg_num) { 369 switch (mii_data->reg_num) {
369 case MII_BMCR: 370 case MII_BMCR:
370 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) 371 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
372 if (phydev->autoneg == AUTONEG_ENABLE)
373 change_autoneg = true;
371 phydev->autoneg = AUTONEG_DISABLE; 374 phydev->autoneg = AUTONEG_DISABLE;
372 else 375 if (val & BMCR_FULLDPLX)
376 phydev->duplex = DUPLEX_FULL;
377 else
378 phydev->duplex = DUPLEX_HALF;
379 if (val & BMCR_SPEED1000)
380 phydev->speed = SPEED_1000;
381 else if (val & BMCR_SPEED100)
382 phydev->speed = SPEED_100;
383 else phydev->speed = SPEED_10;
384 }
385 else {
386 if (phydev->autoneg == AUTONEG_DISABLE)
387 change_autoneg = true;
373 phydev->autoneg = AUTONEG_ENABLE; 388 phydev->autoneg = AUTONEG_ENABLE;
374 if (!phydev->autoneg && (val & BMCR_FULLDPLX)) 389 }
375 phydev->duplex = DUPLEX_FULL;
376 else
377 phydev->duplex = DUPLEX_HALF;
378 if (!phydev->autoneg && (val & BMCR_SPEED1000))
379 phydev->speed = SPEED_1000;
380 else if (!phydev->autoneg &&
381 (val & BMCR_SPEED100))
382 phydev->speed = SPEED_100;
383 break; 390 break;
384 case MII_ADVERTISE: 391 case MII_ADVERTISE:
385 phydev->advertising = val; 392 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
393 change_autoneg = true;
386 break; 394 break;
387 default: 395 default:
388 /* do nothing */ 396 /* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
396 if (mii_data->reg_num == MII_BMCR && 404 if (mii_data->reg_num == MII_BMCR &&
397 val & BMCR_RESET) 405 val & BMCR_RESET)
398 return phy_init_hw(phydev); 406 return phy_init_hw(phydev);
407
408 if (change_autoneg)
409 return phy_start_aneg(phydev);
410
399 return 0; 411 return 0;
400 412
401 case SIOCSHWTSTAMP: 413 case SIOCSHWTSTAMP:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 68c3a3f4e0ab..794a47329368 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 755
756 err = get_filter(argp, &code); 756 err = get_filter(argp, &code);
757 if (err >= 0) { 757 if (err >= 0) {
758 struct bpf_prog *pass_filter = NULL;
758 struct sock_fprog_kern fprog = { 759 struct sock_fprog_kern fprog = {
759 .len = err, 760 .len = err,
760 .filter = code, 761 .filter = code,
761 }; 762 };
762 763
763 ppp_lock(ppp); 764 err = 0;
764 if (ppp->pass_filter) { 765 if (fprog.filter)
765 bpf_prog_destroy(ppp->pass_filter); 766 err = bpf_prog_create(&pass_filter, &fprog);
766 ppp->pass_filter = NULL; 767 if (!err) {
768 ppp_lock(ppp);
769 if (ppp->pass_filter)
770 bpf_prog_destroy(ppp->pass_filter);
771 ppp->pass_filter = pass_filter;
772 ppp_unlock(ppp);
767 } 773 }
768 if (fprog.filter != NULL)
769 err = bpf_prog_create(&ppp->pass_filter,
770 &fprog);
771 else
772 err = 0;
773 kfree(code); 774 kfree(code);
774 ppp_unlock(ppp);
775 } 775 }
776 break; 776 break;
777 } 777 }
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
781 781
782 err = get_filter(argp, &code); 782 err = get_filter(argp, &code);
783 if (err >= 0) { 783 if (err >= 0) {
784 struct bpf_prog *active_filter = NULL;
784 struct sock_fprog_kern fprog = { 785 struct sock_fprog_kern fprog = {
785 .len = err, 786 .len = err,
786 .filter = code, 787 .filter = code,
787 }; 788 };
788 789
789 ppp_lock(ppp); 790 err = 0;
790 if (ppp->active_filter) { 791 if (fprog.filter)
791 bpf_prog_destroy(ppp->active_filter); 792 err = bpf_prog_create(&active_filter, &fprog);
792 ppp->active_filter = NULL; 793 if (!err) {
794 ppp_lock(ppp);
795 if (ppp->active_filter)
796 bpf_prog_destroy(ppp->active_filter);
797 ppp->active_filter = active_filter;
798 ppp_unlock(ppp);
793 } 799 }
794 if (fprog.filter != NULL)
795 err = bpf_prog_create(&ppp->active_filter,
796 &fprog);
797 else
798 err = 0;
799 kfree(code); 800 kfree(code);
800 ppp_unlock(ppp);
801 } 801 }
802 break; 802 break;
803 } 803 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7302398f0b1f..9dd3746994a4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1235 struct tun_pi pi = { 0, skb->protocol }; 1235 struct tun_pi pi = { 0, skb->protocol };
1236 ssize_t total = 0; 1236 ssize_t total = 0;
1237 int vlan_offset = 0, copied; 1237 int vlan_offset = 0, copied;
1238 int vlan_hlen = 0;
1239 int vnet_hdr_sz = 0;
1240
1241 if (vlan_tx_tag_present(skb))
1242 vlan_hlen = VLAN_HLEN;
1243
1244 if (tun->flags & TUN_VNET_HDR)
1245 vnet_hdr_sz = tun->vnet_hdr_sz;
1238 1246
1239 if (!(tun->flags & TUN_NO_PI)) { 1247 if (!(tun->flags & TUN_NO_PI)) {
1240 if ((len -= sizeof(pi)) < 0) 1248 if ((len -= sizeof(pi)) < 0)
1241 return -EINVAL; 1249 return -EINVAL;
1242 1250
1243 if (len < skb->len) { 1251 if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
1244 /* Packet will be striped */ 1252 /* Packet will be striped */
1245 pi.flags |= TUN_PKT_STRIP; 1253 pi.flags |= TUN_PKT_STRIP;
1246 } 1254 }
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1250 total += sizeof(pi); 1258 total += sizeof(pi);
1251 } 1259 }
1252 1260
1253 if (tun->flags & TUN_VNET_HDR) { 1261 if (vnet_hdr_sz) {
1254 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 1262 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1255 if ((len -= tun->vnet_hdr_sz) < 0) 1263 if ((len -= vnet_hdr_sz) < 0)
1256 return -EINVAL; 1264 return -EINVAL;
1257 1265
1258 if (skb_is_gso(skb)) { 1266 if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1284 1292
1285 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1293 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1286 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1294 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1287 gso.csum_start = skb_checksum_start_offset(skb); 1295 gso.csum_start = skb_checksum_start_offset(skb) +
1296 vlan_hlen;
1288 gso.csum_offset = skb->csum_offset; 1297 gso.csum_offset = skb->csum_offset;
1289 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1298 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1290 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; 1299 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1293 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, 1302 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1294 sizeof(gso)))) 1303 sizeof(gso))))
1295 return -EFAULT; 1304 return -EFAULT;
1296 total += tun->vnet_hdr_sz; 1305 total += vnet_hdr_sz;
1297 } 1306 }
1298 1307
1299 copied = total; 1308 copied = total;
1300 total += skb->len; 1309 len = min_t(int, skb->len + vlan_hlen, len);
1301 if (!vlan_tx_tag_present(skb)) { 1310 total += skb->len + vlan_hlen;
1302 len = min_t(int, skb->len, len); 1311 if (vlan_hlen) {
1303 } else {
1304 int copy, ret; 1312 int copy, ret;
1305 struct { 1313 struct {
1306 __be16 h_vlan_proto; 1314 __be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1311 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 1319 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1312 1320
1313 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1321 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1314 len = min_t(int, skb->len + VLAN_HLEN, len);
1315 total += VLAN_HLEN;
1316 1322
1317 copy = min_t(int, vlan_offset, len); 1323 copy = min_t(int, vlan_offset, len);
1318 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 1324 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c05f6cdb12f..816d511e34d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
465 return ret; 465 return ret;
466 } 466 }
467 467
468 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); 468 ax88772_reset(dev);
469 if (ret < 0)
470 return ret;
471
472 msleep(150);
473
474 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
475 if (ret < 0)
476 return ret;
477
478 msleep(150);
479
480 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
481 469
482 /* Read PHYID register *AFTER* the PHY was reset properly */ 470 /* Read PHYID register *AFTER* the PHY was reset properly */
483 phyid = asix_get_phyid(dev); 471 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca309820d39e..fa9dc45b75a6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
276} 276}
277 277
278/* Find VXLAN socket based on network namespace and UDP port */ 278/* Find VXLAN socket based on network namespace, address family and UDP port */
279static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) 279static struct vxlan_sock *vxlan_find_sock(struct net *net,
280 sa_family_t family, __be16 port)
280{ 281{
281 struct vxlan_sock *vs; 282 struct vxlan_sock *vs;
282 283
283 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 284 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
284 if (inet_sk(vs->sock->sk)->inet_sport == port) 285 if (inet_sk(vs->sock->sk)->inet_sport == port &&
286 inet_sk(vs->sock->sk)->sk.sk_family == family)
285 return vs; 287 return vs;
286 } 288 }
287 return NULL; 289 return NULL;
@@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
300} 302}
301 303
302/* Look up VNI in a per net namespace table */ 304/* Look up VNI in a per net namespace table */
303static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) 305static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
306 sa_family_t family, __be16 port)
304{ 307{
305 struct vxlan_sock *vs; 308 struct vxlan_sock *vs;
306 309
307 vs = vxlan_find_sock(net, port); 310 vs = vxlan_find_sock(net, family, port);
308 if (!vs) 311 if (!vs)
309 return NULL; 312 return NULL;
310 313
@@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
621 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); 624 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
622 int err = -ENOSYS; 625 int err = -ENOSYS;
623 626
627 udp_tunnel_gro_complete(skb, nhoff);
628
624 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); 629 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
625 type = eh->h_proto; 630 type = eh->h_proto;
626 631
@@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1771 struct vxlan_dev *dst_vxlan; 1776 struct vxlan_dev *dst_vxlan;
1772 1777
1773 ip_rt_put(rt); 1778 ip_rt_put(rt);
1774 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1779 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1780 dst->sa.sa_family, dst_port);
1775 if (!dst_vxlan) 1781 if (!dst_vxlan)
1776 goto tx_error; 1782 goto tx_error;
1777 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1783 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1825 struct vxlan_dev *dst_vxlan; 1831 struct vxlan_dev *dst_vxlan;
1826 1832
1827 dst_release(ndst); 1833 dst_release(ndst);
1828 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1834 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1835 dst->sa.sa_family, dst_port);
1829 if (!dst_vxlan) 1836 if (!dst_vxlan)
1830 goto tx_error; 1837 goto tx_error;
1831 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1838 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev)
1985 struct vxlan_dev *vxlan = netdev_priv(dev); 1992 struct vxlan_dev *vxlan = netdev_priv(dev);
1986 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 1993 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
1987 struct vxlan_sock *vs; 1994 struct vxlan_sock *vs;
1995 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
1988 1996
1989 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1997 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1990 if (!dev->tstats) 1998 if (!dev->tstats)
1991 return -ENOMEM; 1999 return -ENOMEM;
1992 2000
1993 spin_lock(&vn->sock_lock); 2001 spin_lock(&vn->sock_lock);
1994 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); 2002 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2003 vxlan->dst_port);
1995 if (vs) { 2004 if (vs) {
1996 /* If we have a socket with same port already, reuse it */ 2005 /* If we have a socket with same port already, reuse it */
1997 atomic_inc(&vs->refcnt); 2006 atomic_inc(&vs->refcnt);
@@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2382{ 2391{
2383 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2392 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2384 struct vxlan_sock *vs; 2393 struct vxlan_sock *vs;
2394 bool ipv6 = flags & VXLAN_F_IPV6;
2385 2395
2386 vs = vxlan_socket_create(net, port, rcv, data, flags); 2396 vs = vxlan_socket_create(net, port, rcv, data, flags);
2387 if (!IS_ERR(vs)) 2397 if (!IS_ERR(vs))
@@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2391 return vs; 2401 return vs;
2392 2402
2393 spin_lock(&vn->sock_lock); 2403 spin_lock(&vn->sock_lock);
2394 vs = vxlan_find_sock(net, port); 2404 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
2395 if (vs) { 2405 if (vs) {
2396 if (vs->rcv == rcv) 2406 if (vs->rcv == rcv)
2397 atomic_inc(&vs->refcnt); 2407 atomic_inc(&vs->refcnt);
@@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2550 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2560 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2551 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2561 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2552 2562
2553 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2563 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2564 vxlan->dst_port)) {
2554 pr_info("duplicate VNI %u\n", vni); 2565 pr_info("duplicate VNI %u\n", vni);
2555 return -EEXIST; 2566 return -EEXIST;
2556 } 2567 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e0d9f19650b0..eb03943f8463 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
284 284
285 lockdep_assert_held(&mvm->mutex); 285 lockdep_assert_held(&mvm->mutex);
286 286
287 if (WARN_ON_ONCE(mvm->init_ucode_complete)) 287 if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
288 return 0; 288 return 0;
289 289
290 iwl_init_notification_wait(&mvm->notif_wait, 290 iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
334 goto out; 334 goto out;
335 } 335 }
336 336
337 mvm->calibrating = true;
338
337 /* Send TX valid antennas before triggering calibrations */ 339 /* Send TX valid antennas before triggering calibrations */
338 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 340 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
339 if (ret) 341 if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
358 MVM_UCODE_CALIB_TIMEOUT); 360 MVM_UCODE_CALIB_TIMEOUT);
359 if (!ret) 361 if (!ret)
360 mvm->init_ucode_complete = true; 362 mvm->init_ucode_complete = true;
363
364 if (ret && iwl_mvm_is_radio_killed(mvm)) {
365 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
366 ret = 1;
367 }
361 goto out; 368 goto out;
362 369
363error: 370error:
364 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 371 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
365out: 372out:
373 mvm->calibrating = false;
366 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 374 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
367 /* we want to debug INIT and we have no NVM - fake */ 375 /* we want to debug INIT and we have no NVM - fake */
368 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 376 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 585fe5b7100f..b62405865b25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
788 788
789 mvm->scan_status = IWL_MVM_SCAN_NONE; 789 mvm->scan_status = IWL_MVM_SCAN_NONE;
790 mvm->ps_disabled = false; 790 mvm->ps_disabled = false;
791 mvm->calibrating = false;
791 792
792 /* just in case one was running */ 793 /* just in case one was running */
793 ieee80211_remain_on_channel_expired(mvm->hw); 794 ieee80211_remain_on_channel_expired(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b153ced7015b..845429c88cf4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -548,6 +548,7 @@ struct iwl_mvm {
548 enum iwl_ucode_type cur_ucode; 548 enum iwl_ucode_type cur_ucode;
549 bool ucode_loaded; 549 bool ucode_loaded;
550 bool init_ucode_complete; 550 bool init_ucode_complete;
551 bool calibrating;
551 u32 error_event_table; 552 u32 error_event_table;
552 u32 log_event_table; 553 u32 log_event_table;
553 u32 umac_error_event_table; 554 u32 umac_error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 48cb25a93591..5b719ee8e789 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
424 } 424 }
425 mvm->sf_state = SF_UNINIT; 425 mvm->sf_state = SF_UNINIT;
426 mvm->low_latency_agg_frame_limit = 6; 426 mvm->low_latency_agg_frame_limit = 6;
427 mvm->cur_ucode = IWL_UCODE_INIT;
427 428
428 mutex_init(&mvm->mutex); 429 mutex_init(&mvm->mutex);
429 mutex_init(&mvm->d0i3_suspend_mutex); 430 mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
752static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 753static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
753{ 754{
754 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 755 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
756 bool calibrating = ACCESS_ONCE(mvm->calibrating);
755 757
756 if (state) 758 if (state)
757 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 759 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
760 762
761 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 763 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
762 764
763 return state && mvm->cur_ucode != IWL_UCODE_INIT; 765 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
766 if (calibrating)
767 iwl_abort_notification_waits(&mvm->notif_wait);
768
769 /*
770 * Stop the device if we run OPERATIONAL firmware or if we are in the
771 * middle of the calibrations.
772 */
773 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
764} 774}
765 775
766static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 776static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 3781b029e54a..160c3ebc48d0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
915 * restart. So don't process again if the device is 915 * restart. So don't process again if the device is
916 * already dead. 916 * already dead.
917 */ 917 */
918 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 918 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
919 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
919 iwl_pcie_tx_stop(trans); 920 iwl_pcie_tx_stop(trans);
920 iwl_pcie_rx_stop(trans); 921 iwl_pcie_rx_stop(trans);
921 922
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
945 /* clear all status bits */ 946 /* clear all status bits */
946 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 947 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
947 clear_bit(STATUS_INT_ENABLED, &trans->status); 948 clear_bit(STATUS_INT_ENABLED, &trans->status);
948 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
949 clear_bit(STATUS_TPOWER_PMI, &trans->status); 949 clear_bit(STATUS_TPOWER_PMI, &trans->status);
950 clear_bit(STATUS_RFKILL, &trans->status); 950 clear_bit(STATUS_RFKILL, &trans->status);
951 951
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index babbdc1ce741..c9ad4cf1adfb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1987 if (err != 0) { 1987 if (err != 0) {
1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", 1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
1989 err); 1989 err);
1990 goto failed_hw; 1990 goto failed_bind;
1991 } 1991 }
1992 1992
1993 skb_queue_head_init(&data->pending); 1993 skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2183 return idx; 2183 return idx;
2184 2184
2185failed_hw: 2185failed_hw:
2186 device_release_driver(data->dev);
2187failed_bind:
2186 device_unregister(data->dev); 2188 device_unregister(data->dev);
2187failed_drvdata: 2189failed_drvdata:
2188 ieee80211_free_hw(hw); 2190 ieee80211_free_hw(hw);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..bb9b83640070 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -256,7 +256,7 @@ struct ucred {
256#define MSG_EOF MSG_FIN 256#define MSG_EOF MSG_FIN
257 257
258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
260 descriptor received through 260 descriptor received through
261 SCM_RIGHTS */ 261 SCM_RIGHTS */
262#if defined(CONFIG_COMPAT) 262#if defined(CONFIG_COMPAT)
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c41..2a25dec30211 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -34,7 +34,6 @@
34 * @list: used to maintain a list of currently available transports 34 * @list: used to maintain a list of currently available transports
35 * @name: the human-readable name of the transport 35 * @name: the human-readable name of the transport
36 * @maxsize: transport provided maximum packet size 36 * @maxsize: transport provided maximum packet size
37 * @pref: Preferences of this transport
38 * @def: set if this transport should be considered the default 37 * @def: set if this transport should be considered the default
39 * @create: member function to create a new connection on this transport 38 * @create: member function to create a new connection on this transport
40 * @close: member function to discard a connection on this transport 39 * @close: member function to discard a connection on this transport
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index a47790bcaa38..2a50a70ef587 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
100 return iptunnel_handle_offloads(skb, udp_csum, type); 100 return iptunnel_handle_offloads(skb, udp_csum, type);
101} 101}
102 102
103static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
104{
105 struct udphdr *uh;
106
107 uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
108 skb_shinfo(skb)->gso_type |= uh->check ?
109 SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
110}
111
103static inline void udp_tunnel_encap_enable(struct socket *sock) 112static inline void udp_tunnel_encap_enable(struct socket *sock)
104{ 113{
105#if IS_ENABLED(CONFIG_IPV6) 114#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b70237e8bc37..4c94f31a8c99 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -125,6 +125,7 @@ header-y += filter.h
125header-y += firewire-cdev.h 125header-y += firewire-cdev.h
126header-y += firewire-constants.h 126header-y += firewire-constants.h
127header-y += flat.h 127header-y += flat.h
128header-y += fou.h
128header-y += fs.h 129header-y += fs.h
129header-y += fsl_hypervisor.h 130header-y += fsl_hypervisor.h
130header-y += fuse.h 131header-y += fuse.h
@@ -141,6 +142,7 @@ header-y += hid.h
141header-y += hiddev.h 142header-y += hiddev.h
142header-y += hidraw.h 143header-y += hidraw.h
143header-y += hpet.h 144header-y += hpet.h
145header-y += hsr_netlink.h
144header-y += hyperv.h 146header-y += hyperv.h
145header-y += hysdn_if.h 147header-y += hysdn_if.h
146header-y += i2c-dev.h 148header-y += i2c-dev.h
@@ -251,6 +253,7 @@ header-y += mii.h
251header-y += minix_fs.h 253header-y += minix_fs.h
252header-y += mman.h 254header-y += mman.h
253header-y += mmtimer.h 255header-y += mmtimer.h
256header-y += mpls.h
254header-y += mqueue.h 257header-y += mqueue.h
255header-y += mroute.h 258header-y += mroute.h
256header-y += mroute6.h 259header-y += mroute6.h
@@ -424,6 +427,7 @@ header-y += virtio_net.h
424header-y += virtio_pci.h 427header-y += virtio_pci.h
425header-y += virtio_ring.h 428header-y += virtio_ring.h
426header-y += virtio_rng.h 429header-y += virtio_rng.h
430header=y += vm_sockets.h
427header-y += vt.h 431header-y += vt.h
428header-y += wait.h 432header-y += wait.h
429header-y += wanrouter.h 433header-y += wanrouter.h
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 39f621a9fe82..da17e456908d 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/if_ether.h> 17#include <linux/if_ether.h>
18#include <linux/in6.h>
18 19
19#define SYSFS_BRIDGE_ATTR "bridge" 20#define SYSFS_BRIDGE_ATTR "bridge"
20#define SYSFS_BRIDGE_FDB "brforward" 21#define SYSFS_BRIDGE_FDB "brforward"
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 081be3ba9ea8..624a0b7c05ef 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
230 ht->shift++; 230 ht->shift++;
231 231
232 /* For each new bucket, search the corresponding old bucket 232 /* For each new bucket, search the corresponding old bucket
233 * for the rst entry that hashes to the new bucket, and 233 * for the first entry that hashes to the new bucket, and
234 * link the new bucket to that entry. Since all the entries 234 * link the new bucket to that entry. Since all the entries
235 * which will end up in the new bucket appear in the same 235 * which will end up in the new bucket appear in the same
236 * old bucket, this constructs an entirely valid new hash 236 * old bucket, this constructs an entirely valid new hash
@@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
248 } 248 }
249 249
250 /* Publish the new table pointer. Lookups may now traverse 250 /* Publish the new table pointer. Lookups may now traverse
251 * the new table, but they will not benet from any 251 * the new table, but they will not benefit from any
252 * additional efciency until later steps unzip the buckets. 252 * additional efficiency until later steps unzip the buckets.
253 */ 253 */
254 rcu_assign_pointer(ht->tbl, new_tbl); 254 rcu_assign_pointer(ht->tbl, new_tbl);
255 255
@@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
306 306
307 ht->shift--; 307 ht->shift--;
308 308
309 /* Link each bucket in the new table to the rst bucket 309 /* Link each bucket in the new table to the first bucket
310 * in the old table that contains entries which will hash 310 * in the old table that contains entries which will hash
311 * to the new bucket. 311 * to the new bucket.
312 */ 312 */
313 for (i = 0; i < ntbl->size; i++) { 313 for (i = 0; i < ntbl->size; i++) {
314 ntbl->buckets[i] = tbl->buckets[i]; 314 ntbl->buckets[i] = tbl->buckets[i];
315 315
316 /* Link each bucket in the new table to the rst bucket 316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash 317 * in the old table that contains entries which will hash
318 * to the new bucket. 318 * to the new bucket.
319 */ 319 */
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 654c9018e3e7..48da2c54a69e 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -18,6 +18,7 @@
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <net/ip.h> 20#include <net/ip.h>
21#include <net/ip6_checksum.h>
21#include <linux/netfilter_bridge.h> 22#include <linux/netfilter_bridge.h>
22#include "../br_private.h" 23#include "../br_private.h"
23 24
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6d1817449c36..ab03e00ffe8f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
489 /* We could not connect to a designated PHY, so use the switch internal 489 /* We could not connect to a designated PHY, so use the switch internal
490 * MDIO bus instead 490 * MDIO bus instead
491 */ 491 */
492 if (!p->phy) 492 if (!p->phy) {
493 p->phy = ds->slave_mii_bus->phy_map[p->port]; 493 p->phy = ds->slave_mii_bus->phy_map[p->port];
494 else 494 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
495 p->phy_interface);
496 } else {
495 pr_info("attached PHY at address %d [%s]\n", 497 pr_info("attached PHY at address %d [%s]\n",
496 p->phy->addr, p->phy->drv->name); 498 p->phy->addr, p->phy->drv->name);
499 }
497} 500}
498 501
499int dsa_slave_suspend(struct net_device *slave_dev) 502int dsa_slave_suspend(struct net_device *slave_dev)
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 32e78924e246..606c520ffd5a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
133 int err = -ENOSYS; 133 int err = -ENOSYS;
134 const struct net_offload **offloads; 134 const struct net_offload **offloads;
135 135
136 udp_tunnel_gro_complete(skb, nhoff);
137
136 rcu_read_lock(); 138 rcu_read_lock();
137 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 139 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
138 ops = rcu_dereference(offloads[proto]); 140 ops = rcu_dereference(offloads[proto]);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 065cd94c640c..dedb21e99914 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
146 146
147 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
148
147 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, 149 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
148 tos, ttl, df, src_port, dst_port, xnet); 150 tos, ttl, df, src_port, dst_port, xnet);
149} 151}
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
364static void __exit geneve_cleanup_module(void) 366static void __exit geneve_cleanup_module(void)
365{ 367{
366 destroy_workqueue(geneve_wq); 368 destroy_workqueue(geneve_wq);
369 unregister_pernet_subsys(&geneve_net_ops);
367} 370}
368module_exit(geneve_cleanup_module); 371module_exit(geneve_cleanup_module);
369 372
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c373a9ad4555..9daf2177dc00 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg)) 196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL; 197 return -EINVAL;
198#if defined(CONFIG_IPV6) 198#if IS_ENABLED(CONFIG_IPV6)
199 if (allow_ipv6 && 199 if (allow_ipv6 &&
200 cmsg->cmsg_level == SOL_IPV6 && 200 cmsg->cmsg_level == SOL_IPV6 &&
201 cmsg->cmsg_type == IPV6_PKTINFO) { 201 cmsg->cmsg_type == IPV6_PKTINFO) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a12b455928e5..88fa2d160685 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2315 2315
2316/* Undo procedures. */ 2316/* Undo procedures. */
2317 2317
2318/* We can clear retrans_stamp when there are no retransmissions in the
2319 * window. It would seem that it is trivially available for us in
2320 * tp->retrans_out, however, that kind of assumptions doesn't consider
2321 * what will happen if errors occur when sending retransmission for the
2322 * second time. ...It could the that such segment has only
2323 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2324 * the head skb is enough except for some reneging corner cases that
2325 * are not worth the effort.
2326 *
2327 * Main reason for all this complexity is the fact that connection dying
2328 * time now depends on the validity of the retrans_stamp, in particular,
2329 * that successive retransmissions of a segment must not advance
2330 * retrans_stamp under any conditions.
2331 */
2332static bool tcp_any_retrans_done(const struct sock *sk)
2333{
2334 const struct tcp_sock *tp = tcp_sk(sk);
2335 struct sk_buff *skb;
2336
2337 if (tp->retrans_out)
2338 return true;
2339
2340 skb = tcp_write_queue_head(sk);
2341 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2342 return true;
2343
2344 return false;
2345}
2346
2318#if FASTRETRANS_DEBUG > 1 2347#if FASTRETRANS_DEBUG > 1
2319static void DBGUNDO(struct sock *sk, const char *msg) 2348static void DBGUNDO(struct sock *sk, const char *msg)
2320{ 2349{
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2410 * is ACKed. For Reno it is MUST to prevent false 2439 * is ACKed. For Reno it is MUST to prevent false
2411 * fast retransmits (RFC2582). SACK TCP is safe. */ 2440 * fast retransmits (RFC2582). SACK TCP is safe. */
2412 tcp_moderate_cwnd(tp); 2441 tcp_moderate_cwnd(tp);
2442 if (!tcp_any_retrans_done(sk))
2443 tp->retrans_stamp = 0;
2413 return true; 2444 return true;
2414 } 2445 }
2415 tcp_set_ca_state(sk, TCP_CA_Open); 2446 tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2430 return false; 2461 return false;
2431} 2462}
2432 2463
2433/* We can clear retrans_stamp when there are no retransmissions in the
2434 * window. It would seem that it is trivially available for us in
2435 * tp->retrans_out, however, that kind of assumptions doesn't consider
2436 * what will happen if errors occur when sending retransmission for the
2437 * second time. ...It could the that such segment has only
2438 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2439 * the head skb is enough except for some reneging corner cases that
2440 * are not worth the effort.
2441 *
2442 * Main reason for all this complexity is the fact that connection dying
2443 * time now depends on the validity of the retrans_stamp, in particular,
2444 * that successive retransmissions of a segment must not advance
2445 * retrans_stamp under any conditions.
2446 */
2447static bool tcp_any_retrans_done(const struct sock *sk)
2448{
2449 const struct tcp_sock *tp = tcp_sk(sk);
2450 struct sk_buff *skb;
2451
2452 if (tp->retrans_out)
2453 return true;
2454
2455 skb = tcp_write_queue_head(sk);
2456 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2457 return true;
2458
2459 return false;
2460}
2461
2462/* Undo during loss recovery after partial ACK or using F-RTO. */ 2464/* Undo during loss recovery after partial ACK or using F-RTO. */
2463static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2465static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2464{ 2466{
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 12c3c8ef3849..4564e1fca3eb 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
961 else 961 else
962 dev->flags &= ~IFF_POINTOPOINT; 962 dev->flags &= ~IFF_POINTOPOINT;
963 963
964 dev->iflink = p->link;
965
966 /* Precalculate GRE options length */ 964 /* Precalculate GRE options length */
967 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { 965 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
968 if (t->parms.o_flags&GRE_CSUM) 966 if (t->parms.o_flags&GRE_CSUM)
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1272 u64_stats_init(&ip6gre_tunnel_stats->syncp); 1270 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1273 } 1271 }
1274 1272
1273 dev->iflink = tunnel->parms.link;
1275 1274
1276 return 0; 1275 return 0;
1277} 1276}
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
1481 if (!dev->tstats) 1480 if (!dev->tstats)
1482 return -ENOMEM; 1481 return -ENOMEM;
1483 1482
1483 dev->iflink = tunnel->parms.link;
1484
1484 return 0; 1485 return 0;
1485} 1486}
1486 1487
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9409887fb664..9cb94cfa0ae7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
272 int err; 272 int err;
273 273
274 t = netdev_priv(dev); 274 t = netdev_priv(dev);
275 err = ip6_tnl_dev_init(dev);
276 if (err < 0)
277 goto out;
278 275
279 err = register_netdevice(dev); 276 err = register_netdevice(dev);
280 if (err < 0) 277 if (err < 0)
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1462 1459
1463 1460
1464static const struct net_device_ops ip6_tnl_netdev_ops = { 1461static const struct net_device_ops ip6_tnl_netdev_ops = {
1462 .ndo_init = ip6_tnl_dev_init,
1465 .ndo_uninit = ip6_tnl_dev_uninit, 1463 .ndo_uninit = ip6_tnl_dev_uninit,
1466 .ndo_start_xmit = ip6_tnl_xmit, 1464 .ndo_start_xmit = ip6_tnl_xmit,
1467 .ndo_do_ioctl = ip6_tnl_ioctl, 1465 .ndo_do_ioctl = ip6_tnl_ioctl,
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1546 struct ip6_tnl *t = netdev_priv(dev); 1544 struct ip6_tnl *t = netdev_priv(dev);
1547 struct net *net = dev_net(dev); 1545 struct net *net = dev_net(dev);
1548 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1546 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1549 int err = ip6_tnl_dev_init_gen(dev);
1550
1551 if (err)
1552 return err;
1553 1547
1554 t->parms.proto = IPPROTO_IPV6; 1548 t->parms.proto = IPPROTO_IPV6;
1555 dev_hold(dev); 1549 dev_hold(dev);
1556 1550
1557 ip6_tnl_link_config(t);
1558
1559 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1551 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1560 return 0; 1552 return 0;
1561} 1553}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d440bb585524..31089d153fd3 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
172 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 172 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
173 int err; 173 int err;
174 174
175 err = vti6_dev_init(dev);
176 if (err < 0)
177 goto out;
178
179 err = register_netdevice(dev); 175 err = register_netdevice(dev);
180 if (err < 0) 176 if (err < 0)
181 goto out; 177 goto out;
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
783} 779}
784 780
785static const struct net_device_ops vti6_netdev_ops = { 781static const struct net_device_ops vti6_netdev_ops = {
782 .ndo_init = vti6_dev_init,
786 .ndo_uninit = vti6_dev_uninit, 783 .ndo_uninit = vti6_dev_uninit,
787 .ndo_start_xmit = vti6_tnl_xmit, 784 .ndo_start_xmit = vti6_tnl_xmit,
788 .ndo_do_ioctl = vti6_ioctl, 785 .ndo_do_ioctl = vti6_ioctl,
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
852 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
853 struct net *net = dev_net(dev); 850 struct net *net = dev_net(dev);
854 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 851 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
855 int err = vti6_dev_init_gen(dev);
856
857 if (err)
858 return err;
859 852
860 t->parms.proto = IPPROTO_IPV6; 853 t->parms.proto = IPPROTO_IPV6;
861 dev_hold(dev); 854 dev_hold(dev);
862 855
863 vti6_link_config(t);
864
865 rcu_assign_pointer(ip6n->tnls_wc[0], t); 856 rcu_assign_pointer(ip6n->tnls_wc[0], t);
866 return 0; 857 return 0;
867} 858}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 58e5b4710127..a24557a1c1d8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
195 struct sit_net *sitn = net_generic(net, sit_net_id); 195 struct sit_net *sitn = net_generic(net, sit_net_id);
196 int err; 196 int err;
197 197
198 err = ipip6_tunnel_init(dev); 198 memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
199 if (err < 0) 199 memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
200 goto out;
201 ipip6_tunnel_clone_6rd(dev, sitn);
202 200
203 if ((__force u16)t->parms.i_flags & SIT_ISATAP) 201 if ((__force u16)t->parms.i_flags & SIT_ISATAP)
204 dev->priv_flags |= IFF_ISATAP; 202 dev->priv_flags |= IFF_ISATAP;
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
207 if (err < 0) 205 if (err < 0)
208 goto out; 206 goto out;
209 207
210 strcpy(t->parms.name, dev->name); 208 ipip6_tunnel_clone_6rd(dev, sitn);
209
211 dev->rtnl_link_ops = &sit_link_ops; 210 dev->rtnl_link_ops = &sit_link_ops;
212 211
213 dev_hold(dev); 212 dev_hold(dev);
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1330} 1329}
1331 1330
1332static const struct net_device_ops ipip6_netdev_ops = { 1331static const struct net_device_ops ipip6_netdev_ops = {
1332 .ndo_init = ipip6_tunnel_init,
1333 .ndo_uninit = ipip6_tunnel_uninit, 1333 .ndo_uninit = ipip6_tunnel_uninit,
1334 .ndo_start_xmit = sit_tunnel_xmit, 1334 .ndo_start_xmit = sit_tunnel_xmit,
1335 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1335 .ndo_do_ioctl = ipip6_tunnel_ioctl,
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1378 1378
1379 tunnel->dev = dev; 1379 tunnel->dev = dev;
1380 tunnel->net = dev_net(dev); 1380 tunnel->net = dev_net(dev);
1381 1381 strcpy(tunnel->parms.name, dev->name);
1382 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1383 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1384 1382
1385 ipip6_tunnel_bind_dev(dev); 1383 ipip6_tunnel_bind_dev(dev);
1386 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1384 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1405 1403
1406 tunnel->dev = dev; 1404 tunnel->dev = dev;
1407 tunnel->net = dev_net(dev); 1405 tunnel->net = dev_net(dev);
1408 strcpy(tunnel->parms.name, dev->name);
1409 1406
1410 iph->version = 4; 1407 iph->version = 4;
1411 iph->protocol = IPPROTO_IPV6; 1408 iph->protocol = IPPROTO_IPV6;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56b53571c807..509bc157ce55 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
805 805
806 memset(&params, 0, sizeof(params)); 806 memset(&params, 0, sizeof(params));
807 memset(&csa_ie, 0, sizeof(csa_ie)); 807 memset(&csa_ie, 0, sizeof(csa_ie));
808 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, 808 err = ieee80211_parse_ch_switch_ie(sdata, elems,
809 ifibss->chandef.chan->band, 809 ifibss->chandef.chan->band,
810 sta_flags, ifibss->bssid, &csa_ie); 810 sta_flags, ifibss->bssid, &csa_ie);
811 /* can't switch to destination channel, fail */ 811 /* can't switch to destination channel, fail */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c2aaec4dfcf0..8c68da30595d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs 1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs
1643 * @sdata: the sdata of the interface which has received the frame 1643 * @sdata: the sdata of the interface which has received the frame
1644 * @elems: parsed 802.11 elements received with the frame 1644 * @elems: parsed 802.11 elements received with the frame
1645 * @beacon: indicates if the frame was a beacon or probe response
1646 * @current_band: indicates the current band 1645 * @current_band: indicates the current band
1647 * @sta_flags: contains information about own capabilities and restrictions 1646 * @sta_flags: contains information about own capabilities and restrictions
1648 * to decide which channel switch announcements can be accepted. Only the 1647 * to decide which channel switch announcements can be accepted. Only the
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1656 * Return: 0 on success, <0 on error and >0 if there is nothing to parse. 1655 * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
1657 */ 1656 */
1658int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 1657int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
1659 struct ieee802_11_elems *elems, bool beacon, 1658 struct ieee802_11_elems *elems,
1660 enum ieee80211_band current_band, 1659 enum ieee80211_band current_band,
1661 u32 sta_flags, u8 *bssid, 1660 u32 sta_flags, u8 *bssid,
1662 struct ieee80211_csa_ie *csa_ie); 1661 struct ieee80211_csa_ie *csa_ie);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index af237223a8cd..653f5eb07a27 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
766 int i, flushed; 766 int i, flushed;
767 struct ps_data *ps; 767 struct ps_data *ps;
768 struct cfg80211_chan_def chandef; 768 struct cfg80211_chan_def chandef;
769 bool cancel_scan;
769 770
770 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 771 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
771 772
772 if (rcu_access_pointer(local->scan_sdata) == sdata) 773 cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
774 if (cancel_scan)
773 ieee80211_scan_cancel(local); 775 ieee80211_scan_cancel(local);
774 776
775 /* 777 /*
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
898 list_del(&sdata->u.vlan.list); 900 list_del(&sdata->u.vlan.list);
899 mutex_unlock(&local->mtx); 901 mutex_unlock(&local->mtx);
900 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); 902 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
903 /* see comment in the default case below */
904 ieee80211_free_keys(sdata, true);
901 /* no need to tell driver */ 905 /* no need to tell driver */
902 break; 906 break;
903 case NL80211_IFTYPE_MONITOR: 907 case NL80211_IFTYPE_MONITOR:
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
923 /* 927 /*
924 * When we get here, the interface is marked down. 928 * When we get here, the interface is marked down.
925 * Free the remaining keys, if there are any 929 * Free the remaining keys, if there are any
926 * (shouldn't be, except maybe in WDS mode?) 930 * (which can happen in AP mode if userspace sets
931 * keys before the interface is operating, and maybe
932 * also in WDS mode)
927 * 933 *
928 * Force the key freeing to always synchronize_net() 934 * Force the key freeing to always synchronize_net()
929 * to wait for the RX path in case it is using this 935 * to wait for the RX path in case it is using this
930 * interface enqueuing frames * at this very time on 936 * interface enqueuing frames at this very time on
931 * another CPU. 937 * another CPU.
932 */ 938 */
933 ieee80211_free_keys(sdata, true); 939 ieee80211_free_keys(sdata, true);
934
935 /* fall through */
936 case NL80211_IFTYPE_AP:
937 skb_queue_purge(&sdata->skb_queue); 940 skb_queue_purge(&sdata->skb_queue);
938 } 941 }
939 942
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
991 994
992 ieee80211_recalc_ps(local, -1); 995 ieee80211_recalc_ps(local, -1);
993 996
997 if (cancel_scan)
998 flush_delayed_work(&local->scan_work);
999
994 if (local->open_count == 0) { 1000 if (local->open_count == 0) {
995 ieee80211_stop_device(local); 1001 ieee80211_stop_device(local);
996 1002
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e9f99c1e3fad..0c8b2a77d312 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
874 874
875 memset(&params, 0, sizeof(params)); 875 memset(&params, 0, sizeof(params));
876 memset(&csa_ie, 0, sizeof(csa_ie)); 876 memset(&csa_ie, 0, sizeof(csa_ie));
877 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, 877 err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
878 sta_flags, sdata->vif.addr, 878 sta_flags, sdata->vif.addr,
879 &csa_ie); 879 &csa_ie);
880 if (err < 0) 880 if (err < 0)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2de88704278b..93af0f1c9d99 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1072 1072
1073 current_band = cbss->channel->band; 1073 current_band = cbss->channel->band;
1074 memset(&csa_ie, 0, sizeof(csa_ie)); 1074 memset(&csa_ie, 0, sizeof(csa_ie));
1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, 1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
1076 ifmgd->flags, 1076 ifmgd->flags,
1077 ifmgd->associated->bssid, &csa_ie); 1077 ifmgd->associated->bssid, &csa_ie);
1078 if (res < 0) 1078 if (res < 0)
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); 1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
1169 else 1169 else
1170 mod_timer(&ifmgd->chswitch_timer, 1170 mod_timer(&ifmgd->chswitch_timer,
1171 TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); 1171 TU_TO_EXP_TIME((csa_ie.count - 1) *
1172 cbss->beacon_interval));
1172} 1173}
1173 1174
1174static bool 1175static bool
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index b04ca4049c95..a37f9af634cb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1678 sc = le16_to_cpu(hdr->seq_ctrl); 1678 sc = le16_to_cpu(hdr->seq_ctrl);
1679 frag = sc & IEEE80211_SCTL_FRAG; 1679 frag = sc & IEEE80211_SCTL_FRAG;
1680 1680
1681 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1681 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1682 is_multicast_ether_addr(hdr->addr1))) { 1682 goto out;
1683 /* not fragmented */ 1683
1684 if (is_multicast_ether_addr(hdr->addr1)) {
1685 rx->local->dot11MulticastReceivedFrameCount++;
1684 goto out; 1686 goto out;
1685 } 1687 }
1688
1686 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1689 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1687 1690
1688 if (skb_linearize(rx->skb)) 1691 if (skb_linearize(rx->skb))
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1775 out: 1778 out:
1776 if (rx->sta) 1779 if (rx->sta)
1777 rx->sta->rx_packets++; 1780 rx->sta->rx_packets++;
1778 if (is_multicast_ether_addr(hdr->addr1)) 1781 ieee80211_led_rx(rx->local);
1779 rx->local->dot11MulticastReceivedFrameCount++;
1780 else
1781 ieee80211_led_rx(rx->local);
1782 return RX_CONTINUE; 1782 return RX_CONTINUE;
1783} 1783}
1784 1784
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 6ab009070084..efeba56c913b 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -22,7 +22,7 @@
22#include "wme.h" 22#include "wme.h"
23 23
24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
25 struct ieee802_11_elems *elems, bool beacon, 25 struct ieee802_11_elems *elems,
26 enum ieee80211_band current_band, 26 enum ieee80211_band current_band,
27 u32 sta_flags, u8 *bssid, 27 u32 sta_flags, u8 *bssid,
28 struct ieee80211_csa_ie *csa_ie) 28 struct ieee80211_csa_ie *csa_ie)
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
91 return -EINVAL; 91 return -EINVAL;
92 } 92 }
93 93
94 if (!beacon && sec_chan_offs) { 94 if (sec_chan_offs) {
95 secondary_channel_offset = sec_chan_offs->sec_chan_offs; 95 secondary_channel_offset = sec_chan_offs->sec_chan_offs;
96 } else if (beacon && ht_oper) {
97 secondary_channel_offset =
98 ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
99 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { 96 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
100 /* If it's not a beacon, HT is enabled and the IE not present, 97 /* If the secondary channel offset IE is not present,
101 * it's 20 MHz, 802.11-2012 8.5.2.6: 98 * we can't know what's the post-CSA offset, so the
102 * This element [the Secondary Channel Offset Element] is 99 * best we can do is use 20MHz.
103 * present when switching to a 40 MHz channel. It may be 100 */
104 * present when switching to a 20 MHz channel (in which
105 * case the secondary channel offset is set to SCN).
106 */
107 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; 101 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
108 } 102 }
109 103
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f1de72de273e..0007b8180397 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
1440 return; 1440 return;
1441 1441
1442 for (undo = 0; undo < group; undo++) 1442 for (undo = 0; undo < group; undo++)
1443 if (test_bit(group, &groups)) 1443 if (test_bit(undo, &groups))
1444 nlk->netlink_unbind(undo); 1444 nlk->netlink_unbind(undo);
1445} 1445}
1446 1446
@@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1492 netlink_insert(sk, net, nladdr->nl_pid) : 1492 netlink_insert(sk, net, nladdr->nl_pid) :
1493 netlink_autobind(sock); 1493 netlink_autobind(sock);
1494 if (err) { 1494 if (err) {
1495 netlink_unbind(nlk->ngroups - 1, groups, nlk); 1495 netlink_unbind(nlk->ngroups, groups, nlk);
1496 return err; 1496 return err;
1497 } 1497 }
1498 } 1498 }
@@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
2509 nl_table[unit].module = module; 2509 nl_table[unit].module = module;
2510 if (cfg) { 2510 if (cfg) {
2511 nl_table[unit].bind = cfg->bind; 2511 nl_table[unit].bind = cfg->bind;
2512 nl_table[unit].unbind = cfg->unbind;
2512 nl_table[unit].flags = cfg->flags; 2513 nl_table[unit].flags = cfg->flags;
2513 if (cfg->compare) 2514 if (cfg->compare)
2514 nl_table[unit].compare = cfg->compare; 2515 nl_table[unit].compare = cfg->compare;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 0e8529113dc5..fb7976aee61c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
862 list_add(&cur_key->key_list, sh_keys); 862 list_add(&cur_key->key_list, sh_keys);
863 863
864 cur_key->key = key; 864 cur_key->key = key;
865 sctp_auth_key_hold(key);
866
867 return 0; 865 return 0;
868nomem: 866nomem:
869 if (!replace) 867 if (!replace)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ab734be8cb20..9f32741abb1c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2609,6 +2609,9 @@ do_addr_param:
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(param.p->type));
2612 if (af == NULL)
2613 break;
2614
2612 af->from_addr_param(&addr, addr_param, 2615 af->from_addr_param(&addr, addr_param,
2613 htons(asoc->peer.port), 0); 2616 htons(asoc->peer.port), 0);
2614 2617
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 57b9c2b7c4ff..6f6733331d95 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
128 struct tpacket2_hdr *header = ring; 128 struct tpacket2_hdr *header = ring;
129 int count = 0; 129 int count = 0;
130 130
131 while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) { 131 while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
132 count++; 132 count++;
133 header = ring + (count * getpagesize()); 133 header = ring + (count * getpagesize());
134 } 134 }