aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-19 03:16:20 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-19 03:16:20 -0400
commit91b15613ce7fb3e724ca0d433eef8e6bf15322af (patch)
treee35e961ff6776eaef1505c2241ee7de84376edb8
parent2a96661054452c3016c377d72a38c6d4948ea6ae (diff)
parentd4d576f5ab7edcb757bb33e6a5600666a0b1232d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
David writes: "Networking 1) Fix gro_cells leak in xfrm layer, from Li RongQing. 2) BPF selftests change RLIMIT_MEMLOCK blindly, don't do that. From Eric Dumazet. 3) AF_XDP calls synchronize_net() under RCU lock, fix from Björn Töpel. 4) Out of bounds packet access in _decode_session6(), from Alexei Starovoitov. 5) Several ethtool bugs, where we copy a struct into the kernel twice and our validations of the values in the first copy can be invalidated by the second copy due to asynchronous updates to the memory by the user. From Wenwen Wang. 6) Missing netlink attribute validation in cls_api, from Davide Caratti. 7) LLC SAP sockets neet to be SOCK_RCU FREE, from Cong Wang. 8) rxrpc operates on wrong kvec, from Yue Haibing. 9) A regression was introduced by the disassosciation of route neighbour references in rt6_probe(), causing probe for neighbourless routes to not be properly rate limited. Fix from Sabrina Dubroca. 10) Unsafe RCU locking in tipc, from Tung Nguyen. 11) Use after free in inet6_mc_check(), from Eric Dumazet. 12) PMTU from icmp packets should update the SCTP transport pathmtu, from Xin Long. 13) Missing peer put on error in rxrpc, from David Howells. 14) Fix pedit in nfp driver, from Pieter Jansen van Vuuren. 15) Fix overflowing shift statement in qla3xxx driver, from Nathan Chancellor. 16) Fix Spectre v1 in ptp code, from Gustavo A. R. Silva. 17) udp6_unicast_rcv_skb() interprets udpv6_queue_rcv_skb() return value in an inverted manner, fix from Paolo Abeni. 18) Fix missed unresolved entries in ipmr dumps, from Nikolay Aleksandrov. 19) Fix NAPI handling under high load, we can completely miss events when NAPI has to loop more than one time in a cycle. From Heiner Kallweit." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (49 commits) ip6_tunnel: Fix encapsulation layout tipc: fix info leak from kernel tipc_event net: socket: fix a missing-check bug net: sched: Fix for duplicate class dump r8169: fix NAPI handling under high load net: ipmr: fix unresolved entry dumps net: mscc: ocelot: Fix comment in ocelot_vlant_wait_for_completion() sctp: fix the data size calculation in sctp_data_size virtio_net: avoid using netif_tx_disable() for serializing tx routine udp6: fix encap return code for resubmitting mlxsw: core: Fix use-after-free when flashing firmware during init sctp: not free the new asoc when sctp_wait_for_connect returns err sctp: fix race on sctp_id2asoc r8169: re-enable MSI-X on RTL8168g net: bpfilter: use get_pid_task instead of pid_task ptp: fix Spectre v1 vulnerability net: qla3xxx: Remove overflowing shift statement geneve, vxlan: Don't set exceptions if skb->len < mtu geneve, vxlan: Don't check skb_dst() twice sctp: get pr_assoc and pr_stream all status with SCTP_PR_SCTP_ALL instead ...
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c20
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--include/linux/mlx5/driver.h8
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/ip6_fib.h4
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/uapi/linux/sctp.h1
-rw-r--r--kernel/bpf/xskmap.c10
-rw-r--r--net/bpfilter/bpfilter_kern.c6
-rw-r--r--net/core/ethtool.c11
-rw-r--r--net/ipv4/ipmr_base.c2
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/mcast.c16
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/llc/llc_conn.c1
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/output.c3
-rw-r--r--net/rxrpc/peer_event.c1
-rw-r--r--net/sched/cls_api.c13
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/socket.c17
-rw-r--r--net/socket.c11
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/link.c1
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--net/xfrm/xfrm_interface.c3
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c13
57 files changed, 253 insertions, 187 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 556f902b3766..7f371d372bdd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10122,7 +10122,6 @@ L: netdev@vger.kernel.org
10122T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git 10122T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
10123T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git 10123T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
10124S: Maintained 10124S: Maintained
10125F: net/core/flow.c
10126F: net/xfrm/ 10125F: net/xfrm/
10127F: net/key/ 10126F: net/key/
10128F: net/ipv4/xfrm* 10127F: net/ipv4/xfrm*
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4241ae928d4a..34af5f1569c8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
321 phydev->advertising = phydev->supported; 321 phydev->advertising = phydev->supported;
322 322
323 /* The internal PHY has its link interrupts routed to the 323 /* The internal PHY has its link interrupts routed to the
324 * Ethernet MAC ISRs 324 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
325 * that prevents the signaling of link UP interrupts when
326 * the link operates at 10Mbps, so fallback to polling for
327 * those versions of GENET.
325 */ 328 */
326 if (priv->internal_phy) 329 if (priv->internal_phy && !GENET_IS_V5(priv))
327 dev->phydev->irq = PHY_IGNORE_INTERRUPT; 330 dev->phydev->irq = PHY_IGNORE_INTERRUPT;
328 331
329 return 0; 332 return 0;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4778b663653e..bf80855dd0dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -452,6 +452,10 @@ struct bufdesc_ex {
452 * initialisation. 452 * initialisation.
453 */ 453 */
454#define FEC_QUIRK_MIB_CLEAR (1 << 15) 454#define FEC_QUIRK_MIB_CLEAR (1 << 15)
455/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
456 * those FIFO receive registers are resolved in other platforms.
457 */
458#define FEC_QUIRK_HAS_FRREG (1 << 16)
455 459
456struct bufdesc_prop { 460struct bufdesc_prop {
457 int qid; 461 int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bf9b9fd6d2a0..7b98bb75ba8a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
91 .driver_data = 0, 91 .driver_data = 0,
92 }, { 92 }, {
93 .name = "imx25-fec", 93 .name = "imx25-fec",
94 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, 94 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
95 FEC_QUIRK_HAS_FRREG,
95 }, { 96 }, {
96 .name = "imx27-fec", 97 .name = "imx27-fec",
97 .driver_data = FEC_QUIRK_MIB_CLEAR, 98 .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
98 }, { 99 }, {
99 .name = "imx28-fec", 100 .name = "imx28-fec",
100 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 101 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
101 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 102 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
103 FEC_QUIRK_HAS_FRREG,
102 }, { 104 }, {
103 .name = "imx6q-fec", 105 .name = "imx6q-fec",
104 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 106 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -2164,7 +2166,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
2164 memset(buf, 0, regs->len); 2166 memset(buf, 0, regs->len);
2165 2167
2166 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2168 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2167 off = fec_enet_register_offset[i] / 4; 2169 off = fec_enet_register_offset[i];
2170
2171 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2172 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2173 continue;
2174
2175 off >>= 2;
2168 buf[off] = readl(&theregs[off]); 2176 buf[off] = readl(&theregs[off]);
2169 } 2177 }
2170} 2178}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15d8ae28c040..00172dee5339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -432,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
432 432
433static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, 433static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
434 struct mlx5_wq_cyc *wq, 434 struct mlx5_wq_cyc *wq,
435 u16 pi, u16 frag_pi) 435 u16 pi, u16 nnops)
436{ 436{
437 struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; 437 struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
438 u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
439 438
440 edge_wi = wi + nnops; 439 edge_wi = wi + nnops;
441 440
@@ -454,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
454 struct mlx5_wq_cyc *wq = &sq->wq; 453 struct mlx5_wq_cyc *wq = &sq->wq;
455 struct mlx5e_umr_wqe *umr_wqe; 454 struct mlx5e_umr_wqe *umr_wqe;
456 u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); 455 u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
457 u16 pi, frag_pi; 456 u16 pi, contig_wqebbs_room;
458 int err; 457 int err;
459 int i; 458 int i;
460 459
461 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 460 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
462 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); 461 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
463 462 if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
464 if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { 463 mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
465 mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
466 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 464 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
467 } 465 }
468 466
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ae73ea992845..6dacaeba2fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
290 290
291static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, 291static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
292 struct mlx5_wq_cyc *wq, 292 struct mlx5_wq_cyc *wq,
293 u16 pi, u16 frag_pi) 293 u16 pi, u16 nnops)
294{ 294{
295 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; 295 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
296 u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
297 296
298 edge_wi = wi + nnops; 297 edge_wi = wi + nnops;
299 298
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
348 struct mlx5e_tx_wqe_info *wi; 347 struct mlx5e_tx_wqe_info *wi;
349 348
350 struct mlx5e_sq_stats *stats = sq->stats; 349 struct mlx5e_sq_stats *stats = sq->stats;
350 u16 headlen, ihs, contig_wqebbs_room;
351 u16 ds_cnt, ds_cnt_inl = 0; 351 u16 ds_cnt, ds_cnt_inl = 0;
352 u16 headlen, ihs, frag_pi;
353 u8 num_wqebbs, opcode; 352 u8 num_wqebbs, opcode;
354 u32 num_bytes; 353 u32 num_bytes;
355 int num_dma; 354 int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
386 } 385 }
387 386
388 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
389 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); 388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
390 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { 389 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
391 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); 390 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
392 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 391 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
393 } 392 }
394 393
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
636 struct mlx5e_tx_wqe_info *wi; 635 struct mlx5e_tx_wqe_info *wi;
637 636
638 struct mlx5e_sq_stats *stats = sq->stats; 637 struct mlx5e_sq_stats *stats = sq->stats;
639 u16 headlen, ihs, pi, frag_pi; 638 u16 headlen, ihs, pi, contig_wqebbs_room;
640 u16 ds_cnt, ds_cnt_inl = 0; 639 u16 ds_cnt, ds_cnt_inl = 0;
641 u8 num_wqebbs, opcode; 640 u8 num_wqebbs, opcode;
642 u32 num_bytes; 641 u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
672 } 671 }
673 672
674 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 673 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
675 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); 674 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
676 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { 675 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
676 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
677 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
677 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 678 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
678 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
679 } 679 }
680 680
681 mlx5i_sq_fetch_wqe(sq, &wqe, &pi); 681 mlx5i_sq_fetch_wqe(sq, &wqe, pi);
682 682
683 /* fill wqe */ 683 /* fill wqe */
684 wi = &sq->db.wqe_info[pi]; 684 wi = &sq->db.wqe_info[pi];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48864f4988a4..c1e1a16a9b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
273 case MLX5_PFAULT_SUBTYPE_WQE: 273 case MLX5_PFAULT_SUBTYPE_WQE:
274 /* WQE based event */ 274 /* WQE based event */
275 pfault->type = 275 pfault->type =
276 be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; 276 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
277 pfault->token = 277 pfault->token =
278 be32_to_cpu(pf_eqe->wqe.token); 278 be32_to_cpu(pf_eqe->wqe.token);
279 pfault->wqe.wq_num = 279 pfault->wqe.wq_num =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5645a4facad2..b8ee9101c506 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
245 return ERR_PTR(res); 245 return ERR_PTR(res);
246 } 246 }
247 247
248 /* Context will be freed by wait func after completion */ 248 /* Context should be freed by the caller after completion. */
249 return context; 249 return context;
250} 250}
251 251
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
418 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); 418 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
419 cmd.flags = htonl(flags); 419 cmd.flags = htonl(flags);
420 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); 420 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
421 if (IS_ERR(context)) { 421 if (IS_ERR(context))
422 err = PTR_ERR(context); 422 return PTR_ERR(context);
423 goto out;
424 }
425 423
426 err = mlx5_fpga_ipsec_cmd_wait(context); 424 err = mlx5_fpga_ipsec_cmd_wait(context);
427 if (err) 425 if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
435 } 433 }
436 434
437out: 435out:
436 kfree(context);
438 return err; 437 return err;
439} 438}
440 439
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 08eac92fc26c..0982c579ec74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
109 109
110static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, 110static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
111 struct mlx5i_tx_wqe **wqe, 111 struct mlx5i_tx_wqe **wqe,
112 u16 *pi) 112 u16 pi)
113{ 113{
114 struct mlx5_wq_cyc *wq = &sq->wq; 114 struct mlx5_wq_cyc *wq = &sq->wq;
115 115
116 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 116 *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
117 *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
118 memset(*wqe, 0, sizeof(**wqe)); 117 memset(*wqe, 0, sizeof(**wqe));
119} 118}
120 119
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 68e7f8df2a6d..ddca327e8950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
39 return (u32)wq->fbc.sz_m1 + 1; 39 return (u32)wq->fbc.sz_m1 + 1;
40} 40}
41 41
42u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
43{
44 return wq->fbc.frag_sz_m1 + 1;
45}
46
47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) 42u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
48{ 43{
49 return wq->fbc.sz_m1 + 1; 44 return wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 3a1a170bb2d7..b1293d153a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
80 void *wqc, struct mlx5_wq_cyc *wq, 80 void *wqc, struct mlx5_wq_cyc *wq,
81 struct mlx5_wq_ctrl *wq_ctrl); 81 struct mlx5_wq_ctrl *wq_ctrl);
82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
83u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
84 83
85int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 84int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
86 void *qpc, struct mlx5_wq_qp *wq, 85 void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
140 return ctr & wq->fbc.sz_m1; 139 return ctr & wq->fbc.sz_m1;
141} 140}
142 141
143static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
144{
145 return ctr & wq->fbc.frag_sz_m1;
146}
147
148static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) 142static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
149{ 143{
150 return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); 144 return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
160 return mlx5_frag_buf_get_wqe(&wq->fbc, ix); 154 return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
161} 155}
162 156
157static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
158{
159 return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
160}
161
163static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) 162static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
164{ 163{
165 int equal = (cc1 == cc2); 164 int equal = (cc1 == cc2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 81533d7f395c..937d0ace699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1055err_driver_init: 1055err_driver_init:
1056 mlxsw_thermal_fini(mlxsw_core->thermal); 1056 mlxsw_thermal_fini(mlxsw_core->thermal);
1057err_thermal_init: 1057err_thermal_init:
1058 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1058err_hwmon_init: 1059err_hwmon_init:
1059 if (!reload) 1060 if (!reload)
1060 devlink_unregister(devlink); 1061 devlink_unregister(devlink);
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1088 if (mlxsw_core->driver->fini) 1089 if (mlxsw_core->driver->fini)
1089 mlxsw_core->driver->fini(mlxsw_core); 1090 mlxsw_core->driver->fini(mlxsw_core);
1090 mlxsw_thermal_fini(mlxsw_core->thermal); 1091 mlxsw_thermal_fini(mlxsw_core->thermal);
1092 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1091 if (!reload) 1093 if (!reload)
1092 devlink_unregister(devlink); 1094 devlink_unregister(devlink);
1093 mlxsw_emad_fini(mlxsw_core); 1095 mlxsw_emad_fini(mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 655ddd204ab2..c35be477856f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
359 return 0; 359 return 0;
360} 360}
361 361
362static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
363{
364}
365
362#endif 366#endif
363 367
364struct mlxsw_thermal; 368struct mlxsw_thermal;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index f6cf2896d337..e04e8162aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
303 struct device *hwmon_dev; 303 struct device *hwmon_dev;
304 int err; 304 int err;
305 305
306 mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), 306 mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
307 GFP_KERNEL);
308 if (!mlxsw_hwmon) 307 if (!mlxsw_hwmon)
309 return -ENOMEM; 308 return -ENOMEM;
310 mlxsw_hwmon->core = mlxsw_core; 309 mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
321 mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; 320 mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
322 mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; 321 mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
323 322
324 hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, 323 hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
325 "mlxsw", 324 "mlxsw", mlxsw_hwmon,
326 mlxsw_hwmon, 325 mlxsw_hwmon->groups);
327 mlxsw_hwmon->groups);
328 if (IS_ERR(hwmon_dev)) { 326 if (IS_ERR(hwmon_dev)) {
329 err = PTR_ERR(hwmon_dev); 327 err = PTR_ERR(hwmon_dev);
330 goto err_hwmon_register; 328 goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
337err_hwmon_register: 335err_hwmon_register:
338err_fans_init: 336err_fans_init:
339err_temp_init: 337err_temp_init:
338 kfree(mlxsw_hwmon);
340 return err; 339 return err;
341} 340}
341
342void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
343{
344 hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
345 kfree(mlxsw_hwmon);
346}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..ed4e298cd823 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
133{ 133{
134 unsigned int val, timeout = 10; 134 unsigned int val, timeout = 10;
135 135
136 /* Wait for the issued mac table command to be completed, or timeout. 136 /* Wait for the issued vlan table command to be completed, or timeout.
137 * When the command read from ANA_TABLES_MACACCESS is 137 * When the command read from ANA_TABLES_VLANACCESS is
138 * MACACCESS_CMD_IDLE, the issued command completed successfully. 138 * VLANACCESS_CMD_IDLE, the issued command completed successfully.
139 */ 139 */
140 do { 140 do {
141 val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); 141 val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 46ba0cf257c6..7a1e9cd9cc62 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -429,12 +429,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
429 429
430 switch (off) { 430 switch (off) {
431 case offsetof(struct iphdr, daddr): 431 case offsetof(struct iphdr, daddr):
432 set_ip_addr->ipv4_dst_mask = mask; 432 set_ip_addr->ipv4_dst_mask |= mask;
433 set_ip_addr->ipv4_dst = exact; 433 set_ip_addr->ipv4_dst &= ~mask;
434 set_ip_addr->ipv4_dst |= exact & mask;
434 break; 435 break;
435 case offsetof(struct iphdr, saddr): 436 case offsetof(struct iphdr, saddr):
436 set_ip_addr->ipv4_src_mask = mask; 437 set_ip_addr->ipv4_src_mask |= mask;
437 set_ip_addr->ipv4_src = exact; 438 set_ip_addr->ipv4_src &= ~mask;
439 set_ip_addr->ipv4_src |= exact & mask;
438 break; 440 break;
439 default: 441 default:
440 return -EOPNOTSUPP; 442 return -EOPNOTSUPP;
@@ -448,11 +450,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
448} 450}
449 451
450static void 452static void
451nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask, 453nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
452 struct nfp_fl_set_ipv6_addr *ip6) 454 struct nfp_fl_set_ipv6_addr *ip6)
453{ 455{
454 ip6->ipv6[idx % 4].mask = mask; 456 ip6->ipv6[word].mask |= mask;
455 ip6->ipv6[idx % 4].exact = exact; 457 ip6->ipv6[word].exact &= ~mask;
458 ip6->ipv6[word].exact |= exact & mask;
456 459
457 ip6->reserved = cpu_to_be16(0); 460 ip6->reserved = cpu_to_be16(0);
458 ip6->head.jump_id = opcode_tag; 461 ip6->head.jump_id = opcode_tag;
@@ -465,6 +468,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
465 struct nfp_fl_set_ipv6_addr *ip_src) 468 struct nfp_fl_set_ipv6_addr *ip_src)
466{ 469{
467 __be32 exact, mask; 470 __be32 exact, mask;
471 u8 word;
468 472
469 /* We are expecting tcf_pedit to return a big endian value */ 473 /* We are expecting tcf_pedit to return a big endian value */
470 mask = (__force __be32)~tcf_pedit_mask(action, idx); 474 mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -473,17 +477,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
473 if (exact & ~mask) 477 if (exact & ~mask)
474 return -EOPNOTSUPP; 478 return -EOPNOTSUPP;
475 479
476 if (off < offsetof(struct ipv6hdr, saddr)) 480 if (off < offsetof(struct ipv6hdr, saddr)) {
477 return -EOPNOTSUPP; 481 return -EOPNOTSUPP;
478 else if (off < offsetof(struct ipv6hdr, daddr)) 482 } else if (off < offsetof(struct ipv6hdr, daddr)) {
479 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx, 483 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
484 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
480 exact, mask, ip_src); 485 exact, mask, ip_src);
481 else if (off < offsetof(struct ipv6hdr, daddr) + 486 } else if (off < offsetof(struct ipv6hdr, daddr) +
482 sizeof(struct in6_addr)) 487 sizeof(struct in6_addr)) {
483 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx, 488 word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
489 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
484 exact, mask, ip_dst); 490 exact, mask, ip_dst);
485 else 491 } else {
486 return -EOPNOTSUPP; 492 return -EOPNOTSUPP;
493 }
487 494
488 return 0; 495 return 0;
489} 496}
@@ -541,7 +548,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
541 struct nfp_fl_set_eth set_eth; 548 struct nfp_fl_set_eth set_eth;
542 enum pedit_header_type htype; 549 enum pedit_header_type htype;
543 int idx, nkeys, err; 550 int idx, nkeys, err;
544 size_t act_size; 551 size_t act_size = 0;
545 u32 offset, cmd; 552 u32 offset, cmd;
546 u8 ip_proto = 0; 553 u8 ip_proto = 0;
547 554
@@ -599,7 +606,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
599 act_size = sizeof(set_eth); 606 act_size = sizeof(set_eth);
600 memcpy(nfp_action, &set_eth, act_size); 607 memcpy(nfp_action, &set_eth, act_size);
601 *a_len += act_size; 608 *a_len += act_size;
602 } else if (set_ip_addr.head.len_lw) { 609 }
610 if (set_ip_addr.head.len_lw) {
611 nfp_action += act_size;
603 act_size = sizeof(set_ip_addr); 612 act_size = sizeof(set_ip_addr);
604 memcpy(nfp_action, &set_ip_addr, act_size); 613 memcpy(nfp_action, &set_ip_addr, act_size);
605 *a_len += act_size; 614 *a_len += act_size;
@@ -607,10 +616,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
607 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ 616 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
608 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | 617 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
609 nfp_fl_csum_l4_to_flag(ip_proto); 618 nfp_fl_csum_l4_to_flag(ip_proto);
610 } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { 619 }
620 if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
611 /* TC compiles set src and dst IPv6 address as a single action, 621 /* TC compiles set src and dst IPv6 address as a single action,
612 * the hardware requires this to be 2 separate actions. 622 * the hardware requires this to be 2 separate actions.
613 */ 623 */
624 nfp_action += act_size;
614 act_size = sizeof(set_ip6_src); 625 act_size = sizeof(set_ip6_src);
615 memcpy(nfp_action, &set_ip6_src, act_size); 626 memcpy(nfp_action, &set_ip6_src, act_size);
616 *a_len += act_size; 627 *a_len += act_size;
@@ -623,6 +634,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
623 /* Hardware will automatically fix TCP/UDP checksum. */ 634 /* Hardware will automatically fix TCP/UDP checksum. */
624 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 635 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
625 } else if (set_ip6_dst.head.len_lw) { 636 } else if (set_ip6_dst.head.len_lw) {
637 nfp_action += act_size;
626 act_size = sizeof(set_ip6_dst); 638 act_size = sizeof(set_ip6_dst);
627 memcpy(nfp_action, &set_ip6_dst, act_size); 639 memcpy(nfp_action, &set_ip6_dst, act_size);
628 *a_len += act_size; 640 *a_len += act_size;
@@ -630,13 +642,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
630 /* Hardware will automatically fix TCP/UDP checksum. */ 642 /* Hardware will automatically fix TCP/UDP checksum. */
631 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 643 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
632 } else if (set_ip6_src.head.len_lw) { 644 } else if (set_ip6_src.head.len_lw) {
645 nfp_action += act_size;
633 act_size = sizeof(set_ip6_src); 646 act_size = sizeof(set_ip6_src);
634 memcpy(nfp_action, &set_ip6_src, act_size); 647 memcpy(nfp_action, &set_ip6_src, act_size);
635 *a_len += act_size; 648 *a_len += act_size;
636 649
637 /* Hardware will automatically fix TCP/UDP checksum. */ 650 /* Hardware will automatically fix TCP/UDP checksum. */
638 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 651 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
639 } else if (set_tport.head.len_lw) { 652 }
653 if (set_tport.head.len_lw) {
654 nfp_action += act_size;
640 act_size = sizeof(set_tport); 655 act_size = sizeof(set_tport);
641 memcpy(nfp_action, &set_tport, act_size); 656 memcpy(nfp_action, &set_tport, act_size);
642 *a_len += act_size; 657 *a_len += act_size;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index af3a28ec04eb..0f0aba793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", 231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
233 233
234out: 234out:
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b48f76182049..10b075bc5959 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
380 380
381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
383 ql_write_nvram_reg(qdev, spir,
384 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
385} 383}
386 384
387/* 385/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3a5e6160bf0d..2c350099b83c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6549,17 +6549,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
6549 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); 6549 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6550 struct net_device *dev = tp->dev; 6550 struct net_device *dev = tp->dev;
6551 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; 6551 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6552 int work_done= 0; 6552 int work_done;
6553 u16 status; 6553 u16 status;
6554 6554
6555 status = rtl_get_events(tp); 6555 status = rtl_get_events(tp);
6556 rtl_ack_events(tp, status & ~tp->event_slow); 6556 rtl_ack_events(tp, status & ~tp->event_slow);
6557 6557
6558 if (status & RTL_EVENT_NAPI_RX) 6558 work_done = rtl_rx(dev, tp, (u32) budget);
6559 work_done = rtl_rx(dev, tp, (u32) budget);
6560 6559
6561 if (status & RTL_EVENT_NAPI_TX) 6560 rtl_tx(dev, tp);
6562 rtl_tx(dev, tp);
6563 6561
6564 if (status & tp->event_slow) { 6562 if (status & tp->event_slow) {
6565 enable_mask &= ~tp->event_slow; 6563 enable_mask &= ~tp->event_slow;
@@ -7093,20 +7091,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
7093{ 7091{
7094 unsigned int flags; 7092 unsigned int flags;
7095 7093
7096 switch (tp->mac_version) { 7094 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
7097 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
7098 RTL_W8(tp, Cfg9346, Cfg9346_Unlock); 7095 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
7099 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); 7096 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
7100 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 7097 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
7101 flags = PCI_IRQ_LEGACY; 7098 flags = PCI_IRQ_LEGACY;
7102 break; 7099 } else {
7103 case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
7104 /* This version was reported to have issues with resume
7105 * from suspend when using MSI-X
7106 */
7107 flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
7108 break;
7109 default:
7110 flags = PCI_IRQ_ALL_TYPES; 7100 flags = PCI_IRQ_ALL_TYPES;
7111 } 7101 }
7112 7102
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6acb6b5718b9..493cd382b8aa 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -830,12 +830,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
830 if (IS_ERR(rt)) 830 if (IS_ERR(rt))
831 return PTR_ERR(rt); 831 return PTR_ERR(rt);
832 832
833 if (skb_dst(skb)) { 833 skb_tunnel_check_pmtu(skb, &rt->dst,
834 int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN - 834 GENEVE_IPV4_HLEN + info->options_len);
835 info->options_len;
836
837 skb_dst_update_pmtu(skb, mtu);
838 }
839 835
840 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 836 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
841 if (geneve->collect_md) { 837 if (geneve->collect_md) {
@@ -876,11 +872,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
876 if (IS_ERR(dst)) 872 if (IS_ERR(dst))
877 return PTR_ERR(dst); 873 return PTR_ERR(dst);
878 874
879 if (skb_dst(skb)) { 875 skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
880 int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
881
882 skb_dst_update_pmtu(skb, mtu);
883 }
884 876
885 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 877 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
886 if (geneve->collect_md) { 878 if (geneve->collect_md) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index dab504ec5e50..ddfa3f24204c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2218,8 +2218,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
2218 /* Make sure no work handler is accessing the device */ 2218 /* Make sure no work handler is accessing the device */
2219 flush_work(&vi->config_work); 2219 flush_work(&vi->config_work);
2220 2220
2221 netif_tx_lock_bh(vi->dev);
2221 netif_device_detach(vi->dev); 2222 netif_device_detach(vi->dev);
2222 netif_tx_disable(vi->dev); 2223 netif_tx_unlock_bh(vi->dev);
2223 cancel_delayed_work_sync(&vi->refill); 2224 cancel_delayed_work_sync(&vi->refill);
2224 2225
2225 if (netif_running(vi->dev)) { 2226 if (netif_running(vi->dev)) {
@@ -2255,7 +2256,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
2255 } 2256 }
2256 } 2257 }
2257 2258
2259 netif_tx_lock_bh(vi->dev);
2258 netif_device_attach(vi->dev); 2260 netif_device_attach(vi->dev);
2261 netif_tx_unlock_bh(vi->dev);
2259 return err; 2262 return err;
2260} 2263}
2261 2264
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2b8da2b7e721..27bd586b94b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2194,11 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2194 } 2194 }
2195 2195
2196 ndst = &rt->dst; 2196 ndst = &rt->dst;
2197 if (skb_dst(skb)) { 2197 skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
2198 int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
2199
2200 skb_dst_update_pmtu(skb, mtu);
2201 }
2202 2198
2203 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2199 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2204 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2200 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2231,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2235 goto out_unlock; 2231 goto out_unlock;
2236 } 2232 }
2237 2233
2238 if (skb_dst(skb)) { 2234 skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
2239 int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
2240
2241 skb_dst_update_pmtu(skb, mtu);
2242 }
2243 2235
2244 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2236 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2245 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2237 ttl = ttl ? : ip6_dst_hoplimit(ndst);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 01b0e2bb3319..2012551d93e0 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -24,6 +24,8 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timekeeping.h> 25#include <linux/timekeeping.h>
26 26
27#include <linux/nospec.h>
28
27#include "ptp_private.h" 29#include "ptp_private.h"
28 30
29static int ptp_disable_pinfunc(struct ptp_clock_info *ops, 31static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
248 err = -EINVAL; 250 err = -EINVAL;
249 break; 251 break;
250 } 252 }
253 pin_index = array_index_nospec(pin_index, ops->n_pins);
251 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 254 if (mutex_lock_interruptible(&ptp->pincfg_mux))
252 return -ERESTARTSYS; 255 return -ERESTARTSYS;
253 pd = ops->pin_config[pin_index]; 256 pd = ops->pin_config[pin_index];
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
266 err = -EINVAL; 269 err = -EINVAL;
267 break; 270 break;
268 } 271 }
272 pin_index = array_index_nospec(pin_index, ops->n_pins);
269 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 273 if (mutex_lock_interruptible(&ptp->pincfg_mux))
270 return -ERESTARTSYS; 274 return -ERESTARTSYS;
271 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); 275 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 66d94b4557cf..88a041b73abf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1032,6 +1032,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
1032 ((fbc->frag_sz_m1 & ix) << fbc->log_stride); 1032 ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
1033} 1033}
1034 1034
1035static inline u32
1036mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
1037{
1038 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
1039
1040 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
1041}
1042
1035int mlx5_cmd_init(struct mlx5_core_dev *dev); 1043int mlx5_cmd_init(struct mlx5_core_dev *dev);
1036void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 1044void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
1037void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 1045void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
diff --git a/include/net/dst.h b/include/net/dst.h
index 7f735e76ca73..6cf0870414c7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
527 dst->ops->update_pmtu(dst, NULL, skb, mtu); 527 dst->ops->update_pmtu(dst, NULL, skb, mtu);
528} 528}
529 529
530static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
531 struct dst_entry *encap_dst,
532 int headroom)
533{
534 u32 encap_mtu = dst_mtu(encap_dst);
535
536 if (skb->len > encap_mtu - headroom)
537 skb_dst_update_pmtu(skb, encap_mtu - headroom);
538}
539
530#endif /* _NET_DST_H */ 540#endif /* _NET_DST_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3d4930528db0..2d31e22babd8 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -159,6 +159,10 @@ struct fib6_info {
159 struct rt6_info * __percpu *rt6i_pcpu; 159 struct rt6_info * __percpu *rt6i_pcpu;
160 struct rt6_exception_bucket __rcu *rt6i_exception_bucket; 160 struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
161 161
162#ifdef CONFIG_IPV6_ROUTER_PREF
163 unsigned long last_probe;
164#endif
165
162 u32 fib6_metric; 166 u32 fib6_metric;
163 u8 fib6_protocol; 167 u8 fib6_protocol;
164 u8 fib6_type; 168 u8 fib6_type;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5ef1bad81ef5..9e3d32746430 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
347 __u16 size; 347 __u16 size;
348 348
349 size = ntohs(chunk->chunk_hdr->length); 349 size = ntohs(chunk->chunk_hdr->length);
350 size -= sctp_datahdr_len(&chunk->asoc->stream); 350 size -= sctp_datachk_len(&chunk->asoc->stream);
351 351
352 return size; 352 return size;
353} 353}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 28a7c8e44636..a11f93790476 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -876,6 +876,8 @@ struct sctp_transport {
876 unsigned long sackdelay; 876 unsigned long sackdelay;
877 __u32 sackfreq; 877 __u32 sackfreq;
878 878
879 atomic_t mtu_info;
880
879 /* When was the last time that we heard from this transport? We use 881 /* When was the last time that we heard from this transport? We use
880 * this to pick new active and retran paths. 882 * this to pick new active and retran paths.
881 */ 883 */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b479db5c71d9..34dd3d497f2c 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -301,6 +301,7 @@ enum sctp_sinfo_flags {
301 SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */ 301 SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
302 /* 2 bits here have been used by SCTP_PR_SCTP_MASK */ 302 /* 2 bits here have been used by SCTP_PR_SCTP_MASK */
303 SCTP_SENDALL = (1 << 6), 303 SCTP_SENDALL = (1 << 6),
304 SCTP_PR_SCTP_ALL = (1 << 7),
304 SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */ 305 SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
305 SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */ 306 SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
306}; 307};
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9f8463afda9c..47147c9e184d 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
192 sock_hold(sock->sk); 192 sock_hold(sock->sk);
193 193
194 old_xs = xchg(&m->xsk_map[i], xs); 194 old_xs = xchg(&m->xsk_map[i], xs);
195 if (old_xs) { 195 if (old_xs)
196 /* Make sure we've flushed everything. */
197 synchronize_net();
198 sock_put((struct sock *)old_xs); 196 sock_put((struct sock *)old_xs);
199 }
200 197
201 sockfd_put(sock); 198 sockfd_put(sock);
202 return 0; 199 return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
212 return -EINVAL; 209 return -EINVAL;
213 210
214 old_xs = xchg(&m->xsk_map[k], NULL); 211 old_xs = xchg(&m->xsk_map[k], NULL);
215 if (old_xs) { 212 if (old_xs)
216 /* Make sure we've flushed everything. */
217 synchronize_net();
218 sock_put((struct sock *)old_xs); 213 sock_put((struct sock *)old_xs);
219 }
220 214
221 return 0; 215 return 0;
222} 216}
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index b64e1649993b..94e88f510c5b 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
23 23
24 if (!info->pid) 24 if (!info->pid)
25 return; 25 return;
26 tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID); 26 tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
27 if (tsk) 27 if (tsk) {
28 force_sig(SIGKILL, tsk); 28 force_sig(SIGKILL, tsk);
29 put_task_struct(tsk);
30 }
29 fput(info->pipe_to_umh); 31 fput(info->pipe_to_umh);
30 fput(info->pipe_from_umh); 32 fput(info->pipe_from_umh);
31 info->pid = 0; 33 info->pid = 0;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0762aaf8e964..aeabc4831fca 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1015,6 +1015,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
1015 return -EINVAL; 1015 return -EINVAL;
1016 } 1016 }
1017 1017
1018 if (info.cmd != cmd)
1019 return -EINVAL;
1020
1018 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1021 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
1019 if (info.rule_cnt > 0) { 1022 if (info.rule_cnt > 0) {
1020 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 1023 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
@@ -2469,13 +2472,17 @@ roll_back:
2469 return ret; 2472 return ret;
2470} 2473}
2471 2474
2472static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr) 2475static int ethtool_set_per_queue(struct net_device *dev,
2476 void __user *useraddr, u32 sub_cmd)
2473{ 2477{
2474 struct ethtool_per_queue_op per_queue_opt; 2478 struct ethtool_per_queue_op per_queue_opt;
2475 2479
2476 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) 2480 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
2477 return -EFAULT; 2481 return -EFAULT;
2478 2482
2483 if (per_queue_opt.sub_command != sub_cmd)
2484 return -EINVAL;
2485
2479 switch (per_queue_opt.sub_command) { 2486 switch (per_queue_opt.sub_command) {
2480 case ETHTOOL_GCOALESCE: 2487 case ETHTOOL_GCOALESCE:
2481 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); 2488 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
@@ -2846,7 +2853,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2846 rc = ethtool_get_phy_stats(dev, useraddr); 2853 rc = ethtool_get_phy_stats(dev, useraddr);
2847 break; 2854 break;
2848 case ETHTOOL_PERQUEUE: 2855 case ETHTOOL_PERQUEUE:
2849 rc = ethtool_set_per_queue(dev, useraddr); 2856 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
2850 break; 2857 break;
2851 case ETHTOOL_GLINKSETTINGS: 2858 case ETHTOOL_GLINKSETTINGS:
2852 rc = ethtool_get_link_ksettings(dev, useraddr); 2859 rc = ethtool_get_link_ksettings(dev, useraddr);
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 1ad9aa62a97b..eab8cd5ec2f5 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -296,8 +296,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
296next_entry: 296next_entry:
297 e++; 297 e++;
298 } 298 }
299 e = 0;
300 s_e = 0;
301 299
302 spin_lock_bh(lock); 300 spin_lock_bh(lock);
303 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { 301 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a0b6932c3afd..a9d06d4dd057 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1184,11 +1184,6 @@ route_lookup:
1184 } 1184 }
1185 skb_dst_set(skb, dst); 1185 skb_dst_set(skb, dst);
1186 1186
1187 if (encap_limit >= 0) {
1188 init_tel_txopt(&opt, encap_limit);
1189 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1190 }
1191
1192 if (hop_limit == 0) { 1187 if (hop_limit == 0) {
1193 if (skb->protocol == htons(ETH_P_IP)) 1188 if (skb->protocol == htons(ETH_P_IP))
1194 hop_limit = ip_hdr(skb)->ttl; 1189 hop_limit = ip_hdr(skb)->ttl;
@@ -1210,6 +1205,11 @@ route_lookup:
1210 if (err) 1205 if (err)
1211 return err; 1206 return err;
1212 1207
1208 if (encap_limit >= 0) {
1209 init_tel_txopt(&opt, encap_limit);
1210 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1211 }
1212
1213 skb_push(skb, sizeof(struct ipv6hdr)); 1213 skb_push(skb, sizeof(struct ipv6hdr));
1214 skb_reset_network_header(skb); 1214 skb_reset_network_header(skb);
1215 ipv6h = ipv6_hdr(skb); 1215 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4ae54aaca373..dbab62e3f0d7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2436{ 2436{
2437 int err; 2437 int err;
2438 2438
2439 /* callers have the socket lock and rtnl lock 2439 write_lock_bh(&iml->sflock);
2440 * so no other readers or writers of iml or its sflist
2441 */
2442 if (!iml->sflist) { 2440 if (!iml->sflist) {
2443 /* any-source empty exclude case */ 2441 /* any-source empty exclude case */
2444 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); 2442 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2443 } else {
2444 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2445 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2446 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2447 iml->sflist = NULL;
2445 } 2448 }
2446 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 2449 write_unlock_bh(&iml->sflock);
2447 iml->sflist->sl_count, iml->sflist->sl_addr, 0);
2448 sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
2449 iml->sflist = NULL;
2450 return err; 2450 return err;
2451} 2451}
2452 2452
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a366c05a239d..abcb5ae77319 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -520,10 +520,11 @@ static void rt6_probe_deferred(struct work_struct *w)
520 520
521static void rt6_probe(struct fib6_info *rt) 521static void rt6_probe(struct fib6_info *rt)
522{ 522{
523 struct __rt6_probe_work *work; 523 struct __rt6_probe_work *work = NULL;
524 const struct in6_addr *nh_gw; 524 const struct in6_addr *nh_gw;
525 struct neighbour *neigh; 525 struct neighbour *neigh;
526 struct net_device *dev; 526 struct net_device *dev;
527 struct inet6_dev *idev;
527 528
528 /* 529 /*
529 * Okay, this does not seem to be appropriate 530 * Okay, this does not seem to be appropriate
@@ -539,15 +540,12 @@ static void rt6_probe(struct fib6_info *rt)
539 nh_gw = &rt->fib6_nh.nh_gw; 540 nh_gw = &rt->fib6_nh.nh_gw;
540 dev = rt->fib6_nh.nh_dev; 541 dev = rt->fib6_nh.nh_dev;
541 rcu_read_lock_bh(); 542 rcu_read_lock_bh();
543 idev = __in6_dev_get(dev);
542 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 544 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
543 if (neigh) { 545 if (neigh) {
544 struct inet6_dev *idev;
545
546 if (neigh->nud_state & NUD_VALID) 546 if (neigh->nud_state & NUD_VALID)
547 goto out; 547 goto out;
548 548
549 idev = __in6_dev_get(dev);
550 work = NULL;
551 write_lock(&neigh->lock); 549 write_lock(&neigh->lock);
552 if (!(neigh->nud_state & NUD_VALID) && 550 if (!(neigh->nud_state & NUD_VALID) &&
553 time_after(jiffies, 551 time_after(jiffies,
@@ -557,11 +555,13 @@ static void rt6_probe(struct fib6_info *rt)
557 __neigh_set_probe_once(neigh); 555 __neigh_set_probe_once(neigh);
558 } 556 }
559 write_unlock(&neigh->lock); 557 write_unlock(&neigh->lock);
560 } else { 558 } else if (time_after(jiffies, rt->last_probe +
559 idev->cnf.rtr_probe_interval)) {
561 work = kmalloc(sizeof(*work), GFP_ATOMIC); 560 work = kmalloc(sizeof(*work), GFP_ATOMIC);
562 } 561 }
563 562
564 if (work) { 563 if (work) {
564 rt->last_probe = jiffies;
565 INIT_WORK(&work->work, rt6_probe_deferred); 565 INIT_WORK(&work->work, rt6_probe_deferred);
566 work->target = *nh_gw; 566 work->target = *nh_gw;
567 dev_hold(dev); 567 dev_hold(dev);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 28c4aa5078fc..b36694b6716e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -766,11 +766,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
766 766
767 ret = udpv6_queue_rcv_skb(sk, skb); 767 ret = udpv6_queue_rcv_skb(sk, skb);
768 768
769 /* a return value > 0 means to resubmit the input, but 769 /* a return value > 0 means to resubmit the input */
770 * it wants the return to be -protocol, or 0
771 */
772 if (ret > 0) 770 if (ret > 0)
773 return -ret; 771 return ret;
774 return 0; 772 return 0;
775} 773}
776 774
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef3defaf43b9..d35bcf92969c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
146 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 146 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
147 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 147 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
148 148
149 while (nh + offset + 1 < skb->data || 149 while (nh + offset + sizeof(*exthdr) < skb->data ||
150 pskb_may_pull(skb, nh + offset + 1 - skb->data)) { 150 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
151 nh = skb_network_header(skb); 151 nh = skb_network_header(skb);
152 exthdr = (struct ipv6_opt_hdr *)(nh + offset); 152 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
153 153
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c0ac522b48a1..4ff89cb7c86f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
734 llc_sk(sk)->sap = sap; 734 llc_sk(sk)->sap = sap;
735 735
736 spin_lock_bh(&sap->sk_lock); 736 spin_lock_bh(&sap->sk_lock);
737 sock_set_flag(sk, SOCK_RCU_FREE);
737 sap->sk_count++; 738 sap->sk_count++;
738 sk_nulls_add_node_rcu(sk, laddr_hb); 739 sk_nulls_add_node_rcu(sk, laddr_hb);
739 hlist_add_head(&llc->dev_hash_node, dev_hb); 740 hlist_add_head(&llc->dev_hash_node, dev_hb);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 652e314de38e..8079aacaecac 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
337{ 337{
338 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 338 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
339 struct rxrpc_connection *conn; 339 struct rxrpc_connection *conn;
340 struct rxrpc_peer *peer; 340 struct rxrpc_peer *peer = NULL;
341 struct rxrpc_call *call; 341 struct rxrpc_call *call;
342 342
343 _enter(""); 343 _enter("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index cad0691c2bb4..0906e51d3cfb 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -139,7 +139,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
139 udp_sk(usk)->gro_complete = NULL; 139 udp_sk(usk)->gro_complete = NULL;
140 140
141 udp_encap_enable(); 141 udp_encap_enable();
142#if IS_ENABLED(CONFIG_IPV6) 142#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
143 if (local->srx.transport.family == AF_INET6) 143 if (local->srx.transport.family == AF_INET6)
144 udpv6_encap_enable(); 144 udpv6_encap_enable();
145#endif 145#endif
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index e8fb8922bca8..a141ee3ab812 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -572,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
572 whdr.flags ^= RXRPC_CLIENT_INITIATED; 572 whdr.flags ^= RXRPC_CLIENT_INITIATED;
573 whdr.flags &= RXRPC_CLIENT_INITIATED; 573 whdr.flags &= RXRPC_CLIENT_INITIATED;
574 574
575 ret = kernel_sendmsg(local->socket, &msg, iov, 2, size); 575 ret = kernel_sendmsg(local->socket, &msg,
576 iov, ioc, size);
576 if (ret < 0) 577 if (ret < 0)
577 trace_rxrpc_tx_fail(local->debug_id, 0, ret, 578 trace_rxrpc_tx_fail(local->debug_id, 0, ret,
578 rxrpc_tx_point_reject); 579 rxrpc_tx_point_reject);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 05b51bdbdd41..bd2fa3b7caa7 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -195,6 +195,7 @@ void rxrpc_error_report(struct sock *sk)
195 rxrpc_store_error(peer, serr); 195 rxrpc_store_error(peer, serr);
196 rcu_read_unlock(); 196 rcu_read_unlock();
197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
198 rxrpc_put_peer(peer);
198 199
199 _leave(""); 200 _leave("");
200} 201}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0a75cb2e5e7b..70f144ac5e1d 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,8 @@
31#include <net/pkt_sched.h> 31#include <net/pkt_sched.h>
32#include <net/pkt_cls.h> 32#include <net/pkt_cls.h>
33 33
34extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
35
34/* The list of all installed classifier types */ 36/* The list of all installed classifier types */
35static LIST_HEAD(tcf_proto_base); 37static LIST_HEAD(tcf_proto_base);
36 38
@@ -1211,7 +1213,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1211replay: 1213replay:
1212 tp_created = 0; 1214 tp_created = 0;
1213 1215
1214 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 1216 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1215 if (err < 0) 1217 if (err < 0)
1216 return err; 1218 return err;
1217 1219
@@ -1360,7 +1362,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1360 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1362 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1361 return -EPERM; 1363 return -EPERM;
1362 1364
1363 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 1365 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1364 if (err < 0) 1366 if (err < 0)
1365 return err; 1367 return err;
1366 1368
@@ -1475,7 +1477,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1475 void *fh = NULL; 1477 void *fh = NULL;
1476 int err; 1478 int err;
1477 1479
1478 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 1480 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1479 if (err < 0) 1481 if (err < 0)
1480 return err; 1482 return err;
1481 1483
@@ -1838,7 +1840,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
1838 return -EPERM; 1840 return -EPERM;
1839 1841
1840replay: 1842replay:
1841 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 1843 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1842 if (err < 0) 1844 if (err < 0)
1843 return err; 1845 return err;
1844 1846
@@ -1949,7 +1951,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
1949 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 1951 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1950 return skb->len; 1952 return skb->len;
1951 1953
1952 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 1954 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1955 NULL);
1953 if (err) 1956 if (err)
1954 return err; 1957 return err;
1955 1958
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 85e73f48e48f..3dc0acf54245 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1307,10 +1307,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1307 return 0; 1307 return 0;
1308} 1308}
1309 1309
1310/*
1311 * Delete/get qdisc.
1312 */
1313
1314const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { 1310const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1315 [TCA_KIND] = { .type = NLA_STRING }, 1311 [TCA_KIND] = { .type = NLA_STRING },
1316 [TCA_OPTIONS] = { .type = NLA_NESTED }, 1312 [TCA_OPTIONS] = { .type = NLA_NESTED },
@@ -1323,6 +1319,10 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1323 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, 1319 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1324}; 1320};
1325 1321
1322/*
1323 * Delete/get qdisc.
1324 */
1325
1326static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1326static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1327 struct netlink_ext_ack *extack) 1327 struct netlink_ext_ack *extack)
1328{ 1328{
@@ -2059,7 +2059,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2059 2059
2060 if (tcm->tcm_parent) { 2060 if (tcm->tcm_parent) {
2061 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); 2061 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2062 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 2062 if (q && q != root &&
2063 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2063 return -1; 2064 return -1;
2064 return 0; 2065 return 0;
2065 } 2066 }
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 297d9cf960b9..a827a1f562bf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1450 /* Get the lowest pmtu of all the transports. */ 1450 /* Get the lowest pmtu of all the transports. */
1451 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 1451 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1452 if (t->pmtu_pending && t->dst) { 1452 if (t->pmtu_pending && t->dst) {
1453 sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst)); 1453 sctp_transport_update_pmtu(t,
1454 atomic_read(&t->mtu_info));
1454 t->pmtu_pending = 0; 1455 t->pmtu_pending = 0;
1455 } 1456 }
1456 if (!pmtu || (t->pathmtu < pmtu)) 1457 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 9bbc5f92c941..5c36a99882ed 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
395 return; 395 return;
396 396
397 if (sock_owned_by_user(sk)) { 397 if (sock_owned_by_user(sk)) {
398 atomic_set(&t->mtu_info, pmtu);
398 asoc->pmtu_pending = 1; 399 asoc->pmtu_pending = 1;
399 t->pmtu_pending = 1; 400 t->pmtu_pending = 1;
400 return; 401 return;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7f849b01ec8e..67939ad99c01 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
120 sctp_assoc_sync_pmtu(asoc); 120 sctp_assoc_sync_pmtu(asoc);
121 } 121 }
122 122
123 if (asoc->pmtu_pending) {
124 if (asoc->param_flags & SPP_PMTUD_ENABLE)
125 sctp_assoc_sync_pmtu(asoc);
126 asoc->pmtu_pending = 0;
127 }
128
123 /* If there a is a prepend chunk stick it on the list before 129 /* If there a is a prepend chunk stick it on the list before
124 * any other chunks get appended. 130 * any other chunks get appended.
125 */ 131 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f73e9d38d5ba..c1c1bda334a4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -271,11 +271,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
271 271
272 spin_lock_bh(&sctp_assocs_id_lock); 272 spin_lock_bh(&sctp_assocs_id_lock);
273 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 273 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
274 if (asoc && (asoc->base.sk != sk || asoc->base.dead))
275 asoc = NULL;
274 spin_unlock_bh(&sctp_assocs_id_lock); 276 spin_unlock_bh(&sctp_assocs_id_lock);
275 277
276 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
277 return NULL;
278
279 return asoc; 278 return asoc;
280} 279}
281 280
@@ -1946,8 +1945,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
1946 if (sp->strm_interleave) { 1945 if (sp->strm_interleave) {
1947 timeo = sock_sndtimeo(sk, 0); 1946 timeo = sock_sndtimeo(sk, 0);
1948 err = sctp_wait_for_connect(asoc, &timeo); 1947 err = sctp_wait_for_connect(asoc, &timeo);
1949 if (err) 1948 if (err) {
1949 err = -ESRCH;
1950 goto err; 1950 goto err;
1951 }
1951 } else { 1952 } else {
1952 wait_connect = true; 1953 wait_connect = true;
1953 } 1954 }
@@ -7100,14 +7101,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
7100 } 7101 }
7101 7102
7102 policy = params.sprstat_policy; 7103 policy = params.sprstat_policy;
7103 if (policy & ~SCTP_PR_SCTP_MASK) 7104 if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
7104 goto out; 7105 goto out;
7105 7106
7106 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 7107 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
7107 if (!asoc) 7108 if (!asoc)
7108 goto out; 7109 goto out;
7109 7110
7110 if (policy == SCTP_PR_SCTP_NONE) { 7111 if (policy & SCTP_PR_SCTP_ALL) {
7111 params.sprstat_abandoned_unsent = 0; 7112 params.sprstat_abandoned_unsent = 0;
7112 params.sprstat_abandoned_sent = 0; 7113 params.sprstat_abandoned_sent = 0;
7113 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 7114 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7159,7 +7160,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
7159 } 7160 }
7160 7161
7161 policy = params.sprstat_policy; 7162 policy = params.sprstat_policy;
7162 if (policy & ~SCTP_PR_SCTP_MASK) 7163 if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
7163 goto out; 7164 goto out;
7164 7165
7165 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 7166 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
@@ -7175,7 +7176,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
7175 goto out; 7176 goto out;
7176 } 7177 }
7177 7178
7178 if (policy == SCTP_PR_SCTP_NONE) { 7179 if (policy == SCTP_PR_SCTP_ALL) {
7179 params.sprstat_abandoned_unsent = 0; 7180 params.sprstat_abandoned_unsent = 0;
7180 params.sprstat_abandoned_sent = 0; 7181 params.sprstat_abandoned_sent = 0;
7181 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 7182 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
diff --git a/net/socket.c b/net/socket.c
index 01f3f8f32d6f..390a8ecef4bf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2875,9 +2875,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2875 copy_in_user(&rxnfc->fs.ring_cookie, 2875 copy_in_user(&rxnfc->fs.ring_cookie,
2876 &compat_rxnfc->fs.ring_cookie, 2876 &compat_rxnfc->fs.ring_cookie,
2877 (void __user *)(&rxnfc->fs.location + 1) - 2877 (void __user *)(&rxnfc->fs.location + 1) -
2878 (void __user *)&rxnfc->fs.ring_cookie) || 2878 (void __user *)&rxnfc->fs.ring_cookie))
2879 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, 2879 return -EFAULT;
2880 sizeof(rxnfc->rule_cnt))) 2880 if (ethcmd == ETHTOOL_GRXCLSRLALL) {
2881 if (put_user(rule_cnt, &rxnfc->rule_cnt))
2882 return -EFAULT;
2883 } else if (copy_in_user(&rxnfc->rule_cnt,
2884 &compat_rxnfc->rule_cnt,
2885 sizeof(rxnfc->rule_cnt)))
2881 return -EFAULT; 2886 return -EFAULT;
2882 } 2887 }
2883 2888
diff --git a/net/tipc/group.c b/net/tipc/group.c
index e82f13cb2dc5..06fee142f09f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
666 struct sk_buff *skb; 666 struct sk_buff *skb;
667 struct tipc_msg *hdr; 667 struct tipc_msg *hdr;
668 668
669 memset(&evt, 0, sizeof(evt));
669 evt.event = event; 670 evt.event = event;
670 evt.found_lower = m->instance; 671 evt.found_lower = m->instance;
671 evt.found_upper = m->instance; 672 evt.found_upper = m->instance;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f6552e4f4b43..201c3b5bc96b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1041,6 +1041,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1041 if (r->last_retransm != buf_seqno(skb)) { 1041 if (r->last_retransm != buf_seqno(skb)) {
1042 r->last_retransm = buf_seqno(skb); 1042 r->last_retransm = buf_seqno(skb);
1043 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); 1043 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1044 r->stale_cnt = 0;
1044 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { 1045 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1045 link_retransmit_failure(l, skb); 1046 link_retransmit_failure(l, skb);
1046 if (link_is_bc_sndlink(l)) 1047 if (link_is_bc_sndlink(l))
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..3cfeb9df64b0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
115 struct sk_buff *buf; 115 struct sk_buff *buf;
116 struct distr_item *item; 116 struct distr_item *item;
117 117
118 list_del(&publ->binding_node); 118 list_del_rcu(&publ->binding_node);
119 119
120 if (publ->scope == TIPC_NODE_SCOPE) 120 if (publ->scope == TIPC_NODE_SCOPE)
121 return NULL; 121 return NULL;
@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
147 ITEM_SIZE) * ITEM_SIZE; 147 ITEM_SIZE) * ITEM_SIZE;
148 u32 msg_rem = msg_dsz; 148 u32 msg_rem = msg_dsz;
149 149
150 list_for_each_entry(publ, pls, binding_node) { 150 list_for_each_entry_rcu(publ, pls, binding_node) {
151 /* Prepare next buffer: */ 151 /* Prepare next buffer: */
152 if (!skb) { 152 if (!skb) {
153 skb = named_prepare_buf(net, PUBLICATION, msg_rem, 153 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..661504042d30 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
744 sk->sk_destruct = xsk_destruct; 744 sk->sk_destruct = xsk_destruct;
745 sk_refcnt_debug_inc(sk); 745 sk_refcnt_debug_inc(sk);
746 746
747 sock_set_flag(sk, SOCK_RCU_FREE);
748
747 xs = xdp_sk(sk); 749 xs = xdp_sk(sk);
748 mutex_init(&xs->mutex); 750 mutex_init(&xs->mutex);
749 spin_lock_init(&xs->tx_completion_lock); 751 spin_lock_init(&xs->tx_completion_lock);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 31acc6f33d98..6f05e831a73e 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
116 116
117static void xfrmi_dev_free(struct net_device *dev) 117static void xfrmi_dev_free(struct net_device *dev)
118{ 118{
119 struct xfrm_if *xi = netdev_priv(dev);
120
121 gro_cells_destroy(&xi->gro_cells);
119 free_percpu(dev->tstats); 122 free_percpu(dev->tstats);
120} 123}
121 124
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f094d4b3520d..119a427d9b2b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
632 break; 632 break;
633 } 633 }
634 if (newpos) 634 if (newpos)
635 hlist_add_behind(&policy->bydst, newpos); 635 hlist_add_behind_rcu(&policy->bydst, newpos);
636 else 636 else
637 hlist_add_head(&policy->bydst, chain); 637 hlist_add_head_rcu(&policy->bydst, chain);
638 } 638 }
639 639
640 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 640 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
774 break; 774 break;
775 } 775 }
776 if (newpos) 776 if (newpos)
777 hlist_add_behind(&policy->bydst, newpos); 777 hlist_add_behind_rcu(&policy->bydst, newpos);
778 else 778 else
779 hlist_add_head(&policy->bydst, chain); 779 hlist_add_head_rcu(&policy->bydst, chain);
780 __xfrm_policy_link(policy, dir); 780 __xfrm_policy_link(policy, dir);
781 781
782 /* After previous checking, family can either be AF_INET or AF_INET6 */ 782 /* After previous checking, family can either be AF_INET or AF_INET6 */
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index cad14cd0ea92..b5277106df1f 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -437,14 +437,19 @@ void enable_fastopen(void)
437 } 437 }
438} 438}
439 439
440static struct rlimit rlim_old, rlim_new; 440static struct rlimit rlim_old;
441 441
442static __attribute__((constructor)) void main_ctor(void) 442static __attribute__((constructor)) void main_ctor(void)
443{ 443{
444 getrlimit(RLIMIT_MEMLOCK, &rlim_old); 444 getrlimit(RLIMIT_MEMLOCK, &rlim_old);
445 rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); 445
446 rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); 446 if (rlim_old.rlim_cur != RLIM_INFINITY) {
447 setrlimit(RLIMIT_MEMLOCK, &rlim_new); 447 struct rlimit rlim_new;
448
449 rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
450 rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
451 setrlimit(RLIMIT_MEMLOCK, &rlim_new);
452 }
448} 453}
449 454
450static __attribute__((destructor)) void main_dtor(void) 455static __attribute__((destructor)) void main_dtor(void)