aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /net
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/log.c1
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/routing.c25
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/netfilter/ebt_among.c34
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/filter.c60
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock.c21
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/legacy.c2
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c3
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/datagram.c21
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/route.c76
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c33
-rw-r--r--net/l2tp/l2tp_core.c46
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/x_tables.c30
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/openvswitch/meter.c12
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_csum.c5
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbmod.c5
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c5
-rw-r--r--net/sched/sch_generic.c22
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/socket.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c21
69 files changed, 489 insertions, 267 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 64aa9f755e1d..45c9bf5ff3a0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
48 * original position later 48 * original position later
49 */ 49 */
50 skb_push(skb, offset); 50 skb_push(skb, offset);
51 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, 51 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
52 skb->vlan_tci); 52 skb->vlan_tci, skb->mac_len);
53 if (!skb) 53 if (!skb)
54 return false; 54 return false;
55 skb_pull(skb, offset + VLAN_HLEN); 55 skb_pull(skb, offset + VLAN_HLEN);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 75dda9454ccf..a60bacf7120b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -400,7 +400,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
400 batadv_arp_hw_src(skb, hdr_size), &ip_src, 400 batadv_arp_hw_src(skb, hdr_size), &ip_src,
401 batadv_arp_hw_dst(skb, hdr_size), &ip_dst); 401 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
402 402
403 if (hdr_size == 0) 403 if (hdr_size < sizeof(struct batadv_unicast_packet))
404 return; 404 return;
405 405
406 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 406 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 7d5e9abb7a65..55c358ad3331 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -24,6 +24,7 @@
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/eventpoll.h>
27#include <linux/export.h> 28#include <linux/export.h>
28#include <linux/fcntl.h> 29#include <linux/fcntl.h>
29#include <linux/fs.h> 30#include <linux/fs.h>
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 52d8a4b848c0..853773e45f79 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -22,6 +22,7 @@
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/eventpoll.h>
25#include <linux/export.h> 26#include <linux/export.h>
26#include <linux/fcntl.h> 27#include <linux/fcntl.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 5615b6abea6f..de3a055f7dd8 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -598,8 +598,8 @@ update:
598 bat_priv->mcast.enabled = true; 598 bat_priv->mcast.enabled = true;
599 } 599 }
600 600
601 return !(mcast_data.flags & 601 return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
602 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); 602 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
603} 603}
604 604
605/** 605/**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 289df027ecdd..cc3ed93a6d51 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -759,6 +759,7 @@ free_skb:
759/** 759/**
760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing 760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing
761 * @bat_priv: the bat priv with all the soft interface information 761 * @bat_priv: the bat priv with all the soft interface information
762 * @skb: unicast packet to process
762 * @unicast_packet: the unicast header to be updated 763 * @unicast_packet: the unicast header to be updated
763 * @dst_addr: the payload destination 764 * @dst_addr: the payload destination
764 * @vid: VLAN identifier 765 * @vid: VLAN identifier
@@ -770,7 +771,7 @@ free_skb:
770 * Return: true if the packet header has been updated, false otherwise 771 * Return: true if the packet header has been updated, false otherwise
771 */ 772 */
772static bool 773static bool
773batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, 774batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
774 struct batadv_unicast_packet *unicast_packet, 775 struct batadv_unicast_packet *unicast_packet,
775 u8 *dst_addr, unsigned short vid) 776 u8 *dst_addr, unsigned short vid)
776{ 777{
@@ -799,8 +800,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
799 } 800 }
800 801
801 /* update the packet header */ 802 /* update the packet header */
803 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
802 ether_addr_copy(unicast_packet->dest, orig_addr); 804 ether_addr_copy(unicast_packet->dest, orig_addr);
803 unicast_packet->ttvn = orig_ttvn; 805 unicast_packet->ttvn = orig_ttvn;
806 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
804 807
805 ret = true; 808 ret = true;
806out: 809out:
@@ -841,7 +844,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
841 * the packet to 844 * the packet to
842 */ 845 */
843 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { 846 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
844 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 847 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
845 ethhdr->h_dest, vid)) 848 ethhdr->h_dest, vid))
846 batadv_dbg_ratelimited(BATADV_DBG_TT, 849 batadv_dbg_ratelimited(BATADV_DBG_TT,
847 bat_priv, 850 bat_priv,
@@ -887,7 +890,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
887 * destination can possibly be updated and forwarded towards the new 890 * destination can possibly be updated and forwarded towards the new
888 * target host 891 * target host
889 */ 892 */
890 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 893 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
891 ethhdr->h_dest, vid)) { 894 ethhdr->h_dest, vid)) {
892 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, 895 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
893 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", 896 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -910,12 +913,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
910 if (!primary_if) 913 if (!primary_if)
911 return false; 914 return false;
912 915
916 /* update the packet header */
917 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
913 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); 918 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
919 unicast_packet->ttvn = curr_ttvn;
920 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
914 921
915 batadv_hardif_put(primary_if); 922 batadv_hardif_put(primary_if);
916 923
917 unicast_packet->ttvn = curr_ttvn;
918
919 return true; 924 return true;
920} 925}
921 926
@@ -968,14 +973,10 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
968 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; 973 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
969 int check, hdr_size = sizeof(*unicast_packet); 974 int check, hdr_size = sizeof(*unicast_packet);
970 enum batadv_subtype subtype; 975 enum batadv_subtype subtype;
971 struct ethhdr *ethhdr;
972 int ret = NET_RX_DROP; 976 int ret = NET_RX_DROP;
973 bool is4addr, is_gw; 977 bool is4addr, is_gw;
974 978
975 unicast_packet = (struct batadv_unicast_packet *)skb->data; 979 unicast_packet = (struct batadv_unicast_packet *)skb->data;
976 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
977 ethhdr = eth_hdr(skb);
978
979 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; 980 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
980 /* the caller function should have already pulled 2 bytes */ 981 /* the caller function should have already pulled 2 bytes */
981 if (is4addr) 982 if (is4addr)
@@ -995,12 +996,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
995 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size)) 996 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
996 goto free_skb; 997 goto free_skb;
997 998
999 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1000
998 /* packet for me */ 1001 /* packet for me */
999 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 1002 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
1000 /* If this is a unicast packet from another backgone gw, 1003 /* If this is a unicast packet from another backgone gw,
1001 * drop it. 1004 * drop it.
1002 */ 1005 */
1003 orig_addr_gw = ethhdr->h_source; 1006 orig_addr_gw = eth_hdr(skb)->h_source;
1004 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); 1007 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
1005 if (orig_node_gw) { 1008 if (orig_node_gw) {
1006 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, 1009 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
@@ -1015,6 +1018,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1015 } 1018 }
1016 1019
1017 if (is4addr) { 1020 if (is4addr) {
1021 unicast_4addr_packet =
1022 (struct batadv_unicast_4addr_packet *)skb->data;
1018 subtype = unicast_4addr_packet->subtype; 1023 subtype = unicast_4addr_packet->subtype;
1019 batadv_dat_inc_counter(bat_priv, subtype); 1024 batadv_dat_inc_counter(bat_priv, subtype);
1020 1025
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 01117ae84f1d..a2ddae2f37d7 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2296,8 +2296,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2296 else 2296 else
2297 sec_level = authreq_to_seclevel(auth); 2297 sec_level = authreq_to_seclevel(auth);
2298 2298
2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) 2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
2300 /* If link is already encrypted with sufficient security we
2301 * still need refresh encryption as per Core Spec 5.0 Vol 3,
2302 * Part H 2.4.6
2303 */
2304 smp_ltk_encrypt(conn, hcon->sec_level);
2300 return 0; 2305 return 0;
2306 }
2301 2307
2302 if (sec_level > hcon->pending_sec_level) 2308 if (sec_level > hcon->pending_sec_level)
2303 hcon->pending_sec_level = sec_level; 2309 hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index c5afb4232ecb..620e54f08296 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -177,6 +177,28 @@ static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); 177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
178} 178}
179 179
180static bool wormhash_offset_invalid(int off, unsigned int len)
181{
182 if (off == 0) /* not present */
183 return false;
184
185 if (off < (int)sizeof(struct ebt_among_info) ||
186 off % __alignof__(struct ebt_mac_wormhash))
187 return true;
188
189 off += sizeof(struct ebt_mac_wormhash);
190
191 return off > len;
192}
193
194static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
195{
196 if (a == 0)
197 a = sizeof(struct ebt_among_info);
198
199 return ebt_mac_wormhash_size(wh) + a == b;
200}
201
180static int ebt_among_mt_check(const struct xt_mtchk_param *par) 202static int ebt_among_mt_check(const struct xt_mtchk_param *par)
181{ 203{
182 const struct ebt_among_info *info = par->matchinfo; 204 const struct ebt_among_info *info = par->matchinfo;
@@ -189,6 +211,10 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
189 if (expected_length > em->match_size) 211 if (expected_length > em->match_size)
190 return -EINVAL; 212 return -EINVAL;
191 213
214 if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
215 wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
216 return -EINVAL;
217
192 wh_dst = ebt_among_wh_dst(info); 218 wh_dst = ebt_among_wh_dst(info);
193 if (poolsize_invalid(wh_dst)) 219 if (poolsize_invalid(wh_dst))
194 return -EINVAL; 220 return -EINVAL;
@@ -201,6 +227,14 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
201 if (poolsize_invalid(wh_src)) 227 if (poolsize_invalid(wh_src))
202 return -EINVAL; 228 return -EINVAL;
203 229
230 if (info->wh_src_ofs < info->wh_dst_ofs) {
231 if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
232 return -EINVAL;
233 } else {
234 if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
235 return -EINVAL;
236 }
237
204 expected_length += ebt_mac_wormhash_size(wh_src); 238 expected_length += ebt_mac_wormhash_size(wh_src);
205 239
206 if (em->match_size != EBT_ALIGN(expected_length)) { 240 if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 254ef9f49567..a94d23b0a9af 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2119,8 +2119,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2120 */ 2120 */
2121 for (i = 0; i < 4 ; ++i) { 2121 for (i = 0; i < 4 ; ++i) {
2122 if (offsets[i] >= *total) 2122 if (offsets[i] > *total)
2123 return -EINVAL; 2123 return -EINVAL;
2124
2125 if (i < 3 && offsets[i] == *total)
2126 return -EINVAL;
2127
2124 if (i == 0) 2128 if (i == 0)
2125 continue; 2129 continue;
2126 if (offsets[i-1] > offsets[i]) 2130 if (offsets[i-1] > offsets[i])
diff --git a/net/core/dev.c b/net/core/dev.c
index d8887cc38e7b..f9c28f44286c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3278,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3279static void skb_update_prio(struct sk_buff *skb) 3279static void skb_update_prio(struct sk_buff *skb)
3280{ 3280{
3281 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 3281 const struct netprio_map *map;
3282 const struct sock *sk;
3283 unsigned int prioidx;
3282 3284
3283 if (!skb->priority && skb->sk && map) { 3285 if (skb->priority)
3284 unsigned int prioidx = 3286 return;
3285 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); 3287 map = rcu_dereference_bh(skb->dev->priomap);
3288 if (!map)
3289 return;
3290 sk = skb_to_full_sk(skb);
3291 if (!sk)
3292 return;
3286 3293
3287 if (prioidx < map->priomap_len) 3294 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3288 skb->priority = map->priomap[prioidx]; 3295
3289 } 3296 if (prioidx < map->priomap_len)
3297 skb->priority = map->priomap[prioidx];
3290} 3298}
3291#else 3299#else
3292#define skb_update_prio(skb) 3300#define skb_update_prio(skb)
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 0ab1af04296c..a04e1e88bf3a 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
402 if (colon) 402 if (colon)
403 *colon = 0; 403 *colon = 0;
404 404
405 dev_load(net, ifr->ifr_name);
406
407 /* 405 /*
408 * See which interface the caller is talking about. 406 * See which interface the caller is talking about.
409 */ 407 */
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
423 case SIOCGIFMAP: 421 case SIOCGIFMAP:
424 case SIOCGIFINDEX: 422 case SIOCGIFINDEX:
425 case SIOCGIFTXQLEN: 423 case SIOCGIFTXQLEN:
424 dev_load(net, ifr->ifr_name);
426 rcu_read_lock(); 425 rcu_read_lock();
427 ret = dev_ifsioc_locked(net, ifr, cmd); 426 ret = dev_ifsioc_locked(net, ifr, cmd);
428 rcu_read_unlock(); 427 rcu_read_unlock();
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
431 return ret; 430 return ret;
432 431
433 case SIOCETHTOOL: 432 case SIOCETHTOOL:
433 dev_load(net, ifr->ifr_name);
434 rtnl_lock(); 434 rtnl_lock();
435 ret = dev_ethtool(net, ifr); 435 ret = dev_ethtool(net, ifr);
436 rtnl_unlock(); 436 rtnl_unlock();
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
447 case SIOCGMIIPHY: 447 case SIOCGMIIPHY:
448 case SIOCGMIIREG: 448 case SIOCGMIIREG:
449 case SIOCSIFNAME: 449 case SIOCSIFNAME:
450 dev_load(net, ifr->ifr_name);
450 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 451 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
451 return -EPERM; 452 return -EPERM;
452 rtnl_lock(); 453 rtnl_lock();
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
494 /* fall through */ 495 /* fall through */
495 case SIOCBONDSLAVEINFOQUERY: 496 case SIOCBONDSLAVEINFOQUERY:
496 case SIOCBONDINFOQUERY: 497 case SIOCBONDINFOQUERY:
498 dev_load(net, ifr->ifr_name);
497 rtnl_lock(); 499 rtnl_lock();
498 ret = dev_ifsioc(net, ifr, cmd); 500 ret = dev_ifsioc(net, ifr, cmd);
499 rtnl_unlock(); 501 rtnl_unlock();
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
518 cmd == SIOCGHWTSTAMP || 520 cmd == SIOCGHWTSTAMP ||
519 (cmd >= SIOCDEVPRIVATE && 521 (cmd >= SIOCDEVPRIVATE &&
520 cmd <= SIOCDEVPRIVATE + 15)) { 522 cmd <= SIOCDEVPRIVATE + 15)) {
523 dev_load(net, ifr->ifr_name);
521 rtnl_lock(); 524 rtnl_lock();
522 ret = dev_ifsioc(net, ifr, cmd); 525 ret = dev_ifsioc(net, ifr, cmd);
523 rtnl_unlock(); 526 rtnl_unlock();
diff --git a/net/core/devlink.c b/net/core/devlink.c
index d03b96f87c25..9236e421bd62 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1798,7 +1798,7 @@ send_done:
1798 if (!nlh) { 1798 if (!nlh) {
1799 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 1799 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1800 if (err) 1800 if (err)
1801 goto err_skb_send_alloc; 1801 return err;
1802 goto send_done; 1802 goto send_done;
1803 } 1803 }
1804 1804
@@ -1807,7 +1807,6 @@ send_done:
1807nla_put_failure: 1807nla_put_failure:
1808 err = -EMSGSIZE; 1808 err = -EMSGSIZE;
1809err_table_put: 1809err_table_put:
1810err_skb_send_alloc:
1811 genlmsg_cancel(skb, hdr); 1810 genlmsg_cancel(skb, hdr);
1812 nlmsg_free(skb); 1811 nlmsg_free(skb);
1813 return err; 1812 return err;
@@ -2073,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
2073 table->counters_enabled, 2072 table->counters_enabled,
2074 &dump_ctx); 2073 &dump_ctx);
2075 if (err) 2074 if (err)
2076 goto err_entries_dump; 2075 return err;
2077 2076
2078send_done: 2077send_done:
2079 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, 2078 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -2081,16 +2080,10 @@ send_done:
2081 if (!nlh) { 2080 if (!nlh) {
2082 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); 2081 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
2083 if (err) 2082 if (err)
2084 goto err_skb_send_alloc; 2083 return err;
2085 goto send_done; 2084 goto send_done;
2086 } 2085 }
2087 return genlmsg_reply(dump_ctx.skb, info); 2086 return genlmsg_reply(dump_ctx.skb, info);
2088
2089err_entries_dump:
2090err_skb_send_alloc:
2091 genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
2092 nlmsg_free(dump_ctx.skb);
2093 return err;
2094} 2087}
2095 2088
2096static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, 2089static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2229,7 +2222,7 @@ send_done:
2229 if (!nlh) { 2222 if (!nlh) {
2230 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 2223 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
2231 if (err) 2224 if (err)
2232 goto err_skb_send_alloc; 2225 return err;
2233 goto send_done; 2226 goto send_done;
2234 } 2227 }
2235 return genlmsg_reply(skb, info); 2228 return genlmsg_reply(skb, info);
@@ -2237,7 +2230,6 @@ send_done:
2237nla_put_failure: 2230nla_put_failure:
2238 err = -EMSGSIZE; 2231 err = -EMSGSIZE;
2239err_table_put: 2232err_table_put:
2240err_skb_send_alloc:
2241 genlmsg_cancel(skb, hdr); 2233 genlmsg_cancel(skb, hdr);
2242 nlmsg_free(skb); 2234 nlmsg_free(skb);
2243 return err; 2235 return err;
diff --git a/net/core/filter.c b/net/core/filter.c
index c86f03fd9ea5..00c711c5f1a2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2283,6 +2283,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2283 u32 off = skb_mac_header_len(skb); 2283 u32 off = skb_mac_header_len(skb);
2284 int ret; 2284 int ret;
2285 2285
2286 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2287 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2288 return -ENOTSUPP;
2289
2286 ret = skb_cow(skb, len_diff); 2290 ret = skb_cow(skb, len_diff);
2287 if (unlikely(ret < 0)) 2291 if (unlikely(ret < 0))
2288 return ret; 2292 return ret;
@@ -2292,19 +2296,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2292 return ret; 2296 return ret;
2293 2297
2294 if (skb_is_gso(skb)) { 2298 if (skb_is_gso(skb)) {
2299 struct skb_shared_info *shinfo = skb_shinfo(skb);
2300
2295 /* SKB_GSO_TCPV4 needs to be changed into 2301 /* SKB_GSO_TCPV4 needs to be changed into
2296 * SKB_GSO_TCPV6. 2302 * SKB_GSO_TCPV6.
2297 */ 2303 */
2298 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 2304 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2299 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; 2305 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2300 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 2306 shinfo->gso_type |= SKB_GSO_TCPV6;
2301 } 2307 }
2302 2308
2303 /* Due to IPv6 header, MSS needs to be downgraded. */ 2309 /* Due to IPv6 header, MSS needs to be downgraded. */
2304 skb_shinfo(skb)->gso_size -= len_diff; 2310 skb_decrease_gso_size(shinfo, len_diff);
2305 /* Header must be checked, and gso_segs recomputed. */ 2311 /* Header must be checked, and gso_segs recomputed. */
2306 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2312 shinfo->gso_type |= SKB_GSO_DODGY;
2307 skb_shinfo(skb)->gso_segs = 0; 2313 shinfo->gso_segs = 0;
2308 } 2314 }
2309 2315
2310 skb->protocol = htons(ETH_P_IPV6); 2316 skb->protocol = htons(ETH_P_IPV6);
@@ -2319,6 +2325,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2319 u32 off = skb_mac_header_len(skb); 2325 u32 off = skb_mac_header_len(skb);
2320 int ret; 2326 int ret;
2321 2327
2328 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2329 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2330 return -ENOTSUPP;
2331
2322 ret = skb_unclone(skb, GFP_ATOMIC); 2332 ret = skb_unclone(skb, GFP_ATOMIC);
2323 if (unlikely(ret < 0)) 2333 if (unlikely(ret < 0))
2324 return ret; 2334 return ret;
@@ -2328,19 +2338,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2328 return ret; 2338 return ret;
2329 2339
2330 if (skb_is_gso(skb)) { 2340 if (skb_is_gso(skb)) {
2341 struct skb_shared_info *shinfo = skb_shinfo(skb);
2342
2331 /* SKB_GSO_TCPV6 needs to be changed into 2343 /* SKB_GSO_TCPV6 needs to be changed into
2332 * SKB_GSO_TCPV4. 2344 * SKB_GSO_TCPV4.
2333 */ 2345 */
2334 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 2346 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2335 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; 2347 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2336 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 2348 shinfo->gso_type |= SKB_GSO_TCPV4;
2337 } 2349 }
2338 2350
2339 /* Due to IPv4 header, MSS can be upgraded. */ 2351 /* Due to IPv4 header, MSS can be upgraded. */
2340 skb_shinfo(skb)->gso_size += len_diff; 2352 skb_increase_gso_size(shinfo, len_diff);
2341 /* Header must be checked, and gso_segs recomputed. */ 2353 /* Header must be checked, and gso_segs recomputed. */
2342 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2354 shinfo->gso_type |= SKB_GSO_DODGY;
2343 skb_shinfo(skb)->gso_segs = 0; 2355 shinfo->gso_segs = 0;
2344 } 2356 }
2345 2357
2346 skb->protocol = htons(ETH_P_IP); 2358 skb->protocol = htons(ETH_P_IP);
@@ -2439,6 +2451,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2439 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2451 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2440 int ret; 2452 int ret;
2441 2453
2454 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2455 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2456 return -ENOTSUPP;
2457
2442 ret = skb_cow(skb, len_diff); 2458 ret = skb_cow(skb, len_diff);
2443 if (unlikely(ret < 0)) 2459 if (unlikely(ret < 0))
2444 return ret; 2460 return ret;
@@ -2448,11 +2464,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2448 return ret; 2464 return ret;
2449 2465
2450 if (skb_is_gso(skb)) { 2466 if (skb_is_gso(skb)) {
2467 struct skb_shared_info *shinfo = skb_shinfo(skb);
2468
2451 /* Due to header grow, MSS needs to be downgraded. */ 2469 /* Due to header grow, MSS needs to be downgraded. */
2452 skb_shinfo(skb)->gso_size -= len_diff; 2470 skb_decrease_gso_size(shinfo, len_diff);
2453 /* Header must be checked, and gso_segs recomputed. */ 2471 /* Header must be checked, and gso_segs recomputed. */
2454 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2472 shinfo->gso_type |= SKB_GSO_DODGY;
2455 skb_shinfo(skb)->gso_segs = 0; 2473 shinfo->gso_segs = 0;
2456 } 2474 }
2457 2475
2458 return 0; 2476 return 0;
@@ -2463,6 +2481,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2463 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2481 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2464 int ret; 2482 int ret;
2465 2483
2484 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2485 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2486 return -ENOTSUPP;
2487
2466 ret = skb_unclone(skb, GFP_ATOMIC); 2488 ret = skb_unclone(skb, GFP_ATOMIC);
2467 if (unlikely(ret < 0)) 2489 if (unlikely(ret < 0))
2468 return ret; 2490 return ret;
@@ -2472,11 +2494,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2472 return ret; 2494 return ret;
2473 2495
2474 if (skb_is_gso(skb)) { 2496 if (skb_is_gso(skb)) {
2497 struct skb_shared_info *shinfo = skb_shinfo(skb);
2498
2475 /* Due to header shrink, MSS can be upgraded. */ 2499 /* Due to header shrink, MSS can be upgraded. */
2476 skb_shinfo(skb)->gso_size += len_diff; 2500 skb_increase_gso_size(shinfo, len_diff);
2477 /* Header must be checked, and gso_segs recomputed. */ 2501 /* Header must be checked, and gso_segs recomputed. */
2478 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2502 shinfo->gso_type |= SKB_GSO_DODGY;
2479 skb_shinfo(skb)->gso_segs = 0; 2503 shinfo->gso_segs = 0;
2480 } 2504 }
2481 2505
2482 return 0; 2506 return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 715c13495ba6..46cb22215ff4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4181,7 +4181,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4181 4181
4182 skb_queue_tail(&sk->sk_error_queue, skb); 4182 skb_queue_tail(&sk->sk_error_queue, skb);
4183 if (!sock_flag(sk, SOCK_DEAD)) 4183 if (!sock_flag(sk, SOCK_DEAD))
4184 sk->sk_data_ready(sk); 4184 sk->sk_error_report(sk);
4185 return 0; 4185 return 0;
4186} 4186}
4187EXPORT_SYMBOL(sock_queue_err_skb); 4187EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4906,7 +4906,7 @@ static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4906 thlen += inner_tcp_hdrlen(skb); 4906 thlen += inner_tcp_hdrlen(skb);
4907 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4907 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4908 thlen = tcp_hdrlen(skb); 4908 thlen = tcp_hdrlen(skb);
4909 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { 4909 } else if (unlikely(skb_is_gso_sctp(skb))) {
4910 thlen = sizeof(struct sctphdr); 4910 thlen = sizeof(struct sctphdr);
4911 } 4911 }
4912 /* UFO sets gso_size to the size of the fragmentation 4912 /* UFO sets gso_size to the size of the fragmentation
@@ -5022,13 +5022,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5022 5022
5023static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5023static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5024{ 5024{
5025 int mac_len;
5026
5025 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5027 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5026 kfree_skb(skb); 5028 kfree_skb(skb);
5027 return NULL; 5029 return NULL;
5028 } 5030 }
5029 5031
5030 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 5032 mac_len = skb->data - skb_mac_header(skb);
5031 2 * ETH_ALEN); 5033 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5034 mac_len - VLAN_HLEN - ETH_TLEN);
5032 skb->mac_header += VLAN_HLEN; 5035 skb->mac_header += VLAN_HLEN;
5033 return skb; 5036 return skb;
5034} 5037}
diff --git a/net/core/sock.c b/net/core/sock.c
index f704324d1219..e689496dfd8a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3326,6 +3326,27 @@ void proto_unregister(struct proto *prot)
3326} 3326}
3327EXPORT_SYMBOL(proto_unregister); 3327EXPORT_SYMBOL(proto_unregister);
3328 3328
3329int sock_load_diag_module(int family, int protocol)
3330{
3331 if (!protocol) {
3332 if (!sock_is_registered(family))
3333 return -ENOENT;
3334
3335 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3336 NETLINK_SOCK_DIAG, family);
3337 }
3338
3339#ifdef CONFIG_INET
3340 if (family == AF_INET &&
3341 !rcu_access_pointer(inet_protos[protocol]))
3342 return -ENOENT;
3343#endif
3344
3345 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3346 NETLINK_SOCK_DIAG, family, protocol);
3347}
3348EXPORT_SYMBOL(sock_load_diag_module);
3349
3329#ifdef CONFIG_PROC_FS 3350#ifdef CONFIG_PROC_FS
3330static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 3351static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3331 __acquires(proto_list_mutex) 3352 __acquires(proto_list_mutex)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index aee5642affd9..a3392a8f9276 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 if (sock_diag_handlers[req->sdiag_family] == NULL) 222 if (sock_diag_handlers[req->sdiag_family] == NULL)
223 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 223 sock_load_diag_module(req->sdiag_family, 0);
224 NETLINK_SOCK_DIAG, req->sdiag_family);
225 224
226 mutex_lock(&sock_diag_table_mutex); 225 mutex_lock(&sock_diag_table_mutex);
227 hndl = sock_diag_handlers[req->sdiag_family]; 226 hndl = sock_diag_handlers[req->sdiag_family];
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
247 case TCPDIAG_GETSOCK: 246 case TCPDIAG_GETSOCK:
248 case DCCPDIAG_GETSOCK: 247 case DCCPDIAG_GETSOCK:
249 if (inet_rcv_compat == NULL) 248 if (inet_rcv_compat == NULL)
250 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 249 sock_load_diag_module(AF_INET, 0);
251 NETLINK_SOCK_DIAG, AF_INET);
252 250
253 mutex_lock(&sock_diag_table_mutex); 251 mutex_lock(&sock_diag_table_mutex);
254 if (inet_rcv_compat != NULL) 252 if (inet_rcv_compat != NULL)
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group)
281 case SKNLGRP_INET_TCP_DESTROY: 279 case SKNLGRP_INET_TCP_DESTROY:
282 case SKNLGRP_INET_UDP_DESTROY: 280 case SKNLGRP_INET_UDP_DESTROY:
283 if (!sock_diag_handlers[AF_INET]) 281 if (!sock_diag_handlers[AF_INET])
284 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 282 sock_load_diag_module(AF_INET, 0);
285 NETLINK_SOCK_DIAG, AF_INET);
286 break; 283 break;
287 case SKNLGRP_INET6_TCP_DESTROY: 284 case SKNLGRP_INET6_TCP_DESTROY:
288 case SKNLGRP_INET6_UDP_DESTROY: 285 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 286 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 287 sock_load_diag_module(AF_INET6, 0);
291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 288 break;
293 } 289 }
294 return 0; 290 return 0;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 15bdc002d90c..84cd4e3fd01b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -794,6 +794,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
794 if (skb == NULL) 794 if (skb == NULL)
795 goto out_release; 795 goto out_release;
796 796
797 if (sk->sk_state == DCCP_CLOSED) {
798 rc = -ENOTCONN;
799 goto out_discard;
800 }
801
797 skb_reserve(skb, sk->sk_prot->max_header); 802 skb_reserve(skb, sk->sk_prot->max_header);
798 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 803 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
799 if (rc != 0) 804 if (rc != 0)
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index cb54b81d0bd9..42a7b85b84e1 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds,
194 ds->ports[i].dn = cd->port_dn[i]; 194 ds->ports[i].dn = cd->port_dn[i];
195 ds->ports[i].cpu_dp = dst->cpu_dp; 195 ds->ports[i].cpu_dp = dst->cpu_dp;
196 196
197 if (dsa_is_user_port(ds, i)) 197 if (!dsa_is_user_port(ds, i))
198 continue; 198 continue;
199 199
200 ret = dsa_slave_create(&ds->ports[i]); 200 ret = dsa_slave_create(&ds->ports[i]);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index e4f305320519..275449b0d633 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -207,9 +207,13 @@ static inline void lowpan_netlink_fini(void)
207static int lowpan_device_event(struct notifier_block *unused, 207static int lowpan_device_event(struct notifier_block *unused,
208 unsigned long event, void *ptr) 208 unsigned long event, void *ptr)
209{ 209{
210 struct net_device *wdev = netdev_notifier_info_to_dev(ptr); 210 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
211 struct wpan_dev *wpan_dev;
211 212
212 if (wdev->type != ARPHRD_IEEE802154) 213 if (ndev->type != ARPHRD_IEEE802154)
214 return NOTIFY_DONE;
215 wpan_dev = ndev->ieee802154_ptr;
216 if (!wpan_dev)
213 return NOTIFY_DONE; 217 return NOTIFY_DONE;
214 218
215 switch (event) { 219 switch (event) {
@@ -218,8 +222,8 @@ static int lowpan_device_event(struct notifier_block *unused,
218 * also delete possible lowpan interfaces which belongs 222 * also delete possible lowpan interfaces which belongs
219 * to the wpan interface. 223 * to the wpan interface.
220 */ 224 */
221 if (wdev->ieee802154_ptr->lowpan_dev) 225 if (wpan_dev->lowpan_dev)
222 lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); 226 lowpan_dellink(wpan_dev->lowpan_dev, NULL);
223 break; 227 break;
224 default: 228 default:
225 return NOTIFY_DONE; 229 return NOTIFY_DONE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a383f299ce24..4e5bc4b2f14e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -53,8 +53,7 @@ static DEFINE_MUTEX(inet_diag_table_mutex);
53static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 53static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
54{ 54{
55 if (!inet_diag_table[proto]) 55 if (!inet_diag_table[proto])
56 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 56 sock_load_diag_module(AF_INET, proto);
57 NETLINK_SOCK_DIAG, AF_INET, proto);
58 57
59 mutex_lock(&inet_diag_table_mutex); 58 mutex_lock(&inet_diag_table_mutex);
60 if (!inet_diag_table[proto]) 59 if (!inet_diag_table[proto])
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 26a3d0315728..e8ec28999f5c 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
119 119
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q) 120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{ 121{
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
122 return q->net->low_thresh == 0 || 125 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh; 126 frag_mem_limit(q->net) >= q->net->low_thresh;
124} 127}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e74ee837b300..4ac5728689f5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -635,6 +635,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
635static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 635static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
636{ 636{
637 rt->rt_pmtu = fnhe->fnhe_pmtu; 637 rt->rt_pmtu = fnhe->fnhe_pmtu;
638 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
638 rt->dst.expires = fnhe->fnhe_expires; 639 rt->dst.expires = fnhe->fnhe_expires;
639 640
640 if (fnhe->fnhe_gw) { 641 if (fnhe->fnhe_gw) {
@@ -645,7 +646,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
645} 646}
646 647
647static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, 648static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
648 u32 pmtu, unsigned long expires) 649 u32 pmtu, bool lock, unsigned long expires)
649{ 650{
650 struct fnhe_hash_bucket *hash; 651 struct fnhe_hash_bucket *hash;
651 struct fib_nh_exception *fnhe; 652 struct fib_nh_exception *fnhe;
@@ -682,8 +683,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
682 fnhe->fnhe_genid = genid; 683 fnhe->fnhe_genid = genid;
683 if (gw) 684 if (gw)
684 fnhe->fnhe_gw = gw; 685 fnhe->fnhe_gw = gw;
685 if (pmtu) 686 if (pmtu) {
686 fnhe->fnhe_pmtu = pmtu; 687 fnhe->fnhe_pmtu = pmtu;
688 fnhe->fnhe_mtu_locked = lock;
689 }
687 fnhe->fnhe_expires = max(1UL, expires); 690 fnhe->fnhe_expires = max(1UL, expires);
688 /* Update all cached dsts too */ 691 /* Update all cached dsts too */
689 rt = rcu_dereference(fnhe->fnhe_rth_input); 692 rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -707,6 +710,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
707 fnhe->fnhe_daddr = daddr; 710 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_gw = gw; 711 fnhe->fnhe_gw = gw;
709 fnhe->fnhe_pmtu = pmtu; 712 fnhe->fnhe_pmtu = pmtu;
713 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = expires; 714 fnhe->fnhe_expires = expires;
711 715
712 /* Exception created; mark the cached routes for the nexthop 716 /* Exception created; mark the cached routes for the nexthop
@@ -788,7 +792,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
788 struct fib_nh *nh = &FIB_RES_NH(res); 792 struct fib_nh *nh = &FIB_RES_NH(res);
789 793
790 update_or_create_fnhe(nh, fl4->daddr, new_gw, 794 update_or_create_fnhe(nh, fl4->daddr, new_gw,
791 0, jiffies + ip_rt_gc_timeout); 795 0, false,
796 jiffies + ip_rt_gc_timeout);
792 } 797 }
793 if (kill_route) 798 if (kill_route)
794 rt->dst.obsolete = DST_OBSOLETE_KILL; 799 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1010,15 +1015,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1010{ 1015{
1011 struct dst_entry *dst = &rt->dst; 1016 struct dst_entry *dst = &rt->dst;
1012 struct fib_result res; 1017 struct fib_result res;
1018 bool lock = false;
1013 1019
1014 if (dst_metric_locked(dst, RTAX_MTU)) 1020 if (ip_mtu_locked(dst))
1015 return; 1021 return;
1016 1022
1017 if (ipv4_mtu(dst) < mtu) 1023 if (ipv4_mtu(dst) < mtu)
1018 return; 1024 return;
1019 1025
1020 if (mtu < ip_rt_min_pmtu) 1026 if (mtu < ip_rt_min_pmtu) {
1027 lock = true;
1021 mtu = ip_rt_min_pmtu; 1028 mtu = ip_rt_min_pmtu;
1029 }
1022 1030
1023 if (rt->rt_pmtu == mtu && 1031 if (rt->rt_pmtu == mtu &&
1024 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1028,7 +1036,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1028 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1029 struct fib_nh *nh = &FIB_RES_NH(res); 1037 struct fib_nh *nh = &FIB_RES_NH(res);
1030 1038
1031 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 1039 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1032 jiffies + ip_rt_mtu_expires); 1040 jiffies + ip_rt_mtu_expires);
1033 } 1041 }
1034 rcu_read_unlock(); 1042 rcu_read_unlock();
@@ -1281,7 +1289,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1281 1289
1282 mtu = READ_ONCE(dst->dev->mtu); 1290 mtu = READ_ONCE(dst->dev->mtu);
1283 1291
1284 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1292 if (unlikely(ip_mtu_locked(dst))) {
1285 if (rt->rt_uses_gateway && mtu > 576) 1293 if (rt->rt_uses_gateway && mtu > 576)
1286 mtu = 576; 1294 mtu = 576;
1287 } 1295 }
@@ -1394,7 +1402,7 @@ struct uncached_list {
1394 1402
1395static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1403static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1396 1404
1397static void rt_add_uncached_list(struct rtable *rt) 1405void rt_add_uncached_list(struct rtable *rt)
1398{ 1406{
1399 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1407 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1400 1408
@@ -1405,14 +1413,8 @@ static void rt_add_uncached_list(struct rtable *rt)
1405 spin_unlock_bh(&ul->lock); 1413 spin_unlock_bh(&ul->lock);
1406} 1414}
1407 1415
1408static void ipv4_dst_destroy(struct dst_entry *dst) 1416void rt_del_uncached_list(struct rtable *rt)
1409{ 1417{
1410 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1411 struct rtable *rt = (struct rtable *) dst;
1412
1413 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1414 kfree(p);
1415
1416 if (!list_empty(&rt->rt_uncached)) { 1418 if (!list_empty(&rt->rt_uncached)) {
1417 struct uncached_list *ul = rt->rt_uncached_list; 1419 struct uncached_list *ul = rt->rt_uncached_list;
1418 1420
@@ -1422,6 +1424,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1422 } 1424 }
1423} 1425}
1424 1426
1427static void ipv4_dst_destroy(struct dst_entry *dst)
1428{
1429 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1430 struct rtable *rt = (struct rtable *)dst;
1431
1432 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1433 kfree(p);
1434
1435 rt_del_uncached_list(rt);
1436}
1437
1425void rt_flush_dev(struct net_device *dev) 1438void rt_flush_dev(struct net_device *dev)
1426{ 1439{
1427 struct net *net = dev_net(dev); 1440 struct net *net = dev_net(dev);
@@ -1517,6 +1530,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
1517 rt->rt_is_input = 0; 1530 rt->rt_is_input = 0;
1518 rt->rt_iif = 0; 1531 rt->rt_iif = 0;
1519 rt->rt_pmtu = 0; 1532 rt->rt_pmtu = 0;
1533 rt->rt_mtu_locked = 0;
1520 rt->rt_gateway = 0; 1534 rt->rt_gateway = 0;
1521 rt->rt_uses_gateway = 0; 1535 rt->rt_uses_gateway = 0;
1522 INIT_LIST_HEAD(&rt->rt_uncached); 1536 INIT_LIST_HEAD(&rt->rt_uncached);
@@ -2533,6 +2547,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2533 rt->rt_is_input = ort->rt_is_input; 2547 rt->rt_is_input = ort->rt_is_input;
2534 rt->rt_iif = ort->rt_iif; 2548 rt->rt_iif = ort->rt_iif;
2535 rt->rt_pmtu = ort->rt_pmtu; 2549 rt->rt_pmtu = ort->rt_pmtu;
2550 rt->rt_mtu_locked = ort->rt_mtu_locked;
2536 2551
2537 rt->rt_genid = rt_genid_ipv4(net); 2552 rt->rt_genid = rt_genid_ipv4(net);
2538 rt->rt_flags = ort->rt_flags; 2553 rt->rt_flags = ort->rt_flags;
@@ -2635,6 +2650,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2635 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2650 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2636 if (rt->rt_pmtu && expires) 2651 if (rt->rt_pmtu && expires)
2637 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2652 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2653 if (rt->rt_mtu_locked && expires)
2654 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2638 if (rtnetlink_put_metrics(skb, metrics) < 0) 2655 if (rtnetlink_put_metrics(skb, metrics) < 0)
2639 goto nla_put_failure; 2656 goto nla_put_failure;
2640 2657
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d763fae1b574..0c31be306572 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3545,6 +3545,7 @@ int tcp_abort(struct sock *sk, int err)
3545 3545
3546 bh_unlock_sock(sk); 3546 bh_unlock_sock(sk);
3547 local_bh_enable(); 3547 local_bh_enable();
3548 tcp_write_queue_purge(sk);
3548 release_sock(sk); 3549 release_sock(sk);
3549 return 0; 3550 return 0;
3550} 3551}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 71fc60f1b326..f7d944855f8e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk)
34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
35 sk->sk_error_report(sk); 35 sk->sk_error_report(sk);
36 36
37 tcp_write_queue_purge(sk);
37 tcp_done(sk); 38 tcp_done(sk);
38 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); 39 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
39} 40}
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 63faeee989a9..2a9764bd1719 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0c752dc3f93b..6c76a757fa4a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -100,7 +100,9 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
100 xdst->u.rt.rt_gateway = rt->rt_gateway; 100 xdst->u.rt.rt_gateway = rt->rt_gateway;
101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; 101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
102 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 102 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
103 xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
103 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 104 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
105 rt_add_uncached_list(&xdst->u.rt);
104 106
105 return 0; 107 return 0;
106} 108}
@@ -240,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
240 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 242 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
241 243
242 dst_destroy_metrics_generic(dst); 244 dst_destroy_metrics_generic(dst);
243 245 if (xdst->u.rt.rt_uncached_list)
246 rt_del_uncached_list(&xdst->u.rt);
244 xfrm_dst_destroy(xdst); 247 xfrm_dst_destroy(xdst);
245} 248}
246 249
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b27333d7b099..88bc2ef7c7a8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
147 struct inet_sock *inet = inet_sk(sk); 147 struct inet_sock *inet = inet_sk(sk);
148 struct ipv6_pinfo *np = inet6_sk(sk); 148 struct ipv6_pinfo *np = inet6_sk(sk);
149 struct in6_addr *daddr; 149 struct in6_addr *daddr, old_daddr;
150 __be32 fl6_flowlabel = 0;
151 __be32 old_fl6_flowlabel;
152 __be16 old_dport;
150 int addr_type; 153 int addr_type;
151 int err; 154 int err;
152 __be32 fl6_flowlabel = 0;
153 155
154 if (usin->sin6_family == AF_INET) { 156 if (usin->sin6_family == AF_INET) {
155 if (__ipv6_only_sock(sk)) 157 if (__ipv6_only_sock(sk))
@@ -238,9 +240,13 @@ ipv4_connected:
238 } 240 }
239 } 241 }
240 242
243 /* save the current peer information before updating it */
244 old_daddr = sk->sk_v6_daddr;
245 old_fl6_flowlabel = np->flow_label;
246 old_dport = inet->inet_dport;
247
241 sk->sk_v6_daddr = *daddr; 248 sk->sk_v6_daddr = *daddr;
242 np->flow_label = fl6_flowlabel; 249 np->flow_label = fl6_flowlabel;
243
244 inet->inet_dport = usin->sin6_port; 250 inet->inet_dport = usin->sin6_port;
245 251
246 /* 252 /*
@@ -250,11 +256,12 @@ ipv4_connected:
250 256
251 err = ip6_datagram_dst_update(sk, true); 257 err = ip6_datagram_dst_update(sk, true);
252 if (err) { 258 if (err) {
253 /* Reset daddr and dport so that udp_v6_early_demux() 259 /* Restore the socket peer info, to keep it consistent with
254 * fails to find this socket 260 * the old socket state
255 */ 261 */
256 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 262 sk->sk_v6_daddr = old_daddr;
257 inet->inet_dport = 0; 263 np->flow_label = old_fl6_flowlabel;
264 inet->inet_dport = old_dport;
258 goto out; 265 goto out;
259 } 266 }
260 267
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6adbcf40cf8c..3a98c694da5f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
126 struct ip6_tnl *t, *cand = NULL; 126 struct ip6_tnl *t, *cand = NULL;
127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
128 int dev_type = (gre_proto == htons(ETH_P_TEB) || 128 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
129 gre_proto == htons(ETH_P_ERSPAN)) ? 129 gre_proto == htons(ETH_P_ERSPAN) ||
130 gre_proto == htons(ETH_P_ERSPAN2)) ?
130 ARPHRD_ETHER : ARPHRD_IP6GRE; 131 ARPHRD_ETHER : ARPHRD_IP6GRE;
131 int score, cand_score = 4; 132 int score, cand_score = 4;
132 133
@@ -905,6 +906,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
905 truncate = true; 906 truncate = true;
906 } 907 }
907 908
909 if (skb_cow_head(skb, dev->needed_headroom))
910 goto tx_err;
911
908 t->parms.o_flags &= ~TUNNEL_KEY; 912 t->parms.o_flags &= ~TUNNEL_KEY;
909 IPCB(skb)->flags = 0; 913 IPCB(skb)->flags = 0;
910 914
@@ -947,6 +951,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
947 md->u.md2.dir, 951 md->u.md2.dir,
948 get_hwid(&md->u.md2), 952 get_hwid(&md->u.md2),
949 truncate, false); 953 truncate, false);
954 } else {
955 goto tx_err;
950 } 956 }
951 } else { 957 } else {
952 switch (skb->protocol) { 958 switch (skb->protocol) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 10024eb0c521..d1d0b2fa7a07 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1554 *(opt++) = (rd_len >> 3); 1554 *(opt++) = (rd_len >> 3);
1555 opt += 6; 1555 opt += 6;
1556 1556
1557 memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); 1557 skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1558 rd_len - 8);
1558} 1559}
1559 1560
1560void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1561void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 939d122e71b4..a2ed9fdd58d4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
128 128
129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130 130
131static void rt6_uncached_list_add(struct rt6_info *rt) 131void rt6_uncached_list_add(struct rt6_info *rt)
132{ 132{
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134 134
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
139 spin_unlock_bh(&ul->lock); 139 spin_unlock_bh(&ul->lock);
140} 140}
141 141
142static void rt6_uncached_list_del(struct rt6_info *rt) 142void rt6_uncached_list_del(struct rt6_info *rt)
143{ 143{
144 if (!list_empty(&rt->rt6i_uncached)) { 144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list; 145 struct uncached_list *ul = rt->rt6i_uncached_list;
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1514 } 1514 }
1515} 1515}
1516 1516
1517static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) 1517static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1518 struct rt6_info *rt, int mtu)
1519{
1520 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1521 * lowest MTU in the path: always allow updating the route PMTU to
1522 * reflect PMTU decreases.
1523 *
1524 * If the new MTU is higher, and the route PMTU is equal to the local
1525 * MTU, this means the old MTU is the lowest in the path, so allow
1526 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1527 * handle this.
1528 */
1529
1530 if (dst_mtu(&rt->dst) >= mtu)
1531 return true;
1532
1533 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1534 return true;
1535
1536 return false;
1537}
1538
1539static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1540 struct rt6_info *rt, int mtu)
1518{ 1541{
1519 struct rt6_exception_bucket *bucket; 1542 struct rt6_exception_bucket *bucket;
1520 struct rt6_exception *rt6_ex; 1543 struct rt6_exception *rt6_ex;
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1523 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1546 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1524 lockdep_is_held(&rt6_exception_lock)); 1547 lockdep_is_held(&rt6_exception_lock));
1525 1548
1526 if (bucket) { 1549 if (!bucket)
1527 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1550 return;
1528 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1551
1529 struct rt6_info *entry = rt6_ex->rt6i; 1552 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1530 /* For RTF_CACHE with rt6i_pmtu == 0 1553 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1531 * (i.e. a redirected route), 1554 struct rt6_info *entry = rt6_ex->rt6i;
1532 * the metrics of its rt->dst.from has already 1555
1533 * been updated. 1556 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1534 */ 1557 * route), the metrics of its rt->dst.from have already
1535 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) 1558 * been updated.
1536 entry->rt6i_pmtu = mtu; 1559 */
1537 } 1560 if (entry->rt6i_pmtu &&
1538 bucket++; 1561 rt6_mtu_change_route_allowed(idev, entry, mtu))
1562 entry->rt6i_pmtu = mtu;
1539 } 1563 }
1564 bucket++;
1540 } 1565 }
1541} 1566}
1542 1567
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3899 Since RFC 1981 doesn't include administrative MTU increase 3924 Since RFC 1981 doesn't include administrative MTU increase
3900 update PMTU increase is a MUST. (i.e. jumbo frame) 3925 update PMTU increase is a MUST. (i.e. jumbo frame)
3901 */ 3926 */
3902 /*
3903 If new MTU is less than route PMTU, this new MTU will be the
3904 lowest MTU in the path, update the route PMTU to reflect PMTU
3905 decreases; if new MTU is greater than route PMTU, and the
3906 old MTU is the lowest MTU in the path, update the route PMTU
3907 to reflect the increase. In this case if the other nodes' MTU
3908 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3909 PMTU discovery.
3910 */
3911 if (rt->dst.dev == arg->dev && 3927 if (rt->dst.dev == arg->dev &&
3912 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3913 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 3928 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3914 spin_lock_bh(&rt6_exception_lock); 3929 spin_lock_bh(&rt6_exception_lock);
3915 if (dst_mtu(&rt->dst) >= arg->mtu || 3930 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3916 (dst_mtu(&rt->dst) < arg->mtu && 3931 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3917 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3918 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); 3932 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3919 } 3933 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3920 rt6_exceptions_update_pmtu(rt, arg->mtu);
3921 spin_unlock_bh(&rt6_exception_lock); 3934 spin_unlock_bh(&rt6_exception_lock);
3922 } 3935 }
3923 return 0; 3936 return 0;
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4189 r_cfg.fc_encap_type = nla_get_u16(nla); 4202 r_cfg.fc_encap_type = nla_get_u16(nla);
4190 } 4203 }
4191 4204
4205 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4192 rt = ip6_route_info_create(&r_cfg, extack); 4206 rt = ip6_route_info_create(&r_cfg, extack);
4193 if (IS_ERR(rt)) { 4207 if (IS_ERR(rt)) {
4194 err = PTR_ERR(rt); 4208 err = PTR_ERR(rt);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bd6cc688bd19..7a78dcfda68a 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ 93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) 94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
95{ 95{
96 struct net *net = dev_net(skb_dst(skb)->dev); 96 struct dst_entry *dst = skb_dst(skb);
97 struct net *net = dev_net(dst->dev);
97 struct ipv6hdr *hdr, *inner_hdr; 98 struct ipv6hdr *hdr, *inner_hdr;
98 struct ipv6_sr_hdr *isrh; 99 struct ipv6_sr_hdr *isrh;
99 int hdrlen, tot_len, err; 100 int hdrlen, tot_len, err;
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
134 isrh->nexthdr = proto; 135 isrh->nexthdr = proto;
135 136
136 hdr->daddr = isrh->segments[isrh->first_segment]; 137 hdr->daddr = isrh->segments[isrh->first_segment];
137 set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); 138 set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
138 139
139#ifdef CONFIG_IPV6_SEG6_HMAC 140#ifdef CONFIG_IPV6_SEG6_HMAC
140 if (sr_has_hmac(isrh)) { 141 if (sr_has_hmac(isrh)) {
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
418 419
419 slwt = seg6_lwt_lwtunnel(newts); 420 slwt = seg6_lwt_lwtunnel(newts);
420 421
421 err = dst_cache_init(&slwt->cache, GFP_KERNEL); 422 err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
422 if (err) { 423 if (err) {
423 kfree(newts); 424 kfree(newts);
424 return err; 425 return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index bb935a3b7fea..de1b0b8c53b0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 88cd0c90fa81..cbb270bd81b0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; 113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst; 114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
115 xdst->u.rt6.rt6i_src = rt->rt6i_src; 115 xdst->u.rt6.rt6i_src = rt->rt6i_src;
116 INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
117 rt6_uncached_list_add(&xdst->u.rt6);
118 atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
116 119
117 return 0; 120 return 0;
118} 121}
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
244 if (likely(xdst->u.rt6.rt6i_idev)) 247 if (likely(xdst->u.rt6.rt6i_idev))
245 in6_dev_put(xdst->u.rt6.rt6i_idev); 248 in6_dev_put(xdst->u.rt6.rt6i_idev);
246 dst_destroy_metrics_generic(dst); 249 dst_destroy_metrics_generic(dst);
250 if (xdst->u.rt6.rt6i_uncached_list)
251 rt6_uncached_list_del(&xdst->u.rt6);
247 xfrm_dst_destroy(xdst); 252 xfrm_dst_destroy(xdst);
248} 253}
249 254
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 81ce15ffb878..893a022f9620 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2432,9 +2432,11 @@ static int afiucv_iucv_init(void)
2432 af_iucv_dev->driver = &af_iucv_driver; 2432 af_iucv_dev->driver = &af_iucv_driver;
2433 err = device_register(af_iucv_dev); 2433 err = device_register(af_iucv_dev);
2434 if (err) 2434 if (err)
2435 goto out_driver; 2435 goto out_iucv_dev;
2436 return 0; 2436 return 0;
2437 2437
2438out_iucv_dev:
2439 put_device(af_iucv_dev);
2438out_driver: 2440out_driver:
2439 driver_unregister(&af_iucv_driver); 2441 driver_unregister(&af_iucv_driver);
2440out_iucv: 2442out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a6cd0712e063..516cfad71b85 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1381 .parse_msg = kcm_parse_func_strparser, 1381 .parse_msg = kcm_parse_func_strparser,
1382 .read_sock_done = kcm_read_sock_done, 1382 .read_sock_done = kcm_read_sock_done,
1383 }; 1383 };
1384 int err; 1384 int err = 0;
1385 1385
1386 csk = csock->sk; 1386 csk = csock->sk;
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 lock_sock(csk);
1391
1390 /* Only allow TCP sockets to be attached for now */ 1392 /* Only allow TCP sockets to be attached for now */
1391 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || 1393 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1392 csk->sk_protocol != IPPROTO_TCP) 1394 csk->sk_protocol != IPPROTO_TCP) {
1393 return -EOPNOTSUPP; 1395 err = -EOPNOTSUPP;
1396 goto out;
1397 }
1394 1398
1395 /* Don't allow listeners or closed sockets */ 1399 /* Don't allow listeners or closed sockets */
1396 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) 1400 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1397 return -EOPNOTSUPP; 1401 err = -EOPNOTSUPP;
1402 goto out;
1403 }
1398 1404
1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1405 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1400 if (!psock) 1406 if (!psock) {
1401 return -ENOMEM; 1407 err = -ENOMEM;
1408 goto out;
1409 }
1402 1410
1403 psock->mux = mux; 1411 psock->mux = mux;
1404 psock->sk = csk; 1412 psock->sk = csk;
@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1407 err = strp_init(&psock->strp, csk, &cb); 1415 err = strp_init(&psock->strp, csk, &cb);
1408 if (err) { 1416 if (err) {
1409 kmem_cache_free(kcm_psockp, psock); 1417 kmem_cache_free(kcm_psockp, psock);
1410 return err; 1418 goto out;
1411 } 1419 }
1412 1420
1413 write_lock_bh(&csk->sk_callback_lock); 1421 write_lock_bh(&csk->sk_callback_lock);
@@ -1420,7 +1428,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1420 strp_stop(&psock->strp); 1428 strp_stop(&psock->strp);
1421 strp_done(&psock->strp); 1429 strp_done(&psock->strp);
1422 kmem_cache_free(kcm_psockp, psock); 1430 kmem_cache_free(kcm_psockp, psock);
1423 return -EALREADY; 1431 err = -EALREADY;
1432 goto out;
1424 } 1433 }
1425 1434
1426 psock->save_data_ready = csk->sk_data_ready; 1435 psock->save_data_ready = csk->sk_data_ready;
@@ -1456,7 +1465,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1456 /* Schedule RX work in case there are already bytes queued */ 1465 /* Schedule RX work in case there are already bytes queued */
1457 strp_check_rcv(&psock->strp); 1466 strp_check_rcv(&psock->strp);
1458 1467
1459 return 0; 1468out:
1469 release_sock(csk);
1470
1471 return err;
1460} 1472}
1461 1473
1462static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) 1474static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1508,6 +1520,7 @@ static void kcm_unattach(struct kcm_psock *psock)
1508 1520
1509 if (WARN_ON(psock->rx_kcm)) { 1521 if (WARN_ON(psock->rx_kcm)) {
1510 write_unlock_bh(&csk->sk_callback_lock); 1522 write_unlock_bh(&csk->sk_callback_lock);
1523 release_sock(csk);
1511 return; 1524 return;
1512 } 1525 }
1513 1526
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 189a12a5e4ac..b86868da50d4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,13 @@ struct l2tp_net {
111 spinlock_t l2tp_session_hlist_lock; 111 spinlock_t l2tp_session_hlist_lock;
112}; 112};
113 113
114#if IS_ENABLED(CONFIG_IPV6)
115static bool l2tp_sk_is_v6(struct sock *sk)
116{
117 return sk->sk_family == PF_INET6 &&
118 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
119}
120#endif
114 121
115static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 122static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
116{ 123{
@@ -1049,7 +1056,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1049 /* Queue the packet to IP for output */ 1056 /* Queue the packet to IP for output */
1050 skb->ignore_df = 1; 1057 skb->ignore_df = 1;
1051#if IS_ENABLED(CONFIG_IPV6) 1058#if IS_ENABLED(CONFIG_IPV6)
1052 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1059 if (l2tp_sk_is_v6(tunnel->sock))
1053 error = inet6_csk_xmit(tunnel->sock, skb, NULL); 1060 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1054 else 1061 else
1055#endif 1062#endif
@@ -1112,6 +1119,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1112 goto out_unlock; 1119 goto out_unlock;
1113 } 1120 }
1114 1121
1122 /* The user-space may change the connection status for the user-space
1123 * provided socket at run time: we must check it under the socket lock
1124 */
1125 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1126 kfree_skb(skb);
1127 ret = NET_XMIT_DROP;
1128 goto out_unlock;
1129 }
1130
1115 /* Get routing info from the tunnel socket */ 1131 /* Get routing info from the tunnel socket */
1116 skb_dst_drop(skb); 1132 skb_dst_drop(skb);
1117 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); 1133 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
@@ -1131,7 +1147,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1131 1147
1132 /* Calculate UDP checksum if configured to do so */ 1148 /* Calculate UDP checksum if configured to do so */
1133#if IS_ENABLED(CONFIG_IPV6) 1149#if IS_ENABLED(CONFIG_IPV6)
1134 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1150 if (l2tp_sk_is_v6(sk))
1135 udp6_set_csum(udp_get_no_check6_tx(sk), 1151 udp6_set_csum(udp_get_no_check6_tx(sk),
1136 skb, &inet6_sk(sk)->saddr, 1152 skb, &inet6_sk(sk)->saddr,
1137 &sk->sk_v6_daddr, udp_len); 1153 &sk->sk_v6_daddr, udp_len);
@@ -1457,9 +1473,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1457 encap = cfg->encap; 1473 encap = cfg->encap;
1458 1474
1459 /* Quick sanity checks */ 1475 /* Quick sanity checks */
1476 err = -EPROTONOSUPPORT;
1477 if (sk->sk_type != SOCK_DGRAM) {
1478 pr_debug("tunl %hu: fd %d wrong socket type\n",
1479 tunnel_id, fd);
1480 goto err;
1481 }
1460 switch (encap) { 1482 switch (encap) {
1461 case L2TP_ENCAPTYPE_UDP: 1483 case L2TP_ENCAPTYPE_UDP:
1462 err = -EPROTONOSUPPORT;
1463 if (sk->sk_protocol != IPPROTO_UDP) { 1484 if (sk->sk_protocol != IPPROTO_UDP) {
1464 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1485 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1465 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1486 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1467,7 +1488,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1467 } 1488 }
1468 break; 1489 break;
1469 case L2TP_ENCAPTYPE_IP: 1490 case L2TP_ENCAPTYPE_IP:
1470 err = -EPROTONOSUPPORT;
1471 if (sk->sk_protocol != IPPROTO_L2TP) { 1491 if (sk->sk_protocol != IPPROTO_L2TP) {
1472 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1492 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1473 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1493 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
@@ -1507,24 +1527,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1507 if (cfg != NULL) 1527 if (cfg != NULL)
1508 tunnel->debug = cfg->debug; 1528 tunnel->debug = cfg->debug;
1509 1529
1510#if IS_ENABLED(CONFIG_IPV6)
1511 if (sk->sk_family == PF_INET6) {
1512 struct ipv6_pinfo *np = inet6_sk(sk);
1513
1514 if (ipv6_addr_v4mapped(&np->saddr) &&
1515 ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1516 struct inet_sock *inet = inet_sk(sk);
1517
1518 tunnel->v4mapped = true;
1519 inet->inet_saddr = np->saddr.s6_addr32[3];
1520 inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1521 inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1522 } else {
1523 tunnel->v4mapped = false;
1524 }
1525 }
1526#endif
1527
1528 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1530 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1529 tunnel->encap = encap; 1531 tunnel->encap = encap;
1530 if (encap == L2TP_ENCAPTYPE_UDP) { 1532 if (encap == L2TP_ENCAPTYPE_UDP) {
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a1aa9550f04e..2718d0b284d0 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,9 +188,6 @@ struct l2tp_tunnel {
188 struct sock *sock; /* Parent socket */ 188 struct sock *sock; /* Parent socket */
189 int fd; /* Parent fd, if tunnel socket 189 int fd; /* Parent fd, if tunnel socket
190 * was created by userspace */ 190 * was created by userspace */
191#if IS_ENABLED(CONFIG_IPV6)
192 bool v4mapped;
193#endif
194 191
195 struct work_struct del_work; 192 struct work_struct del_work;
196 193
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a75653affbf7..b5adf3625d16 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -213,6 +213,7 @@ static const char *hw_flag_names[] = {
213 FLAG(SUPPORTS_TX_FRAG), 213 FLAG(SUPPORTS_TX_FRAG),
214 FLAG(SUPPORTS_TDLS_BUFFER_STA), 214 FLAG(SUPPORTS_TDLS_BUFFER_STA),
215 FLAG(DEAUTH_NEED_MGD_TX_PREP), 215 FLAG(DEAUTH_NEED_MGD_TX_PREP),
216 FLAG(DOESNT_SUPPORT_QOS_NDP),
216#undef FLAG 217#undef FLAG
217}; 218};
218 219
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0024eff9bb84..fe4aefb06d9f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -897,7 +897,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
897 struct ieee80211_hdr_3addr *nullfunc; 897 struct ieee80211_hdr_3addr *nullfunc;
898 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 898 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
899 899
900 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); 900 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
901 !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
901 if (!skb) 902 if (!skb)
902 return; 903 return;
903 904
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8e19c86d1aa6..fd13d28e4ca7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5423,6 +5423,7 @@ err:
5423static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) 5423static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5424{ 5424{
5425 cancel_delayed_work_sync(&flowtable->data.gc_work); 5425 cancel_delayed_work_sync(&flowtable->data.gc_work);
5426 kfree(flowtable->ops);
5426 kfree(flowtable->name); 5427 kfree(flowtable->name);
5427 flowtable->data.type->free(&flowtable->data); 5428 flowtable->data.type->free(&flowtable->data);
5428 rhashtable_destroy(&flowtable->data.rhashtable); 5429 rhashtable_destroy(&flowtable->data.rhashtable);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3f1624ee056f..d40591fe1b2f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -674,7 +674,7 @@ static const struct nft_set_ops *
674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, 674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
675 u32 flags) 675 u32 flags)
676{ 676{
677 if (desc->size) { 677 if (desc->size && !(flags & NFT_SET_TIMEOUT)) {
678 switch (desc->klen) { 678 switch (desc->klen) {
679 case 4: 679 case 4:
680 return &nft_hash_fast_ops; 680 return &nft_hash_fast_ops;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d9deebe599ec..6de1f6a4cb80 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
423 return buf; 423 return buf;
424} 424}
425 425
426/**
427 * xt_check_proc_name - check that name is suitable for /proc file creation
428 *
429 * @name: file name candidate
430 * @size: length of buffer
431 *
432 * some x_tables modules wish to create a file in /proc.
433 * This function makes sure that the name is suitable for this
434 * purpose, it checks that name is NUL terminated and isn't a 'special'
435 * name, like "..".
436 *
437 * returns negative number on error or 0 if name is useable.
438 */
439int xt_check_proc_name(const char *name, unsigned int size)
440{
441 if (name[0] == '\0')
442 return -EINVAL;
443
444 if (strnlen(name, size) == size)
445 return -ENAMETOOLONG;
446
447 if (strcmp(name, ".") == 0 ||
448 strcmp(name, "..") == 0 ||
449 strchr(name, '/'))
450 return -EINVAL;
451
452 return 0;
453}
454EXPORT_SYMBOL(xt_check_proc_name);
455
426int xt_check_match(struct xt_mtchk_param *par, 456int xt_check_match(struct xt_mtchk_param *par,
427 unsigned int size, u_int8_t proto, bool inv_proto) 457 unsigned int size, u_int8_t proto, bool inv_proto)
428{ 458{
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index db2fe0911740..ef65b7a9173e 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -917,8 +917,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
917 struct hashlimit_cfg3 cfg = {}; 917 struct hashlimit_cfg3 cfg = {};
918 int ret; 918 int ret;
919 919
920 if (info->name[sizeof(info->name) - 1] != '\0') 920 ret = xt_check_proc_name(info->name, sizeof(info->name));
921 return -EINVAL; 921 if (ret)
922 return ret;
922 923
923 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 924 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
924 925
@@ -935,8 +936,9 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
935 struct hashlimit_cfg3 cfg = {}; 936 struct hashlimit_cfg3 cfg = {};
936 int ret; 937 int ret;
937 938
938 if (info->name[sizeof(info->name) - 1] != '\0') 939 ret = xt_check_proc_name(info->name, sizeof(info->name));
939 return -EINVAL; 940 if (ret)
941 return ret;
940 942
941 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 943 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
942 944
@@ -950,9 +952,11 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
950static int hashlimit_mt_check(const struct xt_mtchk_param *par) 952static int hashlimit_mt_check(const struct xt_mtchk_param *par)
951{ 953{
952 struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 954 struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
955 int ret;
953 956
954 if (info->name[sizeof(info->name) - 1] != '\0') 957 ret = xt_check_proc_name(info->name, sizeof(info->name));
955 return -EINVAL; 958 if (ret)
959 return ret;
956 960
957 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, 961 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
958 info->name, 3); 962 info->name, 3);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 19efdb757944..486dd24da78b 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); 361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
362 return -EINVAL; 362 return -EINVAL;
363 } 363 }
364 if (info->name[0] == '\0' || 364 ret = xt_check_proc_name(info->name, sizeof(info->name));
365 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 365 if (ret)
366 return -EINVAL; 366 return ret;
367 367
368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) 368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; 369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a6f63a5faee7..af51b8c0a2cb 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1107,7 +1107,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1107 if (!err) 1107 if (!err)
1108 delivered = true; 1108 delivered = true;
1109 else if (err != -ESRCH) 1109 else if (err != -ESRCH)
1110 goto error; 1110 return err;
1111 return delivered ? 0 : -ESRCH; 1111 return delivered ? 0 : -ESRCH;
1112 error: 1112 error:
1113 kfree_skb(skb); 1113 kfree_skb(skb);
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 04b94281a30b..b891a91577f8 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
242 242
243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]); 243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]); 244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
245 if (band->rate == 0) {
246 err = -EINVAL;
247 goto exit_free_meter;
248 }
249
245 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]); 250 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
246 /* Figure out max delta_t that is enough to fill any bucket. 251 /* Figure out max delta_t that is enough to fill any bucket.
247 * Keep max_delta_t size to the bucket units: 252 * Keep max_delta_t size to the bucket units:
248 * pkts => 1/1000 packets, kilobits => bits. 253 * pkts => 1/1000 packets, kilobits => bits.
254 *
255 * Start with a full bucket.
249 */ 256 */
250 band_max_delta_t = (band->burst_size + band->rate) * 1000; 257 band->bucket = (band->burst_size + band->rate) * 1000;
251 /* Start with a full bucket. */ 258 band_max_delta_t = band->bucket / band->rate;
252 band->bucket = band_max_delta_t;
253 if (band_max_delta_t > meter->max_delta_t) 259 if (band_max_delta_t > meter->max_delta_t)
254 meter->max_delta_t = band_max_delta_t; 260 meter->max_delta_t = band_max_delta_t;
255 band++; 261 band++;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index da72e0cf2b1f..5cb9b268e8ff 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
352 return res; 352 return res;
353out: 353out:
354 if (res == ACT_P_CREATED) 354 if (res == ACT_P_CREATED)
355 tcf_idr_cleanup(*act, est); 355 tcf_idr_release(*act, bind);
356 356
357 return ret; 357 return ret;
358} 358}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 1fb1f1f6a555..a527e287c086 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
350{ 350{
351 struct sctphdr *sctph; 351 struct sctphdr *sctph;
352 352
353 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) 353 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
354 return 1; 354 return 1;
355 355
356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a)
626 struct tcf_csum_params *params; 626 struct tcf_csum_params *params;
627 627
628 params = rcu_dereference_protected(p->params, 1); 628 params = rcu_dereference_protected(p->params, 1);
629 kfree_rcu(params, rcu); 629 if (params)
630 kfree_rcu(params, rcu);
630} 631}
631 632
632static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 633static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 10866717f88e..b5e8565b89c7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
80static void tcf_ipt_release(struct tc_action *a) 80static void tcf_ipt_release(struct tc_action *a)
81{ 81{
82 struct tcf_ipt *ipt = to_ipt(a); 82 struct tcf_ipt *ipt = to_ipt(a);
83 ipt_destroy_target(ipt->tcfi_t); 83
84 if (ipt->tcfi_t) {
85 ipt_destroy_target(ipt->tcfi_t);
86 kfree(ipt->tcfi_t);
87 }
84 kfree(ipt->tcfi_tname); 88 kfree(ipt->tcfi_tname);
85 kfree(ipt->tcfi_t);
86} 89}
87 90
88static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { 91static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -187,7 +190,7 @@ err2:
187 kfree(tname); 190 kfree(tname);
188err1: 191err1:
189 if (ret == ACT_P_CREATED) 192 if (ret == ACT_P_CREATED)
190 tcf_idr_cleanup(*a, est); 193 tcf_idr_release(*a, bind);
191 return err; 194 return err;
192} 195}
193 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 5e8cc8f63acd..f392ccaaa0d8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
176 p = to_pedit(*a); 176 p = to_pedit(*a);
177 keys = kmalloc(ksize, GFP_KERNEL); 177 keys = kmalloc(ksize, GFP_KERNEL);
178 if (keys == NULL) { 178 if (keys == NULL) {
179 tcf_idr_cleanup(*a, est); 179 tcf_idr_release(*a, bind);
180 kfree(keys_ex); 180 kfree(keys_ex);
181 return -ENOMEM; 181 return -ENOMEM;
182 } 182 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 51fe4fe343f7..7081ec75e696 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,7 +196,7 @@ failure:
196 qdisc_put_rtab(P_tab); 196 qdisc_put_rtab(P_tab);
197 qdisc_put_rtab(R_tab); 197 qdisc_put_rtab(R_tab);
198 if (ret == ACT_P_CREATED) 198 if (ret == ACT_P_CREATED)
199 tcf_idr_cleanup(*a, est); 199 tcf_idr_release(*a, bind);
200 return err; 200 return err;
201} 201}
202 202
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 238dfd27e995..3a89f98f17e6 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
103 103
104 psample_group = rtnl_dereference(s->psample_group); 104 psample_group = rtnl_dereference(s->psample_group);
105 RCU_INIT_POINTER(s->psample_group, NULL); 105 RCU_INIT_POINTER(s->psample_group, NULL);
106 psample_group_put(psample_group); 106 if (psample_group)
107 psample_group_put(psample_group);
107} 108}
108 109
109static bool tcf_sample_dev_ok_push(struct net_device *dev) 110static bool tcf_sample_dev_ok_push(struct net_device *dev)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 91816d73f3f3..e84768ae610a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
121 d = to_defact(*a); 121 d = to_defact(*a);
122 ret = alloc_defdata(d, defdata); 122 ret = alloc_defdata(d, defdata);
123 if (ret < 0) { 123 if (ret < 0) {
124 tcf_idr_cleanup(*a, est); 124 tcf_idr_release(*a, bind);
125 return ret; 125 return ret;
126 } 126 }
127 d->tcf_action = parm->action; 127 d->tcf_action = parm->action;
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index febec75f4f7a..142a996ac776 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
152 ASSERT_RTNL(); 152 ASSERT_RTNL();
153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); 153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
154 if (unlikely(!p)) { 154 if (unlikely(!p)) {
155 if (ovr) 155 if (ret == ACT_P_CREATED)
156 tcf_idr_release(*a, bind); 156 tcf_idr_release(*a, bind);
157 return -ENOMEM; 157 return -ENOMEM;
158 } 158 }
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a)
190 struct tcf_skbmod_params *p; 190 struct tcf_skbmod_params *p;
191 191
192 p = rcu_dereference_protected(d->skbmod_p, 1); 192 p = rcu_dereference_protected(d->skbmod_p, 1);
193 kfree_rcu(p, rcu); 193 if (p)
194 kfree_rcu(p, rcu);
194} 195}
195 196
196static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, 197static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 9169b7e78ada..a1c8dd406a04 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; 153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
154 break; 154 break;
155 default: 155 default:
156 ret = -EINVAL;
156 goto err_out; 157 goto err_out;
157 } 158 }
158 159
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a)
207 struct tcf_tunnel_key_params *params; 208 struct tcf_tunnel_key_params *params;
208 209
209 params = rcu_dereference_protected(t->params, 1); 210 params = rcu_dereference_protected(t->params, 1);
211 if (params) {
212 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
213 dst_release(&params->tcft_enc_metadata->dst);
210 214
211 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) 215 kfree_rcu(params, rcu);
212 dst_release(&params->tcft_enc_metadata->dst); 216 }
213
214 kfree_rcu(params, rcu);
215} 217}
216 218
217static int tunnel_key_dump_addresses(struct sk_buff *skb, 219static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index c2ee7fd51cc9..4595391c2129 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
195 ASSERT_RTNL(); 195 ASSERT_RTNL();
196 p = kzalloc(sizeof(*p), GFP_KERNEL); 196 p = kzalloc(sizeof(*p), GFP_KERNEL);
197 if (!p) { 197 if (!p) {
198 if (ovr) 198 if (ret == ACT_P_CREATED)
199 tcf_idr_release(*a, bind); 199 tcf_idr_release(*a, bind);
200 return -ENOMEM; 200 return -ENOMEM;
201 } 201 }
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a)
225 struct tcf_vlan_params *p; 225 struct tcf_vlan_params *p;
226 226
227 p = rcu_dereference_protected(v->vlan_p, 1); 227 p = rcu_dereference_protected(v->vlan_p, 1);
228 kfree_rcu(p, rcu); 228 if (p)
229 kfree_rcu(p, rcu);
229} 230}
230 231
231static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 232static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 190570f21b20..7e3fbe9cc936 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
106 106
107 __skb_queue_tail(&q->skb_bad_txq, skb); 107 __skb_queue_tail(&q->skb_bad_txq, skb);
108 108
109 if (qdisc_is_percpu_stats(q)) {
110 qdisc_qstats_cpu_backlog_inc(q, skb);
111 qdisc_qstats_cpu_qlen_inc(q);
112 } else {
113 qdisc_qstats_backlog_inc(q, skb);
114 q->q.qlen++;
115 }
116
109 if (lock) 117 if (lock)
110 spin_unlock(lock); 118 spin_unlock(lock);
111} 119}
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
196 break; 204 break;
197 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 205 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
198 qdisc_enqueue_skb_bad_txq(q, nskb); 206 qdisc_enqueue_skb_bad_txq(q, nskb);
199
200 if (qdisc_is_percpu_stats(q)) {
201 qdisc_qstats_cpu_backlog_inc(q, nskb);
202 qdisc_qstats_cpu_qlen_inc(q);
203 } else {
204 qdisc_qstats_backlog_inc(q, nskb);
205 q->q.qlen++;
206 }
207 break; 207 break;
208 } 208 }
209 skb->next = nskb; 209 skb->next = nskb;
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
628 int band = prio2band[skb->priority & TC_PRIO_MAX]; 628 int band = prio2band[skb->priority & TC_PRIO_MAX];
629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
630 struct skb_array *q = band2list(priv, band); 630 struct skb_array *q = band2list(priv, band);
631 unsigned int pkt_len = qdisc_pkt_len(skb);
631 int err; 632 int err;
632 633
633 err = skb_array_produce(q, skb); 634 err = skb_array_produce(q, skb);
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
636 return qdisc_drop_cpu(skb, qdisc, to_free); 637 return qdisc_drop_cpu(skb, qdisc, to_free);
637 638
638 qdisc_qstats_cpu_qlen_inc(qdisc); 639 qdisc_qstats_cpu_qlen_inc(qdisc);
639 qdisc_qstats_cpu_backlog_inc(qdisc, skb); 640 /* Note: skb can not be used after skb_array_produce(),
641 * so we better not use qdisc_qstats_cpu_backlog_inc()
642 */
643 this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
640 return NET_XMIT_SUCCESS; 644 return NET_XMIT_SUCCESS;
641} 645}
642 646
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7c179addebcd..7d6801fc5340 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
509 } 509 }
510 510
511 if (unlikely(sch->q.qlen >= sch->limit)) 511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop(skb, sch, to_free); 512 return qdisc_drop_all(skb, sch, to_free);
513 513
514 qdisc_qstats_backlog_inc(sch, skb); 514 qdisc_qstats_backlog_inc(sch, skb);
515 515
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0247cc432e02..b381d78548ac 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -106,6 +106,7 @@ int sctp_rcv(struct sk_buff *skb)
106 int family; 106 int family;
107 struct sctp_af *af; 107 struct sctp_af *af;
108 struct net *net = dev_net(skb->dev); 108 struct net *net = dev_net(skb->dev);
109 bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
109 110
110 if (skb->pkt_type != PACKET_HOST) 111 if (skb->pkt_type != PACKET_HOST)
111 goto discard_it; 112 goto discard_it;
@@ -123,8 +124,7 @@ int sctp_rcv(struct sk_buff *skb)
123 * it's better to just linearize it otherwise crc computing 124 * it's better to just linearize it otherwise crc computing
124 * takes longer. 125 * takes longer.
125 */ 126 */
126 if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 127 if ((!is_gso && skb_linearize(skb)) ||
127 skb_linearize(skb)) ||
128 !pskb_may_pull(skb, sizeof(struct sctphdr))) 128 !pskb_may_pull(skb, sizeof(struct sctphdr)))
129 goto discard_it; 129 goto discard_it;
130 130
@@ -135,7 +135,7 @@ int sctp_rcv(struct sk_buff *skb)
135 if (skb_csum_unnecessary(skb)) 135 if (skb_csum_unnecessary(skb))
136 __skb_decr_checksum_unnecessary(skb); 136 __skb_decr_checksum_unnecessary(skb);
137 else if (!sctp_checksum_disable && 137 else if (!sctp_checksum_disable &&
138 !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 138 !is_gso &&
139 sctp_rcv_checksum(net, skb) < 0) 139 sctp_rcv_checksum(net, skb) < 0)
140 goto discard_it; 140 goto discard_it;
141 skb->csum_valid = 1; 141 skb->csum_valid = 1;
@@ -1218,7 +1218,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1218 * issue as packets hitting this are mostly INIT or INIT-ACK and 1218 * issue as packets hitting this are mostly INIT or INIT-ACK and
1219 * those cannot be on GSO-style anyway. 1219 * those cannot be on GSO-style anyway.
1220 */ 1220 */
1221 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) 1221 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
1222 return NULL; 1222 return NULL;
1223 1223
1224 ch = (struct sctp_chunkhdr *)skb->data; 1224 ch = (struct sctp_chunkhdr *)skb->data;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 48392552ee7c..23ebc5318edc 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -170,7 +170,7 @@ next_chunk:
170 170
171 chunk = list_entry(entry, struct sctp_chunk, list); 171 chunk = list_entry(entry, struct sctp_chunk, list);
172 172
173 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { 173 if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
174 /* GSO-marked skbs but without frags, handle 174 /* GSO-marked skbs but without frags, handle
175 * them normally 175 * them normally
176 */ 176 */
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 35bc7106d182..123e9f2dc226 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,7 +45,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)) 48 if (!skb_is_gso_sctp(skb))
49 goto out; 49 goto out;
50 50
51 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 86913eb5cfa0..5f8046c62d90 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -983,10 +983,6 @@ out:
983 lsmc->clcsock = NULL; 983 lsmc->clcsock = NULL;
984 } 984 }
985 release_sock(lsk); 985 release_sock(lsk);
986 /* no more listening, wake up smc_close_wait_listen_clcsock and
987 * accept
988 */
989 lsk->sk_state_change(lsk);
990 sock_put(&lsmc->sk); /* sock_hold in smc_listen */ 986 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
991} 987}
992 988
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index e339c0186dcf..fa41d9881741 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -30,27 +30,6 @@ static void smc_close_cleanup_listen(struct sock *parent)
30 smc_close_non_accepted(sk); 30 smc_close_non_accepted(sk);
31} 31}
32 32
33static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
34{
35 DEFINE_WAIT_FUNC(wait, woken_wake_function);
36 struct sock *sk = &smc->sk;
37 signed long timeout;
38
39 timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
40 add_wait_queue(sk_sleep(sk), &wait);
41 do {
42 release_sock(sk);
43 if (smc->clcsock)
44 timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
45 timeout);
46 sched_annotate_sleep();
47 lock_sock(sk);
48 if (!smc->clcsock)
49 break;
50 } while (timeout);
51 remove_wait_queue(sk_sleep(sk), &wait);
52}
53
54/* wait for sndbuf data being transmitted */ 33/* wait for sndbuf data being transmitted */
55static void smc_close_stream_wait(struct smc_sock *smc, long timeout) 34static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
56{ 35{
@@ -204,9 +183,11 @@ again:
204 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); 183 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
205 /* wake up kernel_accept of smc_tcp_listen_worker */ 184 /* wake up kernel_accept of smc_tcp_listen_worker */
206 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); 185 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
207 smc_close_wait_listen_clcsock(smc);
208 } 186 }
209 smc_close_cleanup_listen(sk); 187 smc_close_cleanup_listen(sk);
188 release_sock(sk);
189 flush_work(&smc->tcp_listen_work);
190 lock_sock(sk);
210 break; 191 break;
211 case SMC_ACTIVE: 192 case SMC_ACTIVE:
212 smc_close_stream_wait(smc, timeout); 193 smc_close_stream_wait(smc, timeout);
diff --git a/net/socket.c b/net/socket.c
index d9a1ac233b35..3d1948d27a25 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2590,6 +2590,11 @@ void sock_unregister(int family)
2590} 2590}
2591EXPORT_SYMBOL(sock_unregister); 2591EXPORT_SYMBOL(sock_unregister);
2592 2592
2593bool sock_is_registered(int family)
2594{
2595 return family < NPROTO && rcu_access_pointer(net_families[family]);
2596}
2597
2593static int __init sock_init(void) 2598static int __init sock_init(void)
2594{ 2599{
2595 int err; 2600 int err;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
283 struct crypto_comp *tfm; 283 struct crypto_comp *tfm;
284 284
285 /* This can be any valid CPU ID so we don't need locking. */ 285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms); 286 tfm = this_cpu_read(*pos->tfms);
287 287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 289 pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 77d9d1ab05ce..cb3bb9ae4407 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1458static int xfrm_get_tos(const struct flowi *fl, int family) 1458static int xfrm_get_tos(const struct flowi *fl, int family)
1459{ 1459{
1460 const struct xfrm_policy_afinfo *afinfo; 1460 const struct xfrm_policy_afinfo *afinfo;
1461 int tos = 0; 1461 int tos;
1462 1462
1463 afinfo = xfrm_policy_get_afinfo(family); 1463 afinfo = xfrm_policy_get_afinfo(family);
1464 tos = afinfo ? afinfo->get_tos(fl) : 0; 1464 if (!afinfo)
1465 return 0;
1466
1467 tos = afinfo->get_tos(fl);
1465 1468
1466 rcu_read_unlock(); 1469 rcu_read_unlock();
1467 1470
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
1891 spin_unlock(&pq->hold_queue.lock); 1894 spin_unlock(&pq->hold_queue.lock);
1892 1895
1893 dst_hold(xfrm_dst_path(dst)); 1896 dst_hold(xfrm_dst_path(dst));
1894 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); 1897 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1895 if (IS_ERR(dst)) 1898 if (IS_ERR(dst))
1896 goto purge_queue; 1899 goto purge_queue;
1897 1900
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2729 while (dst->xfrm) { 2732 while (dst->xfrm) {
2730 const struct xfrm_state *xfrm = dst->xfrm; 2733 const struct xfrm_state *xfrm = dst->xfrm;
2731 2734
2735 dst = xfrm_dst_child(dst);
2736
2732 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 2737 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2733 continue; 2738 continue;
2734 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 2739 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2735 daddr = xfrm->coaddr; 2740 daddr = xfrm->coaddr;
2736 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 2741 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2737 daddr = &xfrm->id.daddr; 2742 daddr = &xfrm->id.daddr;
2738
2739 dst = xfrm_dst_child(dst);
2740 } 2743 }
2741 return daddr; 2744 return daddr;
2742} 2745}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 1d38c6acf8af..9e3a5e85f828 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
660 } else { 660 } else {
661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
663 xo->seq.low = oseq = oseq + 1; 663 xo->seq.low = oseq + 1;
664 xo->seq.hi = oseq_hi; 664 xo->seq.hi = oseq_hi;
665 oseq += skb_shinfo(skb)->gso_segs; 665 oseq += skb_shinfo(skb)->gso_segs;
666 } 666 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54e21f19d722..f9d2f2233f09 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2056 struct xfrm_mgr *km; 2056 struct xfrm_mgr *km;
2057 struct xfrm_policy *pol = NULL; 2057 struct xfrm_policy *pol = NULL;
2058 2058
2059#ifdef CONFIG_COMPAT
2060 if (in_compat_syscall())
2061 return -EOPNOTSUPP;
2062#endif
2063
2059 if (!optval && !optlen) { 2064 if (!optval && !optlen) {
2060 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2065 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2061 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2066 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index aff2e84ec761..e92b8c019c88 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs; 122 struct xfrm_replay_state_esn *rs;
123 123
124 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt)
125 if (!rt) 125 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
126 return -EINVAL;
127 126
128 rs = nla_data(rt); 127 rs = nla_data(rt);
129 128
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
131 return -EINVAL; 130 return -EINVAL;
132
133 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
134 nla_len(rt) != sizeof(*rs))
135 return -EINVAL;
136 }
137 131
138 if (!rt) 132 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
139 return 0; 133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
140 135
141 /* As only ESP and AH support ESN feature. */ 136 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 137 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))