aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-28 06:27:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-28 06:27:35 -0400
commitb24d0d5b12a678b96676348976982686fbe222b4 (patch)
tree565ce37d2d971cb94436241bc2ac48028b6b66d0 /net
parent4ac0d3fb13d5acc138d8be7c45715567c2e2ec47 (diff)
parent3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff)
Merge 4.16-rc7 into char-misc-next
We want the hyperv fix in here for merging and testing. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.c26
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c22
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/fragmentation.c3
-rw-r--r--net/batman-adv/hard-interface.c9
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/log.c1
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c25
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/batman-adv/types.h11
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/br_netfilter_hooks.c4
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/bridge/netfilter/ebt_among.c55
-rw-r--r--net/bridge/netfilter/ebtables.c44
-rw-r--r--net/core/dev.c36
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c62
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c60
-rw-r--r--net/core/skbuff.c59
-rw-r--r--net/core/sock.c21
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/legacy.c2
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel.c13
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c3
-rw-r--r--net/ipv4/route.c65
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c3
-rw-r--r--net/ipv4/xfrm4_output.c3
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/datagram.c21
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter.c9
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c4
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c12
-rw-r--r--net/ipv6/route.c76
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c33
-rw-r--r--net/l2tp/l2tp_core.c188
-rw-r--r--net/l2tp/l2tp_core.h26
-rw-r--r--net/l2tp/l2tp_ip.c10
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_ppp.c60
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/tx.c8
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/nf_tables_api.c26
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/x_tables.c30
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/openvswitch/meter.c12
-rw-r--r--net/qrtr/smd.c1
-rw-r--r--net/rds/tcp_listen.c14
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_csum.c5
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbmod.c5
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c5
-rw-r--r--net/sched/sch_generic.c22
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/smc/af_smc.c8
-rw-r--r--net/smc/smc_cdc.c2
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/smc/smc_llc.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/tls/tls_main.c52
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c21
118 files changed, 925 insertions, 625 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 64aa9f755e1d..45c9bf5ff3a0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
48 * original position later 48 * original position later
49 */ 49 */
50 skb_push(skb, offset); 50 skb_push(skb, offset);
51 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, 51 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
52 skb->vlan_tci); 52 skb->vlan_tci, skb->mac_len);
53 if (!skb) 53 if (!skb)
54 return false; 54 return false;
55 skb_pull(skb, offset + VLAN_HLEN); 55 skb_pull(skb, offset + VLAN_HLEN);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 79e326383726..99abeadf416e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
157 * Return: 0 on success, a negative error code otherwise. 157 * Return: 0 on success, a negative error code otherwise.
158 */ 158 */
159static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, 159static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
160 int max_if_num) 160 unsigned int max_if_num)
161{ 161{
162 void *data_ptr; 162 void *data_ptr;
163 size_t old_size; 163 size_t old_size;
@@ -201,7 +201,8 @@ unlock:
201 */ 201 */
202static void 202static void
203batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, 203batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
204 int max_if_num, int del_if_num) 204 unsigned int max_if_num,
205 unsigned int del_if_num)
205{ 206{
206 size_t chunk_size; 207 size_t chunk_size;
207 size_t if_offset; 208 size_t if_offset;
@@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
239 */ 240 */
240static void 241static void
241batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, 242batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
242 int max_if_num, int del_if_num) 243 unsigned int max_if_num,
244 unsigned int del_if_num)
243{ 245{
244 size_t if_offset; 246 size_t if_offset;
245 void *data_ptr; 247 void *data_ptr;
@@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
276 * Return: 0 on success, a negative error code otherwise. 278 * Return: 0 on success, a negative error code otherwise.
277 */ 279 */
278static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, 280static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
279 int max_if_num, int del_if_num) 281 unsigned int max_if_num,
282 unsigned int del_if_num)
280{ 283{
281 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); 284 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
282 285
@@ -311,7 +314,8 @@ static struct batadv_orig_node *
311batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) 314batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
312{ 315{
313 struct batadv_orig_node *orig_node; 316 struct batadv_orig_node *orig_node;
314 int size, hash_added; 317 int hash_added;
318 size_t size;
315 319
316 orig_node = batadv_orig_hash_find(bat_priv, addr); 320 orig_node = batadv_orig_hash_find(bat_priv, addr);
317 if (orig_node) 321 if (orig_node)
@@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
893 u32 i; 897 u32 i;
894 size_t word_index; 898 size_t word_index;
895 u8 *w; 899 u8 *w;
896 int if_num; 900 unsigned int if_num;
897 901
898 for (i = 0; i < hash->size; i++) { 902 for (i = 0; i < hash->size; i++) {
899 head = &hash->table[i]; 903 head = &hash->table[i];
@@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
1023 struct batadv_neigh_node *tmp_neigh_node = NULL; 1027 struct batadv_neigh_node *tmp_neigh_node = NULL;
1024 struct batadv_neigh_node *router = NULL; 1028 struct batadv_neigh_node *router = NULL;
1025 struct batadv_orig_node *orig_node_tmp; 1029 struct batadv_orig_node *orig_node_tmp;
1026 int if_num; 1030 unsigned int if_num;
1027 u8 sum_orig, sum_neigh; 1031 u8 sum_orig, sum_neigh;
1028 u8 *neigh_addr; 1032 u8 *neigh_addr;
1029 u8 tq_avg; 1033 u8 tq_avg;
@@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1182 u8 total_count; 1186 u8 total_count;
1183 u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; 1187 u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
1184 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; 1188 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
1185 int if_num; 1189 unsigned int if_num;
1186 unsigned int tq_asym_penalty, inv_asym_penalty; 1190 unsigned int tq_asym_penalty, inv_asym_penalty;
1187 unsigned int combined_tq; 1191 unsigned int combined_tq;
1188 unsigned int tq_iface_penalty; 1192 unsigned int tq_iface_penalty;
@@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
1702 1706
1703 if (is_my_orig) { 1707 if (is_my_orig) {
1704 unsigned long *word; 1708 unsigned long *word;
1705 int offset; 1709 size_t offset;
1706 s32 bit_pos; 1710 s32 bit_pos;
1707 s16 if_num; 1711 unsigned int if_num;
1708 u8 *weight; 1712 u8 *weight;
1709 1713
1710 orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, 1714 orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2729 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 2733 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
2730 struct batadv_neigh_node *router; 2734 struct batadv_neigh_node *router;
2731 struct batadv_gw_node *curr_gw; 2735 struct batadv_gw_node *curr_gw;
2732 int ret = -EINVAL; 2736 int ret = 0;
2733 void *hdr; 2737 void *hdr;
2734 2738
2735 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); 2739 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 27e165ac9302..c74f81341dab 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
928 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 928 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
929 struct batadv_neigh_node *router; 929 struct batadv_neigh_node *router;
930 struct batadv_gw_node *curr_gw; 930 struct batadv_gw_node *curr_gw;
931 int ret = -EINVAL; 931 int ret = 0;
932 void *hdr; 932 void *hdr;
933 933
934 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); 934 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index fad47853ad3c..b1a08374088b 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2161{ 2161{
2162 struct batadv_bla_claim *claim; 2162 struct batadv_bla_claim *claim;
2163 int idx = 0; 2163 int idx = 0;
2164 int ret = 0;
2164 2165
2165 rcu_read_lock(); 2166 rcu_read_lock();
2166 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2167 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2167 if (idx++ < *idx_skip) 2168 if (idx++ < *idx_skip)
2168 continue; 2169 continue;
2169 if (batadv_bla_claim_dump_entry(msg, portid, seq, 2170
2170 primary_if, claim)) { 2171 ret = batadv_bla_claim_dump_entry(msg, portid, seq,
2172 primary_if, claim);
2173 if (ret) {
2171 *idx_skip = idx - 1; 2174 *idx_skip = idx - 1;
2172 goto unlock; 2175 goto unlock;
2173 } 2176 }
2174 } 2177 }
2175 2178
2176 *idx_skip = idx; 2179 *idx_skip = 0;
2177unlock: 2180unlock:
2178 rcu_read_unlock(); 2181 rcu_read_unlock();
2179 return 0; 2182 return ret;
2180} 2183}
2181 2184
2182/** 2185/**
@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2391{ 2394{
2392 struct batadv_bla_backbone_gw *backbone_gw; 2395 struct batadv_bla_backbone_gw *backbone_gw;
2393 int idx = 0; 2396 int idx = 0;
2397 int ret = 0;
2394 2398
2395 rcu_read_lock(); 2399 rcu_read_lock();
2396 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 2400 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2397 if (idx++ < *idx_skip) 2401 if (idx++ < *idx_skip)
2398 continue; 2402 continue;
2399 if (batadv_bla_backbone_dump_entry(msg, portid, seq, 2403
2400 primary_if, backbone_gw)) { 2404 ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
2405 primary_if, backbone_gw);
2406 if (ret) {
2401 *idx_skip = idx - 1; 2407 *idx_skip = idx - 1;
2402 goto unlock; 2408 goto unlock;
2403 } 2409 }
2404 } 2410 }
2405 2411
2406 *idx_skip = idx; 2412 *idx_skip = 0;
2407unlock: 2413unlock:
2408 rcu_read_unlock(); 2414 rcu_read_unlock();
2409 return 0; 2415 return ret;
2410} 2416}
2411 2417
2412/** 2418/**
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 9703c791ffc5..87cd962d28d5 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -393,7 +393,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
393 batadv_arp_hw_src(skb, hdr_size), &ip_src, 393 batadv_arp_hw_src(skb, hdr_size), &ip_src,
394 batadv_arp_hw_dst(skb, hdr_size), &ip_dst); 394 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
395 395
396 if (hdr_size == 0) 396 if (hdr_size < sizeof(struct batadv_unicast_packet))
397 return; 397 return;
398 398
399 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 399 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 22dde42fd80e..5afe641ee4b0 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
288 /* Move the existing MAC header to just before the payload. (Override 288 /* Move the existing MAC header to just before the payload. (Override
289 * the fragment header.) 289 * the fragment header.)
290 */ 290 */
291 skb_pull_rcsum(skb_out, hdr_size); 291 skb_pull(skb_out, hdr_size);
292 skb_out->ip_summed = CHECKSUM_NONE;
292 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); 293 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
293 skb_set_mac_header(skb_out, -ETH_HLEN); 294 skb_set_mac_header(skb_out, -ETH_HLEN);
294 skb_reset_network_header(skb_out); 295 skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 5f186bff284a..68b54a39c51d 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
763 hard_iface->soft_iface = soft_iface; 763 hard_iface->soft_iface = soft_iface;
764 bat_priv = netdev_priv(hard_iface->soft_iface); 764 bat_priv = netdev_priv(hard_iface->soft_iface);
765 765
766 if (bat_priv->num_ifaces >= UINT_MAX) {
767 ret = -ENOSPC;
768 goto err_dev;
769 }
770
766 ret = netdev_master_upper_dev_link(hard_iface->net_dev, 771 ret = netdev_master_upper_dev_link(hard_iface->net_dev,
767 soft_iface, NULL, NULL, NULL); 772 soft_iface, NULL, NULL, NULL);
768 if (ret) 773 if (ret)
@@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
876 batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); 881 batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
877 882
878 /* nobody uses this interface anymore */ 883 /* nobody uses this interface anymore */
879 if (!bat_priv->num_ifaces) { 884 if (bat_priv->num_ifaces == 0) {
880 batadv_gw_check_client_stop(bat_priv); 885 batadv_gw_check_client_stop(bat_priv);
881 886
882 if (autodel == BATADV_IF_CLEANUP_AUTO) 887 if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
912 if (ret) 917 if (ret)
913 goto free_if; 918 goto free_if;
914 919
915 hard_iface->if_num = -1; 920 hard_iface->if_num = 0;
916 hard_iface->net_dev = net_dev; 921 hard_iface->net_dev = net_dev;
917 hard_iface->soft_iface = NULL; 922 hard_iface->soft_iface = NULL;
918 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 923 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index e91f29c7c638..5daa3d50da17 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -24,6 +24,7 @@
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/eventpoll.h>
27#include <linux/export.h> 28#include <linux/export.h>
28#include <linux/fcntl.h> 29#include <linux/fcntl.h>
29#include <linux/fs.h> 30#include <linux/fs.h>
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index dc9fa37ddd14..cdbe0e5e208b 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -22,6 +22,7 @@
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/eventpoll.h>
25#include <linux/export.h> 26#include <linux/export.h>
26#include <linux/fcntl.h> 27#include <linux/fcntl.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index cbdeb47ec3f6..d70640135e3a 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -543,8 +543,8 @@ update:
543 bat_priv->mcast.enabled = true; 543 bat_priv->mcast.enabled = true;
544 } 544 }
545 545
546 return !(mcast_data.flags & 546 return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
547 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); 547 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
548} 548}
549 549
550/** 550/**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 58a7d9274435..74782426bb77 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
1569 * Return: 0 on success or negative error number in case of failure 1569 * Return: 0 on success or negative error number in case of failure
1570 */ 1570 */
1571int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 1571int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1572 int max_if_num) 1572 unsigned int max_if_num)
1573{ 1573{
1574 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1574 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1575 struct batadv_algo_ops *bao = bat_priv->algo_ops; 1575 struct batadv_algo_ops *bao = bat_priv->algo_ops;
@@ -1611,7 +1611,7 @@ err:
1611 * Return: 0 on success or negative error number in case of failure 1611 * Return: 0 on success or negative error number in case of failure
1612 */ 1612 */
1613int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 1613int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1614 int max_if_num) 1614 unsigned int max_if_num)
1615{ 1615{
1616 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1616 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1617 struct batadv_hashtable *hash = bat_priv->orig_hash; 1617 struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 8e543a3cdc6c..15d896b2de6f 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
73int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); 73int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
74int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); 74int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
75int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 75int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
76 int max_if_num); 76 unsigned int max_if_num);
77int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 77int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
78 int max_if_num); 78 unsigned int max_if_num);
79struct batadv_orig_node_vlan * 79struct batadv_orig_node_vlan *
80batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, 80batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
81 unsigned short vid); 81 unsigned short vid);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index b6891e8b741c..e61dc1293bb5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -759,6 +759,7 @@ free_skb:
759/** 759/**
760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing 760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing
761 * @bat_priv: the bat priv with all the soft interface information 761 * @bat_priv: the bat priv with all the soft interface information
762 * @skb: unicast packet to process
762 * @unicast_packet: the unicast header to be updated 763 * @unicast_packet: the unicast header to be updated
763 * @dst_addr: the payload destination 764 * @dst_addr: the payload destination
764 * @vid: VLAN identifier 765 * @vid: VLAN identifier
@@ -770,7 +771,7 @@ free_skb:
770 * Return: true if the packet header has been updated, false otherwise 771 * Return: true if the packet header has been updated, false otherwise
771 */ 772 */
772static bool 773static bool
773batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, 774batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
774 struct batadv_unicast_packet *unicast_packet, 775 struct batadv_unicast_packet *unicast_packet,
775 u8 *dst_addr, unsigned short vid) 776 u8 *dst_addr, unsigned short vid)
776{ 777{
@@ -799,8 +800,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
799 } 800 }
800 801
801 /* update the packet header */ 802 /* update the packet header */
803 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
802 ether_addr_copy(unicast_packet->dest, orig_addr); 804 ether_addr_copy(unicast_packet->dest, orig_addr);
803 unicast_packet->ttvn = orig_ttvn; 805 unicast_packet->ttvn = orig_ttvn;
806 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
804 807
805 ret = true; 808 ret = true;
806out: 809out:
@@ -841,7 +844,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
841 * the packet to 844 * the packet to
842 */ 845 */
843 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { 846 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
844 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 847 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
845 ethhdr->h_dest, vid)) 848 ethhdr->h_dest, vid))
846 batadv_dbg_ratelimited(BATADV_DBG_TT, 849 batadv_dbg_ratelimited(BATADV_DBG_TT,
847 bat_priv, 850 bat_priv,
@@ -887,7 +890,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
887 * destination can possibly be updated and forwarded towards the new 890 * destination can possibly be updated and forwarded towards the new
888 * target host 891 * target host
889 */ 892 */
890 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 893 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
891 ethhdr->h_dest, vid)) { 894 ethhdr->h_dest, vid)) {
892 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, 895 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
893 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", 896 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -910,12 +913,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
910 if (!primary_if) 913 if (!primary_if)
911 return false; 914 return false;
912 915
916 /* update the packet header */
917 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
913 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); 918 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
919 unicast_packet->ttvn = curr_ttvn;
920 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
914 921
915 batadv_hardif_put(primary_if); 922 batadv_hardif_put(primary_if);
916 923
917 unicast_packet->ttvn = curr_ttvn;
918
919 return true; 924 return true;
920} 925}
921 926
@@ -968,14 +973,10 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
968 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; 973 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
969 int check, hdr_size = sizeof(*unicast_packet); 974 int check, hdr_size = sizeof(*unicast_packet);
970 enum batadv_subtype subtype; 975 enum batadv_subtype subtype;
971 struct ethhdr *ethhdr;
972 int ret = NET_RX_DROP; 976 int ret = NET_RX_DROP;
973 bool is4addr, is_gw; 977 bool is4addr, is_gw;
974 978
975 unicast_packet = (struct batadv_unicast_packet *)skb->data; 979 unicast_packet = (struct batadv_unicast_packet *)skb->data;
976 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
977 ethhdr = eth_hdr(skb);
978
979 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; 980 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
980 /* the caller function should have already pulled 2 bytes */ 981 /* the caller function should have already pulled 2 bytes */
981 if (is4addr) 982 if (is4addr)
@@ -995,12 +996,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
995 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size)) 996 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
996 goto free_skb; 997 goto free_skb;
997 998
999 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1000
998 /* packet for me */ 1001 /* packet for me */
999 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 1002 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
1000 /* If this is a unicast packet from another backgone gw, 1003 /* If this is a unicast packet from another backgone gw,
1001 * drop it. 1004 * drop it.
1002 */ 1005 */
1003 orig_addr_gw = ethhdr->h_source; 1006 orig_addr_gw = eth_hdr(skb)->h_source;
1004 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); 1007 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
1005 if (orig_node_gw) { 1008 if (orig_node_gw) {
1006 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, 1009 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
@@ -1015,6 +1018,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1015 } 1018 }
1016 1019
1017 if (is4addr) { 1020 if (is4addr) {
1021 unicast_4addr_packet =
1022 (struct batadv_unicast_4addr_packet *)skb->data;
1018 subtype = unicast_4addr_packet->subtype; 1023 subtype = unicast_4addr_packet->subtype;
1019 batadv_dat_inc_counter(bat_priv, subtype); 1024 batadv_dat_inc_counter(bat_priv, subtype);
1020 1025
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 900c5ce21cd4..367a81fb785f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
459 459
460 /* skb->dev & skb->pkt_type are set here */ 460 /* skb->dev & skb->pkt_type are set here */
461 skb->protocol = eth_type_trans(skb, soft_iface); 461 skb->protocol = eth_type_trans(skb, soft_iface);
462 462 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
463 /* should not be necessary anymore as we use skb_pull_rcsum()
464 * TODO: please verify this and remove this TODO
465 * -- Dec 21st 2009, Simon Wunderlich
466 */
467
468 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
469 463
470 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 464 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
471 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 465 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index bb1578410e0c..a5aa6d61f4e2 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -167,7 +167,7 @@ struct batadv_hard_iface {
167 struct list_head list; 167 struct list_head list;
168 168
169 /** @if_num: identificator of the interface */ 169 /** @if_num: identificator of the interface */
170 s16 if_num; 170 unsigned int if_num;
171 171
172 /** @if_status: status of the interface for batman-adv */ 172 /** @if_status: status of the interface for batman-adv */
173 char if_status; 173 char if_status;
@@ -1596,7 +1596,7 @@ struct batadv_priv {
1596 atomic_t batman_queue_left; 1596 atomic_t batman_queue_left;
1597 1597
1598 /** @num_ifaces: number of interfaces assigned to this mesh interface */ 1598 /** @num_ifaces: number of interfaces assigned to this mesh interface */
1599 char num_ifaces; 1599 unsigned int num_ifaces;
1600 1600
1601 /** @mesh_obj: kobject for sysfs mesh subdirectory */ 1601 /** @mesh_obj: kobject for sysfs mesh subdirectory */
1602 struct kobject *mesh_obj; 1602 struct kobject *mesh_obj;
@@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops {
2186 * orig_node due to a new hard-interface being added into the mesh 2186 * orig_node due to a new hard-interface being added into the mesh
2187 * (optional) 2187 * (optional)
2188 */ 2188 */
2189 int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); 2189 int (*add_if)(struct batadv_orig_node *orig_node,
2190 unsigned int max_if_num);
2190 2191
2191 /** 2192 /**
2192 * @del_if: ask the routing algorithm to apply the needed changes to the 2193 * @del_if: ask the routing algorithm to apply the needed changes to the
2193 * orig_node due to an hard-interface being removed from the mesh 2194 * orig_node due to an hard-interface being removed from the mesh
2194 * (optional) 2195 * (optional)
2195 */ 2196 */
2196 int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, 2197 int (*del_if)(struct batadv_orig_node *orig_node,
2197 int del_if_num); 2198 unsigned int max_if_num, unsigned int del_if_num);
2198 2199
2199#ifdef CONFIG_BATMAN_ADV_DEBUGFS 2200#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2200 /** @print: print the originator table (optional) */ 2201 /** @print: print the originator table (optional) */
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 01117ae84f1d..a2ddae2f37d7 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2296,8 +2296,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2296 else 2296 else
2297 sec_level = authreq_to_seclevel(auth); 2297 sec_level = authreq_to_seclevel(auth);
2298 2298
2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) 2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
2300 /* If link is already encrypted with sufficient security we
2301 * still need refresh encryption as per Core Spec 5.0 Vol 3,
2302 * Part H 2.4.6
2303 */
2304 smp_ltk_encrypt(conn, hcon->sec_level);
2300 return 0; 2305 return 0;
2306 }
2301 2307
2302 if (sec_level > hcon->pending_sec_level) 2308 if (sec_level > hcon->pending_sec_level)
2303 hcon->pending_sec_level = sec_level; 2309 hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 27f1d4f2114a..9b16eaf33819 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
214 214
215 iph = ip_hdr(skb); 215 iph = ip_hdr(skb);
216 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 216 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
217 goto inhdr_error; 217 goto csum_error;
218 218
219 len = ntohs(iph->tot_len); 219 len = ntohs(iph->tot_len);
220 if (skb->len < len) { 220 if (skb->len < len) {
@@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
236 */ 236 */
237 return 0; 237 return 0;
238 238
239csum_error:
240 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
239inhdr_error: 241inhdr_error:
240 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); 242 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
241drop: 243drop:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 51935270c651..9896f4975353 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
168 masterv = br_vlan_find(vg, vid); 168 masterv = br_vlan_find(vg, vid);
169 if (WARN_ON(!masterv)) 169 if (WARN_ON(!masterv))
170 return NULL; 170 return NULL;
171 refcount_set(&masterv->refcnt, 1);
172 return masterv;
171 } 173 }
172 refcount_inc(&masterv->refcnt); 174 refcount_inc(&masterv->refcnt);
173 175
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index ce7152a12bd8..620e54f08296 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -172,18 +172,69 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
172 return true; 172 return true;
173} 173}
174 174
175static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
176{
177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
178}
179
180static bool wormhash_offset_invalid(int off, unsigned int len)
181{
182 if (off == 0) /* not present */
183 return false;
184
185 if (off < (int)sizeof(struct ebt_among_info) ||
186 off % __alignof__(struct ebt_mac_wormhash))
187 return true;
188
189 off += sizeof(struct ebt_mac_wormhash);
190
191 return off > len;
192}
193
194static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
195{
196 if (a == 0)
197 a = sizeof(struct ebt_among_info);
198
199 return ebt_mac_wormhash_size(wh) + a == b;
200}
201
175static int ebt_among_mt_check(const struct xt_mtchk_param *par) 202static int ebt_among_mt_check(const struct xt_mtchk_param *par)
176{ 203{
177 const struct ebt_among_info *info = par->matchinfo; 204 const struct ebt_among_info *info = par->matchinfo;
178 const struct ebt_entry_match *em = 205 const struct ebt_entry_match *em =
179 container_of(par->matchinfo, const struct ebt_entry_match, data); 206 container_of(par->matchinfo, const struct ebt_entry_match, data);
180 int expected_length = sizeof(struct ebt_among_info); 207 unsigned int expected_length = sizeof(struct ebt_among_info);
181 const struct ebt_mac_wormhash *wh_dst, *wh_src; 208 const struct ebt_mac_wormhash *wh_dst, *wh_src;
182 int err; 209 int err;
183 210
211 if (expected_length > em->match_size)
212 return -EINVAL;
213
214 if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
215 wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
216 return -EINVAL;
217
184 wh_dst = ebt_among_wh_dst(info); 218 wh_dst = ebt_among_wh_dst(info);
185 wh_src = ebt_among_wh_src(info); 219 if (poolsize_invalid(wh_dst))
220 return -EINVAL;
221
186 expected_length += ebt_mac_wormhash_size(wh_dst); 222 expected_length += ebt_mac_wormhash_size(wh_dst);
223 if (expected_length > em->match_size)
224 return -EINVAL;
225
226 wh_src = ebt_among_wh_src(info);
227 if (poolsize_invalid(wh_src))
228 return -EINVAL;
229
230 if (info->wh_src_ofs < info->wh_dst_ofs) {
231 if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
232 return -EINVAL;
233 } else {
234 if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
235 return -EINVAL;
236 }
237
187 expected_length += ebt_mac_wormhash_size(wh_src); 238 expected_length += ebt_mac_wormhash_size(wh_src);
188 239
189 if (em->match_size != EBT_ALIGN(expected_length)) { 240 if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 02c4b409d317..a94d23b0a9af 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1641 int off = ebt_compat_match_offset(match, m->match_size); 1641 int off = ebt_compat_match_offset(match, m->match_size);
1642 compat_uint_t msize = m->match_size - off; 1642 compat_uint_t msize = m->match_size - off;
1643 1643
1644 BUG_ON(off >= m->match_size); 1644 if (WARN_ON(off >= m->match_size))
1645 return -EINVAL;
1645 1646
1646 if (copy_to_user(cm->u.name, match->name, 1647 if (copy_to_user(cm->u.name, match->name,
1647 strlen(match->name) + 1) || put_user(msize, &cm->match_size)) 1648 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1671 int off = xt_compat_target_offset(target); 1672 int off = xt_compat_target_offset(target);
1672 compat_uint_t tsize = t->target_size - off; 1673 compat_uint_t tsize = t->target_size - off;
1673 1674
1674 BUG_ON(off >= t->target_size); 1675 if (WARN_ON(off >= t->target_size))
1676 return -EINVAL;
1675 1677
1676 if (copy_to_user(cm->u.name, target->name, 1678 if (copy_to_user(cm->u.name, target->name,
1677 strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) 1679 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
1902 if (state->buf_kern_start == NULL) 1904 if (state->buf_kern_start == NULL)
1903 goto count_only; 1905 goto count_only;
1904 1906
1905 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1907 if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1908 return -EINVAL;
1906 1909
1907 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1910 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1908 1911
@@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1915{ 1918{
1916 char *b = state->buf_kern_start; 1919 char *b = state->buf_kern_start;
1917 1920
1918 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1921 if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1922 return -EINVAL;
1919 1923
1920 if (b != NULL && sz > 0) 1924 if (b != NULL && sz > 0)
1921 memset(b + state->buf_kern_offset, 0, sz); 1925 memset(b + state->buf_kern_offset, 0, sz);
@@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1992 pad = XT_ALIGN(size_kern) - size_kern; 1996 pad = XT_ALIGN(size_kern) - size_kern;
1993 1997
1994 if (pad > 0 && dst) { 1998 if (pad > 0 && dst) {
1995 BUG_ON(state->buf_kern_len <= pad); 1999 if (WARN_ON(state->buf_kern_len <= pad))
1996 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 2000 return -EINVAL;
2001 if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
2002 return -EINVAL;
1997 memset(dst + size_kern, 0, pad); 2003 memset(dst + size_kern, 0, pad);
1998 } 2004 }
1999 return off + match_size; 2005 return off + match_size;
@@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2043 if (ret < 0) 2049 if (ret < 0)
2044 return ret; 2050 return ret;
2045 2051
2046 BUG_ON(ret < match32->match_size); 2052 if (WARN_ON(ret < match32->match_size))
2053 return -EINVAL;
2047 growth += ret - match32->match_size; 2054 growth += ret - match32->match_size;
2048 growth += ebt_compat_entry_padsize(); 2055 growth += ebt_compat_entry_padsize();
2049 2056
@@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2053 if (match_kern) 2060 if (match_kern)
2054 match_kern->match_size = ret; 2061 match_kern->match_size = ret;
2055 2062
2056 WARN_ON(type == EBT_COMPAT_TARGET && size_left); 2063 if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2064 return -EINVAL;
2065
2057 match32 = (struct compat_ebt_entry_mwt *) buf; 2066 match32 = (struct compat_ebt_entry_mwt *) buf;
2058 } 2067 }
2059 2068
@@ -2109,6 +2118,19 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2109 * 2118 *
2110 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2111 */ 2120 */
2121 for (i = 0; i < 4 ; ++i) {
2122 if (offsets[i] > *total)
2123 return -EINVAL;
2124
2125 if (i < 3 && offsets[i] == *total)
2126 return -EINVAL;
2127
2128 if (i == 0)
2129 continue;
2130 if (offsets[i-1] > offsets[i])
2131 return -EINVAL;
2132 }
2133
2112 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2134 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2113 struct compat_ebt_entry_mwt *match32; 2135 struct compat_ebt_entry_mwt *match32;
2114 unsigned int size; 2136 unsigned int size;
@@ -2140,7 +2162,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2140 2162
2141 startoff = state->buf_user_offset - startoff; 2163 startoff = state->buf_user_offset - startoff;
2142 2164
2143 BUG_ON(*total < startoff); 2165 if (WARN_ON(*total < startoff))
2166 return -EINVAL;
2144 *total -= startoff; 2167 *total -= startoff;
2145 return 0; 2168 return 0;
2146} 2169}
@@ -2267,7 +2290,8 @@ static int compat_do_replace(struct net *net, void __user *user,
2267 state.buf_kern_len = size64; 2290 state.buf_kern_len = size64;
2268 2291
2269 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2292 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2270 BUG_ON(ret < 0); /* parses same data again */ 2293 if (WARN_ON(ret < 0))
2294 goto out_unlock;
2271 2295
2272 vfree(entries_tmp); 2296 vfree(entries_tmp);
2273 tmp.entries_size = size64; 2297 tmp.entries_size = size64;
diff --git a/net/core/dev.c b/net/core/dev.c
index d4362befe7e2..12be20535714 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3278,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3279static void skb_update_prio(struct sk_buff *skb) 3279static void skb_update_prio(struct sk_buff *skb)
3280{ 3280{
3281 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 3281 const struct netprio_map *map;
3282 const struct sock *sk;
3283 unsigned int prioidx;
3282 3284
3283 if (!skb->priority && skb->sk && map) { 3285 if (skb->priority)
3284 unsigned int prioidx = 3286 return;
3285 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); 3287 map = rcu_dereference_bh(skb->dev->priomap);
3288 if (!map)
3289 return;
3290 sk = skb_to_full_sk(skb);
3291 if (!sk)
3292 return;
3286 3293
3287 if (prioidx < map->priomap_len) 3294 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3288 skb->priority = map->priomap[prioidx]; 3295
3289 } 3296 if (prioidx < map->priomap_len)
3297 skb->priority = map->priomap[prioidx];
3290} 3298}
3291#else 3299#else
3292#define skb_update_prio(skb) 3300#define skb_update_prio(skb)
@@ -6396,6 +6404,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
6396 .linking = true, 6404 .linking = true,
6397 .upper_info = upper_info, 6405 .upper_info = upper_info,
6398 }; 6406 };
6407 struct net_device *master_dev;
6399 int ret = 0; 6408 int ret = 0;
6400 6409
6401 ASSERT_RTNL(); 6410 ASSERT_RTNL();
@@ -6407,11 +6416,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
6407 if (netdev_has_upper_dev(upper_dev, dev)) 6416 if (netdev_has_upper_dev(upper_dev, dev))
6408 return -EBUSY; 6417 return -EBUSY;
6409 6418
6410 if (netdev_has_upper_dev(dev, upper_dev)) 6419 if (!master) {
6411 return -EEXIST; 6420 if (netdev_has_upper_dev(dev, upper_dev))
6412 6421 return -EEXIST;
6413 if (master && netdev_master_upper_dev_get(dev)) 6422 } else {
6414 return -EBUSY; 6423 master_dev = netdev_master_upper_dev_get(dev);
6424 if (master_dev)
6425 return master_dev == upper_dev ? -EEXIST : -EBUSY;
6426 }
6415 6427
6416 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 6428 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
6417 &changeupper_info.info); 6429 &changeupper_info.info);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 0ab1af04296c..a04e1e88bf3a 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
402 if (colon) 402 if (colon)
403 *colon = 0; 403 *colon = 0;
404 404
405 dev_load(net, ifr->ifr_name);
406
407 /* 405 /*
408 * See which interface the caller is talking about. 406 * See which interface the caller is talking about.
409 */ 407 */
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
423 case SIOCGIFMAP: 421 case SIOCGIFMAP:
424 case SIOCGIFINDEX: 422 case SIOCGIFINDEX:
425 case SIOCGIFTXQLEN: 423 case SIOCGIFTXQLEN:
424 dev_load(net, ifr->ifr_name);
426 rcu_read_lock(); 425 rcu_read_lock();
427 ret = dev_ifsioc_locked(net, ifr, cmd); 426 ret = dev_ifsioc_locked(net, ifr, cmd);
428 rcu_read_unlock(); 427 rcu_read_unlock();
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
431 return ret; 430 return ret;
432 431
433 case SIOCETHTOOL: 432 case SIOCETHTOOL:
433 dev_load(net, ifr->ifr_name);
434 rtnl_lock(); 434 rtnl_lock();
435 ret = dev_ethtool(net, ifr); 435 ret = dev_ethtool(net, ifr);
436 rtnl_unlock(); 436 rtnl_unlock();
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
447 case SIOCGMIIPHY: 447 case SIOCGMIIPHY:
448 case SIOCGMIIREG: 448 case SIOCGMIIREG:
449 case SIOCSIFNAME: 449 case SIOCSIFNAME:
450 dev_load(net, ifr->ifr_name);
450 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 451 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
451 return -EPERM; 452 return -EPERM;
452 rtnl_lock(); 453 rtnl_lock();
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
494 /* fall through */ 495 /* fall through */
495 case SIOCBONDSLAVEINFOQUERY: 496 case SIOCBONDSLAVEINFOQUERY:
496 case SIOCBONDINFOQUERY: 497 case SIOCBONDINFOQUERY:
498 dev_load(net, ifr->ifr_name);
497 rtnl_lock(); 499 rtnl_lock();
498 ret = dev_ifsioc(net, ifr, cmd); 500 ret = dev_ifsioc(net, ifr, cmd);
499 rtnl_unlock(); 501 rtnl_unlock();
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
518 cmd == SIOCGHWTSTAMP || 520 cmd == SIOCGHWTSTAMP ||
519 (cmd >= SIOCDEVPRIVATE && 521 (cmd >= SIOCDEVPRIVATE &&
520 cmd <= SIOCDEVPRIVATE + 15)) { 522 cmd <= SIOCDEVPRIVATE + 15)) {
523 dev_load(net, ifr->ifr_name);
521 rtnl_lock(); 524 rtnl_lock();
522 ret = dev_ifsioc(net, ifr, cmd); 525 ret = dev_ifsioc(net, ifr, cmd);
523 rtnl_unlock(); 526 rtnl_unlock();
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 18d385ed8237..effd4848c2b4 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
1695 goto nla_put_failure; 1695 goto nla_put_failure;
1696 1696
1697 if (table->resource_valid) { 1697 if (table->resource_valid) {
1698 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, 1698 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
1699 table->resource_id, DEVLINK_ATTR_PAD); 1699 table->resource_id, DEVLINK_ATTR_PAD) ||
1700 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, 1700 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
1701 table->resource_units, DEVLINK_ATTR_PAD); 1701 table->resource_units, DEVLINK_ATTR_PAD))
1702 goto nla_put_failure;
1702 } 1703 }
1703 if (devlink_dpipe_matches_put(table, skb)) 1704 if (devlink_dpipe_matches_put(table, skb))
1704 goto nla_put_failure; 1705 goto nla_put_failure;
@@ -1797,7 +1798,7 @@ send_done:
1797 if (!nlh) { 1798 if (!nlh) {
1798 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 1799 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1799 if (err) 1800 if (err)
1800 goto err_skb_send_alloc; 1801 return err;
1801 goto send_done; 1802 goto send_done;
1802 } 1803 }
1803 1804
@@ -1806,7 +1807,6 @@ send_done:
1806nla_put_failure: 1807nla_put_failure:
1807 err = -EMSGSIZE; 1808 err = -EMSGSIZE;
1808err_table_put: 1809err_table_put:
1809err_skb_send_alloc:
1810 genlmsg_cancel(skb, hdr); 1810 genlmsg_cancel(skb, hdr);
1811 nlmsg_free(skb); 1811 nlmsg_free(skb);
1812 return err; 1812 return err;
@@ -2072,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
2072 table->counters_enabled, 2072 table->counters_enabled,
2073 &dump_ctx); 2073 &dump_ctx);
2074 if (err) 2074 if (err)
2075 goto err_entries_dump; 2075 return err;
2076 2076
2077send_done: 2077send_done:
2078 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, 2078 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -2080,16 +2080,10 @@ send_done:
2080 if (!nlh) { 2080 if (!nlh) {
2081 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); 2081 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
2082 if (err) 2082 if (err)
2083 goto err_skb_send_alloc; 2083 return err;
2084 goto send_done; 2084 goto send_done;
2085 } 2085 }
2086 return genlmsg_reply(dump_ctx.skb, info); 2086 return genlmsg_reply(dump_ctx.skb, info);
2087
2088err_entries_dump:
2089err_skb_send_alloc:
2090 genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
2091 nlmsg_free(dump_ctx.skb);
2092 return err;
2093} 2087}
2094 2088
2095static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, 2089static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2228,7 +2222,7 @@ send_done:
2228 if (!nlh) { 2222 if (!nlh) {
2229 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 2223 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
2230 if (err) 2224 if (err)
2231 goto err_skb_send_alloc; 2225 return err;
2232 goto send_done; 2226 goto send_done;
2233 } 2227 }
2234 return genlmsg_reply(skb, info); 2228 return genlmsg_reply(skb, info);
@@ -2236,7 +2230,6 @@ send_done:
2236nla_put_failure: 2230nla_put_failure:
2237 err = -EMSGSIZE; 2231 err = -EMSGSIZE;
2238err_table_put: 2232err_table_put:
2239err_skb_send_alloc:
2240 genlmsg_cancel(skb, hdr); 2233 genlmsg_cancel(skb, hdr);
2241 nlmsg_free(skb); 2234 nlmsg_free(skb);
2242 return err; 2235 return err;
@@ -2332,7 +2325,7 @@ devlink_resource_validate_children(struct devlink_resource *resource)
2332 list_for_each_entry(child_resource, &resource->resource_list, list) 2325 list_for_each_entry(child_resource, &resource->resource_list, list)
2333 parts_size += child_resource->size_new; 2326 parts_size += child_resource->size_new;
2334 2327
2335 if (parts_size > resource->size) 2328 if (parts_size > resource->size_new)
2336 size_valid = false; 2329 size_valid = false;
2337out: 2330out:
2338 resource->size_valid = size_valid; 2331 resource->size_valid = size_valid;
@@ -2372,20 +2365,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
2372 return 0; 2365 return 0;
2373} 2366}
2374 2367
2375static void 2368static int
2376devlink_resource_size_params_put(struct devlink_resource *resource, 2369devlink_resource_size_params_put(struct devlink_resource *resource,
2377 struct sk_buff *skb) 2370 struct sk_buff *skb)
2378{ 2371{
2379 struct devlink_resource_size_params *size_params; 2372 struct devlink_resource_size_params *size_params;
2380 2373
2381 size_params = resource->size_params; 2374 size_params = &resource->size_params;
2382 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, 2375 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
2383 size_params->size_granularity, DEVLINK_ATTR_PAD); 2376 size_params->size_granularity, DEVLINK_ATTR_PAD) ||
2384 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, 2377 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
2385 size_params->size_max, DEVLINK_ATTR_PAD); 2378 size_params->size_max, DEVLINK_ATTR_PAD) ||
2386 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, 2379 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
2387 size_params->size_min, DEVLINK_ATTR_PAD); 2380 size_params->size_min, DEVLINK_ATTR_PAD) ||
2388 nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); 2381 nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
2382 return -EMSGSIZE;
2383 return 0;
2389} 2384}
2390 2385
2391static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, 2386static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@ -2409,10 +2404,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
2409 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, 2404 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
2410 resource->size_new, DEVLINK_ATTR_PAD); 2405 resource->size_new, DEVLINK_ATTR_PAD);
2411 if (resource->resource_ops && resource->resource_ops->occ_get) 2406 if (resource->resource_ops && resource->resource_ops->occ_get)
2412 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, 2407 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
2413 resource->resource_ops->occ_get(devlink), 2408 resource->resource_ops->occ_get(devlink),
2414 DEVLINK_ATTR_PAD); 2409 DEVLINK_ATTR_PAD))
2415 devlink_resource_size_params_put(resource, skb); 2410 goto nla_put_failure;
2411 if (devlink_resource_size_params_put(resource, skb))
2412 goto nla_put_failure;
2416 if (list_empty(&resource->resource_list)) 2413 if (list_empty(&resource->resource_list))
2417 goto out; 2414 goto out;
2418 2415
@@ -3151,7 +3148,7 @@ int devlink_resource_register(struct devlink *devlink,
3151 u64 resource_size, 3148 u64 resource_size,
3152 u64 resource_id, 3149 u64 resource_id,
3153 u64 parent_resource_id, 3150 u64 parent_resource_id,
3154 struct devlink_resource_size_params *size_params, 3151 const struct devlink_resource_size_params *size_params,
3155 const struct devlink_resource_ops *resource_ops) 3152 const struct devlink_resource_ops *resource_ops)
3156{ 3153{
3157 struct devlink_resource *resource; 3154 struct devlink_resource *resource;
@@ -3194,7 +3191,8 @@ int devlink_resource_register(struct devlink *devlink,
3194 resource->id = resource_id; 3191 resource->id = resource_id;
3195 resource->resource_ops = resource_ops; 3192 resource->resource_ops = resource_ops;
3196 resource->size_valid = true; 3193 resource->size_valid = true;
3197 resource->size_params = size_params; 3194 memcpy(&resource->size_params, size_params,
3195 sizeof(resource->size_params));
3198 INIT_LIST_HEAD(&resource->resource_list); 3196 INIT_LIST_HEAD(&resource->resource_list);
3199 list_add_tail(&resource->list, resource_list); 3197 list_add_tail(&resource->list, resource_list);
3200out: 3198out:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 494e6a5d7306..3f89c76d5c24 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
2520static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) 2520static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
2521{ 2521{
2522 struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; 2522 struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
2523 int rc;
2523 2524
2524 if (!dev->ethtool_ops->get_fecparam) 2525 if (!dev->ethtool_ops->get_fecparam)
2525 return -EOPNOTSUPP; 2526 return -EOPNOTSUPP;
2526 2527
2527 dev->ethtool_ops->get_fecparam(dev, &fecparam); 2528 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
2529 if (rc)
2530 return rc;
2528 2531
2529 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) 2532 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
2530 return -EFAULT; 2533 return -EFAULT;
diff --git a/net/core/filter.c b/net/core/filter.c
index 0c121adbdbaa..48aa7c7320db 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2087 u32 off = skb_mac_header_len(skb); 2087 u32 off = skb_mac_header_len(skb);
2088 int ret; 2088 int ret;
2089 2089
2090 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2091 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2092 return -ENOTSUPP;
2093
2090 ret = skb_cow(skb, len_diff); 2094 ret = skb_cow(skb, len_diff);
2091 if (unlikely(ret < 0)) 2095 if (unlikely(ret < 0))
2092 return ret; 2096 return ret;
@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2096 return ret; 2100 return ret;
2097 2101
2098 if (skb_is_gso(skb)) { 2102 if (skb_is_gso(skb)) {
2103 struct skb_shared_info *shinfo = skb_shinfo(skb);
2104
2099 /* SKB_GSO_TCPV4 needs to be changed into 2105 /* SKB_GSO_TCPV4 needs to be changed into
2100 * SKB_GSO_TCPV6. 2106 * SKB_GSO_TCPV6.
2101 */ 2107 */
2102 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 2108 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2103 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; 2109 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2104 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 2110 shinfo->gso_type |= SKB_GSO_TCPV6;
2105 } 2111 }
2106 2112
2107 /* Due to IPv6 header, MSS needs to be downgraded. */ 2113 /* Due to IPv6 header, MSS needs to be downgraded. */
2108 skb_shinfo(skb)->gso_size -= len_diff; 2114 skb_decrease_gso_size(shinfo, len_diff);
2109 /* Header must be checked, and gso_segs recomputed. */ 2115 /* Header must be checked, and gso_segs recomputed. */
2110 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2116 shinfo->gso_type |= SKB_GSO_DODGY;
2111 skb_shinfo(skb)->gso_segs = 0; 2117 shinfo->gso_segs = 0;
2112 } 2118 }
2113 2119
2114 skb->protocol = htons(ETH_P_IPV6); 2120 skb->protocol = htons(ETH_P_IPV6);
@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2123 u32 off = skb_mac_header_len(skb); 2129 u32 off = skb_mac_header_len(skb);
2124 int ret; 2130 int ret;
2125 2131
2132 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2133 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2134 return -ENOTSUPP;
2135
2126 ret = skb_unclone(skb, GFP_ATOMIC); 2136 ret = skb_unclone(skb, GFP_ATOMIC);
2127 if (unlikely(ret < 0)) 2137 if (unlikely(ret < 0))
2128 return ret; 2138 return ret;
@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2132 return ret; 2142 return ret;
2133 2143
2134 if (skb_is_gso(skb)) { 2144 if (skb_is_gso(skb)) {
2145 struct skb_shared_info *shinfo = skb_shinfo(skb);
2146
2135 /* SKB_GSO_TCPV6 needs to be changed into 2147 /* SKB_GSO_TCPV6 needs to be changed into
2136 * SKB_GSO_TCPV4. 2148 * SKB_GSO_TCPV4.
2137 */ 2149 */
2138 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 2150 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2139 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; 2151 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2140 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 2152 shinfo->gso_type |= SKB_GSO_TCPV4;
2141 } 2153 }
2142 2154
2143 /* Due to IPv4 header, MSS can be upgraded. */ 2155 /* Due to IPv4 header, MSS can be upgraded. */
2144 skb_shinfo(skb)->gso_size += len_diff; 2156 skb_increase_gso_size(shinfo, len_diff);
2145 /* Header must be checked, and gso_segs recomputed. */ 2157 /* Header must be checked, and gso_segs recomputed. */
2146 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2158 shinfo->gso_type |= SKB_GSO_DODGY;
2147 skb_shinfo(skb)->gso_segs = 0; 2159 shinfo->gso_segs = 0;
2148 } 2160 }
2149 2161
2150 skb->protocol = htons(ETH_P_IP); 2162 skb->protocol = htons(ETH_P_IP);
@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2243 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2255 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2244 int ret; 2256 int ret;
2245 2257
2258 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2259 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2260 return -ENOTSUPP;
2261
2246 ret = skb_cow(skb, len_diff); 2262 ret = skb_cow(skb, len_diff);
2247 if (unlikely(ret < 0)) 2263 if (unlikely(ret < 0))
2248 return ret; 2264 return ret;
@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2252 return ret; 2268 return ret;
2253 2269
2254 if (skb_is_gso(skb)) { 2270 if (skb_is_gso(skb)) {
2271 struct skb_shared_info *shinfo = skb_shinfo(skb);
2272
2255 /* Due to header grow, MSS needs to be downgraded. */ 2273 /* Due to header grow, MSS needs to be downgraded. */
2256 skb_shinfo(skb)->gso_size -= len_diff; 2274 skb_decrease_gso_size(shinfo, len_diff);
2257 /* Header must be checked, and gso_segs recomputed. */ 2275 /* Header must be checked, and gso_segs recomputed. */
2258 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2276 shinfo->gso_type |= SKB_GSO_DODGY;
2259 skb_shinfo(skb)->gso_segs = 0; 2277 shinfo->gso_segs = 0;
2260 } 2278 }
2261 2279
2262 return 0; 2280 return 0;
@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2267 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2285 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2268 int ret; 2286 int ret;
2269 2287
2288 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2289 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2290 return -ENOTSUPP;
2291
2270 ret = skb_unclone(skb, GFP_ATOMIC); 2292 ret = skb_unclone(skb, GFP_ATOMIC);
2271 if (unlikely(ret < 0)) 2293 if (unlikely(ret < 0))
2272 return ret; 2294 return ret;
@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2276 return ret; 2298 return ret;
2277 2299
2278 if (skb_is_gso(skb)) { 2300 if (skb_is_gso(skb)) {
2301 struct skb_shared_info *shinfo = skb_shinfo(skb);
2302
2279 /* Due to header shrink, MSS can be upgraded. */ 2303 /* Due to header shrink, MSS can be upgraded. */
2280 skb_shinfo(skb)->gso_size += len_diff; 2304 skb_increase_gso_size(shinfo, len_diff);
2281 /* Header must be checked, and gso_segs recomputed. */ 2305 /* Header must be checked, and gso_segs recomputed. */
2282 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2306 shinfo->gso_type |= SKB_GSO_DODGY;
2283 skb_shinfo(skb)->gso_segs = 0; 2307 shinfo->gso_segs = 0;
2284 } 2308 }
2285 2309
2286 return 0; 2310 return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 09bd89c90a71..1e7acdc30732 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4179,7 +4179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4179 4179
4180 skb_queue_tail(&sk->sk_error_queue, skb); 4180 skb_queue_tail(&sk->sk_error_queue, skb);
4181 if (!sock_flag(sk, SOCK_DEAD)) 4181 if (!sock_flag(sk, SOCK_DEAD))
4182 sk->sk_data_ready(sk); 4182 sk->sk_error_report(sk);
4183 return 0; 4183 return 0;
4184} 4184}
4185EXPORT_SYMBOL(sock_queue_err_skb); 4185EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
4891 * 4891 *
4892 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4892 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4893 */ 4893 */
4894unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4894static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4895{ 4895{
4896 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4896 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4897 unsigned int thlen = 0; 4897 unsigned int thlen = 0;
@@ -4904,7 +4904,7 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4904 thlen += inner_tcp_hdrlen(skb); 4904 thlen += inner_tcp_hdrlen(skb);
4905 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4905 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4906 thlen = tcp_hdrlen(skb); 4906 thlen = tcp_hdrlen(skb);
4907 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { 4907 } else if (unlikely(skb_is_gso_sctp(skb))) {
4908 thlen = sizeof(struct sctphdr); 4908 thlen = sizeof(struct sctphdr);
4909 } 4909 }
4910 /* UFO sets gso_size to the size of the fragmentation 4910 /* UFO sets gso_size to the size of the fragmentation
@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4913 */ 4913 */
4914 return thlen + shinfo->gso_size; 4914 return thlen + shinfo->gso_size;
4915} 4915}
4916EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4916
4917/**
4918 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4919 *
4920 * @skb: GSO skb
4921 *
4922 * skb_gso_network_seglen is used to determine the real size of the
4923 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4924 *
4925 * The MAC/L2 header is not accounted for.
4926 */
4927static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4928{
4929 unsigned int hdr_len = skb_transport_header(skb) -
4930 skb_network_header(skb);
4931
4932 return hdr_len + skb_gso_transport_seglen(skb);
4933}
4934
4935/**
4936 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4937 *
4938 * @skb: GSO skb
4939 *
4940 * skb_gso_mac_seglen is used to determine the real size of the
4941 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4942 * headers (TCP/UDP).
4943 */
4944static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4945{
4946 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4947
4948 return hdr_len + skb_gso_transport_seglen(skb);
4949}
4917 4950
4918/** 4951/**
4919 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 4952 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
4955} 4988}
4956 4989
4957/** 4990/**
4958 * skb_gso_validate_mtu - Return in case such skb fits a given MTU 4991 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
4959 * 4992 *
4960 * @skb: GSO skb 4993 * @skb: GSO skb
4961 * @mtu: MTU to validate against 4994 * @mtu: MTU to validate against
4962 * 4995 *
4963 * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU 4996 * skb_gso_validate_network_len validates if a given skb will fit a
4964 * once split. 4997 * wanted MTU once split. It considers L3 headers, L4 headers, and the
4998 * payload.
4965 */ 4999 */
4966bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) 5000bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
4967{ 5001{
4968 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5002 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
4969} 5003}
4970EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); 5004EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
4971 5005
4972/** 5006/**
4973 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 5007 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
@@ -4986,13 +5020,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
4986 5020
4987static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5021static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4988{ 5022{
5023 int mac_len;
5024
4989 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5025 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4990 kfree_skb(skb); 5026 kfree_skb(skb);
4991 return NULL; 5027 return NULL;
4992 } 5028 }
4993 5029
4994 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 5030 mac_len = skb->data - skb_mac_header(skb);
4995 2 * ETH_ALEN); 5031 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5032 mac_len - VLAN_HLEN - ETH_TLEN);
4996 skb->mac_header += VLAN_HLEN; 5033 skb->mac_header += VLAN_HLEN;
4997 return skb; 5034 return skb;
4998} 5035}
diff --git a/net/core/sock.c b/net/core/sock.c
index c501499a04fe..85b0b64e7f9d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3261,6 +3261,27 @@ void proto_unregister(struct proto *prot)
3261} 3261}
3262EXPORT_SYMBOL(proto_unregister); 3262EXPORT_SYMBOL(proto_unregister);
3263 3263
3264int sock_load_diag_module(int family, int protocol)
3265{
3266 if (!protocol) {
3267 if (!sock_is_registered(family))
3268 return -ENOENT;
3269
3270 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3271 NETLINK_SOCK_DIAG, family);
3272 }
3273
3274#ifdef CONFIG_INET
3275 if (family == AF_INET &&
3276 !rcu_access_pointer(inet_protos[protocol]))
3277 return -ENOENT;
3278#endif
3279
3280 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3281 NETLINK_SOCK_DIAG, family, protocol);
3282}
3283EXPORT_SYMBOL(sock_load_diag_module);
3284
3264#ifdef CONFIG_PROC_FS 3285#ifdef CONFIG_PROC_FS
3265static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 3286static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3266 __acquires(proto_list_mutex) 3287 __acquires(proto_list_mutex)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 146b50e30659..c37b5be7c5e4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 if (sock_diag_handlers[req->sdiag_family] == NULL) 222 if (sock_diag_handlers[req->sdiag_family] == NULL)
223 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 223 sock_load_diag_module(req->sdiag_family, 0);
224 NETLINK_SOCK_DIAG, req->sdiag_family);
225 224
226 mutex_lock(&sock_diag_table_mutex); 225 mutex_lock(&sock_diag_table_mutex);
227 hndl = sock_diag_handlers[req->sdiag_family]; 226 hndl = sock_diag_handlers[req->sdiag_family];
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
247 case TCPDIAG_GETSOCK: 246 case TCPDIAG_GETSOCK:
248 case DCCPDIAG_GETSOCK: 247 case DCCPDIAG_GETSOCK:
249 if (inet_rcv_compat == NULL) 248 if (inet_rcv_compat == NULL)
250 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 249 sock_load_diag_module(AF_INET, 0);
251 NETLINK_SOCK_DIAG, AF_INET);
252 250
253 mutex_lock(&sock_diag_table_mutex); 251 mutex_lock(&sock_diag_table_mutex);
254 if (inet_rcv_compat != NULL) 252 if (inet_rcv_compat != NULL)
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group)
281 case SKNLGRP_INET_TCP_DESTROY: 279 case SKNLGRP_INET_TCP_DESTROY:
282 case SKNLGRP_INET_UDP_DESTROY: 280 case SKNLGRP_INET_UDP_DESTROY:
283 if (!sock_diag_handlers[AF_INET]) 281 if (!sock_diag_handlers[AF_INET])
284 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 282 sock_load_diag_module(AF_INET, 0);
285 NETLINK_SOCK_DIAG, AF_INET);
286 break; 283 break;
287 case SKNLGRP_INET6_TCP_DESTROY: 284 case SKNLGRP_INET6_TCP_DESTROY:
288 case SKNLGRP_INET6_UDP_DESTROY: 285 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 286 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 287 sock_load_diag_module(AF_INET6, 0);
291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 288 break;
293 } 289 }
294 return 0; 290 return 0;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 15bdc002d90c..84cd4e3fd01b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -794,6 +794,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
794 if (skb == NULL) 794 if (skb == NULL)
795 goto out_release; 795 goto out_release;
796 796
797 if (sk->sk_state == DCCP_CLOSED) {
798 rc = -ENOTCONN;
799 goto out_discard;
800 }
801
797 skb_reserve(skb, sk->sk_prot->max_header); 802 skb_reserve(skb, sk->sk_prot->max_header);
798 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 803 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
799 if (rc != 0) 804 if (rc != 0)
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index cb54b81d0bd9..42a7b85b84e1 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds,
194 ds->ports[i].dn = cd->port_dn[i]; 194 ds->ports[i].dn = cd->port_dn[i];
195 ds->ports[i].cpu_dp = dst->cpu_dp; 195 ds->ports[i].cpu_dp = dst->cpu_dp;
196 196
197 if (dsa_is_user_port(ds, i)) 197 if (!dsa_is_user_port(ds, i))
198 continue; 198 continue;
199 199
200 ret = dsa_slave_create(&ds->ports[i]); 200 ret = dsa_slave_create(&ds->ports[i]);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 974765b7d92a..e9f0489e4229 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void)
206static int lowpan_device_event(struct notifier_block *unused, 206static int lowpan_device_event(struct notifier_block *unused,
207 unsigned long event, void *ptr) 207 unsigned long event, void *ptr)
208{ 208{
209 struct net_device *wdev = netdev_notifier_info_to_dev(ptr); 209 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
210 struct wpan_dev *wpan_dev;
210 211
211 if (wdev->type != ARPHRD_IEEE802154) 212 if (ndev->type != ARPHRD_IEEE802154)
213 return NOTIFY_DONE;
214 wpan_dev = ndev->ieee802154_ptr;
215 if (!wpan_dev)
212 return NOTIFY_DONE; 216 return NOTIFY_DONE;
213 217
214 switch (event) { 218 switch (event) {
@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused,
217 * also delete possible lowpan interfaces which belongs 221 * also delete possible lowpan interfaces which belongs
218 * to the wpan interface. 222 * to the wpan interface.
219 */ 223 */
220 if (wdev->ieee802154_ptr->lowpan_dev) 224 if (wpan_dev->lowpan_dev)
221 lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); 225 lowpan_dellink(wpan_dev->lowpan_dev, NULL);
222 break; 226 break;
223 default: 227 default:
224 return NOTIFY_DONE; 228 return NOTIFY_DONE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a383f299ce24..4e5bc4b2f14e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -53,8 +53,7 @@ static DEFINE_MUTEX(inet_diag_table_mutex);
53static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 53static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
54{ 54{
55 if (!inet_diag_table[proto]) 55 if (!inet_diag_table[proto])
56 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 56 sock_load_diag_module(AF_INET, proto);
57 NETLINK_SOCK_DIAG, AF_INET, proto);
58 57
59 mutex_lock(&inet_diag_table_mutex); 58 mutex_lock(&inet_diag_table_mutex);
60 if (!inet_diag_table[proto]) 59 if (!inet_diag_table[proto])
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 26a3d0315728..e8ec28999f5c 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
119 119
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q) 120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{ 121{
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
122 return q->net->low_thresh == 0 || 125 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh; 126 frag_mem_limit(q->net) >= q->net->low_thresh;
124} 127}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 2dd21c3281a1..b54b948b0596 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
55 if (skb->ignore_df) 55 if (skb->ignore_df)
56 return false; 56 return false;
57 57
58 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 58 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
59 return false; 59 return false;
60 60
61 return true; 61 return true;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 45d97e9b2759..0901de42ed85 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -970,9 +970,6 @@ static void __gre_tunnel_init(struct net_device *dev)
970 970
971 t_hlen = tunnel->hlen + sizeof(struct iphdr); 971 t_hlen = tunnel->hlen + sizeof(struct iphdr);
972 972
973 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
974 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
975
976 dev->features |= GRE_FEATURES; 973 dev->features |= GRE_FEATURES;
977 dev->hw_features |= GRE_FEATURES; 974 dev->hw_features |= GRE_FEATURES;
978 975
@@ -1290,8 +1287,6 @@ static int erspan_tunnel_init(struct net_device *dev)
1290 erspan_hdr_len(tunnel->erspan_ver); 1287 erspan_hdr_len(tunnel->erspan_ver);
1291 t_hlen = tunnel->hlen + sizeof(struct iphdr); 1288 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1292 1289
1293 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
1294 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
1295 dev->features |= GRE_FEATURES; 1290 dev->features |= GRE_FEATURES;
1296 dev->hw_features |= GRE_FEATURES; 1291 dev->hw_features |= GRE_FEATURES;
1297 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1292 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e8e675be60ec..66340ab750e6 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
248 248
249 /* common case: seglen is <= mtu 249 /* common case: seglen is <= mtu
250 */ 250 */
251 if (skb_gso_validate_mtu(skb, mtu)) 251 if (skb_gso_validate_network_len(skb, mtu))
252 return ip_finish_output2(net, sk, skb); 252 return ip_finish_output2(net, sk, skb);
253 253
254 /* Slowpath - GSO segment length exceeds the egress MTU. 254 /* Slowpath - GSO segment length exceeds the egress MTU.
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9c41a0cef1a5..74c962b9b09c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
258 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); 258 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
259 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) 259 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
260 return -EINVAL; 260 return -EINVAL;
261 ipc->oif = src_info->ipi6_ifindex; 261 if (src_info->ipi6_ifindex)
262 ipc->oif = src_info->ipi6_ifindex;
262 ipc->addr = src_info->ipi6_addr.s6_addr32[3]; 263 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
263 continue; 264 continue;
264 } 265 }
@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
288 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) 289 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
289 return -EINVAL; 290 return -EINVAL;
290 info = (struct in_pktinfo *)CMSG_DATA(cmsg); 291 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
291 ipc->oif = info->ipi_ifindex; 292 if (info->ipi_ifindex)
293 ipc->oif = info->ipi_ifindex;
292 ipc->addr = info->ipi_spec_dst.s_addr; 294 ipc->addr = info->ipi_spec_dst.s_addr;
293 break; 295 break;
294 } 296 }
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index d786a8441bce..6d21068f9b55 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -710,16 +710,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
710 } 710 }
711 } 711 }
712 712
713 if (tunnel->fwmark) { 713 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
714 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 714 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
715 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, 715 tunnel->fwmark);
716 tunnel->fwmark);
717 }
718 else {
719 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
720 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
721 skb->mark);
722 }
723 716
724 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) 717 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
725 goto tx_error; 718 goto tx_error;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 4b02ab39ebc5..8a8ae61cea71 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
232 c->hash_mode = i->hash_mode; 232 c->hash_mode = i->hash_mode;
233 c->hash_initval = i->hash_initval; 233 c->hash_initval = i->hash_initval;
234 refcount_set(&c->refcount, 1); 234 refcount_set(&c->refcount, 1);
235 refcount_set(&c->entries, 1);
236 235
237 spin_lock_bh(&cn->lock); 236 spin_lock_bh(&cn->lock);
238 if (__clusterip_config_find(net, ip)) { 237 if (__clusterip_config_find(net, ip)) {
@@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
263 262
264 c->notifier.notifier_call = clusterip_netdev_event; 263 c->notifier.notifier_call = clusterip_netdev_event;
265 err = register_netdevice_notifier(&c->notifier); 264 err = register_netdevice_notifier(&c->notifier);
266 if (!err) 265 if (!err) {
266 refcount_set(&c->entries, 1);
267 return c; 267 return c;
268 }
268 269
269#ifdef CONFIG_PROC_FS 270#ifdef CONFIG_PROC_FS
270 proc_remove(c->pde); 271 proc_remove(c->pde);
@@ -273,7 +274,7 @@ err:
273 spin_lock_bh(&cn->lock); 274 spin_lock_bh(&cn->lock);
274 list_del_rcu(&c->list); 275 list_del_rcu(&c->list);
275 spin_unlock_bh(&cn->lock); 276 spin_unlock_bh(&cn->lock);
276 kfree(c); 277 clusterip_config_put(c);
277 278
278 return ERR_PTR(err); 279 return ERR_PTR(err);
279} 280}
@@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
496 return PTR_ERR(config); 497 return PTR_ERR(config);
497 } 498 }
498 } 499 }
499 cipinfo->config = config;
500 500
501 ret = nf_ct_netns_get(par->net, par->family); 501 ret = nf_ct_netns_get(par->net, par->family);
502 if (ret < 0) 502 if (ret < 0) {
503 pr_info("cannot load conntrack support for proto=%u\n", 503 pr_info("cannot load conntrack support for proto=%u\n",
504 par->family); 504 par->family);
505 clusterip_config_entry_put(par->net, config);
506 clusterip_config_put(config);
507 return ret;
508 }
505 509
506 if (!par->net->xt.clusterip_deprecated_warning) { 510 if (!par->net->xt.clusterip_deprecated_warning) {
507 pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " 511 pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
509 par->net->xt.clusterip_deprecated_warning = true; 513 par->net->xt.clusterip_deprecated_warning = true;
510 } 514 }
511 515
516 cipinfo->config = config;
512 return ret; 517 return ret;
513} 518}
514 519
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index 25d2975da156..0cd46bffa469 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
111 default: 111 default:
112 return -1; 112 return -1;
113 } 113 }
114 csum_replace4(&iph->check, addr, new_addr);
114 115
115 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 116 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
116} 117}
@@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
185 if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) 186 if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
186 return false; 187 return false;
187 188
188 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 189 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
189 return false; 190 return false;
190 191
191 return true; 192 return true;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a4f44d815a61..299e247b2032 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
128static int ip_rt_error_cost __read_mostly = HZ; 128static int ip_rt_error_cost __read_mostly = HZ;
129static int ip_rt_error_burst __read_mostly = 5 * HZ; 129static int ip_rt_error_burst __read_mostly = 5 * HZ;
130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 131static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256; 132static int ip_rt_min_advmss __read_mostly = 256;
133 133
134static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 134static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
135
135/* 136/*
136 * Interface to generic destination cache. 137 * Interface to generic destination cache.
137 */ 138 */
@@ -633,6 +634,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
633static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 634static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
634{ 635{
635 rt->rt_pmtu = fnhe->fnhe_pmtu; 636 rt->rt_pmtu = fnhe->fnhe_pmtu;
637 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
636 rt->dst.expires = fnhe->fnhe_expires; 638 rt->dst.expires = fnhe->fnhe_expires;
637 639
638 if (fnhe->fnhe_gw) { 640 if (fnhe->fnhe_gw) {
@@ -643,7 +645,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
643} 645}
644 646
645static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, 647static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
646 u32 pmtu, unsigned long expires) 648 u32 pmtu, bool lock, unsigned long expires)
647{ 649{
648 struct fnhe_hash_bucket *hash; 650 struct fnhe_hash_bucket *hash;
649 struct fib_nh_exception *fnhe; 651 struct fib_nh_exception *fnhe;
@@ -680,8 +682,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
680 fnhe->fnhe_genid = genid; 682 fnhe->fnhe_genid = genid;
681 if (gw) 683 if (gw)
682 fnhe->fnhe_gw = gw; 684 fnhe->fnhe_gw = gw;
683 if (pmtu) 685 if (pmtu) {
684 fnhe->fnhe_pmtu = pmtu; 686 fnhe->fnhe_pmtu = pmtu;
687 fnhe->fnhe_mtu_locked = lock;
688 }
685 fnhe->fnhe_expires = max(1UL, expires); 689 fnhe->fnhe_expires = max(1UL, expires);
686 /* Update all cached dsts too */ 690 /* Update all cached dsts too */
687 rt = rcu_dereference(fnhe->fnhe_rth_input); 691 rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -705,6 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
705 fnhe->fnhe_daddr = daddr; 709 fnhe->fnhe_daddr = daddr;
706 fnhe->fnhe_gw = gw; 710 fnhe->fnhe_gw = gw;
707 fnhe->fnhe_pmtu = pmtu; 711 fnhe->fnhe_pmtu = pmtu;
712 fnhe->fnhe_mtu_locked = lock;
708 fnhe->fnhe_expires = expires; 713 fnhe->fnhe_expires = expires;
709 714
710 /* Exception created; mark the cached routes for the nexthop 715 /* Exception created; mark the cached routes for the nexthop
@@ -786,7 +791,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
786 struct fib_nh *nh = &FIB_RES_NH(res); 791 struct fib_nh *nh = &FIB_RES_NH(res);
787 792
788 update_or_create_fnhe(nh, fl4->daddr, new_gw, 793 update_or_create_fnhe(nh, fl4->daddr, new_gw,
789 0, jiffies + ip_rt_gc_timeout); 794 0, false,
795 jiffies + ip_rt_gc_timeout);
790 } 796 }
791 if (kill_route) 797 if (kill_route)
792 rt->dst.obsolete = DST_OBSOLETE_KILL; 798 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -930,14 +936,23 @@ out_put_peer:
930 936
931static int ip_error(struct sk_buff *skb) 937static int ip_error(struct sk_buff *skb)
932{ 938{
933 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
934 struct rtable *rt = skb_rtable(skb); 939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
935 struct inet_peer *peer; 942 struct inet_peer *peer;
936 unsigned long now; 943 unsigned long now;
937 struct net *net; 944 struct net *net;
938 bool send; 945 bool send;
939 int code; 946 int code;
940 947
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
950 if (!dev)
951 goto out;
952 }
953
954 in_dev = __in_dev_get_rcu(dev);
955
941 /* IP on this device is disabled. */ 956 /* IP on this device is disabled. */
942 if (!in_dev) 957 if (!in_dev)
943 goto out; 958 goto out;
@@ -999,15 +1014,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
999{ 1014{
1000 struct dst_entry *dst = &rt->dst; 1015 struct dst_entry *dst = &rt->dst;
1001 struct fib_result res; 1016 struct fib_result res;
1017 bool lock = false;
1002 1018
1003 if (dst_metric_locked(dst, RTAX_MTU)) 1019 if (ip_mtu_locked(dst))
1004 return; 1020 return;
1005 1021
1006 if (ipv4_mtu(dst) < mtu) 1022 if (ipv4_mtu(dst) < mtu)
1007 return; 1023 return;
1008 1024
1009 if (mtu < ip_rt_min_pmtu) 1025 if (mtu < ip_rt_min_pmtu) {
1026 lock = true;
1010 mtu = ip_rt_min_pmtu; 1027 mtu = ip_rt_min_pmtu;
1028 }
1011 1029
1012 if (rt->rt_pmtu == mtu && 1030 if (rt->rt_pmtu == mtu &&
1013 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1031 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1017,7 +1035,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1017 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1035 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1018 struct fib_nh *nh = &FIB_RES_NH(res); 1036 struct fib_nh *nh = &FIB_RES_NH(res);
1019 1037
1020 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 1038 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1021 jiffies + ip_rt_mtu_expires); 1039 jiffies + ip_rt_mtu_expires);
1022 } 1040 }
1023 rcu_read_unlock(); 1041 rcu_read_unlock();
@@ -1270,7 +1288,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1270 1288
1271 mtu = READ_ONCE(dst->dev->mtu); 1289 mtu = READ_ONCE(dst->dev->mtu);
1272 1290
1273 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1291 if (unlikely(ip_mtu_locked(dst))) {
1274 if (rt->rt_uses_gateway && mtu > 576) 1292 if (rt->rt_uses_gateway && mtu > 576)
1275 mtu = 576; 1293 mtu = 576;
1276 } 1294 }
@@ -1383,7 +1401,7 @@ struct uncached_list {
1383 1401
1384static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1402static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1385 1403
1386static void rt_add_uncached_list(struct rtable *rt) 1404void rt_add_uncached_list(struct rtable *rt)
1387{ 1405{
1388 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1406 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1389 1407
@@ -1394,14 +1412,8 @@ static void rt_add_uncached_list(struct rtable *rt)
1394 spin_unlock_bh(&ul->lock); 1412 spin_unlock_bh(&ul->lock);
1395} 1413}
1396 1414
1397static void ipv4_dst_destroy(struct dst_entry *dst) 1415void rt_del_uncached_list(struct rtable *rt)
1398{ 1416{
1399 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1400 struct rtable *rt = (struct rtable *) dst;
1401
1402 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1403 kfree(p);
1404
1405 if (!list_empty(&rt->rt_uncached)) { 1417 if (!list_empty(&rt->rt_uncached)) {
1406 struct uncached_list *ul = rt->rt_uncached_list; 1418 struct uncached_list *ul = rt->rt_uncached_list;
1407 1419
@@ -1411,6 +1423,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1411 } 1423 }
1412} 1424}
1413 1425
1426static void ipv4_dst_destroy(struct dst_entry *dst)
1427{
1428 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1429 struct rtable *rt = (struct rtable *)dst;
1430
1431 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1432 kfree(p);
1433
1434 rt_del_uncached_list(rt);
1435}
1436
1414void rt_flush_dev(struct net_device *dev) 1437void rt_flush_dev(struct net_device *dev)
1415{ 1438{
1416 struct net *net = dev_net(dev); 1439 struct net *net = dev_net(dev);
@@ -1506,6 +1529,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
1506 rt->rt_is_input = 0; 1529 rt->rt_is_input = 0;
1507 rt->rt_iif = 0; 1530 rt->rt_iif = 0;
1508 rt->rt_pmtu = 0; 1531 rt->rt_pmtu = 0;
1532 rt->rt_mtu_locked = 0;
1509 rt->rt_gateway = 0; 1533 rt->rt_gateway = 0;
1510 rt->rt_uses_gateway = 0; 1534 rt->rt_uses_gateway = 0;
1511 rt->rt_table_id = 0; 1535 rt->rt_table_id = 0;
@@ -2531,6 +2555,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2531 rt->rt_is_input = ort->rt_is_input; 2555 rt->rt_is_input = ort->rt_is_input;
2532 rt->rt_iif = ort->rt_iif; 2556 rt->rt_iif = ort->rt_iif;
2533 rt->rt_pmtu = ort->rt_pmtu; 2557 rt->rt_pmtu = ort->rt_pmtu;
2558 rt->rt_mtu_locked = ort->rt_mtu_locked;
2534 2559
2535 rt->rt_genid = rt_genid_ipv4(net); 2560 rt->rt_genid = rt_genid_ipv4(net);
2536 rt->rt_flags = ort->rt_flags; 2561 rt->rt_flags = ort->rt_flags;
@@ -2633,6 +2658,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2633 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2658 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2634 if (rt->rt_pmtu && expires) 2659 if (rt->rt_pmtu && expires)
2635 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2660 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2661 if (rt->rt_mtu_locked && expires)
2662 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2636 if (rtnetlink_put_metrics(skb, metrics) < 0) 2663 if (rtnetlink_put_metrics(skb, metrics) < 0)
2637 goto nla_put_failure; 2664 goto nla_put_failure;
2638 2665
@@ -2818,6 +2845,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2818static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2845static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2819static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2846static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2820static int ip_rt_gc_elasticity __read_mostly = 8; 2847static int ip_rt_gc_elasticity __read_mostly = 8;
2848static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2821 2849
2822static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 2850static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2823 void __user *buffer, 2851 void __user *buffer,
@@ -2933,7 +2961,8 @@ static struct ctl_table ipv4_route_table[] = {
2933 .data = &ip_rt_min_pmtu, 2961 .data = &ip_rt_min_pmtu,
2934 .maxlen = sizeof(int), 2962 .maxlen = sizeof(int),
2935 .mode = 0644, 2963 .mode = 0644,
2936 .proc_handler = proc_dointvec, 2964 .proc_handler = proc_dointvec_minmax,
2965 .extra1 = &ip_min_valid_pmtu,
2937 }, 2966 },
2938 { 2967 {
2939 .procname = "min_adv_mss", 2968 .procname = "min_adv_mss",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 48636aee23c3..8b8059b7af4d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3566,6 +3566,7 @@ int tcp_abort(struct sock *sk, int err)
3566 3566
3567 bh_unlock_sock(sk); 3567 bh_unlock_sock(sk);
3568 local_bh_enable(); 3568 local_bh_enable();
3569 tcp_write_queue_purge(sk);
3569 release_sock(sk); 3570 release_sock(sk);
3570 return 0; 3571 return 0;
3571} 3572}
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 7c843578f233..faddf4f9a707 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
6 * The algorithm is described in: 6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm 7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks" 8 * for High-Speed Networks"
9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf 9 * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
10 * 10 *
11 * Implemented from description in paper and ns-2 simulation. 11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> 12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 575d3c1fb6e8..9a1b3c1c1c14 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1971,11 +1971,6 @@ void tcp_enter_loss(struct sock *sk)
1971 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1971 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1972 * loss recovery is underway except recurring timeout(s) on 1972 * loss recovery is underway except recurring timeout(s) on
1973 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 1973 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
1974 *
1975 * In theory F-RTO can be used repeatedly during loss recovery.
1976 * In practice this interacts badly with broken middle-boxes that
1977 * falsely raise the receive window, which results in repeated
1978 * timeouts and stop-and-go behavior.
1979 */ 1974 */
1980 tp->frto = net->ipv4.sysctl_tcp_frto && 1975 tp->frto = net->ipv4.sysctl_tcp_frto &&
1981 (new_recovery || icsk->icsk_retransmits) && 1976 (new_recovery || icsk->icsk_retransmits) &&
@@ -2631,18 +2626,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2631 tcp_try_undo_loss(sk, false)) 2626 tcp_try_undo_loss(sk, false))
2632 return; 2627 return;
2633 2628
2634 /* The ACK (s)acks some never-retransmitted data meaning not all
2635 * the data packets before the timeout were lost. Therefore we
2636 * undo the congestion window and state. This is essentially
2637 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
2638 * a retransmitted skb is permantly marked, we can apply such an
2639 * operation even if F-RTO was not used.
2640 */
2641 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2642 tcp_try_undo_loss(sk, tp->undo_marker))
2643 return;
2644
2645 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2629 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2630 /* Step 3.b. A timeout is spurious if not all data are
2631 * lost, i.e., never-retransmitted data are (s)acked.
2632 */
2633 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2634 tcp_try_undo_loss(sk, true))
2635 return;
2636
2646 if (after(tp->snd_nxt, tp->high_seq)) { 2637 if (after(tp->snd_nxt, tp->high_seq)) {
2647 if (flag & FLAG_DATA_SACKED || is_dupack) 2638 if (flag & FLAG_DATA_SACKED || is_dupack)
2648 tp->frto = 0; /* Step 3.a. loss was real */ 2639 tp->frto = 0; /* Step 3.a. loss was real */
@@ -4001,6 +3992,7 @@ void tcp_reset(struct sock *sk)
4001 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3992 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4002 smp_wmb(); 3993 smp_wmb();
4003 3994
3995 tcp_write_queue_purge(sk);
4004 tcp_done(sk); 3996 tcp_done(sk);
4005 3997
4006 if (!sock_flag(sk, SOCK_DEAD)) 3998 if (!sock_flag(sk, SOCK_DEAD))
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 71fc60f1b326..f7d944855f8e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk)
34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
35 sk->sk_error_report(sk); 35 sk->sk_error_report(sk);
36 36
37 tcp_write_queue_purge(sk);
37 tcp_done(sk); 38 tcp_done(sk);
38 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); 39 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
39} 40}
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 63faeee989a9..2a9764bd1719 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 94b8702603bc..be980c195fc5 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
30 30
31 mtu = dst_mtu(skb_dst(skb)); 31 mtu = dst_mtu(skb_dst(skb));
32 if ((!skb_is_gso(skb) && skb->len > mtu) || 32 if ((!skb_is_gso(skb) && skb->len > mtu) ||
33 (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { 33 (skb_is_gso(skb) &&
34 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
34 skb->protocol = htons(ETH_P_IP); 35 skb->protocol = htons(ETH_P_IP);
35 36
36 if (skb->sk) 37 if (skb->sk)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 05017e2c849c..fbebda67ac1b 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -100,8 +100,10 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
100 xdst->u.rt.rt_gateway = rt->rt_gateway; 100 xdst->u.rt.rt_gateway = rt->rt_gateway;
101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; 101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
102 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 102 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
103 xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
103 xdst->u.rt.rt_table_id = rt->rt_table_id; 104 xdst->u.rt.rt_table_id = rt->rt_table_id;
104 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 105 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
106 rt_add_uncached_list(&xdst->u.rt);
105 107
106 return 0; 108 return 0;
107} 109}
@@ -241,7 +243,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
241 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 243 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
242 244
243 dst_destroy_metrics_generic(dst); 245 dst_destroy_metrics_generic(dst);
244 246 if (xdst->u.rt.rt_uncached_list)
247 rt_del_uncached_list(&xdst->u.rt);
245 xfrm_dst_destroy(xdst); 248 xfrm_dst_destroy(xdst);
246} 249}
247 250
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index fbf08ce3f5ab..a9f7eca0b6a3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
147 struct inet_sock *inet = inet_sk(sk); 147 struct inet_sock *inet = inet_sk(sk);
148 struct ipv6_pinfo *np = inet6_sk(sk); 148 struct ipv6_pinfo *np = inet6_sk(sk);
149 struct in6_addr *daddr; 149 struct in6_addr *daddr, old_daddr;
150 __be32 fl6_flowlabel = 0;
151 __be32 old_fl6_flowlabel;
152 __be16 old_dport;
150 int addr_type; 153 int addr_type;
151 int err; 154 int err;
152 __be32 fl6_flowlabel = 0;
153 155
154 if (usin->sin6_family == AF_INET) { 156 if (usin->sin6_family == AF_INET) {
155 if (__ipv6_only_sock(sk)) 157 if (__ipv6_only_sock(sk))
@@ -238,9 +240,13 @@ ipv4_connected:
238 } 240 }
239 } 241 }
240 242
243 /* save the current peer information before updating it */
244 old_daddr = sk->sk_v6_daddr;
245 old_fl6_flowlabel = np->flow_label;
246 old_dport = inet->inet_dport;
247
241 sk->sk_v6_daddr = *daddr; 248 sk->sk_v6_daddr = *daddr;
242 np->flow_label = fl6_flowlabel; 249 np->flow_label = fl6_flowlabel;
243
244 inet->inet_dport = usin->sin6_port; 250 inet->inet_dport = usin->sin6_port;
245 251
246 /* 252 /*
@@ -250,11 +256,12 @@ ipv4_connected:
250 256
251 err = ip6_datagram_dst_update(sk, true); 257 err = ip6_datagram_dst_update(sk, true);
252 if (err) { 258 if (err) {
253 /* Reset daddr and dport so that udp_v6_early_demux() 259 /* Restore the socket peer info, to keep it consistent with
254 * fails to find this socket 260 * the old socket state
255 */ 261 */
256 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 262 sk->sk_v6_daddr = old_daddr;
257 inet->inet_dport = 0; 263 np->flow_label = old_fl6_flowlabel;
264 inet->inet_dport = old_dport;
258 goto out; 265 goto out;
259 } 266 }
260 267
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3c353125546d..1bbd0930063e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
126 struct ip6_tnl *t, *cand = NULL; 126 struct ip6_tnl *t, *cand = NULL;
127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
128 int dev_type = (gre_proto == htons(ETH_P_TEB) || 128 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
129 gre_proto == htons(ETH_P_ERSPAN)) ? 129 gre_proto == htons(ETH_P_ERSPAN) ||
130 gre_proto == htons(ETH_P_ERSPAN2)) ?
130 ARPHRD_ETHER : ARPHRD_IP6GRE; 131 ARPHRD_ETHER : ARPHRD_IP6GRE;
131 int score, cand_score = 4; 132 int score, cand_score = 4;
132 133
@@ -902,6 +903,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
902 truncate = true; 903 truncate = true;
903 } 904 }
904 905
906 if (skb_cow_head(skb, dev->needed_headroom))
907 goto tx_err;
908
905 t->parms.o_flags &= ~TUNNEL_KEY; 909 t->parms.o_flags &= ~TUNNEL_KEY;
906 IPCB(skb)->flags = 0; 910 IPCB(skb)->flags = 0;
907 911
@@ -944,6 +948,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
944 md->u.md2.dir, 948 md->u.md2.dir,
945 get_hwid(&md->u.md2), 949 get_hwid(&md->u.md2),
946 truncate, false); 950 truncate, false);
951 } else {
952 goto tx_err;
947 } 953 }
948 } else { 954 } else {
949 switch (skb->protocol) { 955 switch (skb->protocol) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 997c7f19ad62..a8a919520090 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
412 if (skb->ignore_df) 412 if (skb->ignore_df)
413 return false; 413 return false;
414 414
415 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 415 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
416 return false; 416 return false;
417 417
418 return true; 418 return true;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4b15fe928278..6e0f21eed88a 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1982{ 1982{
1983 struct net *net = dev_net(dev); 1983 struct net *net = dev_net(dev);
1984 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1984 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1985 struct ip6_tnl *nt, *t;
1986 struct ip_tunnel_encap ipencap; 1985 struct ip_tunnel_encap ipencap;
1986 struct ip6_tnl *nt, *t;
1987 int err;
1987 1988
1988 nt = netdev_priv(dev); 1989 nt = netdev_priv(dev);
1989 1990
1990 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1991 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1991 int err = ip6_tnl_encap_setup(nt, &ipencap); 1992 err = ip6_tnl_encap_setup(nt, &ipencap);
1992
1993 if (err < 0) 1993 if (err < 0)
1994 return err; 1994 return err;
1995 } 1995 }
@@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2005 return -EEXIST; 2005 return -EEXIST;
2006 } 2006 }
2007 2007
2008 return ip6_tnl_create2(dev); 2008 err = ip6_tnl_create2(dev);
2009 if (!err && tb[IFLA_MTU])
2010 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2011
2012 return err;
2009} 2013}
2010 2014
2011static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2015static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index f61a5b613b52..ba5e04c6ae17 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1554 *(opt++) = (rd_len >> 3); 1554 *(opt++) = (rd_len >> 3);
1555 opt += 6; 1555 opt += 6;
1556 1556
1557 memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); 1557 skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1558 rd_len - 8);
1558} 1559}
1559 1560
1560void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1561void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d95ceca7ff8f..531d6957af36 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -21,18 +21,19 @@
21int ip6_route_me_harder(struct net *net, struct sk_buff *skb) 21int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
22{ 22{
23 const struct ipv6hdr *iph = ipv6_hdr(skb); 23 const struct ipv6hdr *iph = ipv6_hdr(skb);
24 struct sock *sk = sk_to_full_sk(skb->sk);
24 unsigned int hh_len; 25 unsigned int hh_len;
25 struct dst_entry *dst; 26 struct dst_entry *dst;
26 struct flowi6 fl6 = { 27 struct flowi6 fl6 = {
27 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 28 .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
28 .flowi6_mark = skb->mark, 29 .flowi6_mark = skb->mark,
29 .flowi6_uid = sock_net_uid(net, skb->sk), 30 .flowi6_uid = sock_net_uid(net, sk),
30 .daddr = iph->daddr, 31 .daddr = iph->daddr,
31 .saddr = iph->saddr, 32 .saddr = iph->saddr,
32 }; 33 };
33 int err; 34 int err;
34 35
35 dst = ip6_route_output(net, skb->sk, &fl6); 36 dst = ip6_route_output(net, sk, &fl6);
36 err = dst->error; 37 err = dst->error;
37 if (err) { 38 if (err) {
38 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 39 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
50 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 51 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
51 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 52 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
52 skb_dst_set(skb, NULL); 53 skb_dst_set(skb, NULL);
53 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); 54 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
54 if (IS_ERR(dst)) 55 if (IS_ERR(dst))
55 return PTR_ERR(dst); 56 return PTR_ERR(dst);
56 skb_dst_set(skb, dst); 57 skb_dst_set(skb, dst);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 94deb69bbbda..91ed25a24b79 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
48 } 48 }
49 49
50 fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 50 fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
51 if ((flags & XT_RPFILTER_LOOSE) == 0) {
52 fl6.flowi6_oif = dev->ifindex;
53 lookup_flags |= RT6_LOOKUP_F_IFACE;
54 }
55 51
56 rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); 52 rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
57 if (rt->dst.error) 53 if (rt->dst.error)
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index d346705d6ee6..207cb35569b1 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
178 if (skb->len <= mtu) 178 if (skb->len <= mtu)
179 return false; 179 return false;
180 180
181 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 181 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
182 return false; 182 return false;
183 183
184 return true; 184 return true;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index bed57ee65f7b..6b7f075f811f 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, 99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
100 target, maniptype)) 100 target, maniptype))
101 return false; 101 return false;
102
103 /* must reload, offset might have changed */
104 ipv6h = (void *)skb->data + iphdroff;
105
102manip_addr: 106manip_addr:
103 if (maniptype == NF_NAT_MANIP_SRC) 107 if (maniptype == NF_NAT_MANIP_SRC)
104 ipv6h->saddr = target->src.u3.in6; 108 ipv6h->saddr = target->src.u3.in6;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index cc5174c7254c..62fc84d7bdff 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
180 } 180 }
181 181
182 *dest = 0; 182 *dest = 0;
183 again:
184 rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); 183 rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
185 if (rt->dst.error) 184 if (rt->dst.error)
186 goto put_rt_err; 185 goto put_rt_err;
@@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
189 if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) 188 if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
190 goto put_rt_err; 189 goto put_rt_err;
191 190
192 if (oif && oif != rt->rt6i_idev->dev) { 191 if (oif && oif != rt->rt6i_idev->dev)
193 /* multipath route? Try again with F_IFACE */ 192 goto put_rt_err;
194 if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
195 lookup_flags |= RT6_LOOKUP_F_IFACE;
196 fl6.flowi6_oif = oif->ifindex;
197 ip6_rt_put(rt);
198 goto again;
199 }
200 }
201 193
202 switch (priv->result) { 194 switch (priv->result) {
203 case NFT_FIB_RESULT_OIF: 195 case NFT_FIB_RESULT_OIF:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9dcfadddd800..b0d5c64e1978 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
128 128
129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130 130
131static void rt6_uncached_list_add(struct rt6_info *rt) 131void rt6_uncached_list_add(struct rt6_info *rt)
132{ 132{
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134 134
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
139 spin_unlock_bh(&ul->lock); 139 spin_unlock_bh(&ul->lock);
140} 140}
141 141
142static void rt6_uncached_list_del(struct rt6_info *rt) 142void rt6_uncached_list_del(struct rt6_info *rt)
143{ 143{
144 if (!list_empty(&rt->rt6i_uncached)) { 144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list; 145 struct uncached_list *ul = rt->rt6i_uncached_list;
@@ -1509,7 +1509,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1509 } 1509 }
1510} 1510}
1511 1511
1512static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) 1512static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1513 struct rt6_info *rt, int mtu)
1514{
1515 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1516 * lowest MTU in the path: always allow updating the route PMTU to
1517 * reflect PMTU decreases.
1518 *
1519 * If the new MTU is higher, and the route PMTU is equal to the local
1520 * MTU, this means the old MTU is the lowest in the path, so allow
1521 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1522 * handle this.
1523 */
1524
1525 if (dst_mtu(&rt->dst) >= mtu)
1526 return true;
1527
1528 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1529 return true;
1530
1531 return false;
1532}
1533
1534static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1535 struct rt6_info *rt, int mtu)
1513{ 1536{
1514 struct rt6_exception_bucket *bucket; 1537 struct rt6_exception_bucket *bucket;
1515 struct rt6_exception *rt6_ex; 1538 struct rt6_exception *rt6_ex;
@@ -1518,20 +1541,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1518 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1541 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1519 lockdep_is_held(&rt6_exception_lock)); 1542 lockdep_is_held(&rt6_exception_lock));
1520 1543
1521 if (bucket) { 1544 if (!bucket)
1522 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1545 return;
1523 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1546
1524 struct rt6_info *entry = rt6_ex->rt6i; 1547 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1525 /* For RTF_CACHE with rt6i_pmtu == 0 1548 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1526 * (i.e. a redirected route), 1549 struct rt6_info *entry = rt6_ex->rt6i;
1527 * the metrics of its rt->dst.from has already 1550
1528 * been updated. 1551 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1529 */ 1552 * route), the metrics of its rt->dst.from have already
1530 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) 1553 * been updated.
1531 entry->rt6i_pmtu = mtu; 1554 */
1532 } 1555 if (entry->rt6i_pmtu &&
1533 bucket++; 1556 rt6_mtu_change_route_allowed(idev, entry, mtu))
1557 entry->rt6i_pmtu = mtu;
1534 } 1558 }
1559 bucket++;
1535 } 1560 }
1536} 1561}
1537 1562
@@ -3809,25 +3834,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3809 Since RFC 1981 doesn't include administrative MTU increase 3834 Since RFC 1981 doesn't include administrative MTU increase
3810 update PMTU increase is a MUST. (i.e. jumbo frame) 3835 update PMTU increase is a MUST. (i.e. jumbo frame)
3811 */ 3836 */
3812 /*
3813 If new MTU is less than route PMTU, this new MTU will be the
3814 lowest MTU in the path, update the route PMTU to reflect PMTU
3815 decreases; if new MTU is greater than route PMTU, and the
3816 old MTU is the lowest MTU in the path, update the route PMTU
3817 to reflect the increase. In this case if the other nodes' MTU
3818 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3819 PMTU discovery.
3820 */
3821 if (rt->dst.dev == arg->dev && 3837 if (rt->dst.dev == arg->dev &&
3822 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3823 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 3838 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3824 spin_lock_bh(&rt6_exception_lock); 3839 spin_lock_bh(&rt6_exception_lock);
3825 if (dst_mtu(&rt->dst) >= arg->mtu || 3840 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3826 (dst_mtu(&rt->dst) < arg->mtu && 3841 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3827 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3828 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); 3842 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3829 } 3843 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3830 rt6_exceptions_update_pmtu(rt, arg->mtu);
3831 spin_unlock_bh(&rt6_exception_lock); 3844 spin_unlock_bh(&rt6_exception_lock);
3832 } 3845 }
3833 return 0; 3846 return 0;
@@ -4099,6 +4112,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4099 r_cfg.fc_encap_type = nla_get_u16(nla); 4112 r_cfg.fc_encap_type = nla_get_u16(nla);
4100 } 4113 }
4101 4114
4115 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4102 rt = ip6_route_info_create(&r_cfg, extack); 4116 rt = ip6_route_info_create(&r_cfg, extack);
4103 if (IS_ERR(rt)) { 4117 if (IS_ERR(rt)) {
4104 err = PTR_ERR(rt); 4118 err = PTR_ERR(rt);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bd6cc688bd19..7a78dcfda68a 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ 93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) 94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
95{ 95{
96 struct net *net = dev_net(skb_dst(skb)->dev); 96 struct dst_entry *dst = skb_dst(skb);
97 struct net *net = dev_net(dst->dev);
97 struct ipv6hdr *hdr, *inner_hdr; 98 struct ipv6hdr *hdr, *inner_hdr;
98 struct ipv6_sr_hdr *isrh; 99 struct ipv6_sr_hdr *isrh;
99 int hdrlen, tot_len, err; 100 int hdrlen, tot_len, err;
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
134 isrh->nexthdr = proto; 135 isrh->nexthdr = proto;
135 136
136 hdr->daddr = isrh->segments[isrh->first_segment]; 137 hdr->daddr = isrh->segments[isrh->first_segment];
137 set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); 138 set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
138 139
139#ifdef CONFIG_IPV6_SEG6_HMAC 140#ifdef CONFIG_IPV6_SEG6_HMAC
140 if (sr_has_hmac(isrh)) { 141 if (sr_has_hmac(isrh)) {
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
418 419
419 slwt = seg6_lwt_lwtunnel(newts); 420 slwt = seg6_lwt_lwtunnel(newts);
420 421
421 err = dst_cache_init(&slwt->cache, GFP_KERNEL); 422 err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
422 if (err) { 423 if (err) {
423 kfree(newts); 424 kfree(newts);
424 return err; 425 return err;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3a1775a62973..0195598f7bb5 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1578 if (err < 0) 1578 if (err < 0)
1579 return err; 1579 return err;
1580 1580
1581 if (tb[IFLA_MTU]) {
1582 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1583
1584 if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1585 dev->mtu = mtu;
1586 }
1587
1581#ifdef CONFIG_IPV6_SIT_6RD 1588#ifdef CONFIG_IPV6_SIT_6RD
1582 if (ipip6_netlink_6rd_parms(data, &ip6rd)) 1589 if (ipip6_netlink_6rd_parms(data, &ip6rd))
1583 err = ipip6_tunnel_update_6rd(nt, &ip6rd); 1590 err = ipip6_tunnel_update_6rd(nt, &ip6rd);
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index bb935a3b7fea..de1b0b8c53b0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8ae87d4ec5ff..5959ce9620eb 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
82 82
83 if ((!skb_is_gso(skb) && skb->len > mtu) || 83 if ((!skb_is_gso(skb) && skb->len > mtu) ||
84 (skb_is_gso(skb) && 84 (skb_is_gso(skb) &&
85 skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { 85 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
86 skb->dev = dst->dev; 86 skb->dev = dst->dev;
87 skb->protocol = htons(ETH_P_IPV6); 87 skb->protocol = htons(ETH_P_IPV6);
88 88
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 09fb44ee3b45..416fe67271a9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; 113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst; 114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
115 xdst->u.rt6.rt6i_src = rt->rt6i_src; 115 xdst->u.rt6.rt6i_src = rt->rt6i_src;
116 INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
117 rt6_uncached_list_add(&xdst->u.rt6);
118 atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
116 119
117 return 0; 120 return 0;
118} 121}
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
244 if (likely(xdst->u.rt6.rt6i_idev)) 247 if (likely(xdst->u.rt6.rt6i_idev))
245 in6_dev_put(xdst->u.rt6.rt6i_idev); 248 in6_dev_put(xdst->u.rt6.rt6i_idev);
246 dst_destroy_metrics_generic(dst); 249 dst_destroy_metrics_generic(dst);
250 if (xdst->u.rt6.rt6i_uncached_list)
251 rt6_uncached_list_del(&xdst->u.rt6);
247 xfrm_dst_destroy(xdst); 252 xfrm_dst_destroy(xdst);
248} 253}
249 254
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e8cc7bcbca3..9e2643ab4ccb 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void)
2433 af_iucv_dev->driver = &af_iucv_driver; 2433 af_iucv_dev->driver = &af_iucv_driver;
2434 err = device_register(af_iucv_dev); 2434 err = device_register(af_iucv_dev);
2435 if (err) 2435 if (err)
2436 goto out_driver; 2436 goto out_iucv_dev;
2437 return 0; 2437 return 0;
2438 2438
2439out_iucv_dev:
2440 put_device(af_iucv_dev);
2439out_driver: 2441out_driver:
2440 driver_unregister(&af_iucv_driver); 2442 driver_unregister(&af_iucv_driver);
2441out_iucv: 2443out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index f297d53a11aa..34355fd19f27 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1381 .parse_msg = kcm_parse_func_strparser, 1381 .parse_msg = kcm_parse_func_strparser,
1382 .read_sock_done = kcm_read_sock_done, 1382 .read_sock_done = kcm_read_sock_done,
1383 }; 1383 };
1384 int err; 1384 int err = 0;
1385 1385
1386 csk = csock->sk; 1386 csk = csock->sk;
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 lock_sock(csk);
1391
1390 /* Only allow TCP sockets to be attached for now */ 1392 /* Only allow TCP sockets to be attached for now */
1391 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || 1393 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1392 csk->sk_protocol != IPPROTO_TCP) 1394 csk->sk_protocol != IPPROTO_TCP) {
1393 return -EOPNOTSUPP; 1395 err = -EOPNOTSUPP;
1396 goto out;
1397 }
1394 1398
1395 /* Don't allow listeners or closed sockets */ 1399 /* Don't allow listeners or closed sockets */
1396 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) 1400 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1397 return -EOPNOTSUPP; 1401 err = -EOPNOTSUPP;
1402 goto out;
1403 }
1398 1404
1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1405 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1400 if (!psock) 1406 if (!psock) {
1401 return -ENOMEM; 1407 err = -ENOMEM;
1408 goto out;
1409 }
1402 1410
1403 psock->mux = mux; 1411 psock->mux = mux;
1404 psock->sk = csk; 1412 psock->sk = csk;
@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1407 err = strp_init(&psock->strp, csk, &cb); 1415 err = strp_init(&psock->strp, csk, &cb);
1408 if (err) { 1416 if (err) {
1409 kmem_cache_free(kcm_psockp, psock); 1417 kmem_cache_free(kcm_psockp, psock);
1410 return err; 1418 goto out;
1411 } 1419 }
1412 1420
1413 write_lock_bh(&csk->sk_callback_lock); 1421 write_lock_bh(&csk->sk_callback_lock);
@@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1419 write_unlock_bh(&csk->sk_callback_lock); 1427 write_unlock_bh(&csk->sk_callback_lock);
1420 strp_done(&psock->strp); 1428 strp_done(&psock->strp);
1421 kmem_cache_free(kcm_psockp, psock); 1429 kmem_cache_free(kcm_psockp, psock);
1422 return -EALREADY; 1430 err = -EALREADY;
1431 goto out;
1423 } 1432 }
1424 1433
1425 psock->save_data_ready = csk->sk_data_ready; 1434 psock->save_data_ready = csk->sk_data_ready;
@@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1455 /* Schedule RX work in case there are already bytes queued */ 1464 /* Schedule RX work in case there are already bytes queued */
1456 strp_check_rcv(&psock->strp); 1465 strp_check_rcv(&psock->strp);
1457 1466
1458 return 0; 1467out:
1468 release_sock(csk);
1469
1470 return err;
1459} 1471}
1460 1472
1461static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) 1473static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock)
1507 1519
1508 if (WARN_ON(psock->rx_kcm)) { 1520 if (WARN_ON(psock->rx_kcm)) {
1509 write_unlock_bh(&csk->sk_callback_lock); 1521 write_unlock_bh(&csk->sk_callback_lock);
1522 release_sock(csk);
1510 return; 1523 return;
1511 } 1524 }
1512 1525
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 194a7483bb93..14b67dfacc4b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,13 @@ struct l2tp_net {
111 spinlock_t l2tp_session_hlist_lock; 111 spinlock_t l2tp_session_hlist_lock;
112}; 112};
113 113
114#if IS_ENABLED(CONFIG_IPV6)
115static bool l2tp_sk_is_v6(struct sock *sk)
116{
117 return sk->sk_family == PF_INET6 &&
118 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
119}
120#endif
114 121
115static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 122static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
116{ 123{
@@ -136,51 +143,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
136 143
137} 144}
138 145
139/* Lookup the tunnel socket, possibly involving the fs code if the socket is
140 * owned by userspace. A struct sock returned from this function must be
141 * released using l2tp_tunnel_sock_put once you're done with it.
142 */
143static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
144{
145 int err = 0;
146 struct socket *sock = NULL;
147 struct sock *sk = NULL;
148
149 if (!tunnel)
150 goto out;
151
152 if (tunnel->fd >= 0) {
153 /* Socket is owned by userspace, who might be in the process
154 * of closing it. Look the socket up using the fd to ensure
155 * consistency.
156 */
157 sock = sockfd_lookup(tunnel->fd, &err);
158 if (sock)
159 sk = sock->sk;
160 } else {
161 /* Socket is owned by kernelspace */
162 sk = tunnel->sock;
163 sock_hold(sk);
164 }
165
166out:
167 return sk;
168}
169
170/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
171static void l2tp_tunnel_sock_put(struct sock *sk)
172{
173 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
174 if (tunnel) {
175 if (tunnel->fd >= 0) {
176 /* Socket is owned by userspace */
177 sockfd_put(sk->sk_socket);
178 }
179 sock_put(sk);
180 }
181 sock_put(sk);
182}
183
184/* Session hash list. 146/* Session hash list.
185 * The session_id SHOULD be random according to RFC2661, but several 147 * The session_id SHOULD be random according to RFC2661, but several
186 * L2TP implementations (Cisco and Microsoft) use incrementing 148 * L2TP implementations (Cisco and Microsoft) use incrementing
@@ -193,6 +155,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
193 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; 155 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
194} 156}
195 157
158void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
159{
160 sock_put(tunnel->sock);
161 /* the tunnel is freed in the socket destructor */
162}
163EXPORT_SYMBOL(l2tp_tunnel_free);
164
196/* Lookup a tunnel. A new reference is held on the returned tunnel. */ 165/* Lookup a tunnel. A new reference is held on the returned tunnel. */
197struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) 166struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
198{ 167{
@@ -345,13 +314,11 @@ int l2tp_session_register(struct l2tp_session *session,
345 } 314 }
346 315
347 l2tp_tunnel_inc_refcount(tunnel); 316 l2tp_tunnel_inc_refcount(tunnel);
348 sock_hold(tunnel->sock);
349 hlist_add_head_rcu(&session->global_hlist, g_head); 317 hlist_add_head_rcu(&session->global_hlist, g_head);
350 318
351 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 319 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
352 } else { 320 } else {
353 l2tp_tunnel_inc_refcount(tunnel); 321 l2tp_tunnel_inc_refcount(tunnel);
354 sock_hold(tunnel->sock);
355 } 322 }
356 323
357 hlist_add_head(&session->hlist, head); 324 hlist_add_head(&session->hlist, head);
@@ -969,7 +936,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
969{ 936{
970 struct l2tp_tunnel *tunnel; 937 struct l2tp_tunnel *tunnel;
971 938
972 tunnel = l2tp_sock_to_tunnel(sk); 939 tunnel = l2tp_tunnel(sk);
973 if (tunnel == NULL) 940 if (tunnel == NULL)
974 goto pass_up; 941 goto pass_up;
975 942
@@ -977,13 +944,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
977 tunnel->name, skb->len); 944 tunnel->name, skb->len);
978 945
979 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) 946 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
980 goto pass_up_put; 947 goto pass_up;
981 948
982 sock_put(sk);
983 return 0; 949 return 0;
984 950
985pass_up_put:
986 sock_put(sk);
987pass_up: 951pass_up:
988 return 1; 952 return 1;
989} 953}
@@ -1092,7 +1056,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1092 /* Queue the packet to IP for output */ 1056 /* Queue the packet to IP for output */
1093 skb->ignore_df = 1; 1057 skb->ignore_df = 1;
1094#if IS_ENABLED(CONFIG_IPV6) 1058#if IS_ENABLED(CONFIG_IPV6)
1095 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1059 if (l2tp_sk_is_v6(tunnel->sock))
1096 error = inet6_csk_xmit(tunnel->sock, skb, NULL); 1060 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1097 else 1061 else
1098#endif 1062#endif
@@ -1155,6 +1119,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1155 goto out_unlock; 1119 goto out_unlock;
1156 } 1120 }
1157 1121
1122 /* The user-space may change the connection status for the user-space
1123 * provided socket at run time: we must check it under the socket lock
1124 */
1125 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1126 kfree_skb(skb);
1127 ret = NET_XMIT_DROP;
1128 goto out_unlock;
1129 }
1130
1158 /* Get routing info from the tunnel socket */ 1131 /* Get routing info from the tunnel socket */
1159 skb_dst_drop(skb); 1132 skb_dst_drop(skb);
1160 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); 1133 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
@@ -1174,7 +1147,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1174 1147
1175 /* Calculate UDP checksum if configured to do so */ 1148 /* Calculate UDP checksum if configured to do so */
1176#if IS_ENABLED(CONFIG_IPV6) 1149#if IS_ENABLED(CONFIG_IPV6)
1177 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1150 if (l2tp_sk_is_v6(sk))
1178 udp6_set_csum(udp_get_no_check6_tx(sk), 1151 udp6_set_csum(udp_get_no_check6_tx(sk),
1179 skb, &inet6_sk(sk)->saddr, 1152 skb, &inet6_sk(sk)->saddr,
1180 &sk->sk_v6_daddr, udp_len); 1153 &sk->sk_v6_daddr, udp_len);
@@ -1207,14 +1180,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1207static void l2tp_tunnel_destruct(struct sock *sk) 1180static void l2tp_tunnel_destruct(struct sock *sk)
1208{ 1181{
1209 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); 1182 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1210 struct l2tp_net *pn;
1211 1183
1212 if (tunnel == NULL) 1184 if (tunnel == NULL)
1213 goto end; 1185 goto end;
1214 1186
1215 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); 1187 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1216 1188
1217
1218 /* Disable udp encapsulation */ 1189 /* Disable udp encapsulation */
1219 switch (tunnel->encap) { 1190 switch (tunnel->encap) {
1220 case L2TP_ENCAPTYPE_UDP: 1191 case L2TP_ENCAPTYPE_UDP:
@@ -1231,18 +1202,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1231 sk->sk_destruct = tunnel->old_sk_destruct; 1202 sk->sk_destruct = tunnel->old_sk_destruct;
1232 sk->sk_user_data = NULL; 1203 sk->sk_user_data = NULL;
1233 1204
1234 /* Remove the tunnel struct from the tunnel list */
1235 pn = l2tp_pernet(tunnel->l2tp_net);
1236 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1237 list_del_rcu(&tunnel->list);
1238 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1239
1240 tunnel->sock = NULL;
1241 l2tp_tunnel_dec_refcount(tunnel);
1242
1243 /* Call the original destructor */ 1205 /* Call the original destructor */
1244 if (sk->sk_destruct) 1206 if (sk->sk_destruct)
1245 (*sk->sk_destruct)(sk); 1207 (*sk->sk_destruct)(sk);
1208
1209 kfree_rcu(tunnel, rcu);
1246end: 1210end:
1247 return; 1211 return;
1248} 1212}
@@ -1303,49 +1267,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1303/* Tunnel socket destroy hook for UDP encapsulation */ 1267/* Tunnel socket destroy hook for UDP encapsulation */
1304static void l2tp_udp_encap_destroy(struct sock *sk) 1268static void l2tp_udp_encap_destroy(struct sock *sk)
1305{ 1269{
1306 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 1270 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1307 if (tunnel) { 1271
1308 l2tp_tunnel_closeall(tunnel); 1272 if (tunnel)
1309 sock_put(sk); 1273 l2tp_tunnel_delete(tunnel);
1310 }
1311} 1274}
1312 1275
1313/* Workqueue tunnel deletion function */ 1276/* Workqueue tunnel deletion function */
1314static void l2tp_tunnel_del_work(struct work_struct *work) 1277static void l2tp_tunnel_del_work(struct work_struct *work)
1315{ 1278{
1316 struct l2tp_tunnel *tunnel = NULL; 1279 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1317 struct socket *sock = NULL; 1280 del_work);
1318 struct sock *sk = NULL; 1281 struct sock *sk = tunnel->sock;
1319 1282 struct socket *sock = sk->sk_socket;
1320 tunnel = container_of(work, struct l2tp_tunnel, del_work); 1283 struct l2tp_net *pn;
1321 1284
1322 l2tp_tunnel_closeall(tunnel); 1285 l2tp_tunnel_closeall(tunnel);
1323 1286
1324 sk = l2tp_tunnel_sock_lookup(tunnel); 1287 /* If the tunnel socket was created within the kernel, use
1325 if (!sk)
1326 goto out;
1327
1328 sock = sk->sk_socket;
1329
1330 /* If the tunnel socket was created by userspace, then go through the
1331 * inet layer to shut the socket down, and let userspace close it.
1332 * Otherwise, if we created the socket directly within the kernel, use
1333 * the sk API to release it here. 1288 * the sk API to release it here.
1334 * In either case the tunnel resources are freed in the socket
1335 * destructor when the tunnel socket goes away.
1336 */ 1289 */
1337 if (tunnel->fd >= 0) { 1290 if (tunnel->fd < 0) {
1338 if (sock)
1339 inet_shutdown(sock, 2);
1340 } else {
1341 if (sock) { 1291 if (sock) {
1342 kernel_sock_shutdown(sock, SHUT_RDWR); 1292 kernel_sock_shutdown(sock, SHUT_RDWR);
1343 sock_release(sock); 1293 sock_release(sock);
1344 } 1294 }
1345 } 1295 }
1346 1296
1347 l2tp_tunnel_sock_put(sk); 1297 /* Remove the tunnel struct from the tunnel list */
1348out: 1298 pn = l2tp_pernet(tunnel->l2tp_net);
1299 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1300 list_del_rcu(&tunnel->list);
1301 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1302
1303 /* drop initial ref */
1304 l2tp_tunnel_dec_refcount(tunnel);
1305
1306 /* drop workqueue ref */
1349 l2tp_tunnel_dec_refcount(tunnel); 1307 l2tp_tunnel_dec_refcount(tunnel);
1350} 1308}
1351 1309
@@ -1515,9 +1473,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1515 encap = cfg->encap; 1473 encap = cfg->encap;
1516 1474
1517 /* Quick sanity checks */ 1475 /* Quick sanity checks */
1476 err = -EPROTONOSUPPORT;
1477 if (sk->sk_type != SOCK_DGRAM) {
1478 pr_debug("tunl %hu: fd %d wrong socket type\n",
1479 tunnel_id, fd);
1480 goto err;
1481 }
1518 switch (encap) { 1482 switch (encap) {
1519 case L2TP_ENCAPTYPE_UDP: 1483 case L2TP_ENCAPTYPE_UDP:
1520 err = -EPROTONOSUPPORT;
1521 if (sk->sk_protocol != IPPROTO_UDP) { 1484 if (sk->sk_protocol != IPPROTO_UDP) {
1522 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1485 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1523 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1486 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1525,7 +1488,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1525 } 1488 }
1526 break; 1489 break;
1527 case L2TP_ENCAPTYPE_IP: 1490 case L2TP_ENCAPTYPE_IP:
1528 err = -EPROTONOSUPPORT;
1529 if (sk->sk_protocol != IPPROTO_L2TP) { 1491 if (sk->sk_protocol != IPPROTO_L2TP) {
1530 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1492 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1531 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1493 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
@@ -1565,24 +1527,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1565 if (cfg != NULL) 1527 if (cfg != NULL)
1566 tunnel->debug = cfg->debug; 1528 tunnel->debug = cfg->debug;
1567 1529
1568#if IS_ENABLED(CONFIG_IPV6)
1569 if (sk->sk_family == PF_INET6) {
1570 struct ipv6_pinfo *np = inet6_sk(sk);
1571
1572 if (ipv6_addr_v4mapped(&np->saddr) &&
1573 ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1574 struct inet_sock *inet = inet_sk(sk);
1575
1576 tunnel->v4mapped = true;
1577 inet->inet_saddr = np->saddr.s6_addr32[3];
1578 inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1579 inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1580 } else {
1581 tunnel->v4mapped = false;
1582 }
1583 }
1584#endif
1585
1586 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1530 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1587 tunnel->encap = encap; 1531 tunnel->encap = encap;
1588 if (encap == L2TP_ENCAPTYPE_UDP) { 1532 if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1598,13 +1542,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1598 sk->sk_user_data = tunnel; 1542 sk->sk_user_data = tunnel;
1599 } 1543 }
1600 1544
1545 /* Bump the reference count. The tunnel context is deleted
1546 * only when this drops to zero. A reference is also held on
1547 * the tunnel socket to ensure that it is not released while
1548 * the tunnel is extant. Must be done before sk_destruct is
1549 * set.
1550 */
1551 refcount_set(&tunnel->ref_count, 1);
1552 sock_hold(sk);
1553 tunnel->sock = sk;
1554 tunnel->fd = fd;
1555
1601 /* Hook on the tunnel socket destructor so that we can cleanup 1556 /* Hook on the tunnel socket destructor so that we can cleanup
1602 * if the tunnel socket goes away. 1557 * if the tunnel socket goes away.
1603 */ 1558 */
1604 tunnel->old_sk_destruct = sk->sk_destruct; 1559 tunnel->old_sk_destruct = sk->sk_destruct;
1605 sk->sk_destruct = &l2tp_tunnel_destruct; 1560 sk->sk_destruct = &l2tp_tunnel_destruct;
1606 tunnel->sock = sk;
1607 tunnel->fd = fd;
1608 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1561 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1609 1562
1610 sk->sk_allocation = GFP_ATOMIC; 1563 sk->sk_allocation = GFP_ATOMIC;
@@ -1614,11 +1567,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1614 1567
1615 /* Add tunnel to our list */ 1568 /* Add tunnel to our list */
1616 INIT_LIST_HEAD(&tunnel->list); 1569 INIT_LIST_HEAD(&tunnel->list);
1617
1618 /* Bump the reference count. The tunnel context is deleted
1619 * only when this drops to zero. Must be done before list insertion
1620 */
1621 refcount_set(&tunnel->ref_count, 1);
1622 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1570 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1623 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1571 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1624 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1572 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1659,8 +1607,6 @@ void l2tp_session_free(struct l2tp_session *session)
1659 1607
1660 if (tunnel) { 1608 if (tunnel) {
1661 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1609 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1662 sock_put(tunnel->sock);
1663 session->tunnel = NULL;
1664 l2tp_tunnel_dec_refcount(tunnel); 1610 l2tp_tunnel_dec_refcount(tunnel);
1665 } 1611 }
1666 1612
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9bbee90e9963..2718d0b284d0 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,9 +188,6 @@ struct l2tp_tunnel {
188 struct sock *sock; /* Parent socket */ 188 struct sock *sock; /* Parent socket */
189 int fd; /* Parent fd, if tunnel socket 189 int fd; /* Parent fd, if tunnel socket
190 * was created by userspace */ 190 * was created by userspace */
191#if IS_ENABLED(CONFIG_IPV6)
192 bool v4mapped;
193#endif
194 191
195 struct work_struct del_work; 192 struct work_struct del_work;
196 193
@@ -214,27 +211,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
214 return &session->priv[0]; 211 return &session->priv[0];
215} 212}
216 213
217static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
218{
219 struct l2tp_tunnel *tunnel;
220
221 if (sk == NULL)
222 return NULL;
223
224 sock_hold(sk);
225 tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
226 if (tunnel == NULL) {
227 sock_put(sk);
228 goto out;
229 }
230
231 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
232
233out:
234 return tunnel;
235}
236
237struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); 214struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
215void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
238 216
239struct l2tp_session *l2tp_session_get(const struct net *net, 217struct l2tp_session *l2tp_session_get(const struct net *net,
240 struct l2tp_tunnel *tunnel, 218 struct l2tp_tunnel *tunnel,
@@ -283,7 +261,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
283static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) 261static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
284{ 262{
285 if (refcount_dec_and_test(&tunnel->ref_count)) 263 if (refcount_dec_and_test(&tunnel->ref_count))
286 kfree_rcu(tunnel, rcu); 264 l2tp_tunnel_free(tunnel);
287} 265}
288 266
289/* Session reference counts. Incremented when code obtains a reference 267/* Session reference counts. Incremented when code obtains a reference
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ff61124fdf59..3428fba6f2b7 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
234static void l2tp_ip_destroy_sock(struct sock *sk) 234static void l2tp_ip_destroy_sock(struct sock *sk)
235{ 235{
236 struct sk_buff *skb; 236 struct sk_buff *skb;
237 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 237 struct l2tp_tunnel *tunnel = sk->sk_user_data;
238 238
239 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 239 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
240 kfree_skb(skb); 240 kfree_skb(skb);
241 241
242 if (tunnel) { 242 if (tunnel)
243 l2tp_tunnel_closeall(tunnel); 243 l2tp_tunnel_delete(tunnel);
244 sock_put(sk);
245 }
246
247 sk_refcnt_debug_dec(sk);
248} 244}
249 245
250static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 246static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 192344688c06..6f009eaa5fbe 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
248 248
249static void l2tp_ip6_destroy_sock(struct sock *sk) 249static void l2tp_ip6_destroy_sock(struct sock *sk)
250{ 250{
251 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 251 struct l2tp_tunnel *tunnel = sk->sk_user_data;
252 252
253 lock_sock(sk); 253 lock_sock(sk);
254 ip6_flush_pending_frames(sk); 254 ip6_flush_pending_frames(sk);
255 release_sock(sk); 255 release_sock(sk);
256 256
257 if (tunnel) { 257 if (tunnel)
258 l2tp_tunnel_closeall(tunnel); 258 l2tp_tunnel_delete(tunnel);
259 sock_put(sk);
260 }
261 259
262 inet6_destroy_sock(sk); 260 inet6_destroy_sock(sk);
263} 261}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 59f246d7b290..3b02f24ea9ec 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -416,20 +416,28 @@ abort:
416 * Session (and tunnel control) socket create/destroy. 416 * Session (and tunnel control) socket create/destroy.
417 *****************************************************************************/ 417 *****************************************************************************/
418 418
419static void pppol2tp_put_sk(struct rcu_head *head)
420{
421 struct pppol2tp_session *ps;
422
423 ps = container_of(head, typeof(*ps), rcu);
424 sock_put(ps->__sk);
425}
426
419/* Called by l2tp_core when a session socket is being closed. 427/* Called by l2tp_core when a session socket is being closed.
420 */ 428 */
421static void pppol2tp_session_close(struct l2tp_session *session) 429static void pppol2tp_session_close(struct l2tp_session *session)
422{ 430{
423 struct sock *sk; 431 struct pppol2tp_session *ps;
424
425 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
426 432
427 sk = pppol2tp_session_get_sock(session); 433 ps = l2tp_session_priv(session);
428 if (sk) { 434 mutex_lock(&ps->sk_lock);
429 if (sk->sk_socket) 435 ps->__sk = rcu_dereference_protected(ps->sk,
430 inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); 436 lockdep_is_held(&ps->sk_lock));
431 sock_put(sk); 437 RCU_INIT_POINTER(ps->sk, NULL);
432 } 438 if (ps->__sk)
439 call_rcu(&ps->rcu, pppol2tp_put_sk);
440 mutex_unlock(&ps->sk_lock);
433} 441}
434 442
435/* Really kill the session socket. (Called from sock_put() if 443/* Really kill the session socket. (Called from sock_put() if
@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
449 } 457 }
450} 458}
451 459
452static void pppol2tp_put_sk(struct rcu_head *head)
453{
454 struct pppol2tp_session *ps;
455
456 ps = container_of(head, typeof(*ps), rcu);
457 sock_put(ps->__sk);
458}
459
460/* Called when the PPPoX socket (session) is closed. 460/* Called when the PPPoX socket (session) is closed.
461 */ 461 */
462static int pppol2tp_release(struct socket *sock) 462static int pppol2tp_release(struct socket *sock)
@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
480 sock_orphan(sk); 480 sock_orphan(sk);
481 sock->sk = NULL; 481 sock->sk = NULL;
482 482
483 /* If the socket is associated with a session,
484 * l2tp_session_delete will call pppol2tp_session_close which
485 * will drop the session's ref on the socket.
486 */
483 session = pppol2tp_sock_to_session(sk); 487 session = pppol2tp_sock_to_session(sk);
484 488 if (session) {
485 if (session != NULL) {
486 struct pppol2tp_session *ps;
487
488 l2tp_session_delete(session); 489 l2tp_session_delete(session);
489 490 /* drop the ref obtained by pppol2tp_sock_to_session */
490 ps = l2tp_session_priv(session); 491 sock_put(sk);
491 mutex_lock(&ps->sk_lock);
492 ps->__sk = rcu_dereference_protected(ps->sk,
493 lockdep_is_held(&ps->sk_lock));
494 RCU_INIT_POINTER(ps->sk, NULL);
495 mutex_unlock(&ps->sk_lock);
496 call_rcu(&ps->rcu, pppol2tp_put_sk);
497
498 /* Rely on the sock_put() call at the end of the function for
499 * dropping the reference held by pppol2tp_sock_to_session().
500 * The last reference will be dropped by pppol2tp_put_sk().
501 */
502 } 492 }
493
503 release_sock(sk); 494 release_sock(sk);
504 495
505 /* This will delete the session context via 496 /* This will delete the session context via
@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
796 787
797out_no_ppp: 788out_no_ppp:
798 /* This is how we get the session context from the socket. */ 789 /* This is how we get the session context from the socket. */
790 sock_hold(sk);
799 sk->sk_user_data = session; 791 sk->sk_user_data = session;
800 rcu_assign_pointer(ps->sk, sk); 792 rcu_assign_pointer(ps->sk, sk);
801 mutex_unlock(&ps->sk_lock); 793 mutex_unlock(&ps->sk_lock);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1f466d12a6bc..94c7ee9df33b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -212,6 +212,7 @@ static const char *hw_flag_names[] = {
212 FLAG(REPORTS_LOW_ACK), 212 FLAG(REPORTS_LOW_ACK),
213 FLAG(SUPPORTS_TX_FRAG), 213 FLAG(SUPPORTS_TX_FRAG),
214 FLAG(SUPPORTS_TDLS_BUFFER_STA), 214 FLAG(SUPPORTS_TDLS_BUFFER_STA),
215 FLAG(DOESNT_SUPPORT_QOS_NDP),
215#undef FLAG 216#undef FLAG
216}; 217};
217 218
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 39b660b9a908..5f303abac5ad 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -896,7 +896,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
896 struct ieee80211_hdr_3addr *nullfunc; 896 struct ieee80211_hdr_3addr *nullfunc;
897 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 897 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
898 898
899 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); 899 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
900 !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
900 if (!skb) 901 if (!skb)
901 return; 902 return;
902 903
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fd580614085b..56fe16b07538 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3921,7 +3921,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3921 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 3921 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
3922 IEEE80211_FCTL_TODS)) != 3922 IEEE80211_FCTL_TODS)) !=
3923 fast_rx->expected_ds_bits) 3923 fast_rx->expected_ds_bits)
3924 goto drop; 3924 return false;
3925 3925
3926 /* assign the key to drop unencrypted frames (later) 3926 /* assign the key to drop unencrypted frames (later)
3927 * and strip the IV/MIC if necessary 3927 * and strip the IV/MIC if necessary
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 25904af38839..69722504e3e1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3574,6 +3574,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
3574 if (!IS_ERR_OR_NULL(sta)) { 3574 if (!IS_ERR_OR_NULL(sta)) {
3575 struct ieee80211_fast_tx *fast_tx; 3575 struct ieee80211_fast_tx *fast_tx;
3576 3576
3577 /* We need a bit of data queued to build aggregates properly, so
3578 * instruct the TCP stack to allow more than a single ms of data
3579 * to be queued in the stack. The value is a bit-shift of 1
3580 * second, so 8 is ~4ms of queued data. Only affects local TCP
3581 * sockets.
3582 */
3583 sk_pacing_shift_update(skb->sk, 8);
3584
3577 fast_tx = rcu_dereference(sta->fast_tx); 3585 fast_tx = rcu_dereference(sta->fast_tx);
3578 3586
3579 if (fast_tx && 3587 if (fast_tx &&
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index e545a3c9365f..7a4de6d618b1 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
122 if (skb->len <= mtu) 122 if (skb->len <= mtu)
123 return false; 123 return false;
124 124
125 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 125 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
126 return false; 126 return false;
127 127
128 return true; 128 return true;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 3e17d32b629d..58d5d05aec24 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
260 buf_len = strlen(buf); 260 buf_len = strlen(buf);
261 261
262 ct = nf_ct_get(skb, &ctinfo); 262 ct = nf_ct_get(skb, &ctinfo);
263 if (ct && (ct->status & IPS_NAT_MASK)) { 263 if (ct) {
264 bool mangled; 264 bool mangled;
265 265
266 /* If mangling fails this function will return 0 266 /* If mangling fails this function will return 0
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8b9fe30de0cd..c4acc7340eb1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5037{ 5037{
5038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 5038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
5039 const struct nf_flowtable_type *type; 5039 const struct nf_flowtable_type *type;
5040 struct nft_flowtable *flowtable, *ft;
5040 u8 genmask = nft_genmask_next(net); 5041 u8 genmask = nft_genmask_next(net);
5041 int family = nfmsg->nfgen_family; 5042 int family = nfmsg->nfgen_family;
5042 struct nft_flowtable *flowtable;
5043 struct nft_table *table; 5043 struct nft_table *table;
5044 struct nft_ctx ctx; 5044 struct nft_ctx ctx;
5045 int err, i, k; 5045 int err, i, k;
@@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5099 goto err3; 5099 goto err3;
5100 5100
5101 for (i = 0; i < flowtable->ops_len; i++) { 5101 for (i = 0; i < flowtable->ops_len; i++) {
5102 if (!flowtable->ops[i].dev)
5103 continue;
5104
5105 list_for_each_entry(ft, &table->flowtables, list) {
5106 for (k = 0; k < ft->ops_len; k++) {
5107 if (!ft->ops[k].dev)
5108 continue;
5109
5110 if (flowtable->ops[i].dev == ft->ops[k].dev &&
5111 flowtable->ops[i].pf == ft->ops[k].pf) {
5112 err = -EBUSY;
5113 goto err4;
5114 }
5115 }
5116 }
5117
5102 err = nf_register_net_hook(net, &flowtable->ops[i]); 5118 err = nf_register_net_hook(net, &flowtable->ops[i]);
5103 if (err < 0) 5119 if (err < 0)
5104 goto err4; 5120 goto err4;
@@ -5120,7 +5136,7 @@ err5:
5120 i = flowtable->ops_len; 5136 i = flowtable->ops_len;
5121err4: 5137err4:
5122 for (k = i - 1; k >= 0; k--) 5138 for (k = i - 1; k >= 0; k--)
5123 nf_unregister_net_hook(net, &flowtable->ops[i]); 5139 nf_unregister_net_hook(net, &flowtable->ops[k]);
5124 5140
5125 kfree(flowtable->ops); 5141 kfree(flowtable->ops);
5126err3: 5142err3:
@@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
5145 struct nft_table *table; 5161 struct nft_table *table;
5146 struct nft_ctx ctx; 5162 struct nft_ctx ctx;
5147 5163
5164 if (!nla[NFTA_FLOWTABLE_TABLE] ||
5165 (!nla[NFTA_FLOWTABLE_NAME] &&
5166 !nla[NFTA_FLOWTABLE_HANDLE]))
5167 return -EINVAL;
5168
5148 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5169 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
5149 family, genmask); 5170 family, genmask);
5150 if (IS_ERR(table)) 5171 if (IS_ERR(table))
@@ -5402,6 +5423,7 @@ err:
5402static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) 5423static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5403{ 5424{
5404 cancel_delayed_work_sync(&flowtable->data.gc_work); 5425 cancel_delayed_work_sync(&flowtable->data.gc_work);
5426 kfree(flowtable->ops);
5405 kfree(flowtable->name); 5427 kfree(flowtable->name);
5406 flowtable->data.type->free(&flowtable->data); 5428 flowtable->data.type->free(&flowtable->data);
5407 rhashtable_destroy(&flowtable->data.rhashtable); 5429 rhashtable_destroy(&flowtable->data.rhashtable);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3f1624ee056f..d40591fe1b2f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -674,7 +674,7 @@ static const struct nft_set_ops *
674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, 674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
675 u32 flags) 675 u32 flags)
676{ 676{
677 if (desc->size) { 677 if (desc->size && !(flags & NFT_SET_TIMEOUT)) {
678 switch (desc->klen) { 678 switch (desc->klen) {
679 case 4: 679 case 4:
680 return &nft_hash_fast_ops; 680 return &nft_hash_fast_ops;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index fa1655aff8d3..4aa01c90e9d1 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
423 return buf; 423 return buf;
424} 424}
425 425
426/**
427 * xt_check_proc_name - check that name is suitable for /proc file creation
428 *
429 * @name: file name candidate
430 * @size: length of buffer
431 *
432 * some x_tables modules wish to create a file in /proc.
433 * This function makes sure that the name is suitable for this
434 * purpose, it checks that name is NUL terminated and isn't a 'special'
435 * name, like "..".
436 *
437 * returns negative number on error or 0 if name is useable.
438 */
439int xt_check_proc_name(const char *name, unsigned int size)
440{
441 if (name[0] == '\0')
442 return -EINVAL;
443
444 if (strnlen(name, size) == size)
445 return -ENAMETOOLONG;
446
447 if (strcmp(name, ".") == 0 ||
448 strcmp(name, "..") == 0 ||
449 strchr(name, '/'))
450 return -EINVAL;
451
452 return 0;
453}
454EXPORT_SYMBOL(xt_check_proc_name);
455
426int xt_check_match(struct xt_mtchk_param *par, 456int xt_check_match(struct xt_mtchk_param *par,
427 unsigned int size, u_int8_t proto, bool inv_proto) 457 unsigned int size, u_int8_t proto, bool inv_proto)
428{ 458{
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 66f5aca62a08..3360f13dc208 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -917,8 +917,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
917 struct hashlimit_cfg3 cfg = {}; 917 struct hashlimit_cfg3 cfg = {};
918 int ret; 918 int ret;
919 919
920 if (info->name[sizeof(info->name) - 1] != '\0') 920 ret = xt_check_proc_name(info->name, sizeof(info->name));
921 return -EINVAL; 921 if (ret)
922 return ret;
922 923
923 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 924 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
924 925
@@ -935,8 +936,9 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
935 struct hashlimit_cfg3 cfg = {}; 936 struct hashlimit_cfg3 cfg = {};
936 int ret; 937 int ret;
937 938
938 if (info->name[sizeof(info->name) - 1] != '\0') 939 ret = xt_check_proc_name(info->name, sizeof(info->name));
939 return -EINVAL; 940 if (ret)
941 return ret;
940 942
941 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 943 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
942 944
@@ -950,9 +952,11 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
950static int hashlimit_mt_check(const struct xt_mtchk_param *par) 952static int hashlimit_mt_check(const struct xt_mtchk_param *par)
951{ 953{
952 struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 954 struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
955 int ret;
953 956
954 if (info->name[sizeof(info->name) - 1] != '\0') 957 ret = xt_check_proc_name(info->name, sizeof(info->name));
955 return -EINVAL; 958 if (ret)
959 return ret;
956 960
957 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, 961 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
958 info->name, 3); 962 info->name, 3);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 6d232d18faff..81ee1d6543b2 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); 361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
362 return -EINVAL; 362 return -EINVAL;
363 } 363 }
364 if (info->name[0] == '\0' || 364 ret = xt_check_proc_name(info->name, sizeof(info->name));
365 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 365 if (ret)
366 return -EINVAL; 366 return ret;
367 367
368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) 368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; 369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 6f02499ef007..b9ce82c9440f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1106 if (!err) 1106 if (!err)
1107 delivered = true; 1107 delivered = true;
1108 else if (err != -ESRCH) 1108 else if (err != -ESRCH)
1109 goto error; 1109 return err;
1110 return delivered ? 0 : -ESRCH; 1110 return delivered ? 0 : -ESRCH;
1111 error: 1111 error:
1112 kfree_skb(skb); 1112 kfree_skb(skb);
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 04b94281a30b..b891a91577f8 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
242 242
243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]); 243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]); 244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
245 if (band->rate == 0) {
246 err = -EINVAL;
247 goto exit_free_meter;
248 }
249
245 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]); 250 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
246 /* Figure out max delta_t that is enough to fill any bucket. 251 /* Figure out max delta_t that is enough to fill any bucket.
247 * Keep max_delta_t size to the bucket units: 252 * Keep max_delta_t size to the bucket units:
248 * pkts => 1/1000 packets, kilobits => bits. 253 * pkts => 1/1000 packets, kilobits => bits.
254 *
255 * Start with a full bucket.
249 */ 256 */
250 band_max_delta_t = (band->burst_size + band->rate) * 1000; 257 band->bucket = (band->burst_size + band->rate) * 1000;
251 /* Start with a full bucket. */ 258 band_max_delta_t = band->bucket / band->rate;
252 band->bucket = band_max_delta_t;
253 if (band_max_delta_t > meter->max_delta_t) 259 if (band_max_delta_t > meter->max_delta_t)
254 meter->max_delta_t = band_max_delta_t; 260 meter->max_delta_t = band_max_delta_t;
255 band++; 261 band++;
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 50615d5efac1..9cf089b9754e 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = {
114 114
115module_rpmsg_driver(qcom_smd_qrtr_driver); 115module_rpmsg_driver(qcom_smd_qrtr_driver);
116 116
117MODULE_ALIAS("rpmsg:IPCRTR");
117MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); 118MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
118MODULE_LICENSE("GPL v2"); 119MODULE_LICENSE("GPL v2");
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index c061d6eb465d..22571189f21e 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 Oracle. All rights reserved. 2 * Copyright (c) 2006, 2018 Oracle. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock)
142 if (ret) 142 if (ret)
143 goto out; 143 goto out;
144 144
145 new_sock->type = sock->type;
146 new_sock->ops = sock->ops;
147 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); 145 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
148 if (ret < 0) 146 if (ret < 0)
149 goto out; 147 goto out;
150 148
149 /* sock_create_lite() does not get a hold on the owner module so we
150 * need to do it here. Note that sock_release() uses sock->ops to
151 * determine if it needs to decrement the reference count. So set
152 * sock->ops after calling accept() in case that fails. And there's
153 * no need to do try_module_get() as the listener should have a hold
154 * already.
155 */
156 new_sock->ops = sock->ops;
157 __module_get(new_sock->ops->owner);
158
151 ret = rds_tcp_keepalive(new_sock); 159 ret = rds_tcp_keepalive(new_sock);
152 if (ret < 0) 160 if (ret < 0)
153 goto out; 161 goto out;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index b3f2c15affa7..9d2cabf1dc7e 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
352 return res; 352 return res;
353out: 353out:
354 if (res == ACT_P_CREATED) 354 if (res == ACT_P_CREATED)
355 tcf_idr_cleanup(*act, est); 355 tcf_idr_release(*act, bind);
356 356
357 return ret; 357 return ret;
358} 358}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b7ba9b06b147..2a5c8fd860cf 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
350{ 350{
351 struct sctphdr *sctph; 351 struct sctphdr *sctph;
352 352
353 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) 353 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
354 return 1; 354 return 1;
355 355
356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a)
626 struct tcf_csum_params *params; 626 struct tcf_csum_params *params;
627 627
628 params = rcu_dereference_protected(p->params, 1); 628 params = rcu_dereference_protected(p->params, 1);
629 kfree_rcu(params, rcu); 629 if (params)
630 kfree_rcu(params, rcu);
630} 631}
631 632
632static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 633static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 06e380ae0928..7e06b9b62613 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
80static void tcf_ipt_release(struct tc_action *a) 80static void tcf_ipt_release(struct tc_action *a)
81{ 81{
82 struct tcf_ipt *ipt = to_ipt(a); 82 struct tcf_ipt *ipt = to_ipt(a);
83 ipt_destroy_target(ipt->tcfi_t); 83
84 if (ipt->tcfi_t) {
85 ipt_destroy_target(ipt->tcfi_t);
86 kfree(ipt->tcfi_t);
87 }
84 kfree(ipt->tcfi_tname); 88 kfree(ipt->tcfi_tname);
85 kfree(ipt->tcfi_t);
86} 89}
87 90
88static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { 91static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -187,7 +190,7 @@ err2:
187 kfree(tname); 190 kfree(tname);
188err1: 191err1:
189 if (ret == ACT_P_CREATED) 192 if (ret == ACT_P_CREATED)
190 tcf_idr_cleanup(*a, est); 193 tcf_idr_release(*a, bind);
191 return err; 194 return err;
192} 195}
193 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 349beaffb29e..fef08835f26d 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
176 p = to_pedit(*a); 176 p = to_pedit(*a);
177 keys = kmalloc(ksize, GFP_KERNEL); 177 keys = kmalloc(ksize, GFP_KERNEL);
178 if (keys == NULL) { 178 if (keys == NULL) {
179 tcf_idr_cleanup(*a, est); 179 tcf_idr_release(*a, bind);
180 kfree(keys_ex); 180 kfree(keys_ex);
181 return -ENOMEM; 181 return -ENOMEM;
182 } 182 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 95d3c9097b25..faebf82b99f1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -194,7 +194,7 @@ failure:
194 qdisc_put_rtab(P_tab); 194 qdisc_put_rtab(P_tab);
195 qdisc_put_rtab(R_tab); 195 qdisc_put_rtab(R_tab);
196 if (ret == ACT_P_CREATED) 196 if (ret == ACT_P_CREATED)
197 tcf_idr_cleanup(*a, est); 197 tcf_idr_release(*a, bind);
198 return err; 198 return err;
199} 199}
200 200
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 1ba0df238756..74c5d7e6a0fa 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
103 103
104 psample_group = rtnl_dereference(s->psample_group); 104 psample_group = rtnl_dereference(s->psample_group);
105 RCU_INIT_POINTER(s->psample_group, NULL); 105 RCU_INIT_POINTER(s->psample_group, NULL);
106 psample_group_put(psample_group); 106 if (psample_group)
107 psample_group_put(psample_group);
107} 108}
108 109
109static bool tcf_sample_dev_ok_push(struct net_device *dev) 110static bool tcf_sample_dev_ok_push(struct net_device *dev)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 425eac11f6da..b1f38063ada0 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
121 d = to_defact(*a); 121 d = to_defact(*a);
122 ret = alloc_defdata(d, defdata); 122 ret = alloc_defdata(d, defdata);
123 if (ret < 0) { 123 if (ret < 0) {
124 tcf_idr_cleanup(*a, est); 124 tcf_idr_release(*a, bind);
125 return ret; 125 return ret;
126 } 126 }
127 d->tcf_action = parm->action; 127 d->tcf_action = parm->action;
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index fa975262dbac..7b0700f52b50 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
152 ASSERT_RTNL(); 152 ASSERT_RTNL();
153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); 153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
154 if (unlikely(!p)) { 154 if (unlikely(!p)) {
155 if (ovr) 155 if (ret == ACT_P_CREATED)
156 tcf_idr_release(*a, bind); 156 tcf_idr_release(*a, bind);
157 return -ENOMEM; 157 return -ENOMEM;
158 } 158 }
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a)
190 struct tcf_skbmod_params *p; 190 struct tcf_skbmod_params *p;
191 191
192 p = rcu_dereference_protected(d->skbmod_p, 1); 192 p = rcu_dereference_protected(d->skbmod_p, 1);
193 kfree_rcu(p, rcu); 193 if (p)
194 kfree_rcu(p, rcu);
194} 195}
195 196
196static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, 197static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 0e23aac09ad6..1281ca463727 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; 153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
154 break; 154 break;
155 default: 155 default:
156 ret = -EINVAL;
156 goto err_out; 157 goto err_out;
157 } 158 }
158 159
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a)
207 struct tcf_tunnel_key_params *params; 208 struct tcf_tunnel_key_params *params;
208 209
209 params = rcu_dereference_protected(t->params, 1); 210 params = rcu_dereference_protected(t->params, 1);
211 if (params) {
212 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
213 dst_release(&params->tcft_enc_metadata->dst);
210 214
211 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) 215 kfree_rcu(params, rcu);
212 dst_release(&params->tcft_enc_metadata->dst); 216 }
213
214 kfree_rcu(params, rcu);
215} 217}
216 218
217static int tunnel_key_dump_addresses(struct sk_buff *skb, 219static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index e1a1b3f3983a..c49cb61adedf 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
195 ASSERT_RTNL(); 195 ASSERT_RTNL();
196 p = kzalloc(sizeof(*p), GFP_KERNEL); 196 p = kzalloc(sizeof(*p), GFP_KERNEL);
197 if (!p) { 197 if (!p) {
198 if (ovr) 198 if (ret == ACT_P_CREATED)
199 tcf_idr_release(*a, bind); 199 tcf_idr_release(*a, bind);
200 return -ENOMEM; 200 return -ENOMEM;
201 } 201 }
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a)
225 struct tcf_vlan_params *p; 225 struct tcf_vlan_params *p;
226 226
227 p = rcu_dereference_protected(v->vlan_p, 1); 227 p = rcu_dereference_protected(v->vlan_p, 1);
228 kfree_rcu(p, rcu); 228 if (p)
229 kfree_rcu(p, rcu);
229} 230}
230 231
231static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 232static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 190570f21b20..7e3fbe9cc936 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
106 106
107 __skb_queue_tail(&q->skb_bad_txq, skb); 107 __skb_queue_tail(&q->skb_bad_txq, skb);
108 108
109 if (qdisc_is_percpu_stats(q)) {
110 qdisc_qstats_cpu_backlog_inc(q, skb);
111 qdisc_qstats_cpu_qlen_inc(q);
112 } else {
113 qdisc_qstats_backlog_inc(q, skb);
114 q->q.qlen++;
115 }
116
109 if (lock) 117 if (lock)
110 spin_unlock(lock); 118 spin_unlock(lock);
111} 119}
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
196 break; 204 break;
197 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 205 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
198 qdisc_enqueue_skb_bad_txq(q, nskb); 206 qdisc_enqueue_skb_bad_txq(q, nskb);
199
200 if (qdisc_is_percpu_stats(q)) {
201 qdisc_qstats_cpu_backlog_inc(q, nskb);
202 qdisc_qstats_cpu_qlen_inc(q);
203 } else {
204 qdisc_qstats_backlog_inc(q, nskb);
205 q->q.qlen++;
206 }
207 break; 207 break;
208 } 208 }
209 skb->next = nskb; 209 skb->next = nskb;
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
628 int band = prio2band[skb->priority & TC_PRIO_MAX]; 628 int band = prio2band[skb->priority & TC_PRIO_MAX];
629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
630 struct skb_array *q = band2list(priv, band); 630 struct skb_array *q = band2list(priv, band);
631 unsigned int pkt_len = qdisc_pkt_len(skb);
631 int err; 632 int err;
632 633
633 err = skb_array_produce(q, skb); 634 err = skb_array_produce(q, skb);
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
636 return qdisc_drop_cpu(skb, qdisc, to_free); 637 return qdisc_drop_cpu(skb, qdisc, to_free);
637 638
638 qdisc_qstats_cpu_qlen_inc(qdisc); 639 qdisc_qstats_cpu_qlen_inc(qdisc);
639 qdisc_qstats_cpu_backlog_inc(qdisc, skb); 640 /* Note: skb can not be used after skb_array_produce(),
641 * so we better not use qdisc_qstats_cpu_backlog_inc()
642 */
643 this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
640 return NET_XMIT_SUCCESS; 644 return NET_XMIT_SUCCESS;
641} 645}
642 646
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7c179addebcd..7d6801fc5340 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
509 } 509 }
510 510
511 if (unlikely(sch->q.qlen >= sch->limit)) 511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop(skb, sch, to_free); 512 return qdisc_drop_all(skb, sch, to_free);
513 513
514 qdisc_qstats_backlog_inc(sch, skb); 514 qdisc_qstats_backlog_inc(sch, skb);
515 515
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 229172d509cc..03225a8df973 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
188 int ret; 188 int ret;
189 189
190 if (qdisc_pkt_len(skb) > q->max_size) { 190 if (qdisc_pkt_len(skb) > q->max_size) {
191 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) 191 if (skb_is_gso(skb) &&
192 skb_gso_validate_mac_len(skb, q->max_size))
192 return tbf_segment(skb, sch, to_free); 193 return tbf_segment(skb, sch, to_free);
193 return qdisc_drop(skb, sch, to_free); 194 return qdisc_drop(skb, sch, to_free);
194 } 195 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0247cc432e02..b381d78548ac 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -106,6 +106,7 @@ int sctp_rcv(struct sk_buff *skb)
106 int family; 106 int family;
107 struct sctp_af *af; 107 struct sctp_af *af;
108 struct net *net = dev_net(skb->dev); 108 struct net *net = dev_net(skb->dev);
109 bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
109 110
110 if (skb->pkt_type != PACKET_HOST) 111 if (skb->pkt_type != PACKET_HOST)
111 goto discard_it; 112 goto discard_it;
@@ -123,8 +124,7 @@ int sctp_rcv(struct sk_buff *skb)
123 * it's better to just linearize it otherwise crc computing 124 * it's better to just linearize it otherwise crc computing
124 * takes longer. 125 * takes longer.
125 */ 126 */
126 if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 127 if ((!is_gso && skb_linearize(skb)) ||
127 skb_linearize(skb)) ||
128 !pskb_may_pull(skb, sizeof(struct sctphdr))) 128 !pskb_may_pull(skb, sizeof(struct sctphdr)))
129 goto discard_it; 129 goto discard_it;
130 130
@@ -135,7 +135,7 @@ int sctp_rcv(struct sk_buff *skb)
135 if (skb_csum_unnecessary(skb)) 135 if (skb_csum_unnecessary(skb))
136 __skb_decr_checksum_unnecessary(skb); 136 __skb_decr_checksum_unnecessary(skb);
137 else if (!sctp_checksum_disable && 137 else if (!sctp_checksum_disable &&
138 !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 138 !is_gso &&
139 sctp_rcv_checksum(net, skb) < 0) 139 sctp_rcv_checksum(net, skb) < 0)
140 goto discard_it; 140 goto discard_it;
141 skb->csum_valid = 1; 141 skb->csum_valid = 1;
@@ -1218,7 +1218,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1218 * issue as packets hitting this are mostly INIT or INIT-ACK and 1218 * issue as packets hitting this are mostly INIT or INIT-ACK and
1219 * those cannot be on GSO-style anyway. 1219 * those cannot be on GSO-style anyway.
1220 */ 1220 */
1221 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) 1221 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
1222 return NULL; 1222 return NULL;
1223 1223
1224 ch = (struct sctp_chunkhdr *)skb->data; 1224 ch = (struct sctp_chunkhdr *)skb->data;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 48392552ee7c..23ebc5318edc 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -170,7 +170,7 @@ next_chunk:
170 170
171 chunk = list_entry(entry, struct sctp_chunk, list); 171 chunk = list_entry(entry, struct sctp_chunk, list);
172 172
173 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { 173 if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
174 /* GSO-marked skbs but without frags, handle 174 /* GSO-marked skbs but without frags, handle
175 * them normally 175 * them normally
176 */ 176 */
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 35bc7106d182..123e9f2dc226 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,7 +45,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)) 48 if (!skb_is_gso_sctp(skb))
49 goto out; 49 goto out;
50 50
51 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index da1a5cdefd13..1e0d780855c3 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -978,10 +978,6 @@ out:
978 lsmc->clcsock = NULL; 978 lsmc->clcsock = NULL;
979 } 979 }
980 release_sock(lsk); 980 release_sock(lsk);
981 /* no more listening, wake up smc_close_wait_listen_clcsock and
982 * accept
983 */
984 lsk->sk_state_change(lsk);
985 sock_put(&lsmc->sk); /* sock_hold in smc_listen */ 981 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
986} 982}
987 983
@@ -1406,8 +1402,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
1406 smc->use_fallback = false; /* assume rdma capability first */ 1402 smc->use_fallback = false; /* assume rdma capability first */
1407 rc = sock_create_kern(net, PF_INET, SOCK_STREAM, 1403 rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
1408 IPPROTO_TCP, &smc->clcsock); 1404 IPPROTO_TCP, &smc->clcsock);
1409 if (rc) 1405 if (rc) {
1410 sk_common_release(sk); 1406 sk_common_release(sk);
1407 goto out;
1408 }
1411 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); 1409 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1412 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); 1410 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1413 1411
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 3cd086e5bd28..b42395d24cba 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
269 269
270 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) 270 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
271 return; /* short message */ 271 return; /* short message */
272 if (cdc->len != sizeof(*cdc)) 272 if (cdc->len != SMC_WR_TX_SIZE)
273 return; /* invalid message */ 273 return; /* invalid message */
274 smc_cdc_msg_recv(cdc, link, wc->wr_id); 274 smc_cdc_msg_recv(cdc, link, wc->wr_id);
275} 275}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index e339c0186dcf..fa41d9881741 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -30,27 +30,6 @@ static void smc_close_cleanup_listen(struct sock *parent)
30 smc_close_non_accepted(sk); 30 smc_close_non_accepted(sk);
31} 31}
32 32
33static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
34{
35 DEFINE_WAIT_FUNC(wait, woken_wake_function);
36 struct sock *sk = &smc->sk;
37 signed long timeout;
38
39 timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
40 add_wait_queue(sk_sleep(sk), &wait);
41 do {
42 release_sock(sk);
43 if (smc->clcsock)
44 timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
45 timeout);
46 sched_annotate_sleep();
47 lock_sock(sk);
48 if (!smc->clcsock)
49 break;
50 } while (timeout);
51 remove_wait_queue(sk_sleep(sk), &wait);
52}
53
54/* wait for sndbuf data being transmitted */ 33/* wait for sndbuf data being transmitted */
55static void smc_close_stream_wait(struct smc_sock *smc, long timeout) 34static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
56{ 35{
@@ -204,9 +183,11 @@ again:
204 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); 183 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
205 /* wake up kernel_accept of smc_tcp_listen_worker */ 184 /* wake up kernel_accept of smc_tcp_listen_worker */
206 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); 185 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
207 smc_close_wait_listen_clcsock(smc);
208 } 186 }
209 smc_close_cleanup_listen(sk); 187 smc_close_cleanup_listen(sk);
188 release_sock(sk);
189 flush_work(&smc->tcp_listen_work);
190 lock_sock(sk);
210 break; 191 break;
211 case SMC_ACTIVE: 192 case SMC_ACTIVE:
212 smc_close_stream_wait(smc, timeout); 193 smc_close_stream_wait(smc, timeout);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2424c7100aaf..645dd226177b 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
177 177
178 lnk = &lgr->lnk[SMC_SINGLE_LINK]; 178 lnk = &lgr->lnk[SMC_SINGLE_LINK];
179 /* initialize link */ 179 /* initialize link */
180 lnk->link_id = SMC_SINGLE_LINK;
180 lnk->smcibdev = smcibdev; 181 lnk->smcibdev = smcibdev;
181 lnk->ibport = ibport; 182 lnk->ibport = ibport;
182 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; 183 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@ -465,7 +466,7 @@ create:
465 rc = smc_link_determine_gid(conn->lgr); 466 rc = smc_link_determine_gid(conn->lgr);
466 } 467 }
467 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; 468 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
468 conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); 469 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
469#ifndef KERNEL_HAS_ATOMIC64 470#ifndef KERNEL_HAS_ATOMIC64
470 spin_lock_init(&conn->acurs_lock); 471 spin_lock_init(&conn->acurs_lock);
471#endif 472#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 92fe4cc8c82c..b4aa4fcedb96 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
92 memcpy(confllc->sender_mac, mac, ETH_ALEN); 92 memcpy(confllc->sender_mac, mac, ETH_ALEN);
93 memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); 93 memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
94 hton24(confllc->sender_qp_num, link->roce_qp->qp_num); 94 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
95 /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ 95 confllc->link_num = link->link_id;
96 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); 96 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
97 confllc->max_links = SMC_LINKS_PER_LGR_MAX; 97 confllc->max_links = SMC_LINKS_PER_LGR_MAX;
98 /* send llc message */ 98 /* send llc message */
diff --git a/net/socket.c b/net/socket.c
index a93c99b518ca..08847c3b8c39 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2587,6 +2587,11 @@ void sock_unregister(int family)
2587} 2587}
2588EXPORT_SYMBOL(sock_unregister); 2588EXPORT_SYMBOL(sock_unregister);
2589 2589
2590bool sock_is_registered(int family)
2591{
2592 return family < NPROTO && rcu_access_pointer(net_families[family]);
2593}
2594
2590static int __init sock_init(void) 2595static int __init sock_init(void)
2591{ 2596{
2592 int err; 2597 int err;
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 122162a31816..04e516d18054 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
189 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; 189 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
190 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; 190 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
191 grp->open = group_is_open; 191 grp->open = group_is_open;
192 *grp->open = false;
192 filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; 193 filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
193 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, 194 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
194 filter, &grp->subid)) 195 filter, &grp->subid))
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b0323ec7971e..7dfa9fc99ec3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
473 sk->sk_write_space = tipc_write_space; 473 sk->sk_write_space = tipc_write_space;
474 sk->sk_destruct = tipc_sock_destruct; 474 sk->sk_destruct = tipc_sock_destruct;
475 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
476 tsk->group_is_open = true;
476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 atomic_set(&tsk->dupl_rcvcnt, 0);
477 478
478 /* Start out with safe limits until we receive an advertised window */ 479 /* Start out with safe limits until we receive an advertised window */
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e9b4b53ab53e..d824d548447e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -46,16 +46,26 @@ MODULE_DESCRIPTION("Transport Layer Security Support");
46MODULE_LICENSE("Dual BSD/GPL"); 46MODULE_LICENSE("Dual BSD/GPL");
47 47
48enum { 48enum {
49 TLSV4,
50 TLSV6,
51 TLS_NUM_PROTS,
52};
53
54enum {
49 TLS_BASE_TX, 55 TLS_BASE_TX,
50 TLS_SW_TX, 56 TLS_SW_TX,
51 TLS_NUM_CONFIG, 57 TLS_NUM_CONFIG,
52}; 58};
53 59
54static struct proto tls_prots[TLS_NUM_CONFIG]; 60static struct proto *saved_tcpv6_prot;
61static DEFINE_MUTEX(tcpv6_prot_mutex);
62static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
55 63
56static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) 64static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
57{ 65{
58 sk->sk_prot = &tls_prots[ctx->tx_conf]; 66 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
67
68 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
59} 69}
60 70
61int wait_on_pending_writer(struct sock *sk, long *timeo) 71int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -453,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
453 return do_tls_setsockopt(sk, optname, optval, optlen); 463 return do_tls_setsockopt(sk, optname, optval, optlen);
454} 464}
455 465
466static void build_protos(struct proto *prot, struct proto *base)
467{
468 prot[TLS_BASE_TX] = *base;
469 prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
470 prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
471 prot[TLS_BASE_TX].close = tls_sk_proto_close;
472
473 prot[TLS_SW_TX] = prot[TLS_BASE_TX];
474 prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
475 prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
476}
477
456static int tls_init(struct sock *sk) 478static int tls_init(struct sock *sk)
457{ 479{
480 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
458 struct inet_connection_sock *icsk = inet_csk(sk); 481 struct inet_connection_sock *icsk = inet_csk(sk);
459 struct tls_context *ctx; 482 struct tls_context *ctx;
460 int rc = 0; 483 int rc = 0;
@@ -479,6 +502,17 @@ static int tls_init(struct sock *sk)
479 ctx->getsockopt = sk->sk_prot->getsockopt; 502 ctx->getsockopt = sk->sk_prot->getsockopt;
480 ctx->sk_proto_close = sk->sk_prot->close; 503 ctx->sk_proto_close = sk->sk_prot->close;
481 504
505 /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
506 if (ip_ver == TLSV6 &&
507 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
508 mutex_lock(&tcpv6_prot_mutex);
509 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
510 build_protos(tls_prots[TLSV6], sk->sk_prot);
511 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
512 }
513 mutex_unlock(&tcpv6_prot_mutex);
514 }
515
482 ctx->tx_conf = TLS_BASE_TX; 516 ctx->tx_conf = TLS_BASE_TX;
483 update_sk_prot(sk, ctx); 517 update_sk_prot(sk, ctx);
484out: 518out:
@@ -493,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
493 .init = tls_init, 527 .init = tls_init,
494}; 528};
495 529
496static void build_protos(struct proto *prot, struct proto *base)
497{
498 prot[TLS_BASE_TX] = *base;
499 prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
500 prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
501 prot[TLS_BASE_TX].close = tls_sk_proto_close;
502
503 prot[TLS_SW_TX] = prot[TLS_BASE_TX];
504 prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
505 prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
506}
507
508static int __init tls_register(void) 530static int __init tls_register(void)
509{ 531{
510 build_protos(tls_prots, &tcp_prot); 532 build_protos(tls_prots[TLSV4], &tcp_prot);
511 533
512 tcp_register_ulp(&tcp_tls_ulp_ops); 534 tcp_register_ulp(&tcp_tls_ulp_ops);
513 535
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1abcc4fc4df1..41722046b937 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -34,9 +34,10 @@ config CFG80211
34 34
35 When built as a module it will be called cfg80211. 35 When built as a module it will be called cfg80211.
36 36
37if CFG80211
38
37config NL80211_TESTMODE 39config NL80211_TESTMODE
38 bool "nl80211 testmode command" 40 bool "nl80211 testmode command"
39 depends on CFG80211
40 help 41 help
41 The nl80211 testmode command helps implementing things like 42 The nl80211 testmode command helps implementing things like
42 factory calibration or validation tools for wireless chips. 43 factory calibration or validation tools for wireless chips.
@@ -51,7 +52,6 @@ config NL80211_TESTMODE
51 52
52config CFG80211_DEVELOPER_WARNINGS 53config CFG80211_DEVELOPER_WARNINGS
53 bool "enable developer warnings" 54 bool "enable developer warnings"
54 depends on CFG80211
55 default n 55 default n
56 help 56 help
57 This option enables some additional warnings that help 57 This option enables some additional warnings that help
@@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS
68 68
69config CFG80211_CERTIFICATION_ONUS 69config CFG80211_CERTIFICATION_ONUS
70 bool "cfg80211 certification onus" 70 bool "cfg80211 certification onus"
71 depends on CFG80211 && EXPERT 71 depends on EXPERT
72 default n 72 default n
73 ---help--- 73 ---help---
74 You should disable this option unless you are both capable 74 You should disable this option unless you are both capable
@@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR
159 159
160config CFG80211_DEFAULT_PS 160config CFG80211_DEFAULT_PS
161 bool "enable powersave by default" 161 bool "enable powersave by default"
162 depends on CFG80211
163 default y 162 default y
164 help 163 help
165 This option enables powersave mode by default. 164 This option enables powersave mode by default.
@@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS
170 169
171config CFG80211_DEBUGFS 170config CFG80211_DEBUGFS
172 bool "cfg80211 DebugFS entries" 171 bool "cfg80211 DebugFS entries"
173 depends on CFG80211
174 depends on DEBUG_FS 172 depends on DEBUG_FS
175 ---help--- 173 ---help---
176 You can enable this if you want debugfs entries for cfg80211. 174 You can enable this if you want debugfs entries for cfg80211.
@@ -180,7 +178,6 @@ config CFG80211_DEBUGFS
180config CFG80211_CRDA_SUPPORT 178config CFG80211_CRDA_SUPPORT
181 bool "support CRDA" if EXPERT 179 bool "support CRDA" if EXPERT
182 default y 180 default y
183 depends on CFG80211
184 help 181 help
185 You should enable this option unless you know for sure you have no 182 You should enable this option unless you know for sure you have no
186 need for it, for example when using internal regdb (above) or the 183 need for it, for example when using internal regdb (above) or the
@@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT
190 187
191config CFG80211_WEXT 188config CFG80211_WEXT
192 bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT 189 bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
193 depends on CFG80211
194 select WEXT_CORE 190 select WEXT_CORE
195 default y if CFG80211_WEXT_EXPORT 191 default y if CFG80211_WEXT_EXPORT
196 help 192 help
@@ -199,11 +195,12 @@ config CFG80211_WEXT
199 195
200config CFG80211_WEXT_EXPORT 196config CFG80211_WEXT_EXPORT
201 bool 197 bool
202 depends on CFG80211
203 help 198 help
204 Drivers should select this option if they require cfg80211's 199 Drivers should select this option if they require cfg80211's
205 wext compatibility symbols to be exported. 200 wext compatibility symbols to be exported.
206 201
202endif # CFG80211
203
207config LIB80211 204config LIB80211
208 tristate 205 tristate
209 default n 206 default n
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8e70291e586a..e87d6c4dd5b6 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
217 if (skb->len <= mtu) 217 if (skb->len <= mtu)
218 goto ok; 218 goto ok;
219 219
220 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 220 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
221 goto ok; 221 goto ok;
222 } 222 }
223 223
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
283 struct crypto_comp *tfm; 283 struct crypto_comp *tfm;
284 284
285 /* This can be any valid CPU ID so we don't need locking. */ 285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms); 286 tfm = this_cpu_read(*pos->tfms);
287 287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 289 pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7a23078132cf..625b3fca5704 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1458static int xfrm_get_tos(const struct flowi *fl, int family) 1458static int xfrm_get_tos(const struct flowi *fl, int family)
1459{ 1459{
1460 const struct xfrm_policy_afinfo *afinfo; 1460 const struct xfrm_policy_afinfo *afinfo;
1461 int tos = 0; 1461 int tos;
1462 1462
1463 afinfo = xfrm_policy_get_afinfo(family); 1463 afinfo = xfrm_policy_get_afinfo(family);
1464 tos = afinfo ? afinfo->get_tos(fl) : 0; 1464 if (!afinfo)
1465 return 0;
1466
1467 tos = afinfo->get_tos(fl);
1465 1468
1466 rcu_read_unlock(); 1469 rcu_read_unlock();
1467 1470
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
1891 spin_unlock(&pq->hold_queue.lock); 1894 spin_unlock(&pq->hold_queue.lock);
1892 1895
1893 dst_hold(xfrm_dst_path(dst)); 1896 dst_hold(xfrm_dst_path(dst));
1894 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); 1897 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1895 if (IS_ERR(dst)) 1898 if (IS_ERR(dst))
1896 goto purge_queue; 1899 goto purge_queue;
1897 1900
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2729 while (dst->xfrm) { 2732 while (dst->xfrm) {
2730 const struct xfrm_state *xfrm = dst->xfrm; 2733 const struct xfrm_state *xfrm = dst->xfrm;
2731 2734
2735 dst = xfrm_dst_child(dst);
2736
2732 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 2737 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2733 continue; 2738 continue;
2734 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 2739 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2735 daddr = xfrm->coaddr; 2740 daddr = xfrm->coaddr;
2736 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 2741 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2737 daddr = &xfrm->id.daddr; 2742 daddr = &xfrm->id.daddr;
2738
2739 dst = xfrm_dst_child(dst);
2740 } 2743 }
2741 return daddr; 2744 return daddr;
2742} 2745}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 1d38c6acf8af..9e3a5e85f828 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
660 } else { 660 } else {
661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
663 xo->seq.low = oseq = oseq + 1; 663 xo->seq.low = oseq + 1;
664 xo->seq.hi = oseq_hi; 664 xo->seq.hi = oseq_hi;
665 oseq += skb_shinfo(skb)->gso_segs; 665 oseq += skb_shinfo(skb)->gso_segs;
666 } 666 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54e21f19d722..f9d2f2233f09 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2056 struct xfrm_mgr *km; 2056 struct xfrm_mgr *km;
2057 struct xfrm_policy *pol = NULL; 2057 struct xfrm_policy *pol = NULL;
2058 2058
2059#ifdef CONFIG_COMPAT
2060 if (in_compat_syscall())
2061 return -EOPNOTSUPP;
2062#endif
2063
2059 if (!optval && !optlen) { 2064 if (!optval && !optlen) {
2060 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2065 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2061 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2066 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7f52b8eb177d..080035f056d9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs; 122 struct xfrm_replay_state_esn *rs;
123 123
124 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt)
125 if (!rt) 125 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
126 return -EINVAL;
127 126
128 rs = nla_data(rt); 127 rs = nla_data(rt);
129 128
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
131 return -EINVAL; 130 return -EINVAL;
132
133 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
134 nla_len(rt) != sizeof(*rs))
135 return -EINVAL;
136 }
137 131
138 if (!rt) 132 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
139 return 0; 133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
140 135
141 /* As only ESP and AH support ESN feature. */ 136 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 137 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))