summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/bridge/br_vlan.c29
-rw-r--r--net/bridge/netfilter/Kconfig6
-rw-r--r--net/bridge/netfilter/ebtables.c32
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c10
-rw-r--r--net/can/gw.c48
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/flow_offload.c22
-rw-r--r--net/core/skmsg.c4
-rw-r--r--net/core/sock_map.c19
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/dsa/tag_sja1105.c12
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c12
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_ulp.c13
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/iucv/af_iucv.c14
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/cfg.c8
-rw-r--r--net/mac80211/driver-ops.c13
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/nf_conntrack_amanda.c2
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_expect.c26
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c5
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c18
-rw-r--r--net/netfilter/nf_conntrack_irc.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/nf_conntrack_pptp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c8
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c10
-rw-r--r--net/netfilter/nf_conntrack_tftp.c2
-rw-r--r--net/netfilter/nf_nat_amanda.c2
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_nat_ftp.c2
-rw-r--r--net/netfilter/nf_nat_irc.c2
-rw-r--r--net/netfilter/nf_nat_sip.c8
-rw-r--r--net/netfilter/nf_nat_tftp.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c8
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nf_tables_offload.c5
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nft_chain_filter.c2
-rw-r--r--net/netfilter/nft_chain_nat.c3
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/nft_meta.c18
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_synproxy.c2
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow.h4
-rw-r--r--net/openvswitch/flow_table.c8
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/peer_event.c2
-rw-r--r--net/rxrpc/peer_object.c18
-rw-r--r--net/rxrpc/sendmsg.c1
-rw-r--r--net/sched/act_bpf.c9
-rw-r--r--net/sched/act_connmark.c9
-rw-r--r--net/sched/act_csum.c9
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/act_ctinfo.c9
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c13
-rw-r--r--net/sched/act_mirred.c13
-rw-r--r--net/sched/act_mpls.c8
-rw-r--r--net/sched/act_nat.c9
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c11
-rw-r--r--net/sched/act_skbmod.c11
-rw-r--r--net/sched/act_tunnel_key.c8
-rw-r--r--net/sched/act_vlan.c25
-rw-r--r--net/sched/cls_api.c16
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_matchall.c2
-rw-r--r--net/sched/cls_u32.c6
-rw-r--r--net/sched/sch_codel.c6
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/tipc/netlink_compat.c11
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_main.c97
-rw-r--r--net/tls/tls_sw.c83
-rw-r--r--net/vmw_vsock/hyperv_transport.c8
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/util.c27
118 files changed, 673 insertions, 409 deletions
diff --git a/net/bridge/br.c b/net/bridge/br.c
index d164f63a4345..8a8f9e5f264f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -37,12 +37,15 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
37 int err; 37 int err;
38 38
39 if (dev->priv_flags & IFF_EBRIDGE) { 39 if (dev->priv_flags & IFF_EBRIDGE) {
40 err = br_vlan_bridge_event(dev, event, ptr);
41 if (err)
42 return notifier_from_errno(err);
43
40 if (event == NETDEV_REGISTER) { 44 if (event == NETDEV_REGISTER) {
41 /* register of bridge completed, add sysfs entries */ 45 /* register of bridge completed, add sysfs entries */
42 br_sysfs_addbr(dev); 46 br_sysfs_addbr(dev);
43 return NOTIFY_DONE; 47 return NOTIFY_DONE;
44 } 48 }
45 br_vlan_bridge_event(dev, event, ptr);
46 } 49 }
47 50
48 /* not a port of a bridge */ 51 /* not a port of a bridge */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3d8deac2353d..f8cac3702712 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1388,6 +1388,9 @@ br_multicast_leave_group(struct net_bridge *br,
1388 if (!br_port_group_equal(p, port, src)) 1388 if (!br_port_group_equal(p, port, src))
1389 continue; 1389 continue;
1390 1390
1391 if (p->flags & MDB_PG_FLAGS_PERMANENT)
1392 break;
1393
1391 rcu_assign_pointer(*pp, p->next); 1394 rcu_assign_pointer(*pp, p->next);
1392 hlist_del_init(&p->mglist); 1395 hlist_del_init(&p->mglist);
1393 del_timer(&p->timer); 1396 del_timer(&p->timer);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index e8cf03b43b7d..646504db0220 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -894,8 +894,8 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
894void br_vlan_get_stats(const struct net_bridge_vlan *v, 894void br_vlan_get_stats(const struct net_bridge_vlan *v,
895 struct br_vlan_stats *stats); 895 struct br_vlan_stats *stats);
896void br_vlan_port_event(struct net_bridge_port *p, unsigned long event); 896void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
897void br_vlan_bridge_event(struct net_device *dev, unsigned long event, 897int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
898 void *ptr); 898 void *ptr);
899 899
900static inline struct net_bridge_vlan_group *br_vlan_group( 900static inline struct net_bridge_vlan_group *br_vlan_group(
901 const struct net_bridge *br) 901 const struct net_bridge *br)
@@ -1085,9 +1085,10 @@ static inline void br_vlan_port_event(struct net_bridge_port *p,
1085{ 1085{
1086} 1086}
1087 1087
1088static inline void br_vlan_bridge_event(struct net_device *dev, 1088static inline int br_vlan_bridge_event(struct net_device *dev,
1089 unsigned long event, void *ptr) 1089 unsigned long event, void *ptr)
1090{ 1090{
1091 return 0;
1091} 1092}
1092#endif 1093#endif
1093 1094
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 021cc9f66804..f5b2aeebbfe9 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1053,7 +1053,6 @@ int br_vlan_init(struct net_bridge *br)
1053{ 1053{
1054 struct net_bridge_vlan_group *vg; 1054 struct net_bridge_vlan_group *vg;
1055 int ret = -ENOMEM; 1055 int ret = -ENOMEM;
1056 bool changed;
1057 1056
1058 vg = kzalloc(sizeof(*vg), GFP_KERNEL); 1057 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1059 if (!vg) 1058 if (!vg)
@@ -1068,17 +1067,10 @@ int br_vlan_init(struct net_bridge *br)
1068 br->vlan_proto = htons(ETH_P_8021Q); 1067 br->vlan_proto = htons(ETH_P_8021Q);
1069 br->default_pvid = 1; 1068 br->default_pvid = 1;
1070 rcu_assign_pointer(br->vlgrp, vg); 1069 rcu_assign_pointer(br->vlgrp, vg);
1071 ret = br_vlan_add(br, 1,
1072 BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1073 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1074 if (ret)
1075 goto err_vlan_add;
1076 1070
1077out: 1071out:
1078 return ret; 1072 return ret;
1079 1073
1080err_vlan_add:
1081 vlan_tunnel_deinit(vg);
1082err_tunnel_init: 1074err_tunnel_init:
1083 rhashtable_destroy(&vg->vlan_hash); 1075 rhashtable_destroy(&vg->vlan_hash);
1084err_rhtbl: 1076err_rhtbl:
@@ -1464,13 +1456,23 @@ static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1464} 1456}
1465 1457
1466/* Must be protected by RTNL. */ 1458/* Must be protected by RTNL. */
1467void br_vlan_bridge_event(struct net_device *dev, unsigned long event, 1459int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1468 void *ptr)
1469{ 1460{
1470 struct netdev_notifier_changeupper_info *info; 1461 struct netdev_notifier_changeupper_info *info;
1471 struct net_bridge *br; 1462 struct net_bridge *br = netdev_priv(dev);
1463 bool changed;
1464 int ret = 0;
1472 1465
1473 switch (event) { 1466 switch (event) {
1467 case NETDEV_REGISTER:
1468 ret = br_vlan_add(br, br->default_pvid,
1469 BRIDGE_VLAN_INFO_PVID |
1470 BRIDGE_VLAN_INFO_UNTAGGED |
1471 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1472 break;
1473 case NETDEV_UNREGISTER:
1474 br_vlan_delete(br, br->default_pvid);
1475 break;
1474 case NETDEV_CHANGEUPPER: 1476 case NETDEV_CHANGEUPPER:
1475 info = ptr; 1477 info = ptr;
1476 br_vlan_upper_change(dev, info->upper_dev, info->linking); 1478 br_vlan_upper_change(dev, info->upper_dev, info->linking);
@@ -1478,12 +1480,13 @@ void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
1478 1480
1479 case NETDEV_CHANGE: 1481 case NETDEV_CHANGE:
1480 case NETDEV_UP: 1482 case NETDEV_UP:
1481 br = netdev_priv(dev);
1482 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING)) 1483 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1483 return; 1484 break;
1484 br_vlan_link_state_change(dev, br); 1485 br_vlan_link_state_change(dev, br);
1485 break; 1486 break;
1486 } 1487 }
1488
1489 return ret;
1487} 1490}
1488 1491
1489/* Must be protected by RTNL. */ 1492/* Must be protected by RTNL. */
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 154fa558bb90..5040fe43f4b4 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -6,7 +6,7 @@
6menuconfig NF_TABLES_BRIDGE 6menuconfig NF_TABLES_BRIDGE
7 depends on BRIDGE && NETFILTER && NF_TABLES 7 depends on BRIDGE && NETFILTER && NF_TABLES
8 select NETFILTER_FAMILY_BRIDGE 8 select NETFILTER_FAMILY_BRIDGE
9 bool "Ethernet Bridge nf_tables support" 9 tristate "Ethernet Bridge nf_tables support"
10 10
11if NF_TABLES_BRIDGE 11if NF_TABLES_BRIDGE
12 12
@@ -25,6 +25,8 @@ config NF_LOG_BRIDGE
25 tristate "Bridge packet logging" 25 tristate "Bridge packet logging"
26 select NF_LOG_COMMON 26 select NF_LOG_COMMON
27 27
28endif # NF_TABLES_BRIDGE
29
28config NF_CONNTRACK_BRIDGE 30config NF_CONNTRACK_BRIDGE
29 tristate "IPv4/IPV6 bridge connection tracking support" 31 tristate "IPv4/IPV6 bridge connection tracking support"
30 depends on NF_CONNTRACK 32 depends on NF_CONNTRACK
@@ -39,8 +41,6 @@ config NF_CONNTRACK_BRIDGE
39 41
40 To compile it as a module, choose M here. If unsure, say N. 42 To compile it as a module, choose M here. If unsure, say N.
41 43
42endif # NF_TABLES_BRIDGE
43
44menuconfig BRIDGE_NF_EBTABLES 44menuconfig BRIDGE_NF_EBTABLES
45 tristate "Ethernet Bridge tables (ebtables) support" 45 tristate "Ethernet Bridge tables (ebtables) support"
46 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES 46 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 963dfdc14827..c8177a89f52c 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
1770 return 0; 1770 return 0;
1771} 1771}
1772 1772
1773static int ebt_compat_init_offsets(unsigned int number)
1774{
1775 if (number > INT_MAX)
1776 return -EINVAL;
1777
1778 /* also count the base chain policies */
1779 number += NF_BR_NUMHOOKS;
1780
1781 return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
1782}
1773 1783
1774static int compat_table_info(const struct ebt_table_info *info, 1784static int compat_table_info(const struct ebt_table_info *info,
1775 struct compat_ebt_replace *newinfo) 1785 struct compat_ebt_replace *newinfo)
1776{ 1786{
1777 unsigned int size = info->entries_size; 1787 unsigned int size = info->entries_size;
1778 const void *entries = info->entries; 1788 const void *entries = info->entries;
1789 int ret;
1779 1790
1780 newinfo->entries_size = size; 1791 newinfo->entries_size = size;
1781 if (info->nentries) { 1792 ret = ebt_compat_init_offsets(info->nentries);
1782 int ret = xt_compat_init_offsets(NFPROTO_BRIDGE, 1793 if (ret)
1783 info->nentries); 1794 return ret;
1784 if (ret)
1785 return ret;
1786 }
1787 1795
1788 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1796 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1789 entries, newinfo); 1797 entries, newinfo);
@@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user,
2234 2242
2235 xt_compat_lock(NFPROTO_BRIDGE); 2243 xt_compat_lock(NFPROTO_BRIDGE);
2236 2244
2237 if (tmp.nentries) { 2245 ret = ebt_compat_init_offsets(tmp.nentries);
2238 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2246 if (ret < 0)
2239 if (ret < 0) 2247 goto out_unlock;
2240 goto out_unlock;
2241 }
2242 2248
2243 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2249 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2244 if (ret < 0) 2250 if (ret < 0)
@@ -2261,8 +2267,10 @@ static int compat_do_replace(struct net *net, void __user *user,
2261 state.buf_kern_len = size64; 2267 state.buf_kern_len = size64;
2262 2268
2263 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2269 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2264 if (WARN_ON(ret < 0)) 2270 if (WARN_ON(ret < 0)) {
2271 vfree(entries_tmp);
2265 goto out_unlock; 2272 goto out_unlock;
2273 }
2266 2274
2267 vfree(entries_tmp); 2275 vfree(entries_tmp);
2268 tmp.entries_size = size64; 2276 tmp.entries_size = size64;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index bed66f536b34..1804e867f715 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -30,13 +30,9 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
30 switch (priv->key) { 30 switch (priv->key) {
31 case NFT_META_BRI_IIFNAME: 31 case NFT_META_BRI_IIFNAME:
32 br_dev = nft_meta_get_bridge(in); 32 br_dev = nft_meta_get_bridge(in);
33 if (!br_dev)
34 goto err;
35 break; 33 break;
36 case NFT_META_BRI_OIFNAME: 34 case NFT_META_BRI_OIFNAME:
37 br_dev = nft_meta_get_bridge(out); 35 br_dev = nft_meta_get_bridge(out);
38 if (!br_dev)
39 goto err;
40 break; 36 break;
41 case NFT_META_BRI_IIFPVID: { 37 case NFT_META_BRI_IIFPVID: {
42 u16 p_pvid; 38 u16 p_pvid;
@@ -61,13 +57,11 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
61 return; 57 return;
62 } 58 }
63 default: 59 default:
64 goto out; 60 return nft_meta_get_eval(expr, regs, pkt);
65 } 61 }
66 62
67 strncpy((char *)dest, br_dev->name, IFNAMSIZ); 63 strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
68 return; 64 return;
69out:
70 return nft_meta_get_eval(expr, regs, pkt);
71err: 65err:
72 regs->verdict.code = NFT_BREAK; 66 regs->verdict.code = NFT_BREAK;
73} 67}
diff --git a/net/can/gw.c b/net/can/gw.c
index 5275ddf580bc..72711053ebe6 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
1046 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n", 1046 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
1047 max_hops); 1047 max_hops);
1048 1048
1049 register_pernet_subsys(&cangw_pernet_ops); 1049 ret = register_pernet_subsys(&cangw_pernet_ops);
1050 if (ret)
1051 return ret;
1052
1053 ret = -ENOMEM;
1050 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 1054 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
1051 0, 0, NULL); 1055 0, 0, NULL);
1052
1053 if (!cgw_cache) 1056 if (!cgw_cache)
1054 return -ENOMEM; 1057 goto out_cache_create;
1055 1058
1056 /* set notifier */ 1059 /* set notifier */
1057 notifier.notifier_call = cgw_notifier; 1060 notifier.notifier_call = cgw_notifier;
1058 register_netdevice_notifier(&notifier); 1061 ret = register_netdevice_notifier(&notifier);
1062 if (ret)
1063 goto out_register_notifier;
1059 1064
1060 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE, 1065 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
1061 NULL, cgw_dump_jobs, 0); 1066 NULL, cgw_dump_jobs, 0);
1062 if (ret) { 1067 if (ret)
1063 unregister_netdevice_notifier(&notifier); 1068 goto out_rtnl_register1;
1064 kmem_cache_destroy(cgw_cache); 1069
1065 return -ENOBUFS; 1070 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
1066 } 1071 cgw_create_job, NULL, 0);
1067 1072 if (ret)
1068 /* Only the first call to rtnl_register_module can fail */ 1073 goto out_rtnl_register2;
1069 rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, 1074 ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
1070 cgw_create_job, NULL, 0); 1075 cgw_remove_job, NULL, 0);
1071 rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, 1076 if (ret)
1072 cgw_remove_job, NULL, 0); 1077 goto out_rtnl_register3;
1073 1078
1074 return 0; 1079 return 0;
1080
1081out_rtnl_register3:
1082 rtnl_unregister(PF_CAN, RTM_NEWROUTE);
1083out_rtnl_register2:
1084 rtnl_unregister(PF_CAN, RTM_GETROUTE);
1085out_rtnl_register1:
1086 unregister_netdevice_notifier(&notifier);
1087out_register_notifier:
1088 kmem_cache_destroy(cgw_cache);
1089out_cache_create:
1090 unregister_pernet_subsys(&cangw_pernet_ops);
1091
1092 return ret;
1075} 1093}
1076 1094
1077static __exit void cgw_module_exit(void) 1095static __exit void cgw_module_exit(void)
diff --git a/net/core/dev.c b/net/core/dev.c
index fc676b2610e3..0891f499c1bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4374,12 +4374,17 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4374 4374
4375 act = bpf_prog_run_xdp(xdp_prog, xdp); 4375 act = bpf_prog_run_xdp(xdp_prog, xdp);
4376 4376
4377 /* check if bpf_xdp_adjust_head was used */
4377 off = xdp->data - orig_data; 4378 off = xdp->data - orig_data;
4378 if (off > 0) 4379 if (off) {
4379 __skb_pull(skb, off); 4380 if (off > 0)
4380 else if (off < 0) 4381 __skb_pull(skb, off);
4381 __skb_push(skb, -off); 4382 else if (off < 0)
4382 skb->mac_header += off; 4383 __skb_push(skb, -off);
4384
4385 skb->mac_header += off;
4386 skb_reset_network_header(skb);
4387 }
4383 4388
4384 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4389 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4385 * pckt. 4390 * pckt.
@@ -9701,6 +9706,8 @@ static void __net_exit default_device_exit(struct net *net)
9701 9706
9702 /* Push remaining network devices to init_net */ 9707 /* Push remaining network devices to init_net */
9703 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 9708 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9709 if (__dev_get_by_name(&init_net, fb_name))
9710 snprintf(fb_name, IFNAMSIZ, "dev%%d");
9704 err = dev_change_net_namespace(dev, &init_net, fb_name); 9711 err = dev_change_net_namespace(dev, &init_net, fb_name);
9705 if (err) { 9712 if (err) {
9706 pr_emerg("%s: failed to move %s to init_net: %d\n", 9713 pr_emerg("%s: failed to move %s to init_net: %d\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 4e2a79b2fd77..7878f918b8c0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7455,12 +7455,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7455 case offsetof(struct __sk_buff, gso_segs): 7455 case offsetof(struct __sk_buff, gso_segs):
7456 /* si->dst_reg = skb_shinfo(SKB); */ 7456 /* si->dst_reg = skb_shinfo(SKB); */
7457#ifdef NET_SKBUFF_DATA_USES_OFFSET 7457#ifdef NET_SKBUFF_DATA_USES_OFFSET
7458 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7459 si->dst_reg, si->src_reg,
7460 offsetof(struct sk_buff, head));
7461 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 7458 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7462 BPF_REG_AX, si->src_reg, 7459 BPF_REG_AX, si->src_reg,
7463 offsetof(struct sk_buff, end)); 7460 offsetof(struct sk_buff, end));
7461 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7462 si->dst_reg, si->src_reg,
7463 offsetof(struct sk_buff, head));
7464 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); 7464 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7465#else 7465#else
7466 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 7466 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 76f8db3841d7..d63b970784dc 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
165} 165}
166EXPORT_SYMBOL(flow_rule_match_enc_opts); 166EXPORT_SYMBOL(flow_rule_match_enc_opts);
167 167
168struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, 168struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
169 void *cb_ident, void *cb_priv, 169 void *cb_ident, void *cb_priv,
170 void (*release)(void *cb_priv)) 170 void (*release)(void *cb_priv))
171{ 171{
@@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
175 if (!block_cb) 175 if (!block_cb)
176 return ERR_PTR(-ENOMEM); 176 return ERR_PTR(-ENOMEM);
177 177
178 block_cb->net = net;
179 block_cb->cb = cb; 178 block_cb->cb = cb;
180 block_cb->cb_ident = cb_ident; 179 block_cb->cb_ident = cb_ident;
181 block_cb->cb_priv = cb_priv; 180 block_cb->cb_priv = cb_priv;
@@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb)
194} 193}
195EXPORT_SYMBOL(flow_block_cb_free); 194EXPORT_SYMBOL(flow_block_cb_free);
196 195
197struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, 196struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
198 tc_setup_cb_t *cb, void *cb_ident) 197 flow_setup_cb_t *cb, void *cb_ident)
199{ 198{
200 struct flow_block_cb *block_cb; 199 struct flow_block_cb *block_cb;
201 200
202 list_for_each_entry(block_cb, f->driver_block_list, driver_list) { 201 list_for_each_entry(block_cb, &block->cb_list, list) {
203 if (block_cb->net == f->net && 202 if (block_cb->cb == cb &&
204 block_cb->cb == cb &&
205 block_cb->cb_ident == cb_ident) 203 block_cb->cb_ident == cb_ident)
206 return block_cb; 204 return block_cb;
207 } 205 }
@@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
228} 226}
229EXPORT_SYMBOL(flow_block_cb_decref); 227EXPORT_SYMBOL(flow_block_cb_decref);
230 228
231bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, 229bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
232 struct list_head *driver_block_list) 230 struct list_head *driver_block_list)
233{ 231{
234 struct flow_block_cb *block_cb; 232 struct flow_block_cb *block_cb;
@@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy);
245 243
246int flow_block_cb_setup_simple(struct flow_block_offload *f, 244int flow_block_cb_setup_simple(struct flow_block_offload *f,
247 struct list_head *driver_block_list, 245 struct list_head *driver_block_list,
248 tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, 246 flow_setup_cb_t *cb,
247 void *cb_ident, void *cb_priv,
249 bool ingress_only) 248 bool ingress_only)
250{ 249{
251 struct flow_block_cb *block_cb; 250 struct flow_block_cb *block_cb;
@@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
261 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 260 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
262 return -EBUSY; 261 return -EBUSY;
263 262
264 block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, 263 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
265 cb_priv, NULL);
266 if (IS_ERR(block_cb)) 264 if (IS_ERR(block_cb))
267 return PTR_ERR(block_cb); 265 return PTR_ERR(block_cb);
268 266
@@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
270 list_add_tail(&block_cb->driver_list, driver_block_list); 268 list_add_tail(&block_cb->driver_list, driver_block_list);
271 return 0; 269 return 0;
272 case FLOW_BLOCK_UNBIND: 270 case FLOW_BLOCK_UNBIND:
273 block_cb = flow_block_cb_lookup(f, cb, cb_ident); 271 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
274 if (!block_cb) 272 if (!block_cb)
275 return -ENOENT; 273 return -ENOENT;
276 274
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 93bffaad2135..6832eeb4b785 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -585,12 +585,12 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
585 585
586void sk_psock_drop(struct sock *sk, struct sk_psock *psock) 586void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
587{ 587{
588 rcu_assign_sk_user_data(sk, NULL);
589 sk_psock_cork_free(psock); 588 sk_psock_cork_free(psock);
590 sk_psock_zap_ingress(psock); 589 sk_psock_zap_ingress(psock);
591 sk_psock_restore_proto(sk, psock);
592 590
593 write_lock_bh(&sk->sk_callback_lock); 591 write_lock_bh(&sk->sk_callback_lock);
592 sk_psock_restore_proto(sk, psock);
593 rcu_assign_sk_user_data(sk, NULL);
594 if (psock->progs.skb_parser) 594 if (psock->progs.skb_parser)
595 sk_psock_stop_strp(sk, psock); 595 sk_psock_stop_strp(sk, psock);
596 write_unlock_bh(&sk->sk_callback_lock); 596 write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 52d4faeee18b..1330a7442e5b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -247,6 +247,8 @@ static void sock_map_free(struct bpf_map *map)
247 raw_spin_unlock_bh(&stab->lock); 247 raw_spin_unlock_bh(&stab->lock);
248 rcu_read_unlock(); 248 rcu_read_unlock();
249 249
250 synchronize_rcu();
251
250 bpf_map_area_free(stab->sks); 252 bpf_map_area_free(stab->sks);
251 kfree(stab); 253 kfree(stab);
252} 254}
@@ -276,16 +278,20 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
276 struct sock **psk) 278 struct sock **psk)
277{ 279{
278 struct sock *sk; 280 struct sock *sk;
281 int err = 0;
279 282
280 raw_spin_lock_bh(&stab->lock); 283 raw_spin_lock_bh(&stab->lock);
281 sk = *psk; 284 sk = *psk;
282 if (!sk_test || sk_test == sk) 285 if (!sk_test || sk_test == sk)
283 *psk = NULL; 286 sk = xchg(psk, NULL);
287
288 if (likely(sk))
289 sock_map_unref(sk, psk);
290 else
291 err = -EINVAL;
292
284 raw_spin_unlock_bh(&stab->lock); 293 raw_spin_unlock_bh(&stab->lock);
285 if (unlikely(!sk)) 294 return err;
286 return -EINVAL;
287 sock_map_unref(sk, psk);
288 return 0;
289} 295}
290 296
291static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 297static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
@@ -328,6 +334,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
328 struct sock *sk, u64 flags) 334 struct sock *sk, u64 flags)
329{ 335{
330 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 336 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
337 struct inet_connection_sock *icsk = inet_csk(sk);
331 struct sk_psock_link *link; 338 struct sk_psock_link *link;
332 struct sk_psock *psock; 339 struct sk_psock *psock;
333 struct sock *osk; 340 struct sock *osk;
@@ -338,6 +345,8 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
338 return -EINVAL; 345 return -EINVAL;
339 if (unlikely(idx >= map->max_entries)) 346 if (unlikely(idx >= map->max_entries))
340 return -E2BIG; 347 return -E2BIG;
348 if (unlikely(icsk->icsk_ulp_data))
349 return -EINVAL;
341 350
342 link = sk_psock_init_link(); 351 link = sk_psock_init_link();
343 if (!link) 352 if (!link)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 614c38ece104..33f41178afcc 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
951 struct flow_block_offload *f) 951 struct flow_block_offload *f)
952{ 952{
953 struct flow_block_cb *block_cb; 953 struct flow_block_cb *block_cb;
954 tc_setup_cb_t *cb; 954 flow_setup_cb_t *cb;
955 955
956 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 956 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
957 cb = dsa_slave_setup_tc_block_cb_ig; 957 cb = dsa_slave_setup_tc_block_cb_ig;
@@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
967 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 967 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
968 return -EBUSY; 968 return -EBUSY;
969 969
970 block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); 970 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
971 if (IS_ERR(block_cb)) 971 if (IS_ERR(block_cb))
972 return PTR_ERR(block_cb); 972 return PTR_ERR(block_cb);
973 973
@@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
975 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 975 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
976 return 0; 976 return 0;
977 case FLOW_BLOCK_UNBIND: 977 case FLOW_BLOCK_UNBIND:
978 block_cb = flow_block_cb_lookup(f, cb, dev); 978 block_cb = flow_block_cb_lookup(f->block, cb, dev);
979 if (!block_cb) 979 if (!block_cb)
980 return -ENOENT; 980 return -ENOENT;
981 981
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 26363d72d25b..47ee88163a9d 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -165,6 +165,7 @@ static struct sk_buff
165 "Expected meta frame, is %12llx " 165 "Expected meta frame, is %12llx "
166 "in the DSA master multicast filter?\n", 166 "in the DSA master multicast filter?\n",
167 SJA1105_META_DMAC); 167 SJA1105_META_DMAC);
168 kfree_skb(sp->data->stampable_skb);
168 } 169 }
169 170
170 /* Hold a reference to avoid dsa_switch_rcv 171 /* Hold a reference to avoid dsa_switch_rcv
@@ -211,17 +212,8 @@ static struct sk_buff
211 * for further processing up the network stack. 212 * for further processing up the network stack.
212 */ 213 */
213 kfree_skb(skb); 214 kfree_skb(skb);
214 215 skb = stampable_skb;
215 skb = skb_copy(stampable_skb, GFP_ATOMIC);
216 if (!skb) {
217 dev_err_ratelimited(dp->ds->dev,
218 "Failed to copy stampable skb\n");
219 spin_unlock(&sp->data->meta_lock);
220 return NULL;
221 }
222 sja1105_transfer_meta(skb, meta); 216 sja1105_transfer_meta(skb, meta);
223 /* The cached copy will be freed now */
224 skb_unref(stampable_skb);
225 217
226 spin_unlock(&sp->data->meta_lock); 218 spin_unlock(&sp->data->meta_lock);
227 } 219 }
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index d666756be5f1..a999451345f9 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -331,7 +331,7 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
331 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params); 331 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
332 if (!prev) 332 if (!prev)
333 fq = inet_frag_create(fqdir, key, &prev); 333 fq = inet_frag_create(fqdir, key, &prev);
334 if (prev && !IS_ERR(prev)) { 334 if (!IS_ERR_OR_NULL(prev)) {
335 fq = prev; 335 fq = prev;
336 if (!refcount_inc_not_zero(&fq->refcnt)) 336 if (!refcount_inc_not_zero(&fq->refcnt))
337 fq = NULL; 337 fq = NULL;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 43adfc1641ba..2f01cf6fa0de 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -275,6 +275,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
275 const struct iphdr *tiph = &tunnel->parms.iph; 275 const struct iphdr *tiph = &tunnel->parms.iph;
276 u8 ipproto; 276 u8 ipproto;
277 277
278 if (!pskb_inet_may_pull(skb))
279 goto tx_error;
280
278 switch (skb->protocol) { 281 switch (skb->protocol) {
279 case htons(ETH_P_IP): 282 case htons(ETH_P_IP):
280 ipproto = IPPROTO_IPIP; 283 ipproto = IPPROTO_IPIP;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 4d6bf7ac0792..6bdb1ab8af61 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -416,8 +416,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
416 ctinfo == IP_CT_RELATED_REPLY)) 416 ctinfo == IP_CT_RELATED_REPLY))
417 return XT_CONTINUE; 417 return XT_CONTINUE;
418 418
419 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 419 /* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO,
420 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here 420 * TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here
421 * on, which all have an ID field [relevant for hashing]. */ 421 * on, which all have an ID field [relevant for hashing]. */
422 422
423 hash = clusterip_hashfn(skb, cipinfo->config); 423 hash = clusterip_hashfn(skb, cipinfo->config);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 8e7f84ec783d..0e70f3f65f6f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -36,6 +36,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
36 opts.options |= XT_SYNPROXY_OPT_ECN; 36 opts.options |= XT_SYNPROXY_OPT_ECN;
37 37
38 opts.options &= info->options; 38 opts.options &= info->options;
39 opts.mss_encode = opts.mss;
40 opts.mss = info->mss;
39 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 41 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
40 synproxy_init_timestamp_cookie(info, &opts); 42 synproxy_init_timestamp_cookie(info, &opts);
41 else 43 else
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 59031670b16a..cc23f1ce239c 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
78 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 78 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
79 flow.flowi4_tos = RT_TOS(iph->tos); 79 flow.flowi4_tos = RT_TOS(iph->tos);
80 flow.flowi4_scope = RT_SCOPE_UNIVERSE; 80 flow.flowi4_scope = RT_SCOPE_UNIVERSE;
81 flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
81 82
82 return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; 83 return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
83} 84}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 87b711fd5a44..3e2685c120c7 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -221,11 +221,11 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
221 int ret; 221 int ret;
222 222
223 rtp_exp->tuple.dst.u.udp.port = htons(nated_port); 223 rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
224 ret = nf_ct_expect_related(rtp_exp); 224 ret = nf_ct_expect_related(rtp_exp, 0);
225 if (ret == 0) { 225 if (ret == 0) {
226 rtcp_exp->tuple.dst.u.udp.port = 226 rtcp_exp->tuple.dst.u.udp.port =
227 htons(nated_port + 1); 227 htons(nated_port + 1);
228 ret = nf_ct_expect_related(rtcp_exp); 228 ret = nf_ct_expect_related(rtcp_exp, 0);
229 if (ret == 0) 229 if (ret == 0)
230 break; 230 break;
231 else if (ret == -EBUSY) { 231 else if (ret == -EBUSY) {
@@ -296,7 +296,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
296 int ret; 296 int ret;
297 297
298 exp->tuple.dst.u.tcp.port = htons(nated_port); 298 exp->tuple.dst.u.tcp.port = htons(nated_port);
299 ret = nf_ct_expect_related(exp); 299 ret = nf_ct_expect_related(exp, 0);
300 if (ret == 0) 300 if (ret == 0)
301 break; 301 break;
302 else if (ret != -EBUSY) { 302 else if (ret != -EBUSY) {
@@ -352,7 +352,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
352 int ret; 352 int ret;
353 353
354 exp->tuple.dst.u.tcp.port = htons(nated_port); 354 exp->tuple.dst.u.tcp.port = htons(nated_port);
355 ret = nf_ct_expect_related(exp); 355 ret = nf_ct_expect_related(exp, 0);
356 if (ret == 0) 356 if (ret == 0)
357 break; 357 break;
358 else if (ret != -EBUSY) { 358 else if (ret != -EBUSY) {
@@ -444,7 +444,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
444 int ret; 444 int ret;
445 445
446 exp->tuple.dst.u.tcp.port = htons(nated_port); 446 exp->tuple.dst.u.tcp.port = htons(nated_port);
447 ret = nf_ct_expect_related(exp); 447 ret = nf_ct_expect_related(exp, 0);
448 if (ret == 0) 448 if (ret == 0)
449 break; 449 break;
450 else if (ret != -EBUSY) { 450 else if (ret != -EBUSY) {
@@ -537,7 +537,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
537 int ret; 537 int ret;
538 538
539 exp->tuple.dst.u.tcp.port = htons(nated_port); 539 exp->tuple.dst.u.tcp.port = htons(nated_port);
540 ret = nf_ct_expect_related(exp); 540 ret = nf_ct_expect_related(exp, 0);
541 if (ret == 0) 541 if (ret == 0)
542 break; 542 break;
543 else if (ret != -EBUSY) { 543 else if (ret != -EBUSY) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4af1f5dae9d3..6e4afc48d7bb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1288,6 +1288,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1288 struct tcp_sock *tp = tcp_sk(sk); 1288 struct tcp_sock *tp = tcp_sk(sk);
1289 struct sk_buff *buff; 1289 struct sk_buff *buff;
1290 int nsize, old_factor; 1290 int nsize, old_factor;
1291 long limit;
1291 int nlen; 1292 int nlen;
1292 u8 flags; 1293 u8 flags;
1293 1294
@@ -1298,8 +1299,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1298 if (nsize < 0) 1299 if (nsize < 0)
1299 nsize = 0; 1300 nsize = 0;
1300 1301
1301 if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf && 1302 /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1302 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) { 1303 * We need some allowance to not penalize applications setting small
1304 * SO_SNDBUF values.
1305 * Also allow first and last skb in retransmit queue to be split.
1306 */
1307 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1308 if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1309 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1310 skb != tcp_rtx_queue_head(sk) &&
1311 skb != tcp_rtx_queue_tail(sk))) {
1303 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); 1312 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1304 return -ENOMEM; 1313 return -ENOMEM;
1305 } 1314 }
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 3d8a1d835471..4849edb62d52 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -96,6 +96,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
96 rcu_read_unlock(); 96 rcu_read_unlock();
97} 97}
98 98
99void tcp_update_ulp(struct sock *sk, struct proto *proto)
100{
101 struct inet_connection_sock *icsk = inet_csk(sk);
102
103 if (!icsk->icsk_ulp_ops) {
104 sk->sk_prot = proto;
105 return;
106 }
107
108 if (icsk->icsk_ulp_ops->update)
109 icsk->icsk_ulp_ops->update(sk, proto);
110}
111
99void tcp_cleanup_ulp(struct sock *sk) 112void tcp_cleanup_ulp(struct sock *sk)
100{ 113{
101 struct inet_connection_sock *icsk = inet_csk(sk); 114 struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c2049c72f3e5..dd2d0b963260 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -660,12 +660,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
660 struct flowi6 *fl6, __u8 *dsfield, 660 struct flowi6 *fl6, __u8 *dsfield,
661 int *encap_limit) 661 int *encap_limit)
662{ 662{
663 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 663 struct ipv6hdr *ipv6h;
664 struct ip6_tnl *t = netdev_priv(dev); 664 struct ip6_tnl *t = netdev_priv(dev);
665 __u16 offset; 665 __u16 offset;
666 666
667 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 667 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
668 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 668 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
669 ipv6h = ipv6_hdr(skb);
669 670
670 if (offset > 0) { 671 if (offset > 0) {
671 struct ipv6_tlv_tnl_enc_lim *tel; 672 struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3134fbb65d7f..754a484d35df 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1278,12 +1278,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1278 } 1278 }
1279 1279
1280 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1280 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1281 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1281 1282
1282 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1283 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1283 return -1; 1284 return -1;
1284 1285
1285 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1286
1287 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1286 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1288 1287
1289 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1288 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1367,12 +1366,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1367 } 1366 }
1368 1367
1369 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1368 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1369 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1370 1370
1371 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1371 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1372 return -1; 1372 return -1;
1373 1373
1374 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1375
1376 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1374 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1377 1375
1378 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1376 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index e77ea1ed5edd..5cdb4a69d277 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -36,6 +36,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
36 opts.options |= XT_SYNPROXY_OPT_ECN; 36 opts.options |= XT_SYNPROXY_OPT_ECN;
37 37
38 opts.options &= info->options; 38 opts.options &= info->options;
39 opts.mss_encode = opts.mss;
40 opts.mss = info->mss;
39 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 41 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
40 synproxy_init_timestamp_cookie(info, &opts); 42 synproxy_init_timestamp_cookie(info, &opts);
41 else 43 else
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 6bcaf7357183..d800801a5dd2 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
55 if (rpfilter_addr_linklocal(&iph->saddr)) { 55 if (rpfilter_addr_linklocal(&iph->saddr)) {
56 lookup_flags |= RT6_LOOKUP_F_IFACE; 56 lookup_flags |= RT6_LOOKUP_F_IFACE;
57 fl6.flowi6_oif = dev->ifindex; 57 fl6.flowi6_oif = dev->ifindex;
58 } else if ((flags & XT_RPFILTER_LOOSE) == 0) 58 /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
59 } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
60 (flags & XT_RPFILTER_LOOSE) == 0)
59 fl6.flowi6_oif = dev->ifindex; 61 fl6.flowi6_oif = dev->ifindex;
60 62
61 rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); 63 rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
70 goto out; 72 goto out;
71 } 73 }
72 74
73 if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) 75 if (rt->rt6i_idev->dev == dev ||
76 l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
77 (flags & XT_RPFILTER_LOOSE))
74 ret = true; 78 ret = true;
75 out: 79 out:
76 ip6_rt_put(rt); 80 ip6_rt_put(rt);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e49fec767a10..fd059e08785a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1951,7 +1951,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); 1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1952 1952
1953 if (!arg.match) 1953 if (!arg.match)
1954 return; 1954 goto unlock;
1955 fib6_nh = arg.match; 1955 fib6_nh = arg.match;
1956 } else { 1956 } else {
1957 fib6_nh = from->fib6_nh; 1957 fib6_nh = from->fib6_nh;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 09e1694b6d34..ebb62a4ebe30 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -512,7 +512,9 @@ static void iucv_sock_close(struct sock *sk)
512 sk->sk_state = IUCV_DISCONN; 512 sk->sk_state = IUCV_DISCONN;
513 sk->sk_state_change(sk); 513 sk->sk_state_change(sk);
514 } 514 }
515 case IUCV_DISCONN: /* fall through */ 515 /* fall through */
516
517 case IUCV_DISCONN:
516 sk->sk_state = IUCV_CLOSING; 518 sk->sk_state = IUCV_CLOSING;
517 sk->sk_state_change(sk); 519 sk->sk_state_change(sk);
518 520
@@ -525,8 +527,9 @@ static void iucv_sock_close(struct sock *sk)
525 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 527 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
526 timeo); 528 timeo);
527 } 529 }
530 /* fall through */
528 531
529 case IUCV_CLOSING: /* fall through */ 532 case IUCV_CLOSING:
530 sk->sk_state = IUCV_CLOSED; 533 sk->sk_state = IUCV_CLOSED;
531 sk->sk_state_change(sk); 534 sk->sk_state_change(sk);
532 535
@@ -535,8 +538,9 @@ static void iucv_sock_close(struct sock *sk)
535 538
536 skb_queue_purge(&iucv->send_skb_q); 539 skb_queue_purge(&iucv->send_skb_q);
537 skb_queue_purge(&iucv->backlog_skb_q); 540 skb_queue_purge(&iucv->backlog_skb_q);
541 /* fall through */
538 542
539 default: /* fall through */ 543 default:
540 iucv_sever_path(sk, 1); 544 iucv_sever_path(sk, 1);
541 } 545 }
542 546
@@ -2247,10 +2251,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2247 kfree_skb(skb); 2251 kfree_skb(skb);
2248 break; 2252 break;
2249 } 2253 }
2250 /* fall through and receive non-zero length data */ 2254 /* fall through - and receive non-zero length data */
2251 case (AF_IUCV_FLAG_SHT): 2255 case (AF_IUCV_FLAG_SHT):
2252 /* shutdown request */ 2256 /* shutdown request */
2253 /* fall through and receive zero length data */ 2257 /* fall through - and receive zero length data */
2254 case 0: 2258 case 0:
2255 /* plain data frame */ 2259 /* plain data frame */
2256 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2260 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1d0e5904dedf..c54cb59593ef 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1681,6 +1681,9 @@ static const struct proto_ops pppol2tp_ops = {
1681 .recvmsg = pppol2tp_recvmsg, 1681 .recvmsg = pppol2tp_recvmsg,
1682 .mmap = sock_no_mmap, 1682 .mmap = sock_no_mmap,
1683 .ioctl = pppox_ioctl, 1683 .ioctl = pppox_ioctl,
1684#ifdef CONFIG_COMPAT
1685 .compat_ioctl = pppox_compat_ioctl,
1686#endif
1684}; 1687};
1685 1688
1686static const struct pppox_proto pppol2tp_proto = { 1689static const struct pppox_proto pppol2tp_proto = {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 76cc9e967fa6..4d458067d80d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -936,8 +936,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
936 936
937 err = ieee80211_set_probe_resp(sdata, params->probe_resp, 937 err = ieee80211_set_probe_resp(sdata, params->probe_resp,
938 params->probe_resp_len, csa); 938 params->probe_resp_len, csa);
939 if (err < 0) 939 if (err < 0) {
940 kfree(new);
940 return err; 941 return err;
942 }
941 if (err == 0) 943 if (err == 0)
942 changed |= BSS_CHANGED_AP_PROBE_RESP; 944 changed |= BSS_CHANGED_AP_PROBE_RESP;
943 945
@@ -949,8 +951,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
949 params->civicloc, 951 params->civicloc,
950 params->civicloc_len); 952 params->civicloc_len);
951 953
952 if (err < 0) 954 if (err < 0) {
955 kfree(new);
953 return err; 956 return err;
957 }
954 958
955 changed |= BSS_CHANGED_FTM_RESPONDER; 959 changed |= BSS_CHANGED_FTM_RESPONDER;
956 } 960 }
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index acd4afb4944b..c9a8a2433e8a 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
187 if (!check_sdata_in_driver(sdata)) 187 if (!check_sdata_in_driver(sdata))
188 return -EIO; 188 return -EIO;
189 189
190 if (WARN_ONCE(params->cw_min == 0 || 190 if (params->cw_min == 0 || params->cw_min > params->cw_max) {
191 params->cw_min > params->cw_max, 191 /*
192 "%s: invalid CW_min/CW_max: %d/%d\n", 192 * If we can't configure hardware anyway, don't warn. We may
193 sdata->name, params->cw_min, params->cw_max)) 193 * never have initialized the CW parameters.
194 */
195 WARN_ONCE(local->ops->conf_tx,
196 "%s: invalid CW_min/CW_max: %d/%d\n",
197 sdata->name, params->cw_min, params->cw_max);
194 return -EINVAL; 198 return -EINVAL;
199 }
195 200
196 trace_drv_conf_tx(local, sdata, ac, params); 201 trace_drv_conf_tx(local, sdata, ac, params);
197 if (local->ops->conf_tx) 202 if (local->ops->conf_tx)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06aac0aaae64..8dc6580e1787 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1222,7 +1222,6 @@ static void ieee80211_if_setup(struct net_device *dev)
1222static void ieee80211_if_setup_no_queue(struct net_device *dev) 1222static void ieee80211_if_setup_no_queue(struct net_device *dev)
1223{ 1223{
1224 ieee80211_if_setup(dev); 1224 ieee80211_if_setup(dev);
1225 dev->features |= NETIF_F_LLTX;
1226 dev->priv_flags |= IFF_NO_QUEUE; 1225 dev->priv_flags |= IFF_NO_QUEUE;
1227} 1226}
1228 1227
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a99ad0325309..4c888dc9bd81 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2042,6 +2042,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
2042 ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac); 2042 ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
2043 } 2043 }
2044 2044
2045 /* WMM specification requires all 4 ACIs. */
2046 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2047 if (params[ac].cw_min == 0) {
2048 sdata_info(sdata,
2049 "AP has invalid WMM params (missing AC %d), using defaults\n",
2050 ac);
2051 return false;
2052 }
2053 }
2054
2045 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 2055 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2046 mlme_dbg(sdata, 2056 mlme_dbg(sdata,
2047 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", 2057 "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 1b224fa27367..ad1e58184c4e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3796,9 +3796,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3796 } 3796 }
3797 3797
3798 /* Always allow software iftypes */ 3798 /* Always allow software iftypes */
3799 if (local->hw.wiphy->software_iftypes & BIT(iftype) || 3799 if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
3800 (iftype == NL80211_IFTYPE_AP_VLAN &&
3801 local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
3802 if (radar_detect) 3800 if (radar_detect)
3803 return -EINVAL; 3801 return -EINVAL;
3804 return 0; 3802 return 0;
@@ -3833,7 +3831,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
3833 3831
3834 if (sdata_iter == sdata || 3832 if (sdata_iter == sdata ||
3835 !ieee80211_sdata_running(sdata_iter) || 3833 !ieee80211_sdata_running(sdata_iter) ||
3836 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) 3834 cfg80211_iftype_allowed(local->hw.wiphy,
3835 wdev_iter->iftype, 0, 1))
3837 continue; 3836 continue;
3838 3837
3839 params.iftype_num[wdev_iter->iftype]++; 3838 params.iftype_num[wdev_iter->iftype]++;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32a45c03786e..0d65f4d39494 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -223,8 +223,6 @@ config NF_CONNTRACK_FTP
223 of Network Address Translation on them. 223 of Network Address Translation on them.
224 224
225 This is FTP support on Layer 3 independent connection tracking. 225 This is FTP support on Layer 3 independent connection tracking.
226 Layer 3 independent connection tracking is experimental scheme
227 which generalize ip_conntrack to support other layer 3 protocols.
228 226
229 To compile it as a module, choose M here. If unsure, say N. 227 To compile it as a module, choose M here. If unsure, say N.
230 228
@@ -338,7 +336,7 @@ config NF_CONNTRACK_SIP
338 help 336 help
339 SIP is an application-layer control protocol that can establish, 337 SIP is an application-layer control protocol that can establish,
340 modify, and terminate multimedia sessions (conferences) such as 338 modify, and terminate multimedia sessions (conferences) such as
341 Internet telephony calls. With the ip_conntrack_sip and 339 Internet telephony calls. With the nf_conntrack_sip and
342 the nf_nat_sip modules you can support the protocol on a connection 340 the nf_nat_sip modules you can support the protocol on a connection
343 tracking/NATing firewall. 341 tracking/NATing firewall.
344 342
@@ -1313,7 +1311,7 @@ config NETFILTER_XT_MATCH_HELPER
1313 depends on NETFILTER_ADVANCED 1311 depends on NETFILTER_ADVANCED
1314 help 1312 help
1315 Helper matching allows you to match packets in dynamic connections 1313 Helper matching allows you to match packets in dynamic connections
1316 tracked by a conntrack-helper, ie. ip_conntrack_ftp 1314 tracked by a conntrack-helper, ie. nf_conntrack_ftp
1317 1315
1318 To compile it as a module, choose M here. If unsure, say Y. 1316 To compile it as a module, choose M here. If unsure, say Y.
1319 1317
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index ca7ac4a25ada..1d4e63326e68 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -226,7 +226,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
226 226
227 e.id = ip_to_id(map, ip); 227 e.id = ip_to_id(map, ip);
228 228
229 if (opt->flags & IPSET_DIM_ONE_SRC) 229 if (opt->flags & IPSET_DIM_TWO_SRC)
230 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 230 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
231 else 231 else
232 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 232 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 2e151856ad99..e64d5f9a89dd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1161,7 +1161,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
1161 return -ENOENT; 1161 return -ENOENT;
1162 1162
1163 write_lock_bh(&ip_set_ref_lock); 1163 write_lock_bh(&ip_set_ref_lock);
1164 if (set->ref != 0) { 1164 if (set->ref != 0 || set->ref_netlink != 0) {
1165 ret = -IPSET_ERR_REFERENCED; 1165 ret = -IPSET_ERR_REFERENCED;
1166 goto out; 1166 goto out;
1167 } 1167 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index faf59b6a998f..24d8f4df4230 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -89,15 +89,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
89 struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } }; 89 struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
90 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 90 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
91 91
92 /* MAC can be src only */
93 if (!(opt->flags & IPSET_DIM_TWO_SRC))
94 return 0;
95
96 if (skb_mac_header(skb) < skb->head || 92 if (skb_mac_header(skb) < skb->head ||
97 (skb_mac_header(skb) + ETH_HLEN) > skb->data) 93 (skb_mac_header(skb) + ETH_HLEN) > skb->data)
98 return -EINVAL; 94 return -EINVAL;
99 95
100 if (opt->flags & IPSET_DIM_ONE_SRC) 96 if (opt->flags & IPSET_DIM_TWO_SRC)
101 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 97 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
102 else 98 else
103 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 99 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 403541996952..08adcb222986 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -231,7 +231,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
231 231
232 IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n", 232 IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n",
233 __func__, ct, ARG_TUPLE(&exp->tuple)); 233 __func__, ct, ARG_TUPLE(&exp->tuple));
234 nf_ct_expect_related(exp); 234 nf_ct_expect_related(exp, 0);
235 nf_ct_expect_put(exp); 235 nf_ct_expect_put(exp);
236} 236}
237EXPORT_SYMBOL(ip_vs_nfct_expect_related); 237EXPORT_SYMBOL(ip_vs_nfct_expect_related);
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 42ee659d0d1e..d011d2eb0848 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -159,7 +159,7 @@ static int amanda_help(struct sk_buff *skb,
159 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) 159 if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
160 ret = nf_nat_amanda(skb, ctinfo, protoff, 160 ret = nf_nat_amanda(skb, ctinfo, protoff,
161 off - dataoff, len, exp); 161 off - dataoff, len, exp);
162 else if (nf_ct_expect_related(exp) != 0) { 162 else if (nf_ct_expect_related(exp, 0) != 0) {
163 nf_ct_helper_log(skb, ct, "cannot add expectation"); 163 nf_ct_helper_log(skb, ct, "cannot add expectation");
164 ret = NF_DROP; 164 ret = NF_DROP;
165 } 165 }
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index 921a7b95be68..1ba6becc3079 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -68,7 +68,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
68 exp->class = NF_CT_EXPECT_CLASS_DEFAULT; 68 exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
69 exp->helper = NULL; 69 exp->helper = NULL;
70 70
71 nf_ct_expect_related(exp); 71 nf_ct_expect_related(exp, 0);
72 nf_ct_expect_put(exp); 72 nf_ct_expect_put(exp);
73 73
74 nf_ct_refresh(ct, skb, timeout * HZ); 74 nf_ct_refresh(ct, skb, timeout * HZ);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index bdfeacee0817..a542761e90d1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1817,9 +1817,7 @@ EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1817#include <linux/netfilter/nfnetlink_conntrack.h> 1817#include <linux/netfilter/nfnetlink_conntrack.h>
1818#include <linux/mutex.h> 1818#include <linux/mutex.h>
1819 1819
1820/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 1820/* Generic function for tcp/udp/sctp/dccp and alike. */
1821 * in ip_conntrack_core, since we don't want the protocols to autoload
1822 * or depend on ctnetlink */
1823int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 1821int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1824 const struct nf_conntrack_tuple *tuple) 1822 const struct nf_conntrack_tuple *tuple)
1825{ 1823{
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index ffd1f4906c4f..65364de915d1 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -249,13 +249,22 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
249static inline int expect_matches(const struct nf_conntrack_expect *a, 249static inline int expect_matches(const struct nf_conntrack_expect *a,
250 const struct nf_conntrack_expect *b) 250 const struct nf_conntrack_expect *b)
251{ 251{
252 return a->master == b->master && 252 return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
253 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
254 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && 253 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
255 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && 254 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
256 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); 255 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
257} 256}
258 257
258static bool master_matches(const struct nf_conntrack_expect *a,
259 const struct nf_conntrack_expect *b,
260 unsigned int flags)
261{
262 if (flags & NF_CT_EXP_F_SKIP_MASTER)
263 return true;
264
265 return a->master == b->master;
266}
267
259/* Generally a bad idea to call this: could have matched already. */ 268/* Generally a bad idea to call this: could have matched already. */
260void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) 269void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
261{ 270{
@@ -399,7 +408,8 @@ static void evict_oldest_expect(struct nf_conn *master,
399 nf_ct_remove_expect(last); 408 nf_ct_remove_expect(last);
400} 409}
401 410
402static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) 411static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
412 unsigned int flags)
403{ 413{
404 const struct nf_conntrack_expect_policy *p; 414 const struct nf_conntrack_expect_policy *p;
405 struct nf_conntrack_expect *i; 415 struct nf_conntrack_expect *i;
@@ -417,8 +427,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
417 } 427 }
418 h = nf_ct_expect_dst_hash(net, &expect->tuple); 428 h = nf_ct_expect_dst_hash(net, &expect->tuple);
419 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { 429 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
420 if (expect_matches(i, expect)) { 430 if (master_matches(i, expect, flags) &&
421 if (i->class != expect->class) 431 expect_matches(i, expect)) {
432 if (i->class != expect->class ||
433 i->master != expect->master)
422 return -EALREADY; 434 return -EALREADY;
423 435
424 if (nf_ct_remove_expect(i)) 436 if (nf_ct_remove_expect(i))
@@ -453,12 +465,12 @@ out:
453} 465}
454 466
455int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 467int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
456 u32 portid, int report) 468 u32 portid, int report, unsigned int flags)
457{ 469{
458 int ret; 470 int ret;
459 471
460 spin_lock_bh(&nf_conntrack_expect_lock); 472 spin_lock_bh(&nf_conntrack_expect_lock);
461 ret = __nf_ct_expect_check(expect); 473 ret = __nf_ct_expect_check(expect, flags);
462 if (ret < 0) 474 if (ret < 0)
463 goto out; 475 goto out;
464 476
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c6c11bab5b6..0ecb3e289ef2 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -525,7 +525,7 @@ skip_nl_seq:
525 protoff, matchoff, matchlen, exp); 525 protoff, matchoff, matchlen, exp);
526 else { 526 else {
527 /* Can't expect this? Best to drop packet now. */ 527 /* Can't expect this? Best to drop packet now. */
528 if (nf_ct_expect_related(exp) != 0) { 528 if (nf_ct_expect_related(exp, 0) != 0) {
529 nf_ct_helper_log(skb, ct, "cannot add expectation"); 529 nf_ct_helper_log(skb, ct, "cannot add expectation");
530 ret = NF_DROP; 530 ret = NF_DROP;
531 } else 531 } else
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 8f6ba8162f0b..573cb4481481 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -1,11 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 3 * BER and PER decoding library for H.323 conntrack/NAT module.
4 * conntrack/NAT module.
5 * 4 *
6 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> 5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
7 * 6 *
8 * See ip_conntrack_helper_h323_asn1.h for details. 7 * See nf_conntrack_helper_h323_asn1.h for details.
9 */ 8 */
10 9
11#ifdef __KERNEL__ 10#ifdef __KERNEL__
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 6497e5fc0871..8ba037b76ad3 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -305,8 +305,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
305 ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, 305 ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
306 taddr, port, rtp_port, rtp_exp, rtcp_exp); 306 taddr, port, rtp_port, rtp_exp, rtcp_exp);
307 } else { /* Conntrack only */ 307 } else { /* Conntrack only */
308 if (nf_ct_expect_related(rtp_exp) == 0) { 308 if (nf_ct_expect_related(rtp_exp, 0) == 0) {
309 if (nf_ct_expect_related(rtcp_exp) == 0) { 309 if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
310 pr_debug("nf_ct_h323: expect RTP "); 310 pr_debug("nf_ct_h323: expect RTP ");
311 nf_ct_dump_tuple(&rtp_exp->tuple); 311 nf_ct_dump_tuple(&rtp_exp->tuple);
312 pr_debug("nf_ct_h323: expect RTCP "); 312 pr_debug("nf_ct_h323: expect RTCP ");
@@ -364,7 +364,7 @@ static int expect_t120(struct sk_buff *skb,
364 ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr, 364 ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
365 port, exp); 365 port, exp);
366 } else { /* Conntrack only */ 366 } else { /* Conntrack only */
367 if (nf_ct_expect_related(exp) == 0) { 367 if (nf_ct_expect_related(exp, 0) == 0) {
368 pr_debug("nf_ct_h323: expect T.120 "); 368 pr_debug("nf_ct_h323: expect T.120 ");
369 nf_ct_dump_tuple(&exp->tuple); 369 nf_ct_dump_tuple(&exp->tuple);
370 } else 370 } else
@@ -701,7 +701,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
701 ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr, 701 ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
702 port, exp); 702 port, exp);
703 } else { /* Conntrack only */ 703 } else { /* Conntrack only */
704 if (nf_ct_expect_related(exp) == 0) { 704 if (nf_ct_expect_related(exp, 0) == 0) {
705 pr_debug("nf_ct_q931: expect H.245 "); 705 pr_debug("nf_ct_q931: expect H.245 ");
706 nf_ct_dump_tuple(&exp->tuple); 706 nf_ct_dump_tuple(&exp->tuple);
707 } else 707 } else
@@ -825,7 +825,7 @@ static int expect_callforwarding(struct sk_buff *skb,
825 protoff, data, dataoff, 825 protoff, data, dataoff,
826 taddr, port, exp); 826 taddr, port, exp);
827 } else { /* Conntrack only */ 827 } else { /* Conntrack only */
828 if (nf_ct_expect_related(exp) == 0) { 828 if (nf_ct_expect_related(exp, 0) == 0) {
829 pr_debug("nf_ct_q931: expect Call Forwarding "); 829 pr_debug("nf_ct_q931: expect Call Forwarding ");
830 nf_ct_dump_tuple(&exp->tuple); 830 nf_ct_dump_tuple(&exp->tuple);
831 } else 831 } else
@@ -1284,7 +1284,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1284 ret = nat_q931(skb, ct, ctinfo, protoff, data, 1284 ret = nat_q931(skb, ct, ctinfo, protoff, data,
1285 taddr, i, port, exp); 1285 taddr, i, port, exp);
1286 } else { /* Conntrack only */ 1286 } else { /* Conntrack only */
1287 if (nf_ct_expect_related(exp) == 0) { 1287 if (nf_ct_expect_related(exp, 0) == 0) {
1288 pr_debug("nf_ct_ras: expect Q.931 "); 1288 pr_debug("nf_ct_ras: expect Q.931 ");
1289 nf_ct_dump_tuple(&exp->tuple); 1289 nf_ct_dump_tuple(&exp->tuple);
1290 1290
@@ -1349,7 +1349,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
1349 IPPROTO_UDP, NULL, &port); 1349 IPPROTO_UDP, NULL, &port);
1350 exp->helper = nf_conntrack_helper_ras; 1350 exp->helper = nf_conntrack_helper_ras;
1351 1351
1352 if (nf_ct_expect_related(exp) == 0) { 1352 if (nf_ct_expect_related(exp, 0) == 0) {
1353 pr_debug("nf_ct_ras: expect RAS "); 1353 pr_debug("nf_ct_ras: expect RAS ");
1354 nf_ct_dump_tuple(&exp->tuple); 1354 nf_ct_dump_tuple(&exp->tuple);
1355 } else 1355 } else
@@ -1561,7 +1561,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1561 exp->flags = NF_CT_EXPECT_PERMANENT; 1561 exp->flags = NF_CT_EXPECT_PERMANENT;
1562 exp->helper = nf_conntrack_helper_q931; 1562 exp->helper = nf_conntrack_helper_q931;
1563 1563
1564 if (nf_ct_expect_related(exp) == 0) { 1564 if (nf_ct_expect_related(exp, 0) == 0) {
1565 pr_debug("nf_ct_ras: expect Q.931 "); 1565 pr_debug("nf_ct_ras: expect Q.931 ");
1566 nf_ct_dump_tuple(&exp->tuple); 1566 nf_ct_dump_tuple(&exp->tuple);
1567 } else 1567 } else
@@ -1615,7 +1615,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1615 exp->flags = NF_CT_EXPECT_PERMANENT; 1615 exp->flags = NF_CT_EXPECT_PERMANENT;
1616 exp->helper = nf_conntrack_helper_q931; 1616 exp->helper = nf_conntrack_helper_q931;
1617 1617
1618 if (nf_ct_expect_related(exp) == 0) { 1618 if (nf_ct_expect_related(exp, 0) == 0) {
1619 pr_debug("nf_ct_ras: expect Q.931 "); 1619 pr_debug("nf_ct_ras: expect Q.931 ");
1620 nf_ct_dump_tuple(&exp->tuple); 1620 nf_ct_dump_tuple(&exp->tuple);
1621 } else 1621 } else
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 7ac156f1f3bc..e40988a2f22f 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -213,7 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
213 addr_beg_p - ib_ptr, 213 addr_beg_p - ib_ptr,
214 addr_end_p - addr_beg_p, 214 addr_end_p - addr_beg_p,
215 exp); 215 exp);
216 else if (nf_ct_expect_related(exp) != 0) { 216 else if (nf_ct_expect_related(exp, 0) != 0) {
217 nf_ct_helper_log(skb, ct, 217 nf_ct_helper_log(skb, ct,
218 "cannot add expectation"); 218 "cannot add expectation");
219 ret = NF_DROP; 219 ret = NF_DROP;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1b77444d5b52..6aa01eb6fe99 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2616,7 +2616,7 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2616 if (IS_ERR(exp)) 2616 if (IS_ERR(exp))
2617 return PTR_ERR(exp); 2617 return PTR_ERR(exp);
2618 2618
2619 err = nf_ct_expect_related_report(exp, portid, report); 2619 err = nf_ct_expect_related_report(exp, portid, report, 0);
2620 nf_ct_expect_put(exp); 2620 nf_ct_expect_put(exp);
2621 return err; 2621 return err;
2622} 2622}
@@ -3367,7 +3367,7 @@ ctnetlink_create_expect(struct net *net,
3367 goto err_rcu; 3367 goto err_rcu;
3368 } 3368 }
3369 3369
3370 err = nf_ct_expect_related_report(exp, portid, report); 3370 err = nf_ct_expect_related_report(exp, portid, report, 0);
3371 nf_ct_expect_put(exp); 3371 nf_ct_expect_put(exp);
3372err_rcu: 3372err_rcu:
3373 rcu_read_unlock(); 3373 rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index b22042ad0fca..a971183f11af 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -234,9 +234,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
234 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); 234 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
235 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) 235 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
236 nf_nat_pptp_exp_gre(exp_orig, exp_reply); 236 nf_nat_pptp_exp_gre(exp_orig, exp_reply);
237 if (nf_ct_expect_related(exp_orig) != 0) 237 if (nf_ct_expect_related(exp_orig, 0) != 0)
238 goto out_put_both; 238 goto out_put_both;
239 if (nf_ct_expect_related(exp_reply) != 0) 239 if (nf_ct_expect_related(exp_reply, 0) != 0)
240 goto out_unexpect_orig; 240 goto out_unexpect_orig;
241 241
242 /* Add GRE keymap entries */ 242 /* Add GRE keymap entries */
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c2eb365f1723..5b05487a60d2 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -1,7 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * ip_conntrack_proto_gre.c - Version 3.0
4 *
5 * Connection tracking protocol helper module for GRE. 3 * Connection tracking protocol helper module for GRE.
6 * 4 *
7 * GRE is a generic encapsulation protocol, which is generally not very 5 * GRE is a generic encapsulation protocol, which is generally not very
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index dd53e2b20f6b..097deba7441a 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -215,7 +215,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
215 return -NF_ACCEPT; 215 return -NF_ACCEPT;
216 } 216 }
217 217
218 /* See ip_conntrack_proto_tcp.c */ 218 /* See nf_conntrack_proto_tcp.c */
219 if (state->net->ct.sysctl_checksum && 219 if (state->net->ct.sysctl_checksum &&
220 state->hook == NF_INET_PRE_ROUTING && 220 state->hook == NF_INET_PRE_ROUTING &&
221 nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) { 221 nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index d5fdfa00d683..85c1f8c213b0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
472 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 472 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
473 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 473 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
474 __u32 seq, ack, sack, end, win, swin; 474 __u32 seq, ack, sack, end, win, swin;
475 u16 win_raw;
475 s32 receiver_offset; 476 s32 receiver_offset;
476 bool res, in_recv_win; 477 bool res, in_recv_win;
477 478
@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
480 */ 481 */
481 seq = ntohl(tcph->seq); 482 seq = ntohl(tcph->seq);
482 ack = sack = ntohl(tcph->ack_seq); 483 ack = sack = ntohl(tcph->ack_seq);
483 win = ntohs(tcph->window); 484 win_raw = ntohs(tcph->window);
485 win = win_raw;
484 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph); 486 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
485 487
486 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) 488 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
655 && state->last_seq == seq 657 && state->last_seq == seq
656 && state->last_ack == ack 658 && state->last_ack == ack
657 && state->last_end == end 659 && state->last_end == end
658 && state->last_win == win) 660 && state->last_win == win_raw)
659 state->retrans++; 661 state->retrans++;
660 else { 662 else {
661 state->last_dir = dir; 663 state->last_dir = dir;
662 state->last_seq = seq; 664 state->last_seq = seq;
663 state->last_ack = ack; 665 state->last_ack = ack;
664 state->last_end = end; 666 state->last_end = end;
665 state->last_win = win; 667 state->last_win = win_raw;
666 state->retrans = 0; 668 state->retrans = 0;
667 } 669 }
668 } 670 }
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 81448c3db661..1aebd6569d4e 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -153,7 +153,7 @@ static int help(struct sk_buff *skb,
153 nf_ct_dump_tuple(&exp->tuple); 153 nf_ct_dump_tuple(&exp->tuple);
154 154
155 /* Can't expect this? Best to drop packet now. */ 155 /* Can't expect this? Best to drop packet now. */
156 if (nf_ct_expect_related(exp) != 0) { 156 if (nf_ct_expect_related(exp, 0) != 0) {
157 nf_ct_helper_log(skb, ct, "cannot add expectation"); 157 nf_ct_helper_log(skb, ct, "cannot add expectation");
158 ret = NF_DROP; 158 ret = NF_DROP;
159 } 159 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 107251731809..b83dc9bf0a5d 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -977,11 +977,15 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
977 /* -EALREADY handling works around end-points that send 977 /* -EALREADY handling works around end-points that send
978 * SDP messages with identical port but different media type, 978 * SDP messages with identical port but different media type,
979 * we pretend expectation was set up. 979 * we pretend expectation was set up.
980 * It also works in the case that SDP messages are sent with
981 * identical expect tuples but for different master conntracks.
980 */ 982 */
981 int errp = nf_ct_expect_related(rtp_exp); 983 int errp = nf_ct_expect_related(rtp_exp,
984 NF_CT_EXP_F_SKIP_MASTER);
982 985
983 if (errp == 0 || errp == -EALREADY) { 986 if (errp == 0 || errp == -EALREADY) {
984 int errcp = nf_ct_expect_related(rtcp_exp); 987 int errcp = nf_ct_expect_related(rtcp_exp,
988 NF_CT_EXP_F_SKIP_MASTER);
985 989
986 if (errcp == 0 || errcp == -EALREADY) 990 if (errcp == 0 || errcp == -EALREADY)
987 ret = NF_ACCEPT; 991 ret = NF_ACCEPT;
@@ -1296,7 +1300,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1296 ret = hooks->expect(skb, protoff, dataoff, dptr, datalen, 1300 ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
1297 exp, matchoff, matchlen); 1301 exp, matchoff, matchlen);
1298 else { 1302 else {
1299 if (nf_ct_expect_related(exp) != 0) { 1303 if (nf_ct_expect_related(exp, 0) != 0) {
1300 nf_ct_helper_log(skb, ct, "cannot add expectation"); 1304 nf_ct_helper_log(skb, ct, "cannot add expectation");
1301 ret = NF_DROP; 1305 ret = NF_DROP;
1302 } else 1306 } else
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index df6d6d61bd58..80ee53f29f68 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -78,7 +78,7 @@ static int tftp_help(struct sk_buff *skb,
78 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); 78 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
79 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) 79 if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
80 ret = nf_nat_tftp(skb, ctinfo, exp); 80 ret = nf_nat_tftp(skb, ctinfo, exp);
81 else if (nf_ct_expect_related(exp) != 0) { 81 else if (nf_ct_expect_related(exp, 0) != 0) {
82 nf_ct_helper_log(skb, ct, "cannot add expectation"); 82 nf_ct_helper_log(skb, ct, "cannot add expectation");
83 ret = NF_DROP; 83 ret = NF_DROP;
84 } 84 }
diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c
index a352604d6186..3bc7e0854efe 100644
--- a/net/netfilter/nf_nat_amanda.c
+++ b/net/netfilter/nf_nat_amanda.c
@@ -48,7 +48,7 @@ static unsigned int help(struct sk_buff *skb,
48 int res; 48 int res;
49 49
50 exp->tuple.dst.u.tcp.port = htons(port); 50 exp->tuple.dst.u.tcp.port = htons(port);
51 res = nf_ct_expect_related(exp); 51 res = nf_ct_expect_related(exp, 0);
52 if (res == 0) 52 if (res == 0)
53 break; 53 break;
54 else if (res != -EBUSY) { 54 else if (res != -EBUSY) {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 9ab410455992..3f6023ed4966 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -519,7 +519,7 @@ another_round:
519 * and NF_INET_LOCAL_OUT, we change the destination to map into the 519 * and NF_INET_LOCAL_OUT, we change the destination to map into the
520 * range. It might not be possible to get a unique tuple, but we try. 520 * range. It might not be possible to get a unique tuple, but we try.
521 * At worst (or if we race), we will end up with a final duplicate in 521 * At worst (or if we race), we will end up with a final duplicate in
522 * __ip_conntrack_confirm and drop the packet. */ 522 * __nf_conntrack_confirm and drop the packet. */
523static void 523static void
524get_unique_tuple(struct nf_conntrack_tuple *tuple, 524get_unique_tuple(struct nf_conntrack_tuple *tuple,
525 const struct nf_conntrack_tuple *orig_tuple, 525 const struct nf_conntrack_tuple *orig_tuple,
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index d48484a9d52d..aace6768a64e 100644
--- a/net/netfilter/nf_nat_ftp.c
+++ b/net/netfilter/nf_nat_ftp.c
@@ -91,7 +91,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
91 int ret; 91 int ret;
92 92
93 exp->tuple.dst.u.tcp.port = htons(port); 93 exp->tuple.dst.u.tcp.port = htons(port);
94 ret = nf_ct_expect_related(exp); 94 ret = nf_ct_expect_related(exp, 0);
95 if (ret == 0) 95 if (ret == 0)
96 break; 96 break;
97 else if (ret != -EBUSY) { 97 else if (ret != -EBUSY) {
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index dfb7ef8845bd..c691ab8d234c 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff *skb,
53 int ret; 53 int ret;
54 54
55 exp->tuple.dst.u.tcp.port = htons(port); 55 exp->tuple.dst.u.tcp.port = htons(port);
56 ret = nf_ct_expect_related(exp); 56 ret = nf_ct_expect_related(exp, 0);
57 if (ret == 0) 57 if (ret == 0)
58 break; 58 break;
59 else if (ret != -EBUSY) { 59 else if (ret != -EBUSY) {
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index e338d91980d8..f0a735e86851 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -414,7 +414,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
414 int ret; 414 int ret;
415 415
416 exp->tuple.dst.u.udp.port = htons(port); 416 exp->tuple.dst.u.udp.port = htons(port);
417 ret = nf_ct_expect_related(exp); 417 ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
418 if (ret == 0) 418 if (ret == 0)
419 break; 419 break;
420 else if (ret != -EBUSY) { 420 else if (ret != -EBUSY) {
@@ -607,7 +607,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
607 int ret; 607 int ret;
608 608
609 rtp_exp->tuple.dst.u.udp.port = htons(port); 609 rtp_exp->tuple.dst.u.udp.port = htons(port);
610 ret = nf_ct_expect_related(rtp_exp); 610 ret = nf_ct_expect_related(rtp_exp,
611 NF_CT_EXP_F_SKIP_MASTER);
611 if (ret == -EBUSY) 612 if (ret == -EBUSY)
612 continue; 613 continue;
613 else if (ret < 0) { 614 else if (ret < 0) {
@@ -615,7 +616,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
615 break; 616 break;
616 } 617 }
617 rtcp_exp->tuple.dst.u.udp.port = htons(port + 1); 618 rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
618 ret = nf_ct_expect_related(rtcp_exp); 619 ret = nf_ct_expect_related(rtcp_exp,
620 NF_CT_EXP_F_SKIP_MASTER);
619 if (ret == 0) 621 if (ret == 0)
620 break; 622 break;
621 else if (ret == -EBUSY) { 623 else if (ret == -EBUSY) {
diff --git a/net/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c
index 833a11f68031..1a591132d6eb 100644
--- a/net/netfilter/nf_nat_tftp.c
+++ b/net/netfilter/nf_nat_tftp.c
@@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff *skb,
30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; 30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
31 exp->dir = IP_CT_DIR_REPLY; 31 exp->dir = IP_CT_DIR_REPLY;
32 exp->expectfn = nf_nat_follow_master; 32 exp->expectfn = nf_nat_follow_master;
33 if (nf_ct_expect_related(exp) != 0) { 33 if (nf_ct_expect_related(exp, 0) != 0) {
34 nf_ct_helper_log(skb, exp->master, "cannot add expectation"); 34 nf_ct_helper_log(skb, exp->master, "cannot add expectation");
35 return NF_DROP; 35 return NF_DROP;
36 } 36 }
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b101f187eda8..c769462a839e 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -470,7 +470,7 @@ synproxy_send_client_synack(struct net *net,
470 struct iphdr *iph, *niph; 470 struct iphdr *iph, *niph;
471 struct tcphdr *nth; 471 struct tcphdr *nth;
472 unsigned int tcp_hdr_size; 472 unsigned int tcp_hdr_size;
473 u16 mss = opts->mss; 473 u16 mss = opts->mss_encode;
474 474
475 iph = ip_hdr(skb); 475 iph = ip_hdr(skb);
476 476
@@ -687,7 +687,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
687 state = &ct->proto.tcp; 687 state = &ct->proto.tcp;
688 switch (state->state) { 688 switch (state->state) {
689 case TCP_CONNTRACK_CLOSE: 689 case TCP_CONNTRACK_CLOSE:
690 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 690 if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
691 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - 691 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
692 ntohl(th->seq) + 1); 692 ntohl(th->seq) + 1);
693 break; 693 break;
@@ -884,7 +884,7 @@ synproxy_send_client_synack_ipv6(struct net *net,
884 struct ipv6hdr *iph, *niph; 884 struct ipv6hdr *iph, *niph;
885 struct tcphdr *nth; 885 struct tcphdr *nth;
886 unsigned int tcp_hdr_size; 886 unsigned int tcp_hdr_size;
887 u16 mss = opts->mss; 887 u16 mss = opts->mss_encode;
888 888
889 iph = ipv6_hdr(skb); 889 iph = ipv6_hdr(skb);
890 890
@@ -1111,7 +1111,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
1111 state = &ct->proto.tcp; 1111 state = &ct->proto.tcp;
1112 switch (state->state) { 1112 switch (state->state) {
1113 case TCP_CONNTRACK_CLOSE: 1113 case TCP_CONNTRACK_CLOSE:
1114 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 1114 if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
1115 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - 1115 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
1116 ntohl(th->seq) + 1); 1116 ntohl(th->seq) + 1);
1117 break; 1117 break;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ed17a7c29b86..605a7cfe7ca7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1662,7 +1662,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1662 1662
1663 chain->flags |= NFT_BASE_CHAIN | flags; 1663 chain->flags |= NFT_BASE_CHAIN | flags;
1664 basechain->policy = NF_ACCEPT; 1664 basechain->policy = NF_ACCEPT;
1665 INIT_LIST_HEAD(&basechain->cb_list); 1665 flow_block_init(&basechain->flow_block);
1666 } else { 1666 } else {
1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
1668 if (chain == NULL) 1668 if (chain == NULL)
@@ -1900,6 +1900,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1900 1900
1901 if (nla[NFTA_CHAIN_FLAGS]) 1901 if (nla[NFTA_CHAIN_FLAGS])
1902 flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS])); 1902 flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
1903 else if (chain)
1904 flags = chain->flags;
1903 1905
1904 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); 1906 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
1905 1907
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 2c3302845f67..64f5fd5f240e 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -116,7 +116,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
116 struct flow_block_cb *block_cb; 116 struct flow_block_cb *block_cb;
117 int err; 117 int err;
118 118
119 list_for_each_entry(block_cb, &basechain->cb_list, list) { 119 list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
120 err = block_cb->cb(type, type_data, block_cb->cb_priv); 120 err = block_cb->cb(type, type_data, block_cb->cb_priv);
121 if (err < 0) 121 if (err < 0)
122 return err; 122 return err;
@@ -154,7 +154,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
154static int nft_flow_offload_bind(struct flow_block_offload *bo, 154static int nft_flow_offload_bind(struct flow_block_offload *bo,
155 struct nft_base_chain *basechain) 155 struct nft_base_chain *basechain)
156{ 156{
157 list_splice(&bo->cb_list, &basechain->cb_list); 157 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
158 return 0; 158 return 0;
159} 159}
160 160
@@ -198,6 +198,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
198 return -EOPNOTSUPP; 198 return -EOPNOTSUPP;
199 199
200 bo.command = cmd; 200 bo.command = cmd;
201 bo.block = &basechain->flow_block;
201 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 202 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
202 bo.extack = &extack; 203 bo.extack = &extack;
203 INIT_LIST_HEAD(&bo.cb_list); 204 INIT_LIST_HEAD(&bo.cb_list);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 92077d459109..4abbb452cf6c 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
578 ss = nfnetlink_get_subsys(type << 8); 578 ss = nfnetlink_get_subsys(type << 8);
579 rcu_read_unlock(); 579 rcu_read_unlock();
580 if (!ss) 580 if (!ss)
581 request_module("nfnetlink-subsys-%d", type); 581 request_module_nowait("nfnetlink-subsys-%d", type);
582 return 0; 582 return 0;
583} 583}
584#endif 584#endif
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 3fd540b2c6ba..b5d5d071d765 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -193,7 +193,7 @@ static inline void nft_chain_filter_inet_init(void) {}
193static inline void nft_chain_filter_inet_fini(void) {} 193static inline void nft_chain_filter_inet_fini(void) {}
194#endif /* CONFIG_NF_TABLES_IPV6 */ 194#endif /* CONFIG_NF_TABLES_IPV6 */
195 195
196#ifdef CONFIG_NF_TABLES_BRIDGE 196#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
197static unsigned int 197static unsigned int
198nft_do_chain_bridge(void *priv, 198nft_do_chain_bridge(void *priv,
199 struct sk_buff *skb, 199 struct sk_buff *skb,
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index 2f89bde3c61c..ff9ac8ae0031 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
142#ifdef CONFIG_NF_TABLES_IPV6 142#ifdef CONFIG_NF_TABLES_IPV6
143MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat"); 143MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
144#endif 144#endif
145#ifdef CONFIG_NF_TABLES_INET
146MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
147#endif
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 827ab6196df9..46ca8bcca1bd 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1252,7 +1252,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
1252 priv->l4proto, NULL, &priv->dport); 1252 priv->l4proto, NULL, &priv->dport);
1253 exp->timeout.expires = jiffies + priv->timeout * HZ; 1253 exp->timeout.expires = jiffies + priv->timeout * HZ;
1254 1254
1255 if (nf_ct_expect_related(exp) != 0) 1255 if (nf_ct_expect_related(exp, 0) != 0)
1256 regs->verdict.code = NF_DROP; 1256 regs->verdict.code = NF_DROP;
1257} 1257}
1258 1258
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index fe93e731dc7f..b836d550b919 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
129 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); 129 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
130 130
131 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); 131 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
132 if (priv->modulus <= 1) 132 if (priv->modulus < 1)
133 return -ERANGE; 133 return -ERANGE;
134 134
135 if (priv->offset + priv->modulus - 1 < priv->offset) 135 if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 76866f77e343..f69afb9ff3cb 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -60,24 +60,16 @@ void nft_meta_get_eval(const struct nft_expr *expr,
60 *dest = skb->mark; 60 *dest = skb->mark;
61 break; 61 break;
62 case NFT_META_IIF: 62 case NFT_META_IIF:
63 if (in == NULL) 63 *dest = in ? in->ifindex : 0;
64 goto err;
65 *dest = in->ifindex;
66 break; 64 break;
67 case NFT_META_OIF: 65 case NFT_META_OIF:
68 if (out == NULL) 66 *dest = out ? out->ifindex : 0;
69 goto err;
70 *dest = out->ifindex;
71 break; 67 break;
72 case NFT_META_IIFNAME: 68 case NFT_META_IIFNAME:
73 if (in == NULL) 69 strncpy((char *)dest, in ? in->name : "", IFNAMSIZ);
74 goto err;
75 strncpy((char *)dest, in->name, IFNAMSIZ);
76 break; 70 break;
77 case NFT_META_OIFNAME: 71 case NFT_META_OIFNAME:
78 if (out == NULL) 72 strncpy((char *)dest, out ? out->name : "", IFNAMSIZ);
79 goto err;
80 strncpy((char *)dest, out->name, IFNAMSIZ);
81 break; 73 break;
82 case NFT_META_IIFTYPE: 74 case NFT_META_IIFTYPE:
83 if (in == NULL) 75 if (in == NULL)
@@ -546,7 +538,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
546 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) 538 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
547 return ERR_PTR(-EINVAL); 539 return ERR_PTR(-EINVAL);
548 540
549#ifdef CONFIG_NF_TABLES_BRIDGE 541#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META)
550 if (ctx->family == NFPROTO_BRIDGE) 542 if (ctx->family == NFPROTO_BRIDGE)
551 return ERR_PTR(-EAGAIN); 543 return ERR_PTR(-EAGAIN);
552#endif 544#endif
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 8487eeff5c0e..43eeb1f609f1 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
291 291
292MODULE_LICENSE("GPL"); 292MODULE_LICENSE("GPL");
293MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); 293MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
294MODULE_ALIAS_NFT_EXPR("nat"); 294MODULE_ALIAS_NFT_EXPR("redir");
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 80060ade8a5b..928e661d1517 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -31,6 +31,8 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
31 opts->options |= NF_SYNPROXY_OPT_ECN; 31 opts->options |= NF_SYNPROXY_OPT_ECN;
32 32
33 opts->options &= priv->info.options; 33 opts->options &= priv->info.options;
34 opts->mss_encode = opts->mss;
35 opts->mss = info->mss;
34 if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP) 36 if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
35 synproxy_init_timestamp_cookie(info, opts); 37 synproxy_init_timestamp_cookie(info, opts);
36 else 38 else
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 96740d389377..c4f54ad2b98a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -967,6 +967,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
967 967
968 window = skb->data[20]; 968 window = skb->data[20];
969 969
970 sock_hold(make);
970 skb->sk = make; 971 skb->sk = make;
971 skb->destructor = sock_efree; 972 skb->destructor = sock_efree;
972 make->sk_state = TCP_ESTABLISHED; 973 make->sk_state = TCP_ESTABLISHED;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 892287d06c17..d01410e52097 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1047,7 +1047,7 @@ error:
1047} 1047}
1048 1048
1049/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ 1049/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1050static struct sw_flow_actions *get_flow_actions(struct net *net, 1050static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
1051 const struct nlattr *a, 1051 const struct nlattr *a,
1052 const struct sw_flow_key *key, 1052 const struct sw_flow_key *key,
1053 const struct sw_flow_mask *mask, 1053 const struct sw_flow_mask *mask,
@@ -1081,12 +1081,13 @@ static struct sw_flow_actions *get_flow_actions(struct net *net,
1081 * we should not to return match object with dangling reference 1081 * we should not to return match object with dangling reference
1082 * to mask. 1082 * to mask.
1083 * */ 1083 * */
1084static int ovs_nla_init_match_and_action(struct net *net, 1084static noinline_for_stack int
1085 struct sw_flow_match *match, 1085ovs_nla_init_match_and_action(struct net *net,
1086 struct sw_flow_key *key, 1086 struct sw_flow_match *match,
1087 struct nlattr **a, 1087 struct sw_flow_key *key,
1088 struct sw_flow_actions **acts, 1088 struct nlattr **a,
1089 bool log) 1089 struct sw_flow_actions **acts,
1090 bool log)
1090{ 1091{
1091 struct sw_flow_mask mask; 1092 struct sw_flow_mask mask;
1092 int error = 0; 1093 int error = 0;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..bc89e16e0505 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
60 const struct sk_buff *skb) 60 const struct sk_buff *skb)
61{ 61{
62 struct flow_stats *stats; 62 struct sw_flow_stats *stats;
63 unsigned int cpu = smp_processor_id(); 63 unsigned int cpu = smp_processor_id();
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
65 65
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
87 if (likely(flow->stats_last_writer != -1) && 87 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89 /* Try to allocate CPU-specific stats. */ 89 /* Try to allocate CPU-specific stats. */
90 struct flow_stats *new_stats; 90 struct sw_flow_stats *new_stats;
91 91
92 new_stats = 92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache, 93 kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
134 134
135 /* We open code this to make sure cpu 0 is always considered */ 135 /* We open code this to make sure cpu 0 is always considered */
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
138 138
139 if (stats) { 139 if (stats) {
140 /* Local CPU may write on non-local stats, so we must 140 /* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
158 158
159 /* We open code this to make sure cpu 0 is always considered */ 159 /* We open code this to make sure cpu 0 is always considered */
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
161 struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
162 162
163 if (stats) { 163 if (stats) {
164 spin_lock_bh(&stats->lock); 164 spin_lock_bh(&stats->lock);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 3e2cc2202d66..a5506e2d4b7a 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -194,7 +194,7 @@ struct sw_flow_actions {
194 struct nlattr actions[]; 194 struct nlattr actions[];
195}; 195};
196 196
197struct flow_stats { 197struct sw_flow_stats {
198 u64 packet_count; /* Number of packets matched. */ 198 u64 packet_count; /* Number of packets matched. */
199 u64 byte_count; /* Number of bytes matched. */ 199 u64 byte_count; /* Number of bytes matched. */
200 unsigned long used; /* Last used time (in jiffies). */ 200 unsigned long used; /* Last used time (in jiffies). */
@@ -216,7 +216,7 @@ struct sw_flow {
216 struct cpumask cpu_used_mask; 216 struct cpumask cpu_used_mask;
217 struct sw_flow_mask *mask; 217 struct sw_flow_mask *mask;
218 struct sw_flow_actions __rcu *sf_acts; 218 struct sw_flow_actions __rcu *sf_acts;
219 struct flow_stats __rcu *stats[]; /* One for each CPU. First one 219 struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
220 * is allocated at flow creation time, 220 * is allocated at flow creation time,
221 * the rest are allocated on demand 221 * the rest are allocated on demand
222 * while holding the 'stats[0].lock'. 222 * while holding the 'stats[0].lock'.
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 988fd8a94e43..cf3582c5ed70 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
66struct sw_flow *ovs_flow_alloc(void) 66struct sw_flow *ovs_flow_alloc(void)
67{ 67{
68 struct sw_flow *flow; 68 struct sw_flow *flow;
69 struct flow_stats *stats; 69 struct sw_flow_stats *stats;
70 70
71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
72 if (!flow) 72 if (!flow)
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) 110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
111 if (flow->stats[cpu]) 111 if (flow->stats[cpu])
112 kmem_cache_free(flow_stats_cache, 112 kmem_cache_free(flow_stats_cache,
113 (struct flow_stats __force *)flow->stats[cpu]); 113 (struct sw_flow_stats __force *)flow->stats[cpu]);
114 kmem_cache_free(flow_cache, flow); 114 kmem_cache_free(flow_cache, flow);
115} 115}
116 116
@@ -712,13 +712,13 @@ int ovs_flow_init(void)
712 712
713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
714 + (nr_cpu_ids 714 + (nr_cpu_ids
715 * sizeof(struct flow_stats *)), 715 * sizeof(struct sw_flow_stats *)),
716 0, 0, NULL); 716 0, 0, NULL);
717 if (flow_cache == NULL) 717 if (flow_cache == NULL)
718 return -ENOMEM; 718 return -ENOMEM;
719 719
720 flow_stats_cache 720 flow_stats_cache
721 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), 721 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
722 0, SLAB_HWCACHE_ALIGN, NULL); 722 0, SLAB_HWCACHE_ALIGN, NULL);
723 if (flow_stats_cache == NULL) { 723 if (flow_stats_cache == NULL) {
724 kmem_cache_destroy(flow_cache); 724 kmem_cache_destroy(flow_cache);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index ff74c4bbb9fc..9986d6065c4d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -105,7 +105,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
105 break; 105 break;
106 106
107 case RDMA_CM_EVENT_ESTABLISHED: 107 case RDMA_CM_EVENT_ESTABLISHED:
108 trans->cm_connect_complete(conn, event); 108 if (conn)
109 trans->cm_connect_complete(conn, event);
109 break; 110 break;
110 111
111 case RDMA_CM_EVENT_REJECTED: 112 case RDMA_CM_EVENT_REJECTED:
@@ -137,6 +138,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
137 break; 138 break;
138 139
139 case RDMA_CM_EVENT_DISCONNECTED: 140 case RDMA_CM_EVENT_DISCONNECTED:
141 if (!conn)
142 break;
140 rdsdebug("DISCONNECT event - dropping connection " 143 rdsdebug("DISCONNECT event - dropping connection "
141 "%pI6c->%pI6c\n", &conn->c_laddr, 144 "%pI6c->%pI6c\n", &conn->c_laddr,
142 &conn->c_faddr); 145 &conn->c_faddr);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 80335b4ee4fd..822f45386e31 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1061,6 +1061,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1063void rxrpc_put_peer(struct rxrpc_peer *); 1063void rxrpc_put_peer(struct rxrpc_peer *);
1064void rxrpc_put_peer_locked(struct rxrpc_peer *);
1064 1065
1065/* 1066/*
1066 * proc.c 1067 * proc.c
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 9f2f45c09e58..7666ec72d37e 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
378 spin_lock_bh(&rxnet->peer_hash_lock); 378 spin_lock_bh(&rxnet->peer_hash_lock);
379 list_add_tail(&peer->keepalive_link, 379 list_add_tail(&peer->keepalive_link,
380 &rxnet->peer_keepalive[slot & mask]); 380 &rxnet->peer_keepalive[slot & mask]);
381 rxrpc_put_peer(peer); 381 rxrpc_put_peer_locked(peer);
382 } 382 }
383 383
384 spin_unlock_bh(&rxnet->peer_hash_lock); 384 spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 9d3ce81cf8ae..9c3ac96f71cb 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
437} 437}
438 438
439/* 439/*
440 * Drop a ref on a peer record where the caller already holds the
441 * peer_hash_lock.
442 */
443void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
444{
445 const void *here = __builtin_return_address(0);
446 int n;
447
448 n = atomic_dec_return(&peer->usage);
449 trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
450 if (n == 0) {
451 hash_del_rcu(&peer->hash_link);
452 list_del_init(&peer->keepalive_link);
453 kfree_rcu(peer, rcu);
454 }
455}
456
457/*
440 * Make sure all peer records have been discarded. 458 * Make sure all peer records have been discarded.
441 */ 459 */
442void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) 460void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5d3f33ce6d41..bae14438f869 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
226 rxrpc_set_call_completion(call, 226 rxrpc_set_call_completion(call,
227 RXRPC_CALL_LOCAL_ERROR, 227 RXRPC_CALL_LOCAL_ERROR,
228 0, ret); 228 0, ret);
229 rxrpc_notify_socket(call);
229 goto out; 230 goto out;
230 } 231 }
231 _debug("need instant resend %d", ret); 232 _debug("need instant resend %d", ret);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 8126b26f125e..fd1f7e799e23 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -285,6 +285,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
285 struct tcf_bpf *prog; 285 struct tcf_bpf *prog;
286 bool is_bpf, is_ebpf; 286 bool is_bpf, is_ebpf;
287 int ret, res = 0; 287 int ret, res = 0;
288 u32 index;
288 289
289 if (!nla) 290 if (!nla)
290 return -EINVAL; 291 return -EINVAL;
@@ -298,13 +299,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
298 return -EINVAL; 299 return -EINVAL;
299 300
300 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); 301 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
301 302 index = parm->index;
302 ret = tcf_idr_check_alloc(tn, &parm->index, act, bind); 303 ret = tcf_idr_check_alloc(tn, &index, act, bind);
303 if (!ret) { 304 if (!ret) {
304 ret = tcf_idr_create(tn, parm->index, est, act, 305 ret = tcf_idr_create(tn, index, est, act,
305 &act_bpf_ops, bind, true); 306 &act_bpf_ops, bind, true);
306 if (ret < 0) { 307 if (ret < 0) {
307 tcf_idr_cleanup(tn, parm->index); 308 tcf_idr_cleanup(tn, index);
308 return ret; 309 return ret;
309 } 310 }
310 311
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ce36b0f7e1dc..32ac04d77a45 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -103,6 +103,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
103 struct tcf_connmark_info *ci; 103 struct tcf_connmark_info *ci;
104 struct tc_connmark *parm; 104 struct tc_connmark *parm;
105 int ret = 0, err; 105 int ret = 0, err;
106 u32 index;
106 107
107 if (!nla) 108 if (!nla)
108 return -EINVAL; 109 return -EINVAL;
@@ -116,13 +117,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
116 return -EINVAL; 117 return -EINVAL;
117 118
118 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
119 120 index = parm->index;
120 ret = tcf_idr_check_alloc(tn, &parm->index, a, bind); 121 ret = tcf_idr_check_alloc(tn, &index, a, bind);
121 if (!ret) { 122 if (!ret) {
122 ret = tcf_idr_create(tn, parm->index, est, a, 123 ret = tcf_idr_create(tn, index, est, a,
123 &act_connmark_ops, bind, false); 124 &act_connmark_ops, bind, false);
124 if (ret) { 125 if (ret) {
125 tcf_idr_cleanup(tn, parm->index); 126 tcf_idr_cleanup(tn, index);
126 return ret; 127 return ret;
127 } 128 }
128 129
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 621fb22ce2a9..9b9288267a54 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -52,6 +52,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
52 struct tc_csum *parm; 52 struct tc_csum *parm;
53 struct tcf_csum *p; 53 struct tcf_csum *p;
54 int ret = 0, err; 54 int ret = 0, err;
55 u32 index;
55 56
56 if (nla == NULL) 57 if (nla == NULL)
57 return -EINVAL; 58 return -EINVAL;
@@ -64,13 +65,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
64 if (tb[TCA_CSUM_PARMS] == NULL) 65 if (tb[TCA_CSUM_PARMS] == NULL)
65 return -EINVAL; 66 return -EINVAL;
66 parm = nla_data(tb[TCA_CSUM_PARMS]); 67 parm = nla_data(tb[TCA_CSUM_PARMS]);
67 68 index = parm->index;
68 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 69 err = tcf_idr_check_alloc(tn, &index, a, bind);
69 if (!err) { 70 if (!err) {
70 ret = tcf_idr_create(tn, parm->index, est, a, 71 ret = tcf_idr_create(tn, index, est, a,
71 &act_csum_ops, bind, true); 72 &act_csum_ops, bind, true);
72 if (ret) { 73 if (ret) {
73 tcf_idr_cleanup(tn, parm->index); 74 tcf_idr_cleanup(tn, index);
74 return ret; 75 return ret;
75 } 76 }
76 ret = ACT_P_CREATED; 77 ret = ACT_P_CREATED;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b501ce0cf116..33a1a7406e87 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -666,6 +666,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
666 struct tc_ct *parm; 666 struct tc_ct *parm;
667 struct tcf_ct *c; 667 struct tcf_ct *c;
668 int err, res = 0; 668 int err, res = 0;
669 u32 index;
669 670
670 if (!nla) { 671 if (!nla) {
671 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); 672 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
@@ -681,16 +682,16 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
681 return -EINVAL; 682 return -EINVAL;
682 } 683 }
683 parm = nla_data(tb[TCA_CT_PARMS]); 684 parm = nla_data(tb[TCA_CT_PARMS]);
684 685 index = parm->index;
685 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 686 err = tcf_idr_check_alloc(tn, &index, a, bind);
686 if (err < 0) 687 if (err < 0)
687 return err; 688 return err;
688 689
689 if (!err) { 690 if (!err) {
690 err = tcf_idr_create(tn, parm->index, est, a, 691 err = tcf_idr_create(tn, index, est, a,
691 &act_ct_ops, bind, true); 692 &act_ct_ops, bind, true);
692 if (err) { 693 if (err) {
693 tcf_idr_cleanup(tn, parm->index); 694 tcf_idr_cleanup(tn, index);
694 return err; 695 return err;
695 } 696 }
696 res = ACT_P_CREATED; 697 res = ACT_P_CREATED;
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 10eb2bb99861..06ef74b74911 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -157,10 +157,10 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
157 struct netlink_ext_ack *extack) 157 struct netlink_ext_ack *extack)
158{ 158{
159 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 159 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
160 u32 dscpmask = 0, dscpstatemask, index;
160 struct nlattr *tb[TCA_CTINFO_MAX + 1]; 161 struct nlattr *tb[TCA_CTINFO_MAX + 1];
161 struct tcf_ctinfo_params *cp_new; 162 struct tcf_ctinfo_params *cp_new;
162 struct tcf_chain *goto_ch = NULL; 163 struct tcf_chain *goto_ch = NULL;
163 u32 dscpmask = 0, dscpstatemask;
164 struct tc_ctinfo *actparm; 164 struct tc_ctinfo *actparm;
165 struct tcf_ctinfo *ci; 165 struct tcf_ctinfo *ci;
166 u8 dscpmaskshift; 166 u8 dscpmaskshift;
@@ -206,12 +206,13 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
206 } 206 }
207 207
208 /* done the validation:now to the actual action allocation */ 208 /* done the validation:now to the actual action allocation */
209 err = tcf_idr_check_alloc(tn, &actparm->index, a, bind); 209 index = actparm->index;
210 err = tcf_idr_check_alloc(tn, &index, a, bind);
210 if (!err) { 211 if (!err) {
211 ret = tcf_idr_create(tn, actparm->index, est, a, 212 ret = tcf_idr_create(tn, index, est, a,
212 &act_ctinfo_ops, bind, false); 213 &act_ctinfo_ops, bind, false);
213 if (ret) { 214 if (ret) {
214 tcf_idr_cleanup(tn, actparm->index); 215 tcf_idr_cleanup(tn, index);
215 return ret; 216 return ret;
216 } 217 }
217 ret = ACT_P_CREATED; 218 ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b2380c5284e6..8f0140c6ca58 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -61,6 +61,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
61 struct tc_gact *parm; 61 struct tc_gact *parm;
62 struct tcf_gact *gact; 62 struct tcf_gact *gact;
63 int ret = 0; 63 int ret = 0;
64 u32 index;
64 int err; 65 int err;
65#ifdef CONFIG_GACT_PROB 66#ifdef CONFIG_GACT_PROB
66 struct tc_gact_p *p_parm = NULL; 67 struct tc_gact_p *p_parm = NULL;
@@ -77,6 +78,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
77 if (tb[TCA_GACT_PARMS] == NULL) 78 if (tb[TCA_GACT_PARMS] == NULL)
78 return -EINVAL; 79 return -EINVAL;
79 parm = nla_data(tb[TCA_GACT_PARMS]); 80 parm = nla_data(tb[TCA_GACT_PARMS]);
81 index = parm->index;
80 82
81#ifndef CONFIG_GACT_PROB 83#ifndef CONFIG_GACT_PROB
82 if (tb[TCA_GACT_PROB] != NULL) 84 if (tb[TCA_GACT_PROB] != NULL)
@@ -94,12 +96,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
94 } 96 }
95#endif 97#endif
96 98
97 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 99 err = tcf_idr_check_alloc(tn, &index, a, bind);
98 if (!err) { 100 if (!err) {
99 ret = tcf_idr_create(tn, parm->index, est, a, 101 ret = tcf_idr_create(tn, index, est, a,
100 &act_gact_ops, bind, true); 102 &act_gact_ops, bind, true);
101 if (ret) { 103 if (ret) {
102 tcf_idr_cleanup(tn, parm->index); 104 tcf_idr_cleanup(tn, index);
103 return ret; 105 return ret;
104 } 106 }
105 ret = ACT_P_CREATED; 107 ret = ACT_P_CREATED;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 41d5398dd2f2..92ee853d43e6 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -479,8 +479,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
479 u8 *saddr = NULL; 479 u8 *saddr = NULL;
480 bool exists = false; 480 bool exists = false;
481 int ret = 0; 481 int ret = 0;
482 u32 index;
482 int err; 483 int err;
483 484
485 if (!nla) {
486 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
487 return -EINVAL;
488 }
489
484 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy, 490 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
485 NULL); 491 NULL);
486 if (err < 0) 492 if (err < 0)
@@ -502,7 +508,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
502 if (!p) 508 if (!p)
503 return -ENOMEM; 509 return -ENOMEM;
504 510
505 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 511 index = parm->index;
512 err = tcf_idr_check_alloc(tn, &index, a, bind);
506 if (err < 0) { 513 if (err < 0) {
507 kfree(p); 514 kfree(p);
508 return err; 515 return err;
@@ -514,10 +521,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
514 } 521 }
515 522
516 if (!exists) { 523 if (!exists) {
517 ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops, 524 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
518 bind, true); 525 bind, true);
519 if (ret) { 526 if (ret) {
520 tcf_idr_cleanup(tn, parm->index); 527 tcf_idr_cleanup(tn, index);
521 kfree(p); 528 kfree(p);
522 return ret; 529 return ret;
523 } 530 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 055faa298c8e..be3f88dfc37e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
104 struct net_device *dev; 104 struct net_device *dev;
105 bool exists = false; 105 bool exists = false;
106 int ret, err; 106 int ret, err;
107 u32 index;
107 108
108 if (!nla) { 109 if (!nla) {
109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); 110 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -118,8 +119,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
118 return -EINVAL; 119 return -EINVAL;
119 } 120 }
120 parm = nla_data(tb[TCA_MIRRED_PARMS]); 121 parm = nla_data(tb[TCA_MIRRED_PARMS]);
121 122 index = parm->index;
122 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 123 err = tcf_idr_check_alloc(tn, &index, a, bind);
123 if (err < 0) 124 if (err < 0)
124 return err; 125 return err;
125 exists = err; 126 exists = err;
@@ -136,21 +137,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
136 if (exists) 137 if (exists)
137 tcf_idr_release(*a, bind); 138 tcf_idr_release(*a, bind);
138 else 139 else
139 tcf_idr_cleanup(tn, parm->index); 140 tcf_idr_cleanup(tn, index);
140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); 141 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141 return -EINVAL; 142 return -EINVAL;
142 } 143 }
143 144
144 if (!exists) { 145 if (!exists) {
145 if (!parm->ifindex) { 146 if (!parm->ifindex) {
146 tcf_idr_cleanup(tn, parm->index); 147 tcf_idr_cleanup(tn, index);
147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); 148 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
150 ret = tcf_idr_create(tn, parm->index, est, a, 151 ret = tcf_idr_create(tn, index, est, a,
151 &act_mirred_ops, bind, true); 152 &act_mirred_ops, bind, true);
152 if (ret) { 153 if (ret) {
153 tcf_idr_cleanup(tn, parm->index); 154 tcf_idr_cleanup(tn, index);
154 return ret; 155 return ret;
155 } 156 }
156 ret = ACT_P_CREATED; 157 ret = ACT_P_CREATED;
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index ca2597ce4ac9..0f299e3b618c 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -138,6 +138,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
138 struct tcf_mpls *m; 138 struct tcf_mpls *m;
139 int ret = 0, err; 139 int ret = 0, err;
140 u8 mpls_ttl = 0; 140 u8 mpls_ttl = 0;
141 u32 index;
141 142
142 if (!nla) { 143 if (!nla) {
143 NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes"); 144 NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes");
@@ -153,6 +154,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
153 return -EINVAL; 154 return -EINVAL;
154 } 155 }
155 parm = nla_data(tb[TCA_MPLS_PARMS]); 156 parm = nla_data(tb[TCA_MPLS_PARMS]);
157 index = parm->index;
156 158
157 /* Verify parameters against action type. */ 159 /* Verify parameters against action type. */
158 switch (parm->m_action) { 160 switch (parm->m_action) {
@@ -209,7 +211,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
209 return -EINVAL; 211 return -EINVAL;
210 } 212 }
211 213
212 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 214 err = tcf_idr_check_alloc(tn, &index, a, bind);
213 if (err < 0) 215 if (err < 0)
214 return err; 216 return err;
215 exists = err; 217 exists = err;
@@ -217,10 +219,10 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
217 return 0; 219 return 0;
218 220
219 if (!exists) { 221 if (!exists) {
220 ret = tcf_idr_create(tn, parm->index, est, a, 222 ret = tcf_idr_create(tn, index, est, a,
221 &act_mpls_ops, bind, true); 223 &act_mpls_ops, bind, true);
222 if (ret) { 224 if (ret) {
223 tcf_idr_cleanup(tn, parm->index); 225 tcf_idr_cleanup(tn, index);
224 return ret; 226 return ret;
225 } 227 }
226 228
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 45923ebb7a4f..7b858c11b1b5 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -44,6 +44,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
44 struct tc_nat *parm; 44 struct tc_nat *parm;
45 int ret = 0, err; 45 int ret = 0, err;
46 struct tcf_nat *p; 46 struct tcf_nat *p;
47 u32 index;
47 48
48 if (nla == NULL) 49 if (nla == NULL)
49 return -EINVAL; 50 return -EINVAL;
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
56 if (tb[TCA_NAT_PARMS] == NULL) 57 if (tb[TCA_NAT_PARMS] == NULL)
57 return -EINVAL; 58 return -EINVAL;
58 parm = nla_data(tb[TCA_NAT_PARMS]); 59 parm = nla_data(tb[TCA_NAT_PARMS]);
59 60 index = parm->index;
60 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 61 err = tcf_idr_check_alloc(tn, &index, a, bind);
61 if (!err) { 62 if (!err) {
62 ret = tcf_idr_create(tn, parm->index, est, a, 63 ret = tcf_idr_create(tn, index, est, a,
63 &act_nat_ops, bind, false); 64 &act_nat_ops, bind, false);
64 if (ret) { 65 if (ret) {
65 tcf_idr_cleanup(tn, parm->index); 66 tcf_idr_cleanup(tn, index);
66 return ret; 67 return ret;
67 } 68 }
68 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45e9d6bfddb3..17360c6faeaa 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
149 struct tcf_pedit *p; 149 struct tcf_pedit *p;
150 int ret = 0, err; 150 int ret = 0, err;
151 int ksize; 151 int ksize;
152 u32 index;
152 153
153 if (!nla) { 154 if (!nla) {
154 NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed"); 155 NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
@@ -179,18 +180,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
179 if (IS_ERR(keys_ex)) 180 if (IS_ERR(keys_ex))
180 return PTR_ERR(keys_ex); 181 return PTR_ERR(keys_ex);
181 182
182 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 183 index = parm->index;
184 err = tcf_idr_check_alloc(tn, &index, a, bind);
183 if (!err) { 185 if (!err) {
184 if (!parm->nkeys) { 186 if (!parm->nkeys) {
185 tcf_idr_cleanup(tn, parm->index); 187 tcf_idr_cleanup(tn, index);
186 NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); 188 NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
187 ret = -EINVAL; 189 ret = -EINVAL;
188 goto out_free; 190 goto out_free;
189 } 191 }
190 ret = tcf_idr_create(tn, parm->index, est, a, 192 ret = tcf_idr_create(tn, index, est, a,
191 &act_pedit_ops, bind, false); 193 &act_pedit_ops, bind, false);
192 if (ret) { 194 if (ret) {
193 tcf_idr_cleanup(tn, parm->index); 195 tcf_idr_cleanup(tn, index);
194 goto out_free; 196 goto out_free;
195 } 197 }
196 ret = ACT_P_CREATED; 198 ret = ACT_P_CREATED;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a065f62fa79c..49cec3e64a4d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -57,6 +57,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
57 struct tc_action_net *tn = net_generic(net, police_net_id); 57 struct tc_action_net *tn = net_generic(net, police_net_id);
58 struct tcf_police_params *new; 58 struct tcf_police_params *new;
59 bool exists = false; 59 bool exists = false;
60 u32 index;
60 61
61 if (nla == NULL) 62 if (nla == NULL)
62 return -EINVAL; 63 return -EINVAL;
@@ -73,7 +74,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
73 return -EINVAL; 74 return -EINVAL;
74 75
75 parm = nla_data(tb[TCA_POLICE_TBF]); 76 parm = nla_data(tb[TCA_POLICE_TBF]);
76 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 77 index = parm->index;
78 err = tcf_idr_check_alloc(tn, &index, a, bind);
77 if (err < 0) 79 if (err < 0)
78 return err; 80 return err;
79 exists = err; 81 exists = err;
@@ -81,10 +83,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
81 return 0; 83 return 0;
82 84
83 if (!exists) { 85 if (!exists) {
84 ret = tcf_idr_create(tn, parm->index, NULL, a, 86 ret = tcf_idr_create(tn, index, NULL, a,
85 &act_police_ops, bind, true); 87 &act_police_ops, bind, true);
86 if (ret) { 88 if (ret) {
87 tcf_idr_cleanup(tn, parm->index); 89 tcf_idr_cleanup(tn, index);
88 return ret; 90 return ret;
89 } 91 }
90 ret = ACT_P_CREATED; 92 ret = ACT_P_CREATED;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 274d7a0c0e25..595308d60133 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -41,8 +41,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
41 struct tc_action_net *tn = net_generic(net, sample_net_id); 41 struct tc_action_net *tn = net_generic(net, sample_net_id);
42 struct nlattr *tb[TCA_SAMPLE_MAX + 1]; 42 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
43 struct psample_group *psample_group; 43 struct psample_group *psample_group;
44 u32 psample_group_num, rate, index;
44 struct tcf_chain *goto_ch = NULL; 45 struct tcf_chain *goto_ch = NULL;
45 u32 psample_group_num, rate;
46 struct tc_sample *parm; 46 struct tc_sample *parm;
47 struct tcf_sample *s; 47 struct tcf_sample *s;
48 bool exists = false; 48 bool exists = false;
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
59 return -EINVAL; 59 return -EINVAL;
60 60
61 parm = nla_data(tb[TCA_SAMPLE_PARMS]); 61 parm = nla_data(tb[TCA_SAMPLE_PARMS]);
62 62 index = parm->index;
63 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 63 err = tcf_idr_check_alloc(tn, &index, a, bind);
64 if (err < 0) 64 if (err < 0)
65 return err; 65 return err;
66 exists = err; 66 exists = err;
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
68 return 0; 68 return 0;
69 69
70 if (!exists) { 70 if (!exists) {
71 ret = tcf_idr_create(tn, parm->index, est, a, 71 ret = tcf_idr_create(tn, index, est, a,
72 &act_sample_ops, bind, true); 72 &act_sample_ops, bind, true);
73 if (ret) { 73 if (ret) {
74 tcf_idr_cleanup(tn, parm->index); 74 tcf_idr_cleanup(tn, index);
75 return ret; 75 return ret;
76 } 76 }
77 ret = ACT_P_CREATED; 77 ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index f28ddbabff76..33aefa25b545 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -95,6 +95,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
95 struct tcf_defact *d; 95 struct tcf_defact *d;
96 bool exists = false; 96 bool exists = false;
97 int ret = 0, err; 97 int ret = 0, err;
98 u32 index;
98 99
99 if (nla == NULL) 100 if (nla == NULL)
100 return -EINVAL; 101 return -EINVAL;
@@ -108,7 +109,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
108 return -EINVAL; 109 return -EINVAL;
109 110
110 parm = nla_data(tb[TCA_DEF_PARMS]); 111 parm = nla_data(tb[TCA_DEF_PARMS]);
111 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 112 index = parm->index;
113 err = tcf_idr_check_alloc(tn, &index, a, bind);
112 if (err < 0) 114 if (err < 0)
113 return err; 115 return err;
114 exists = err; 116 exists = err;
@@ -119,15 +121,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
119 if (exists) 121 if (exists)
120 tcf_idr_release(*a, bind); 122 tcf_idr_release(*a, bind);
121 else 123 else
122 tcf_idr_cleanup(tn, parm->index); 124 tcf_idr_cleanup(tn, index);
123 return -EINVAL; 125 return -EINVAL;
124 } 126 }
125 127
126 if (!exists) { 128 if (!exists) {
127 ret = tcf_idr_create(tn, parm->index, est, a, 129 ret = tcf_idr_create(tn, index, est, a,
128 &act_simp_ops, bind, false); 130 &act_simp_ops, bind, false);
129 if (ret) { 131 if (ret) {
130 tcf_idr_cleanup(tn, parm->index); 132 tcf_idr_cleanup(tn, index);
131 return ret; 133 return ret;
132 } 134 }
133 135
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 215a06705cef..b100870f02a6 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,6 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
99 u16 *queue_mapping = NULL, *ptype = NULL; 99 u16 *queue_mapping = NULL, *ptype = NULL;
100 bool exists = false; 100 bool exists = false;
101 int ret = 0, err; 101 int ret = 0, err;
102 u32 index;
102 103
103 if (nla == NULL) 104 if (nla == NULL)
104 return -EINVAL; 105 return -EINVAL;
@@ -146,8 +147,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
146 } 147 }
147 148
148 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 149 parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
149 150 index = parm->index;
150 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 151 err = tcf_idr_check_alloc(tn, &index, a, bind);
151 if (err < 0) 152 if (err < 0)
152 return err; 153 return err;
153 exists = err; 154 exists = err;
@@ -158,15 +159,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
158 if (exists) 159 if (exists)
159 tcf_idr_release(*a, bind); 160 tcf_idr_release(*a, bind);
160 else 161 else
161 tcf_idr_cleanup(tn, parm->index); 162 tcf_idr_cleanup(tn, index);
162 return -EINVAL; 163 return -EINVAL;
163 } 164 }
164 165
165 if (!exists) { 166 if (!exists) {
166 ret = tcf_idr_create(tn, parm->index, est, a, 167 ret = tcf_idr_create(tn, index, est, a,
167 &act_skbedit_ops, bind, true); 168 &act_skbedit_ops, bind, true);
168 if (ret) { 169 if (ret) {
169 tcf_idr_cleanup(tn, parm->index); 170 tcf_idr_cleanup(tn, index);
170 return ret; 171 return ret;
171 } 172 }
172 173
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 4f07706eff07..7da3518e18ef 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -87,12 +87,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
87 struct tcf_skbmod_params *p, *p_old; 87 struct tcf_skbmod_params *p, *p_old;
88 struct tcf_chain *goto_ch = NULL; 88 struct tcf_chain *goto_ch = NULL;
89 struct tc_skbmod *parm; 89 struct tc_skbmod *parm;
90 u32 lflags = 0, index;
90 struct tcf_skbmod *d; 91 struct tcf_skbmod *d;
91 bool exists = false; 92 bool exists = false;
92 u8 *daddr = NULL; 93 u8 *daddr = NULL;
93 u8 *saddr = NULL; 94 u8 *saddr = NULL;
94 u16 eth_type = 0; 95 u16 eth_type = 0;
95 u32 lflags = 0;
96 int ret = 0, err; 96 int ret = 0, err;
97 97
98 if (!nla) 98 if (!nla)
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
122 } 122 }
123 123
124 parm = nla_data(tb[TCA_SKBMOD_PARMS]); 124 parm = nla_data(tb[TCA_SKBMOD_PARMS]);
125 index = parm->index;
125 if (parm->flags & SKBMOD_F_SWAPMAC) 126 if (parm->flags & SKBMOD_F_SWAPMAC)
126 lflags = SKBMOD_F_SWAPMAC; 127 lflags = SKBMOD_F_SWAPMAC;
127 128
128 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 129 err = tcf_idr_check_alloc(tn, &index, a, bind);
129 if (err < 0) 130 if (err < 0)
130 return err; 131 return err;
131 exists = err; 132 exists = err;
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
136 if (exists) 137 if (exists)
137 tcf_idr_release(*a, bind); 138 tcf_idr_release(*a, bind);
138 else 139 else
139 tcf_idr_cleanup(tn, parm->index); 140 tcf_idr_cleanup(tn, index);
140 return -EINVAL; 141 return -EINVAL;
141 } 142 }
142 143
143 if (!exists) { 144 if (!exists) {
144 ret = tcf_idr_create(tn, parm->index, est, a, 145 ret = tcf_idr_create(tn, index, est, a,
145 &act_skbmod_ops, bind, true); 146 &act_skbmod_ops, bind, true);
146 if (ret) { 147 if (ret) {
147 tcf_idr_cleanup(tn, parm->index); 148 tcf_idr_cleanup(tn, index);
148 return ret; 149 return ret;
149 } 150 }
150 151
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 10dffda1d5cc..6d0debdc9b97 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -225,6 +225,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
225 __be16 flags = 0; 225 __be16 flags = 0;
226 u8 tos, ttl; 226 u8 tos, ttl;
227 int ret = 0; 227 int ret = 0;
228 u32 index;
228 int err; 229 int err;
229 230
230 if (!nla) { 231 if (!nla) {
@@ -245,7 +246,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
245 } 246 }
246 247
247 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); 248 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
248 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 249 index = parm->index;
250 err = tcf_idr_check_alloc(tn, &index, a, bind);
249 if (err < 0) 251 if (err < 0)
250 return err; 252 return err;
251 exists = err; 253 exists = err;
@@ -345,7 +347,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
345 } 347 }
346 348
347 if (!exists) { 349 if (!exists) {
348 ret = tcf_idr_create(tn, parm->index, est, a, 350 ret = tcf_idr_create(tn, index, est, a,
349 &act_tunnel_key_ops, bind, true); 351 &act_tunnel_key_ops, bind, true);
350 if (ret) { 352 if (ret) {
351 NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); 353 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
@@ -403,7 +405,7 @@ err_out:
403 if (exists) 405 if (exists)
404 tcf_idr_release(*a, bind); 406 tcf_idr_release(*a, bind);
405 else 407 else
406 tcf_idr_cleanup(tn, parm->index); 408 tcf_idr_cleanup(tn, index);
407 return ret; 409 return ret;
408} 410}
409 411
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 9269d350fb8a..a3c9eea1ee8a 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,6 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
116 u8 push_prio = 0; 116 u8 push_prio = 0;
117 bool exists = false; 117 bool exists = false;
118 int ret = 0, err; 118 int ret = 0, err;
119 u32 index;
119 120
120 if (!nla) 121 if (!nla)
121 return -EINVAL; 122 return -EINVAL;
@@ -128,7 +129,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
128 if (!tb[TCA_VLAN_PARMS]) 129 if (!tb[TCA_VLAN_PARMS])
129 return -EINVAL; 130 return -EINVAL;
130 parm = nla_data(tb[TCA_VLAN_PARMS]); 131 parm = nla_data(tb[TCA_VLAN_PARMS]);
131 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 132 index = parm->index;
133 err = tcf_idr_check_alloc(tn, &index, a, bind);
132 if (err < 0) 134 if (err < 0)
133 return err; 135 return err;
134 exists = err; 136 exists = err;
@@ -144,7 +146,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
144 if (exists) 146 if (exists)
145 tcf_idr_release(*a, bind); 147 tcf_idr_release(*a, bind);
146 else 148 else
147 tcf_idr_cleanup(tn, parm->index); 149 tcf_idr_cleanup(tn, index);
148 return -EINVAL; 150 return -EINVAL;
149 } 151 }
150 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 152 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
@@ -152,7 +154,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
152 if (exists) 154 if (exists)
153 tcf_idr_release(*a, bind); 155 tcf_idr_release(*a, bind);
154 else 156 else
155 tcf_idr_cleanup(tn, parm->index); 157 tcf_idr_cleanup(tn, index);
156 return -ERANGE; 158 return -ERANGE;
157 } 159 }
158 160
@@ -166,7 +168,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
166 if (exists) 168 if (exists)
167 tcf_idr_release(*a, bind); 169 tcf_idr_release(*a, bind);
168 else 170 else
169 tcf_idr_cleanup(tn, parm->index); 171 tcf_idr_cleanup(tn, index);
170 return -EPROTONOSUPPORT; 172 return -EPROTONOSUPPORT;
171 } 173 }
172 } else { 174 } else {
@@ -180,16 +182,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
180 if (exists) 182 if (exists)
181 tcf_idr_release(*a, bind); 183 tcf_idr_release(*a, bind);
182 else 184 else
183 tcf_idr_cleanup(tn, parm->index); 185 tcf_idr_cleanup(tn, index);
184 return -EINVAL; 186 return -EINVAL;
185 } 187 }
186 action = parm->v_action; 188 action = parm->v_action;
187 189
188 if (!exists) { 190 if (!exists) {
189 ret = tcf_idr_create(tn, parm->index, est, a, 191 ret = tcf_idr_create(tn, index, est, a,
190 &act_vlan_ops, bind, true); 192 &act_vlan_ops, bind, true);
191 if (ret) { 193 if (ret) {
192 tcf_idr_cleanup(tn, parm->index); 194 tcf_idr_cleanup(tn, index);
193 return ret; 195 return ret;
194 } 196 }
195 197
@@ -306,6 +308,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
306 return tcf_idr_search(tn, a, index); 308 return tcf_idr_search(tn, a, index);
307} 309}
308 310
311static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
312{
313 return nla_total_size(sizeof(struct tc_vlan))
314 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
315 + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
316 + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
317}
318
309static struct tc_action_ops act_vlan_ops = { 319static struct tc_action_ops act_vlan_ops = {
310 .kind = "vlan", 320 .kind = "vlan",
311 .id = TCA_ID_VLAN, 321 .id = TCA_ID_VLAN,
@@ -315,6 +325,7 @@ static struct tc_action_ops act_vlan_ops = {
315 .init = tcf_vlan_init, 325 .init = tcf_vlan_init,
316 .cleanup = tcf_vlan_cleanup, 326 .cleanup = tcf_vlan_cleanup,
317 .walk = tcf_vlan_walker, 327 .walk = tcf_vlan_walker,
328 .get_fill_size = tcf_vlan_get_fill_size,
318 .lookup = tcf_vlan_search, 329 .lookup = tcf_vlan_search,
319 .size = sizeof(struct tcf_vlan), 330 .size = sizeof(struct tcf_vlan),
320}; 331};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d144233423c5..efd3cfb80a2a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
691 if (!indr_dev->block) 691 if (!indr_dev->block)
692 return; 692 return;
693 693
694 bo.block = &indr_dev->block->flow_block;
695
694 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, 696 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
695 &bo); 697 &bo);
696 tcf_block_setup(indr_dev->block, &bo); 698 tcf_block_setup(indr_dev->block, &bo);
@@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
775 .command = command, 777 .command = command,
776 .binder_type = ei->binder_type, 778 .binder_type = ei->binder_type,
777 .net = dev_net(dev), 779 .net = dev_net(dev),
780 .block = &block->flow_block,
778 .block_shared = tcf_block_shared(block), 781 .block_shared = tcf_block_shared(block),
779 .extack = extack, 782 .extack = extack,
780 }; 783 };
@@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
810 bo.net = dev_net(dev); 813 bo.net = dev_net(dev);
811 bo.command = command; 814 bo.command = command;
812 bo.binder_type = ei->binder_type; 815 bo.binder_type = ei->binder_type;
816 bo.block = &block->flow_block;
813 bo.block_shared = tcf_block_shared(block); 817 bo.block_shared = tcf_block_shared(block);
814 bo.extack = extack; 818 bo.extack = extack;
815 INIT_LIST_HEAD(&bo.cb_list); 819 INIT_LIST_HEAD(&bo.cb_list);
@@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
987 return ERR_PTR(-ENOMEM); 991 return ERR_PTR(-ENOMEM);
988 } 992 }
989 mutex_init(&block->lock); 993 mutex_init(&block->lock);
994 flow_block_init(&block->flow_block);
990 INIT_LIST_HEAD(&block->chain_list); 995 INIT_LIST_HEAD(&block->chain_list);
991 INIT_LIST_HEAD(&block->cb_list);
992 INIT_LIST_HEAD(&block->owner_list); 996 INIT_LIST_HEAD(&block->owner_list);
993 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 997 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
994 998
@@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block)
1514EXPORT_SYMBOL(tcf_block_put); 1518EXPORT_SYMBOL(tcf_block_put);
1515 1519
1516static int 1520static int
1517tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, 1521tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1518 void *cb_priv, bool add, bool offload_in_use, 1522 void *cb_priv, bool add, bool offload_in_use,
1519 struct netlink_ext_ack *extack) 1523 struct netlink_ext_ack *extack)
1520{ 1524{
@@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block,
1570 1574
1571 i++; 1575 i++;
1572 } 1576 }
1573 list_splice(&bo->cb_list, &block->cb_list); 1577 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1574 1578
1575 return 0; 1579 return 0;
1576 1580
@@ -2152,7 +2156,9 @@ replay:
2152 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2156 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153 RTM_NEWTFILTER, false, rtnl_held); 2157 RTM_NEWTFILTER, false, rtnl_held);
2154 tfilter_put(tp, fh); 2158 tfilter_put(tp, fh);
2155 q->flags &= ~TCQ_F_CAN_BYPASS; 2159 /* q pointer is NULL for shared blocks */
2160 if (q)
2161 q->flags &= ~TCQ_F_CAN_BYPASS;
2156 } 2162 }
2157 2163
2158errout: 2164errout:
@@ -3156,7 +3162,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3156 if (block->nooffloaddevcnt && err_stop) 3162 if (block->nooffloaddevcnt && err_stop)
3157 return -EOPNOTSUPP; 3163 return -EOPNOTSUPP;
3158 3164
3159 list_for_each_entry(block_cb, &block->cb_list, list) { 3165 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3160 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3166 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3161 if (err) { 3167 if (err) {
3162 if (err_stop) 3168 if (err_stop)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 691f71830134..3f7a9c02b70c 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -651,7 +651,7 @@ skip:
651 } 651 }
652} 652}
653 653
654static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 654static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
655 void *cb_priv, struct netlink_ext_ack *extack) 655 void *cb_priv, struct netlink_ext_ack *extack)
656{ 656{
657 struct cls_bpf_head *head = rtnl_dereference(tp->root); 657 struct cls_bpf_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 38d6e85693fc..054123742e32 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1800 return NULL; 1800 return NULL;
1801} 1801}
1802 1802
1803static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1803static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1804 void *cb_priv, struct netlink_ext_ack *extack) 1804 void *cb_priv, struct netlink_ext_ack *extack)
1805{ 1805{
1806 struct tcf_block *block = tp->chain->block; 1806 struct tcf_block *block = tp->chain->block;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index a30d2f8feb32..455ea2793f9b 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -282,7 +282,7 @@ skip:
282 arg->count++; 282 arg->count++;
283} 283}
284 284
285static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 285static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
286 void *cb_priv, struct netlink_ext_ack *extack) 286 void *cb_priv, struct netlink_ext_ack *extack)
287{ 287{
288 struct cls_mall_head *head = rtnl_dereference(tp->root); 288 struct cls_mall_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index be9e46c77e8b..8614088edd1b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1152} 1152}
1153 1153
1154static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1154static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1155 bool add, tc_setup_cb_t *cb, void *cb_priv, 1155 bool add, flow_setup_cb_t *cb, void *cb_priv,
1156 struct netlink_ext_ack *extack) 1156 struct netlink_ext_ack *extack)
1157{ 1157{
1158 struct tc_cls_u32_offload cls_u32 = {}; 1158 struct tc_cls_u32_offload cls_u32 = {};
@@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1172} 1172}
1173 1173
1174static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1174static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1175 bool add, tc_setup_cb_t *cb, void *cb_priv, 1175 bool add, flow_setup_cb_t *cb, void *cb_priv,
1176 struct netlink_ext_ack *extack) 1176 struct netlink_ext_ack *extack)
1177{ 1177{
1178 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 1178 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1213 return 0; 1213 return 0;
1214} 1214}
1215 1215
1216static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1216static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1217 void *cb_priv, struct netlink_ext_ack *extack) 1217 void *cb_priv, struct netlink_ext_ack *extack)
1218{ 1218{
1219 struct tc_u_common *tp_c = tp->data; 1219 struct tc_u_common *tp_c = tp->data;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 25ef172c23df..30169b3adbbb 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
71 struct Qdisc *sch = ctx; 71 struct Qdisc *sch = ctx;
72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73 73
74 if (skb) 74 if (skb) {
75 sch->qstats.backlog -= qdisc_pkt_len(skb); 75 sch->qstats.backlog -= qdisc_pkt_len(skb);
76 76 prefetch(&skb->end); /* we'll need skb_shinfo() */
77 prefetch(&skb->end); /* we'll need skb_shinfo() */ 77 }
78 return skb; 78 return skb;
79} 79}
80 80
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index aa80cda36581..9d1f83b10c0a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -985,7 +985,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
985 return -EINVAL; 985 return -EINVAL;
986 986
987 kaddrs = memdup_user(addrs, addrs_size); 987 kaddrs = memdup_user(addrs, addrs_size);
988 if (unlikely(IS_ERR(kaddrs))) 988 if (IS_ERR(kaddrs))
989 return PTR_ERR(kaddrs); 989 return PTR_ERR(kaddrs);
990 990
991 /* Walk through the addrs buffer and count the number of addresses. */ 991 /* Walk through the addrs buffer and count the number of addresses. */
@@ -1315,7 +1315,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1315 return -EINVAL; 1315 return -EINVAL;
1316 1316
1317 kaddrs = memdup_user(addrs, addrs_size); 1317 kaddrs = memdup_user(addrs, addrs_size);
1318 if (unlikely(IS_ERR(kaddrs))) 1318 if (IS_ERR(kaddrs))
1319 return PTR_ERR(kaddrs); 1319 return PTR_ERR(kaddrs);
1320 1320
1321 /* Allow security module to validate connectx addresses. */ 1321 /* Allow security module to validate connectx addresses. */
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 302e355f2ebc..5b932583e407 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -263,7 +263,7 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
263 263
264 /* Check if socket is already active */ 264 /* Check if socket is already active */
265 rc = -EINVAL; 265 rc = -EINVAL;
266 if (sk->sk_state != SMC_INIT) 266 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
267 goto out_rel; 267 goto out_rel;
268 268
269 smc->clcsock->sk->sk_reuse = sk->sk_reuse; 269 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
@@ -1390,7 +1390,8 @@ static int smc_listen(struct socket *sock, int backlog)
1390 lock_sock(sk); 1390 lock_sock(sk);
1391 1391
1392 rc = -EINVAL; 1392 rc = -EINVAL;
1393 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN)) 1393 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1394 smc->connect_nonblock)
1394 goto out; 1395 goto out;
1395 1396
1396 rc = 0; 1397 rc = 0;
@@ -1518,7 +1519,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1518 goto out; 1519 goto out;
1519 1520
1520 if (msg->msg_flags & MSG_FASTOPEN) { 1521 if (msg->msg_flags & MSG_FASTOPEN) {
1521 if (sk->sk_state == SMC_INIT) { 1522 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1522 smc_switch_to_fallback(smc); 1523 smc_switch_to_fallback(smc);
1523 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; 1524 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1524 } else { 1525 } else {
@@ -1732,14 +1733,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
1732 } 1733 }
1733 break; 1734 break;
1734 case TCP_NODELAY: 1735 case TCP_NODELAY:
1735 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) { 1736 if (sk->sk_state != SMC_INIT &&
1737 sk->sk_state != SMC_LISTEN &&
1738 sk->sk_state != SMC_CLOSED) {
1736 if (val && !smc->use_fallback) 1739 if (val && !smc->use_fallback)
1737 mod_delayed_work(system_wq, &smc->conn.tx_work, 1740 mod_delayed_work(system_wq, &smc->conn.tx_work,
1738 0); 1741 0);
1739 } 1742 }
1740 break; 1743 break;
1741 case TCP_CORK: 1744 case TCP_CORK:
1742 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) { 1745 if (sk->sk_state != SMC_INIT &&
1746 sk->sk_state != SMC_LISTEN &&
1747 sk->sk_state != SMC_CLOSED) {
1743 if (!val && !smc->use_fallback) 1748 if (!val && !smc->use_fallback)
1744 mod_delayed_work(system_wq, &smc->conn.tx_work, 1749 mod_delayed_work(system_wq, &smc->conn.tx_work,
1745 0); 1750 0);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d86030ef1232..e135d4e11231 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
55 int rep_type; 55 int rep_type;
56 int rep_size; 56 int rep_size;
57 int req_type; 57 int req_type;
58 int req_size;
58 struct net *net; 59 struct net *net;
59 struct sk_buff *rep; 60 struct sk_buff *rep;
60 struct tlv_desc *req; 61 struct tlv_desc *req;
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
257 int err; 258 int err;
258 struct sk_buff *arg; 259 struct sk_buff *arg;
259 260
260 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) 261 if (msg->req_type && (!msg->req_size ||
262 !TLV_CHECK_TYPE(msg->req, msg->req_type)))
261 return -EINVAL; 263 return -EINVAL;
262 264
263 msg->rep = tipc_tlv_alloc(msg->rep_size); 265 msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
354{ 356{
355 int err; 357 int err;
356 358
357 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) 359 if (msg->req_type && (!msg->req_size ||
360 !TLV_CHECK_TYPE(msg->req, msg->req_type)))
358 return -EINVAL; 361 return -EINVAL;
359 362
360 err = __tipc_nl_compat_doit(cmd, msg); 363 err = __tipc_nl_compat_doit(cmd, msg);
@@ -1278,8 +1281,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1278 goto send; 1281 goto send;
1279 } 1282 }
1280 1283
1281 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); 1284 msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1282 if (!len || !TLV_OK(msg.req, len)) { 1285 if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
1283 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); 1286 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1284 err = -EOPNOTSUPP; 1287 err = -EOPNOTSUPP;
1285 goto send; 1288 goto send;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index dd8537f988c4..83ae41d7e554 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -485,9 +485,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
485 tsk_set_unreturnable(tsk, true); 485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM) 486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true); 487 tsk_set_unreliable(tsk, true);
488 __skb_queue_head_init(&tsk->mc_method.deferredq);
489 } 488 }
490 489 __skb_queue_head_init(&tsk->mc_method.deferredq);
491 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " "); 490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
492 return 0; 491 return 0;
493} 492}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index f345662890a6..ca8ac96d22a9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -476,7 +476,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
476 } 476 }
477} 477}
478 478
479/* tipc_toprsv_listener_data_ready - interrupt callback with connection request 479/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
480 * The queued job is launched into tipc_topsrv_accept() 480 * The queued job is launched into tipc_topsrv_accept()
481 */ 481 */
482static void tipc_topsrv_listener_data_ready(struct sock *sk) 482static void tipc_topsrv_listener_data_ready(struct sock *sk)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 4674e57e66b0..9cbbae606ced 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -261,24 +261,9 @@ void tls_ctx_free(struct tls_context *ctx)
261 kfree(ctx); 261 kfree(ctx);
262} 262}
263 263
264static void tls_sk_proto_close(struct sock *sk, long timeout) 264static void tls_sk_proto_cleanup(struct sock *sk,
265 struct tls_context *ctx, long timeo)
265{ 266{
266 struct tls_context *ctx = tls_get_ctx(sk);
267 long timeo = sock_sndtimeo(sk, 0);
268 void (*sk_proto_close)(struct sock *sk, long timeout);
269 bool free_ctx = false;
270
271 lock_sock(sk);
272 sk_proto_close = ctx->sk_proto_close;
273
274 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
275 goto skip_tx_cleanup;
276
277 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
278 free_ctx = true;
279 goto skip_tx_cleanup;
280 }
281
282 if (unlikely(sk->sk_write_pending) && 267 if (unlikely(sk->sk_write_pending) &&
283 !wait_on_pending_writer(sk, &timeo)) 268 !wait_on_pending_writer(sk, &timeo))
284 tls_handle_open_record(sk, 0); 269 tls_handle_open_record(sk, 0);
@@ -287,7 +272,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
287 if (ctx->tx_conf == TLS_SW) { 272 if (ctx->tx_conf == TLS_SW) {
288 kfree(ctx->tx.rec_seq); 273 kfree(ctx->tx.rec_seq);
289 kfree(ctx->tx.iv); 274 kfree(ctx->tx.iv);
290 tls_sw_free_resources_tx(sk); 275 tls_sw_release_resources_tx(sk);
291#ifdef CONFIG_TLS_DEVICE 276#ifdef CONFIG_TLS_DEVICE
292 } else if (ctx->tx_conf == TLS_HW) { 277 } else if (ctx->tx_conf == TLS_HW) {
293 tls_device_free_resources_tx(sk); 278 tls_device_free_resources_tx(sk);
@@ -295,26 +280,44 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
295 } 280 }
296 281
297 if (ctx->rx_conf == TLS_SW) 282 if (ctx->rx_conf == TLS_SW)
298 tls_sw_free_resources_rx(sk); 283 tls_sw_release_resources_rx(sk);
299 284
300#ifdef CONFIG_TLS_DEVICE 285#ifdef CONFIG_TLS_DEVICE
301 if (ctx->rx_conf == TLS_HW) 286 if (ctx->rx_conf == TLS_HW)
302 tls_device_offload_cleanup_rx(sk); 287 tls_device_offload_cleanup_rx(sk);
303
304 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
305#else
306 {
307#endif 288#endif
308 tls_ctx_free(ctx); 289}
309 ctx = NULL;
310 }
311 290
312skip_tx_cleanup: 291static void tls_sk_proto_close(struct sock *sk, long timeout)
292{
293 struct inet_connection_sock *icsk = inet_csk(sk);
294 struct tls_context *ctx = tls_get_ctx(sk);
295 long timeo = sock_sndtimeo(sk, 0);
296 bool free_ctx;
297
298 if (ctx->tx_conf == TLS_SW)
299 tls_sw_cancel_work_tx(ctx);
300
301 lock_sock(sk);
302 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
303
304 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
305 tls_sk_proto_cleanup(sk, ctx, timeo);
306
307 write_lock_bh(&sk->sk_callback_lock);
308 if (free_ctx)
309 icsk->icsk_ulp_data = NULL;
310 sk->sk_prot = ctx->sk_proto;
311 write_unlock_bh(&sk->sk_callback_lock);
313 release_sock(sk); 312 release_sock(sk);
314 sk_proto_close(sk, timeout); 313 if (ctx->tx_conf == TLS_SW)
315 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 314 tls_sw_free_ctx_tx(ctx);
316 * for sk->sk_prot->unhash [tls_hw_unhash] 315 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
317 */ 316 tls_sw_strparser_done(ctx);
317 if (ctx->rx_conf == TLS_SW)
318 tls_sw_free_ctx_rx(ctx);
319 ctx->sk_proto_close(sk, timeout);
320
318 if (free_ctx) 321 if (free_ctx)
319 tls_ctx_free(ctx); 322 tls_ctx_free(ctx);
320} 323}
@@ -526,6 +529,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
526 { 529 {
527#endif 530#endif
528 rc = tls_set_sw_offload(sk, ctx, 1); 531 rc = tls_set_sw_offload(sk, ctx, 1);
532 if (rc)
533 goto err_crypto_info;
529 conf = TLS_SW; 534 conf = TLS_SW;
530 } 535 }
531 } else { 536 } else {
@@ -537,13 +542,13 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
537 { 542 {
538#endif 543#endif
539 rc = tls_set_sw_offload(sk, ctx, 0); 544 rc = tls_set_sw_offload(sk, ctx, 0);
545 if (rc)
546 goto err_crypto_info;
540 conf = TLS_SW; 547 conf = TLS_SW;
541 } 548 }
549 tls_sw_strparser_arm(sk, ctx);
542 } 550 }
543 551
544 if (rc)
545 goto err_crypto_info;
546
547 if (tx) 552 if (tx)
548 ctx->tx_conf = conf; 553 ctx->tx_conf = conf;
549 else 554 else
@@ -607,6 +612,7 @@ static struct tls_context *create_ctx(struct sock *sk)
607 ctx->setsockopt = sk->sk_prot->setsockopt; 612 ctx->setsockopt = sk->sk_prot->setsockopt;
608 ctx->getsockopt = sk->sk_prot->getsockopt; 613 ctx->getsockopt = sk->sk_prot->getsockopt;
609 ctx->sk_proto_close = sk->sk_prot->close; 614 ctx->sk_proto_close = sk->sk_prot->close;
615 ctx->unhash = sk->sk_prot->unhash;
610 return ctx; 616 return ctx;
611} 617}
612 618
@@ -764,7 +770,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
764 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; 770 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
765 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash; 771 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
766 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash; 772 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
767 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
768} 773}
769 774
770static int tls_init(struct sock *sk) 775static int tls_init(struct sock *sk)
@@ -773,7 +778,7 @@ static int tls_init(struct sock *sk)
773 int rc = 0; 778 int rc = 0;
774 779
775 if (tls_hw_prot(sk)) 780 if (tls_hw_prot(sk))
776 goto out; 781 return 0;
777 782
778 /* The TLS ulp is currently supported only for TCP sockets 783 /* The TLS ulp is currently supported only for TCP sockets
779 * in ESTABLISHED state. 784 * in ESTABLISHED state.
@@ -784,21 +789,38 @@ static int tls_init(struct sock *sk)
784 if (sk->sk_state != TCP_ESTABLISHED) 789 if (sk->sk_state != TCP_ESTABLISHED)
785 return -ENOTSUPP; 790 return -ENOTSUPP;
786 791
792 tls_build_proto(sk);
793
787 /* allocate tls context */ 794 /* allocate tls context */
795 write_lock_bh(&sk->sk_callback_lock);
788 ctx = create_ctx(sk); 796 ctx = create_ctx(sk);
789 if (!ctx) { 797 if (!ctx) {
790 rc = -ENOMEM; 798 rc = -ENOMEM;
791 goto out; 799 goto out;
792 } 800 }
793 801
794 tls_build_proto(sk);
795 ctx->tx_conf = TLS_BASE; 802 ctx->tx_conf = TLS_BASE;
796 ctx->rx_conf = TLS_BASE; 803 ctx->rx_conf = TLS_BASE;
804 ctx->sk_proto = sk->sk_prot;
797 update_sk_prot(sk, ctx); 805 update_sk_prot(sk, ctx);
798out: 806out:
807 write_unlock_bh(&sk->sk_callback_lock);
799 return rc; 808 return rc;
800} 809}
801 810
811static void tls_update(struct sock *sk, struct proto *p)
812{
813 struct tls_context *ctx;
814
815 ctx = tls_get_ctx(sk);
816 if (likely(ctx)) {
817 ctx->sk_proto_close = p->close;
818 ctx->sk_proto = p;
819 } else {
820 sk->sk_prot = p;
821 }
822}
823
802void tls_register_device(struct tls_device *device) 824void tls_register_device(struct tls_device *device)
803{ 825{
804 spin_lock_bh(&device_spinlock); 826 spin_lock_bh(&device_spinlock);
@@ -819,6 +841,7 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
819 .name = "tls", 841 .name = "tls",
820 .owner = THIS_MODULE, 842 .owner = THIS_MODULE,
821 .init = tls_init, 843 .init = tls_init,
844 .update = tls_update,
822}; 845};
823 846
824static int __init tls_register(void) 847static int __init tls_register(void)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 53b4ad94e74a..91d21b048a9b 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2054,7 +2054,16 @@ static void tls_data_ready(struct sock *sk)
2054 } 2054 }
2055} 2055}
2056 2056
2057void tls_sw_free_resources_tx(struct sock *sk) 2057void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2058{
2059 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2060
2061 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2062 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2063 cancel_delayed_work_sync(&ctx->tx_work.work);
2064}
2065
2066void tls_sw_release_resources_tx(struct sock *sk)
2058{ 2067{
2059 struct tls_context *tls_ctx = tls_get_ctx(sk); 2068 struct tls_context *tls_ctx = tls_get_ctx(sk);
2060 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2069 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk)
2065 if (atomic_read(&ctx->encrypt_pending)) 2074 if (atomic_read(&ctx->encrypt_pending))
2066 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2075 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2067 2076
2068 release_sock(sk);
2069 cancel_delayed_work_sync(&ctx->tx_work.work);
2070 lock_sock(sk);
2071
2072 /* Tx whatever records we can transmit and abandon the rest */
2073 tls_tx_records(sk, -1); 2077 tls_tx_records(sk, -1);
2074 2078
2075 /* Free up un-sent records in tx_list. First, free 2079 /* Free up un-sent records in tx_list. First, free
@@ -2092,6 +2096,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
2092 2096
2093 crypto_free_aead(ctx->aead_send); 2097 crypto_free_aead(ctx->aead_send);
2094 tls_free_open_rec(sk); 2098 tls_free_open_rec(sk);
2099}
2100
2101void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2102{
2103 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2095 2104
2096 kfree(ctx); 2105 kfree(ctx);
2097} 2106}
@@ -2110,25 +2119,40 @@ void tls_sw_release_resources_rx(struct sock *sk)
2110 skb_queue_purge(&ctx->rx_list); 2119 skb_queue_purge(&ctx->rx_list);
2111 crypto_free_aead(ctx->aead_recv); 2120 crypto_free_aead(ctx->aead_recv);
2112 strp_stop(&ctx->strp); 2121 strp_stop(&ctx->strp);
2113 write_lock_bh(&sk->sk_callback_lock); 2122 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2114 sk->sk_data_ready = ctx->saved_data_ready; 2123 * we still want to strp_stop(), but sk->sk_data_ready was
2115 write_unlock_bh(&sk->sk_callback_lock); 2124 * never swapped.
2116 release_sock(sk); 2125 */
2117 strp_done(&ctx->strp); 2126 if (ctx->saved_data_ready) {
2118 lock_sock(sk); 2127 write_lock_bh(&sk->sk_callback_lock);
2128 sk->sk_data_ready = ctx->saved_data_ready;
2129 write_unlock_bh(&sk->sk_callback_lock);
2130 }
2119 } 2131 }
2120} 2132}
2121 2133
2122void tls_sw_free_resources_rx(struct sock *sk) 2134void tls_sw_strparser_done(struct tls_context *tls_ctx)
2123{ 2135{
2124 struct tls_context *tls_ctx = tls_get_ctx(sk);
2125 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2136 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2126 2137
2127 tls_sw_release_resources_rx(sk); 2138 strp_done(&ctx->strp);
2139}
2140
2141void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2142{
2143 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2128 2144
2129 kfree(ctx); 2145 kfree(ctx);
2130} 2146}
2131 2147
2148void tls_sw_free_resources_rx(struct sock *sk)
2149{
2150 struct tls_context *tls_ctx = tls_get_ctx(sk);
2151
2152 tls_sw_release_resources_rx(sk);
2153 tls_sw_free_ctx_rx(tls_ctx);
2154}
2155
2132/* The work handler to transmitt the encrypted records in tx_list */ 2156/* The work handler to transmitt the encrypted records in tx_list */
2133static void tx_work_handler(struct work_struct *work) 2157static void tx_work_handler(struct work_struct *work)
2134{ 2158{
@@ -2137,11 +2161,17 @@ static void tx_work_handler(struct work_struct *work)
2137 struct tx_work, work); 2161 struct tx_work, work);
2138 struct sock *sk = tx_work->sk; 2162 struct sock *sk = tx_work->sk;
2139 struct tls_context *tls_ctx = tls_get_ctx(sk); 2163 struct tls_context *tls_ctx = tls_get_ctx(sk);
2140 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2164 struct tls_sw_context_tx *ctx;
2141 2165
2142 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2166 if (unlikely(!tls_ctx))
2143 return; 2167 return;
2144 2168
2169 ctx = tls_sw_ctx_tx(tls_ctx);
2170 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2171 return;
2172
2173 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2174 return;
2145 lock_sock(sk); 2175 lock_sock(sk);
2146 tls_tx_records(sk, -1); 2176 tls_tx_records(sk, -1);
2147 release_sock(sk); 2177 release_sock(sk);
@@ -2160,6 +2190,18 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2160 } 2190 }
2161} 2191}
2162 2192
2193void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2194{
2195 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2196
2197 write_lock_bh(&sk->sk_callback_lock);
2198 rx_ctx->saved_data_ready = sk->sk_data_ready;
2199 sk->sk_data_ready = tls_data_ready;
2200 write_unlock_bh(&sk->sk_callback_lock);
2201
2202 strp_check_rcv(&rx_ctx->strp);
2203}
2204
2163int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2205int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2164{ 2206{
2165 struct tls_context *tls_ctx = tls_get_ctx(sk); 2207 struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -2357,13 +2399,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2357 cb.parse_msg = tls_read_size; 2399 cb.parse_msg = tls_read_size;
2358 2400
2359 strp_init(&sw_ctx_rx->strp, sk, &cb); 2401 strp_init(&sw_ctx_rx->strp, sk, &cb);
2360
2361 write_lock_bh(&sk->sk_callback_lock);
2362 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
2363 sk->sk_data_ready = tls_data_ready;
2364 write_unlock_bh(&sk->sk_callback_lock);
2365
2366 strp_check_rcv(&sw_ctx_rx->strp);
2367 } 2402 }
2368 2403
2369 goto out; 2404 goto out;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index f2084e3f7aa4..9d864ebeb7b3 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -312,6 +312,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
312 lock_sock(sk); 312 lock_sock(sk);
313 hvs_do_close_lock_held(vsock_sk(sk), true); 313 hvs_do_close_lock_held(vsock_sk(sk), true);
314 release_sock(sk); 314 release_sock(sk);
315
316 /* Release the refcnt for the channel that's opened in
317 * hvs_open_connection().
318 */
319 sock_put(sk);
315} 320}
316 321
317static void hvs_open_connection(struct vmbus_channel *chan) 322static void hvs_open_connection(struct vmbus_channel *chan)
@@ -407,6 +412,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
407 } 412 }
408 413
409 set_per_channel_state(chan, conn_from_host ? new : sk); 414 set_per_channel_state(chan, conn_from_host ? new : sk);
415
416 /* This reference will be dropped by hvs_close_connection(). */
417 sock_hold(conn_from_host ? new : sk);
410 vmbus_set_chn_rescind_callback(chan, hvs_close_connection); 418 vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
411 419
412 /* Set the pending send size to max packet size to always get 420 /* Set the pending send size to max packet size to always get
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 45d9afcff6d5..32b3c719fdfc 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1410,10 +1410,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
1410 } 1410 }
1411 break; 1411 break;
1412 case NETDEV_PRE_UP: 1412 case NETDEV_PRE_UP:
1413 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) && 1413 if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
1414 !(wdev->iftype == NL80211_IFTYPE_AP_VLAN && 1414 wdev->use_4addr, 0))
1415 rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
1416 wdev->use_4addr))
1417 return notifier_from_errno(-EOPNOTSUPP); 1415 return notifier_from_errno(-EOPNOTSUPP);
1418 1416
1419 if (rfkill_blocked(rdev->rfkill)) 1417 if (rfkill_blocked(rdev->rfkill))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fc83dd179c1a..fd05ae1437a9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3484,9 +3484,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3484 return err; 3484 return err;
3485 } 3485 }
3486 3486
3487 if (!(rdev->wiphy.interface_modes & (1 << type)) && 3487 if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
3488 !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
3489 rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
3490 return -EOPNOTSUPP; 3488 return -EOPNOTSUPP;
3491 3489
3492 err = nl80211_parse_mon_options(rdev, type, info, &params); 3490 err = nl80211_parse_mon_options(rdev, type, info, &params);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1c39d6a2e850..d0e35b7b9e35 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1697,7 +1697,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
1697 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 1697 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1698 num_interfaces += params->iftype_num[iftype]; 1698 num_interfaces += params->iftype_num[iftype];
1699 if (params->iftype_num[iftype] > 0 && 1699 if (params->iftype_num[iftype] > 0 &&
1700 !(wiphy->software_iftypes & BIT(iftype))) 1700 !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
1701 used_iftypes |= BIT(iftype); 1701 used_iftypes |= BIT(iftype);
1702 } 1702 }
1703 1703
@@ -1719,7 +1719,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
1719 return -ENOMEM; 1719 return -ENOMEM;
1720 1720
1721 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 1721 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1722 if (wiphy->software_iftypes & BIT(iftype)) 1722 if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
1723 continue; 1723 continue;
1724 for (j = 0; j < c->n_limits; j++) { 1724 for (j = 0; j < c->n_limits; j++) {
1725 all_iftypes |= limits[j].types; 1725 all_iftypes |= limits[j].types;
@@ -2072,3 +2072,26 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
2072 return max_vht_nss; 2072 return max_vht_nss;
2073} 2073}
2074EXPORT_SYMBOL(ieee80211_get_vht_max_nss); 2074EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
2075
2076bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
2077 bool is_4addr, u8 check_swif)
2078
2079{
2080 bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
2081
2082 switch (check_swif) {
2083 case 0:
2084 if (is_vlan && is_4addr)
2085 return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
2086 return wiphy->interface_modes & BIT(iftype);
2087 case 1:
2088 if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
2089 return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
2090 return wiphy->software_iftypes & BIT(iftype);
2091 default:
2092 break;
2093 }
2094
2095 return false;
2096}
2097EXPORT_SYMBOL(cfg80211_iftype_allowed);