aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osd_client.c5
-rw-r--r--net/ceph/osdmap.c31
-rw-r--r--net/core/dev_ioctl.c3
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c3
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/tcp_bbr.c49
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/output_core.c8
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/netfilter/core.c147
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_nat_core.c17
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/openvswitch/conntrack.c51
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rds/send.c6
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sctp/sm_make_chunk.c4
28 files changed, 165 insertions, 234 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index f0f3447e8aa4..861ae2a165f4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -34,11 +34,11 @@ static struct lock_class_key bridge_netdev_addr_lock_key;
34netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 34netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
35{ 35{
36 struct net_bridge *br = netdev_priv(dev); 36 struct net_bridge *br = netdev_priv(dev);
37 const unsigned char *dest = skb->data;
38 struct net_bridge_fdb_entry *dst; 37 struct net_bridge_fdb_entry *dst;
39 struct net_bridge_mdb_entry *mdst; 38 struct net_bridge_mdb_entry *mdst;
40 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 39 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
41 const struct nf_br_ops *nf_ops; 40 const struct nf_br_ops *nf_ops;
41 const unsigned char *dest;
42 u16 vid = 0; 42 u16 vid = 0;
43 43
44 rcu_read_lock(); 44 rcu_read_lock();
@@ -61,6 +61,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
61 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid)) 61 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
62 goto out; 62 goto out;
63 63
64 dest = eth_hdr(skb)->h_dest;
64 if (is_broadcast_ether_addr(dest)) { 65 if (is_broadcast_ether_addr(dest)) {
65 br_flood(br, skb, BR_PKT_BROADCAST, false, true); 66 br_flood(br, skb, BR_PKT_BROADCAST, false, true);
66 } else if (is_multicast_ether_addr(dest)) { 67 } else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 013f2290bfa5..7637f58c1226 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -131,11 +131,11 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
131int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 131int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
132{ 132{
133 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 133 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
134 const unsigned char *dest = eth_hdr(skb)->h_dest;
135 enum br_pkt_type pkt_type = BR_PKT_UNICAST; 134 enum br_pkt_type pkt_type = BR_PKT_UNICAST;
136 struct net_bridge_fdb_entry *dst = NULL; 135 struct net_bridge_fdb_entry *dst = NULL;
137 struct net_bridge_mdb_entry *mdst; 136 struct net_bridge_mdb_entry *mdst;
138 bool local_rcv, mcast_hit = false; 137 bool local_rcv, mcast_hit = false;
138 const unsigned char *dest;
139 struct net_bridge *br; 139 struct net_bridge *br;
140 u16 vid = 0; 140 u16 vid = 0;
141 141
@@ -153,6 +153,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
153 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false); 153 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
154 154
155 local_rcv = !!(br->dev->flags & IFF_PROMISC); 155 local_rcv = !!(br->dev->flags & IFF_PROMISC);
156 dest = eth_hdr(skb)->h_dest;
156 if (is_multicast_ether_addr(dest)) { 157 if (is_multicast_ether_addr(dest)) {
157 /* by definition the broadcast is also a multicast address */ 158 /* by definition the broadcast is also a multicast address */
158 if (is_broadcast_ether_addr(dest)) { 159 if (is_broadcast_ether_addr(dest)) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0c31035bbfee..b7cc615d42ef 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3203,8 +3203,10 @@ static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3203 return NULL; 3203 return NULL;
3204 3204
3205 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3205 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3206 if (data) 3206 if (!data)
3207 data->type = type; 3207 return NULL;
3208
3209 data->type = type;
3208 INIT_LIST_HEAD(&data->links); 3210 INIT_LIST_HEAD(&data->links);
3209 3211
3210 return data; 3212 return data;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 86a9737d8e3f..901bb8221366 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -5310,7 +5310,10 @@ static int invalidate_authorizer(struct ceph_connection *con)
5310 5310
5311static void osd_reencode_message(struct ceph_msg *msg) 5311static void osd_reencode_message(struct ceph_msg *msg)
5312{ 5312{
5313 encode_request_finish(msg); 5313 int type = le16_to_cpu(msg->hdr.type);
5314
5315 if (type == CEPH_MSG_OSD_OP)
5316 encode_request_finish(msg);
5314} 5317}
5315 5318
5316static int osd_sign_message(struct ceph_msg *msg) 5319static int osd_sign_message(struct ceph_msg *msg)
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 864789c5974e..64ae9f89773a 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -338,7 +338,7 @@ static void crush_finalize(struct crush_map *c)
338static struct crush_map *crush_decode(void *pbyval, void *end) 338static struct crush_map *crush_decode(void *pbyval, void *end)
339{ 339{
340 struct crush_map *c; 340 struct crush_map *c;
341 int err = -EINVAL; 341 int err;
342 int i, j; 342 int i, j;
343 void **p = &pbyval; 343 void **p = &pbyval;
344 void *start = pbyval; 344 void *start = pbyval;
@@ -407,7 +407,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
407 size = sizeof(struct crush_bucket_straw2); 407 size = sizeof(struct crush_bucket_straw2);
408 break; 408 break;
409 default: 409 default:
410 err = -EINVAL;
411 goto bad; 410 goto bad;
412 } 411 }
413 BUG_ON(size == 0); 412 BUG_ON(size == 0);
@@ -439,31 +438,31 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
439 err = crush_decode_uniform_bucket(p, end, 438 err = crush_decode_uniform_bucket(p, end,
440 (struct crush_bucket_uniform *)b); 439 (struct crush_bucket_uniform *)b);
441 if (err < 0) 440 if (err < 0)
442 goto bad; 441 goto fail;
443 break; 442 break;
444 case CRUSH_BUCKET_LIST: 443 case CRUSH_BUCKET_LIST:
445 err = crush_decode_list_bucket(p, end, 444 err = crush_decode_list_bucket(p, end,
446 (struct crush_bucket_list *)b); 445 (struct crush_bucket_list *)b);
447 if (err < 0) 446 if (err < 0)
448 goto bad; 447 goto fail;
449 break; 448 break;
450 case CRUSH_BUCKET_TREE: 449 case CRUSH_BUCKET_TREE:
451 err = crush_decode_tree_bucket(p, end, 450 err = crush_decode_tree_bucket(p, end,
452 (struct crush_bucket_tree *)b); 451 (struct crush_bucket_tree *)b);
453 if (err < 0) 452 if (err < 0)
454 goto bad; 453 goto fail;
455 break; 454 break;
456 case CRUSH_BUCKET_STRAW: 455 case CRUSH_BUCKET_STRAW:
457 err = crush_decode_straw_bucket(p, end, 456 err = crush_decode_straw_bucket(p, end,
458 (struct crush_bucket_straw *)b); 457 (struct crush_bucket_straw *)b);
459 if (err < 0) 458 if (err < 0)
460 goto bad; 459 goto fail;
461 break; 460 break;
462 case CRUSH_BUCKET_STRAW2: 461 case CRUSH_BUCKET_STRAW2:
463 err = crush_decode_straw2_bucket(p, end, 462 err = crush_decode_straw2_bucket(p, end,
464 (struct crush_bucket_straw2 *)b); 463 (struct crush_bucket_straw2 *)b);
465 if (err < 0) 464 if (err < 0)
466 goto bad; 465 goto fail;
467 break; 466 break;
468 } 467 }
469 } 468 }
@@ -474,7 +473,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
474 u32 yes; 473 u32 yes;
475 struct crush_rule *r; 474 struct crush_rule *r;
476 475
477 err = -EINVAL;
478 ceph_decode_32_safe(p, end, yes, bad); 476 ceph_decode_32_safe(p, end, yes, bad);
479 if (!yes) { 477 if (!yes) {
480 dout("crush_decode NO rule %d off %x %p to %p\n", 478 dout("crush_decode NO rule %d off %x %p to %p\n",
@@ -489,7 +487,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
489 /* len */ 487 /* len */
490 ceph_decode_32_safe(p, end, yes, bad); 488 ceph_decode_32_safe(p, end, yes, bad);
491#if BITS_PER_LONG == 32 489#if BITS_PER_LONG == 32
492 err = -EINVAL;
493 if (yes > (ULONG_MAX - sizeof(*r)) 490 if (yes > (ULONG_MAX - sizeof(*r))
494 / sizeof(struct crush_rule_step)) 491 / sizeof(struct crush_rule_step))
495 goto bad; 492 goto bad;
@@ -557,7 +554,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
557 if (*p != end) { 554 if (*p != end) {
558 err = decode_choose_args(p, end, c); 555 err = decode_choose_args(p, end, c);
559 if (err) 556 if (err)
560 goto bad; 557 goto fail;
561 } 558 }
562 559
563done: 560done:
@@ -567,10 +564,14 @@ done:
567 564
568badmem: 565badmem:
569 err = -ENOMEM; 566 err = -ENOMEM;
570bad: 567fail:
571 dout("crush_decode fail %d\n", err); 568 dout("crush_decode fail %d\n", err);
572 crush_destroy(c); 569 crush_destroy(c);
573 return ERR_PTR(err); 570 return ERR_PTR(err);
571
572bad:
573 err = -EINVAL;
574 goto fail;
574} 575}
575 576
576int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 577int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
@@ -1399,7 +1400,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1399 return ERR_PTR(-EINVAL); 1400 return ERR_PTR(-EINVAL);
1400 1401
1401 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1402 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1402 pg = kzalloc(sizeof(*pg) + 2 * len * sizeof(u32), GFP_NOIO); 1403 pg = alloc_pg_mapping(2 * len * sizeof(u32));
1403 if (!pg) 1404 if (!pg)
1404 return ERR_PTR(-ENOMEM); 1405 return ERR_PTR(-ENOMEM);
1405 1406
@@ -1544,7 +1545,7 @@ static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1544 if (struct_v >= 3) { 1545 if (struct_v >= 3) {
1545 /* erasure_code_profiles */ 1546 /* erasure_code_profiles */
1546 ceph_decode_skip_map_of_map(p, end, string, string, string, 1547 ceph_decode_skip_map_of_map(p, end, string, string, string,
1547 bad); 1548 e_inval);
1548 } 1549 }
1549 1550
1550 if (struct_v >= 4) { 1551 if (struct_v >= 4) {
@@ -1825,9 +1826,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1825 if (struct_v >= 3) { 1826 if (struct_v >= 3) {
1826 /* new_erasure_code_profiles */ 1827 /* new_erasure_code_profiles */
1827 ceph_decode_skip_map_of_map(p, end, string, string, string, 1828 ceph_decode_skip_map_of_map(p, end, string, string, string,
1828 bad); 1829 e_inval);
1829 /* old_erasure_code_profiles */ 1830 /* old_erasure_code_profiles */
1830 ceph_decode_skip_set(p, end, string, bad); 1831 ceph_decode_skip_set(p, end, string, e_inval);
1831 } 1832 }
1832 1833
1833 if (struct_v >= 4) { 1834 if (struct_v >= 4) {
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 82fd4c9c4a1b..06b147d7d9e2 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
28 28
29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
30 return -EFAULT; 30 return -EFAULT;
31 ifr.ifr_name[IFNAMSIZ-1] = 0;
31 32
32 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); 33 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
33 if (error) 34 if (error)
@@ -424,6 +425,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
424 if (copy_from_user(&iwr, arg, sizeof(iwr))) 425 if (copy_from_user(&iwr, arg, sizeof(iwr)))
425 return -EFAULT; 426 return -EFAULT;
426 427
428 iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
429
427 return wext_handle_ioctl(net, &iwr, cmd, arg); 430 return wext_handle_ioctl(net, &iwr, cmd, arg);
428 } 431 }
429 432
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index a0093e1b0235..fdcb1bcd2afa 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -400,6 +400,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
400 err = -ENOMEM; 400 err = -ENOMEM;
401 goto errout; 401 goto errout;
402 } 402 }
403 refcount_set(&rule->refcnt, 1);
403 rule->fr_net = net; 404 rule->fr_net = net;
404 405
405 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) 406 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
@@ -517,8 +518,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
517 last = r; 518 last = r;
518 } 519 }
519 520
520 refcount_set(&rule->refcnt, 1);
521
522 if (last) 521 if (last)
523 list_add_rcu(&rule->list, &last->list); 522 list_add_rcu(&rule->list, &last->list);
524 else 523 else
diff --git a/net/core/filter.c b/net/core/filter.c
index c7f737058d89..f44fc22fd45a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2248,7 +2248,7 @@ static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2248 bpf_skb_net_grow(skb, len_diff_abs); 2248 bpf_skb_net_grow(skb, len_diff_abs);
2249 2249
2250 bpf_compute_data_end(skb); 2250 bpf_compute_data_end(skb);
2251 return 0; 2251 return ret;
2252} 2252}
2253 2253
2254BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 2254BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d3408a693166..8357f164c660 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
277 struct sk_buff *skb = clist; 277 struct sk_buff *skb = clist;
278 clist = clist->next; 278 clist = clist->next;
279 if (!skb_irq_freeable(skb)) { 279 if (!skb_irq_freeable(skb)) {
280 refcount_inc(&skb->users); 280 refcount_set(&skb->users, 1);
281 dev_kfree_skb_any(skb); /* put this one back */ 281 dev_kfree_skb_any(skb); /* put this one back */
282 } else { 282 } else {
283 __kfree_skb(skb); 283 __kfree_skb(skb);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1ba90980be1..9201e3621351 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2031,7 +2031,8 @@ static int do_setlink(const struct sk_buff *skb,
2031 struct sockaddr *sa; 2031 struct sockaddr *sa;
2032 int len; 2032 int len;
2033 2033
2034 len = sizeof(sa_family_t) + dev->addr_len; 2034 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2035 sizeof(*sa));
2035 sa = kmalloc(len, GFP_KERNEL); 2036 sa = kmalloc(len, GFP_KERNEL);
2036 if (!sa) { 2037 if (!sa) {
2037 err = -ENOMEM; 2038 err = -ENOMEM;
@@ -4241,6 +4242,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
4241 4242
4242 switch (event) { 4243 switch (event) {
4243 case NETDEV_REBOOT: 4244 case NETDEV_REBOOT:
4245 case NETDEV_CHANGEADDR:
4244 case NETDEV_CHANGENAME: 4246 case NETDEV_CHANGENAME:
4245 case NETDEV_FEAT_CHANGE: 4247 case NETDEV_FEAT_CHANGE:
4246 case NETDEV_BONDING_FAILOVER: 4248 case NETDEV_BONDING_FAILOVER:
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4a05d7876850..fa6be9750bb4 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -126,7 +126,7 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
126 126
127static u16 dccp_reset_code_convert(const u8 code) 127static u16 dccp_reset_code_convert(const u8 code)
128{ 128{
129 const u16 error_code[] = { 129 static const u16 error_code[] = {
130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ 130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ 131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET, 132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e678fa892dd..044d2a159a3c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1334,13 +1334,14 @@ static struct pernet_operations fib_net_ops = {
1334 1334
1335void __init ip_fib_init(void) 1335void __init ip_fib_init(void)
1336{ 1336{
1337 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); 1337 fib_trie_init();
1338 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1339 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1340 1338
1341 register_pernet_subsys(&fib_net_ops); 1339 register_pernet_subsys(&fib_net_ops);
1340
1342 register_netdevice_notifier(&fib_netdev_notifier); 1341 register_netdevice_notifier(&fib_netdev_notifier);
1343 register_inetaddr_notifier(&fib_inetaddr_notifier); 1342 register_inetaddr_notifier(&fib_inetaddr_notifier);
1344 1343
1345 fib_trie_init(); 1344 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1345 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1346 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1346} 1347}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7eb252dcecee..50c74cd890bc 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -599,6 +599,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
599 hlen = iph->ihl * 4; 599 hlen = iph->ihl * 4;
600 mtu = mtu - hlen; /* Size of data space */ 600 mtu = mtu - hlen; /* Size of data space */
601 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; 601 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
602 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
602 603
603 /* When frag_list is given, use it. First, check its validity: 604 /* When frag_list is given, use it. First, check its validity:
604 * some transformers could create wrong frag_list or break existing 605 * some transformers could create wrong frag_list or break existing
@@ -614,14 +615,15 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
614 if (first_len - hlen > mtu || 615 if (first_len - hlen > mtu ||
615 ((first_len - hlen) & 7) || 616 ((first_len - hlen) & 7) ||
616 ip_is_fragment(iph) || 617 ip_is_fragment(iph) ||
617 skb_cloned(skb)) 618 skb_cloned(skb) ||
619 skb_headroom(skb) < ll_rs)
618 goto slow_path; 620 goto slow_path;
619 621
620 skb_walk_frags(skb, frag) { 622 skb_walk_frags(skb, frag) {
621 /* Correct geometry. */ 623 /* Correct geometry. */
622 if (frag->len > mtu || 624 if (frag->len > mtu ||
623 ((frag->len & 7) && frag->next) || 625 ((frag->len & 7) && frag->next) ||
624 skb_headroom(frag) < hlen) 626 skb_headroom(frag) < hlen + ll_rs)
625 goto slow_path_clean; 627 goto slow_path_clean;
626 628
627 /* Partially cloned skb? */ 629 /* Partially cloned skb? */
@@ -711,8 +713,6 @@ slow_path:
711 left = skb->len - hlen; /* Space per frame */ 713 left = skb->len - hlen; /* Space per frame */
712 ptr = hlen; /* Where to start from */ 714 ptr = hlen; /* Where to start from */
713 715
714 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
715
716 /* 716 /*
717 * Fragment the datagram. 717 * Fragment the datagram.
718 */ 718 */
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 805c8ddfe860..4bbc273b45e8 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -72,8 +72,7 @@ static const struct nf_chain_type filter_arp = {
72 .family = NFPROTO_ARP, 72 .family = NFPROTO_ARP,
73 .owner = THIS_MODULE, 73 .owner = THIS_MODULE,
74 .hook_mask = (1 << NF_ARP_IN) | 74 .hook_mask = (1 << NF_ARP_IN) |
75 (1 << NF_ARP_OUT) | 75 (1 << NF_ARP_OUT),
76 (1 << NF_ARP_FORWARD),
77}; 76};
78 77
79static int __init nf_tables_arp_init(void) 78static int __init nf_tables_arp_init(void)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0905cf04c2a4..03ad8778c395 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -335,6 +335,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
335 treq->rcv_isn = ntohl(th->seq) - 1; 335 treq->rcv_isn = ntohl(th->seq) - 1;
336 treq->snt_isn = cookie; 336 treq->snt_isn = cookie;
337 treq->ts_off = 0; 337 treq->ts_off = 0;
338 treq->txhash = net_tx_rndhash();
338 req->mss = mss; 339 req->mss = mss;
339 ireq->ir_num = ntohs(th->dest); 340 ireq->ir_num = ntohs(th->dest);
340 ireq->ir_rmt_port = th->source; 341 ireq->ir_rmt_port = th->source;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index dbcc9352a48f..69ee877574d0 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -112,7 +112,8 @@ struct bbr {
112 cwnd_gain:10, /* current gain for setting cwnd */ 112 cwnd_gain:10, /* current gain for setting cwnd */
113 full_bw_cnt:3, /* number of rounds without large bw gains */ 113 full_bw_cnt:3, /* number of rounds without large bw gains */
114 cycle_idx:3, /* current index in pacing_gain cycle array */ 114 cycle_idx:3, /* current index in pacing_gain cycle array */
115 unused_b:6; 115 has_seen_rtt:1, /* have we seen an RTT sample yet? */
116 unused_b:5;
116 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ 117 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
117 u32 full_bw; /* recent bw, to estimate if pipe is full */ 118 u32 full_bw; /* recent bw, to estimate if pipe is full */
118}; 119};
@@ -211,6 +212,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
211 return rate >> BW_SCALE; 212 return rate >> BW_SCALE;
212} 213}
213 214
215/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
216static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
217{
218 u64 rate = bw;
219
220 rate = bbr_rate_bytes_per_sec(sk, rate, gain);
221 rate = min_t(u64, rate, sk->sk_max_pacing_rate);
222 return rate;
223}
224
225/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
226static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
227{
228 struct tcp_sock *tp = tcp_sk(sk);
229 struct bbr *bbr = inet_csk_ca(sk);
230 u64 bw;
231 u32 rtt_us;
232
233 if (tp->srtt_us) { /* any RTT sample yet? */
234 rtt_us = max(tp->srtt_us >> 3, 1U);
235 bbr->has_seen_rtt = 1;
236 } else { /* no RTT sample yet */
237 rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
238 }
239 bw = (u64)tp->snd_cwnd * BW_UNIT;
240 do_div(bw, rtt_us);
241 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
242}
243
214/* Pace using current bw estimate and a gain factor. In order to help drive the 244/* Pace using current bw estimate and a gain factor. In order to help drive the
215 * network toward lower queues while maintaining high utilization and low 245 * network toward lower queues while maintaining high utilization and low
216 * latency, the average pacing rate aims to be slightly (~1%) lower than the 246 * latency, the average pacing rate aims to be slightly (~1%) lower than the
@@ -220,12 +250,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
220 */ 250 */
221static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) 251static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
222{ 252{
253 struct tcp_sock *tp = tcp_sk(sk);
223 struct bbr *bbr = inet_csk_ca(sk); 254 struct bbr *bbr = inet_csk_ca(sk);
224 u64 rate = bw; 255 u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
225 256
226 rate = bbr_rate_bytes_per_sec(sk, rate, gain); 257 if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
227 rate = min_t(u64, rate, sk->sk_max_pacing_rate); 258 bbr_init_pacing_rate_from_rtt(sk);
228 if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate) 259 if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
229 sk->sk_pacing_rate = rate; 260 sk->sk_pacing_rate = rate;
230} 261}
231 262
@@ -798,7 +829,6 @@ static void bbr_init(struct sock *sk)
798{ 829{
799 struct tcp_sock *tp = tcp_sk(sk); 830 struct tcp_sock *tp = tcp_sk(sk);
800 struct bbr *bbr = inet_csk_ca(sk); 831 struct bbr *bbr = inet_csk_ca(sk);
801 u64 bw;
802 832
803 bbr->prior_cwnd = 0; 833 bbr->prior_cwnd = 0;
804 bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ 834 bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
@@ -814,11 +844,8 @@ static void bbr_init(struct sock *sk)
814 844
815 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ 845 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
816 846
817 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ 847 bbr->has_seen_rtt = 0;
818 bw = (u64)tp->snd_cwnd * BW_UNIT; 848 bbr_init_pacing_rate_from_rtt(sk);
819 do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
820 sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
821 bbr_set_pacing_rate(sk, bw, bbr_high_gain);
822 849
823 bbr->restore_cwnd = 0; 850 bbr->restore_cwnd = 0;
824 bbr->round_start = 0; 851 bbr->round_start = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 25294d43e147..b057653ceca9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1388,6 +1388,11 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
1388 unlock_sock_fast(sk, slow); 1388 unlock_sock_fast(sk, slow);
1389 } 1389 }
1390 1390
1391 /* we cleared the head states previously only if the skb lacks any IP
1392 * options, see __udp_queue_rcv_skb().
1393 */
1394 if (unlikely(IPCB(skb)->opt.optlen > 0))
1395 skb_release_head_state(skb);
1391 consume_stateless_skb(skb); 1396 consume_stateless_skb(skb);
1392} 1397}
1393EXPORT_SYMBOL_GPL(skb_consume_udp); 1398EXPORT_SYMBOL_GPL(skb_consume_udp);
@@ -1779,8 +1784,12 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1779 sk_mark_napi_id_once(sk, skb); 1784 sk_mark_napi_id_once(sk, skb);
1780 } 1785 }
1781 1786
1782 /* clear all pending head states while they are hot in the cache */ 1787 /* At recvmsg() time we need skb->dst to process IP options-related
1783 skb_release_head_state(skb); 1788 * cmsg, elsewhere can we clear all pending head states while they are
1789 * hot in the cache
1790 */
1791 if (likely(IPCB(skb)->opt.optlen == 0))
1792 skb_release_head_state(skb);
1784 1793
1785 rc = __udp_enqueue_schedule_skb(sk, skb); 1794 rc = __udp_enqueue_schedule_skb(sk, skb);
1786 if (rc < 0) { 1795 if (rc < 0) {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index e9065b8d3af8..abb2c307fbe8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
78 78
79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
80{ 80{
81 u16 offset = sizeof(struct ipv6hdr); 81 unsigned int offset = sizeof(struct ipv6hdr);
82 unsigned int packet_len = skb_tail_pointer(skb) - 82 unsigned int packet_len = skb_tail_pointer(skb) -
83 skb_network_header(skb); 83 skb_network_header(skb);
84 int found_rhdr = 0; 84 int found_rhdr = 0;
@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
86 86
87 while (offset <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr; 88 struct ipv6_opt_hdr *exthdr;
89 unsigned int len;
89 90
90 switch (**nexthdr) { 91 switch (**nexthdr) {
91 92
@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
111 112
112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 113 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
113 offset); 114 offset);
114 offset += ipv6_optlen(exthdr); 115 len = ipv6_optlen(exthdr);
116 if (len + offset >= IPV6_MAXPLEN)
117 return -EINVAL;
118 offset += len;
115 *nexthdr = &exthdr->nexthdr; 119 *nexthdr = &exthdr->nexthdr;
116 } 120 }
117 121
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7b75b0620730..4e7817abc0b9 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -216,6 +216,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
216 treq->rcv_isn = ntohl(th->seq) - 1; 216 treq->rcv_isn = ntohl(th->seq) - 1;
217 treq->snt_isn = cookie; 217 treq->snt_isn = cookie;
218 treq->ts_off = 0; 218 treq->ts_off = 0;
219 treq->txhash = net_tx_rndhash();
219 220
220 /* 221 /*
221 * We need to lookup the dst_entry to get the correct window size. 222 * We need to lookup the dst_entry to get the correct window size.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 552d606e57ca..974cf2a3795a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -227,114 +227,6 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
227} 227}
228EXPORT_SYMBOL(nf_unregister_net_hooks); 228EXPORT_SYMBOL(nf_unregister_net_hooks);
229 229
230static LIST_HEAD(nf_hook_list);
231
232static int _nf_register_hook(struct nf_hook_ops *reg)
233{
234 struct net *net, *last;
235 int ret;
236
237 for_each_net(net) {
238 ret = nf_register_net_hook(net, reg);
239 if (ret && ret != -ENOENT)
240 goto rollback;
241 }
242 list_add_tail(&reg->list, &nf_hook_list);
243
244 return 0;
245rollback:
246 last = net;
247 for_each_net(net) {
248 if (net == last)
249 break;
250 nf_unregister_net_hook(net, reg);
251 }
252 return ret;
253}
254
255int nf_register_hook(struct nf_hook_ops *reg)
256{
257 int ret;
258
259 rtnl_lock();
260 ret = _nf_register_hook(reg);
261 rtnl_unlock();
262
263 return ret;
264}
265EXPORT_SYMBOL(nf_register_hook);
266
267static void _nf_unregister_hook(struct nf_hook_ops *reg)
268{
269 struct net *net;
270
271 list_del(&reg->list);
272 for_each_net(net)
273 nf_unregister_net_hook(net, reg);
274}
275
276void nf_unregister_hook(struct nf_hook_ops *reg)
277{
278 rtnl_lock();
279 _nf_unregister_hook(reg);
280 rtnl_unlock();
281}
282EXPORT_SYMBOL(nf_unregister_hook);
283
284int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
285{
286 unsigned int i;
287 int err = 0;
288
289 for (i = 0; i < n; i++) {
290 err = nf_register_hook(&reg[i]);
291 if (err)
292 goto err;
293 }
294 return err;
295
296err:
297 if (i > 0)
298 nf_unregister_hooks(reg, i);
299 return err;
300}
301EXPORT_SYMBOL(nf_register_hooks);
302
303/* Caller MUST take rtnl_lock() */
304int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
305{
306 unsigned int i;
307 int err = 0;
308
309 for (i = 0; i < n; i++) {
310 err = _nf_register_hook(&reg[i]);
311 if (err)
312 goto err;
313 }
314 return err;
315
316err:
317 if (i > 0)
318 _nf_unregister_hooks(reg, i);
319 return err;
320}
321EXPORT_SYMBOL(_nf_register_hooks);
322
323void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
324{
325 while (n-- > 0)
326 nf_unregister_hook(&reg[n]);
327}
328EXPORT_SYMBOL(nf_unregister_hooks);
329
330/* Caller MUST take rtnl_lock */
331void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
332{
333 while (n-- > 0)
334 _nf_unregister_hook(&reg[n]);
335}
336EXPORT_SYMBOL(_nf_unregister_hooks);
337
338/* Returns 1 if okfn() needs to be executed by the caller, 230/* Returns 1 if okfn() needs to be executed by the caller,
339 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ 231 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
340int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, 232int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
@@ -450,40 +342,9 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
450EXPORT_SYMBOL(nf_nat_decode_session_hook); 342EXPORT_SYMBOL(nf_nat_decode_session_hook);
451#endif 343#endif
452 344
453static int nf_register_hook_list(struct net *net)
454{
455 struct nf_hook_ops *elem;
456 int ret;
457
458 rtnl_lock();
459 list_for_each_entry(elem, &nf_hook_list, list) {
460 ret = nf_register_net_hook(net, elem);
461 if (ret && ret != -ENOENT)
462 goto out_undo;
463 }
464 rtnl_unlock();
465 return 0;
466
467out_undo:
468 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
469 nf_unregister_net_hook(net, elem);
470 rtnl_unlock();
471 return ret;
472}
473
474static void nf_unregister_hook_list(struct net *net)
475{
476 struct nf_hook_ops *elem;
477
478 rtnl_lock();
479 list_for_each_entry(elem, &nf_hook_list, list)
480 nf_unregister_net_hook(net, elem);
481 rtnl_unlock();
482}
483
484static int __net_init netfilter_net_init(struct net *net) 345static int __net_init netfilter_net_init(struct net *net)
485{ 346{
486 int i, h, ret; 347 int i, h;
487 348
488 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { 349 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
489 for (h = 0; h < NF_MAX_HOOKS; h++) 350 for (h = 0; h < NF_MAX_HOOKS; h++)
@@ -500,16 +361,12 @@ static int __net_init netfilter_net_init(struct net *net)
500 return -ENOMEM; 361 return -ENOMEM;
501 } 362 }
502#endif 363#endif
503 ret = nf_register_hook_list(net);
504 if (ret)
505 remove_proc_entry("netfilter", net->proc_net);
506 364
507 return ret; 365 return 0;
508} 366}
509 367
510static void __net_exit netfilter_net_exit(struct net *net) 368static void __net_exit netfilter_net_exit(struct net *net)
511{ 369{
512 nf_unregister_hook_list(net);
513 remove_proc_entry("netfilter", net->proc_net); 370 remove_proc_entry("netfilter", net->proc_net);
514} 371}
515 372
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index e03d16ed550d..899c2c36da13 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
422 h = nf_ct_expect_dst_hash(net, &expect->tuple); 422 h = nf_ct_expect_dst_hash(net, &expect->tuple);
423 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { 423 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
424 if (expect_matches(i, expect)) { 424 if (expect_matches(i, expect)) {
425 if (nf_ct_remove_expect(expect)) 425 if (nf_ct_remove_expect(i))
426 break; 426 break;
427 } else if (expect_clash(i, expect)) { 427 } else if (expect_clash(i, expect)) {
428 ret = -EBUSY; 428 ret = -EBUSY;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 832c5a08d9a5..eb541786ccb7 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
222 .tuple = tuple, 222 .tuple = tuple,
223 .zone = zone 223 .zone = zone
224 }; 224 };
225 struct rhlist_head *hl; 225 struct rhlist_head *hl, *h;
226 226
227 hl = rhltable_lookup(&nf_nat_bysource_table, &key, 227 hl = rhltable_lookup(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params); 228 nf_nat_bysource_params);
229 if (!hl)
230 return 0;
231 229
232 ct = container_of(hl, typeof(*ct), nat_bysource); 230 rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
231 nf_ct_invert_tuplepr(result,
232 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
233 result->dst = tuple->dst;
233 234
234 nf_ct_invert_tuplepr(result, 235 if (in_range(l3proto, l4proto, result, range))
235 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 236 return 1;
236 result->dst = tuple->dst; 237 }
237 238
238 return in_range(l3proto, l4proto, result, range); 239 return 0;
239} 240}
240 241
241/* For [FUTURE] fragmentation handling, we want the least-used 242/* For [FUTURE] fragmentation handling, we want the least-used
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 92b05e188fd1..733d3e4a30d8 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -472,8 +472,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
472 if (msglen > skb->len) 472 if (msglen > skb->len)
473 msglen = skb->len; 473 msglen = skb->len;
474 474
475 if (nlh->nlmsg_len < NLMSG_HDRLEN || 475 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
476 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
477 return; 476 return;
478 477
479 err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, 478 err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
@@ -500,7 +499,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
500{ 499{
501 struct nlmsghdr *nlh = nlmsg_hdr(skb); 500 struct nlmsghdr *nlh = nlmsg_hdr(skb);
502 501
503 if (nlh->nlmsg_len < NLMSG_HDRLEN || 502 if (skb->len < NLMSG_HDRLEN ||
503 nlh->nlmsg_len < NLMSG_HDRLEN ||
504 skb->len < nlh->nlmsg_len) 504 skb->len < nlh->nlmsg_len)
505 return; 505 return;
506 506
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 08679ebb3068..e3c4c6c3fef7 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -629,6 +629,34 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
629 return ct; 629 return ct;
630} 630}
631 631
632static
633struct nf_conn *ovs_ct_executed(struct net *net,
634 const struct sw_flow_key *key,
635 const struct ovs_conntrack_info *info,
636 struct sk_buff *skb,
637 bool *ct_executed)
638{
639 struct nf_conn *ct = NULL;
640
641 /* If no ct, check if we have evidence that an existing conntrack entry
642 * might be found for this skb. This happens when we lose a skb->_nfct
643 * due to an upcall, or if the direction is being forced. If the
644 * connection was not confirmed, it is not cached and needs to be run
645 * through conntrack again.
646 */
647 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
648 !(key->ct_state & OVS_CS_F_INVALID) &&
649 (key->ct_zone == info->zone.id);
650
651 if (*ct_executed || (!key->ct_state && info->force)) {
652 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
653 !!(key->ct_state &
654 OVS_CS_F_NAT_MASK));
655 }
656
657 return ct;
658}
659
632/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ 660/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
633static bool skb_nfct_cached(struct net *net, 661static bool skb_nfct_cached(struct net *net,
634 const struct sw_flow_key *key, 662 const struct sw_flow_key *key,
@@ -637,24 +665,17 @@ static bool skb_nfct_cached(struct net *net,
637{ 665{
638 enum ip_conntrack_info ctinfo; 666 enum ip_conntrack_info ctinfo;
639 struct nf_conn *ct; 667 struct nf_conn *ct;
668 bool ct_executed = true;
640 669
641 ct = nf_ct_get(skb, &ctinfo); 670 ct = nf_ct_get(skb, &ctinfo);
642 /* If no ct, check if we have evidence that an existing conntrack entry
643 * might be found for this skb. This happens when we lose a skb->_nfct
644 * due to an upcall. If the connection was not confirmed, it is not
645 * cached and needs to be run through conntrack again.
646 */
647 if (!ct && key->ct_state & OVS_CS_F_TRACKED &&
648 !(key->ct_state & OVS_CS_F_INVALID) &&
649 key->ct_zone == info->zone.id) {
650 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
651 !!(key->ct_state
652 & OVS_CS_F_NAT_MASK));
653 if (ct)
654 nf_ct_get(skb, &ctinfo);
655 }
656 if (!ct) 671 if (!ct)
672 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
673
674 if (ct)
675 nf_ct_get(skb, &ctinfo);
676 else
657 return false; 677 return false;
678
658 if (!net_eq(net, read_pnet(&ct->ct_net))) 679 if (!net_eq(net, read_pnet(&ct->ct_net)))
659 return false; 680 return false;
660 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct))) 681 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
@@ -679,7 +700,7 @@ static bool skb_nfct_cached(struct net *net,
679 return false; 700 return false;
680 } 701 }
681 702
682 return true; 703 return ct_executed;
683} 704}
684 705
685#ifdef CONFIG_NF_NAT_NEEDED 706#ifdef CONFIG_NF_NAT_NEEDED
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e3beb28203eb..008bb34ee324 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -214,6 +214,7 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 214static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215 struct tpacket3_hdr *); 215 struct tpacket3_hdr *);
216static void packet_flush_mclist(struct sock *sk); 216static void packet_flush_mclist(struct sock *sk);
217static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
217 218
218struct packet_skb_cb { 219struct packet_skb_cb {
219 union { 220 union {
@@ -260,6 +261,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
260 if (skb != orig_skb) 261 if (skb != orig_skb)
261 goto drop; 262 goto drop;
262 263
264 packet_pick_tx_queue(dev, skb);
263 txq = skb_get_tx_queue(dev, skb); 265 txq = skb_get_tx_queue(dev, skb);
264 266
265 local_bh_disable(); 267 local_bh_disable();
@@ -2747,8 +2749,6 @@ tpacket_error:
2747 goto tpacket_error; 2749 goto tpacket_error;
2748 } 2750 }
2749 2751
2750 packet_pick_tx_queue(dev, skb);
2751
2752 skb->destructor = tpacket_destruct_skb; 2752 skb->destructor = tpacket_destruct_skb;
2753 __packet_set_status(po, ph, TP_STATUS_SENDING); 2753 __packet_set_status(po, ph, TP_STATUS_SENDING);
2754 packet_inc_pending(&po->tx_ring); 2754 packet_inc_pending(&po->tx_ring);
@@ -2931,8 +2931,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2931 skb->priority = sk->sk_priority; 2931 skb->priority = sk->sk_priority;
2932 skb->mark = sockc.mark; 2932 skb->mark = sockc.mark;
2933 2933
2934 packet_pick_tx_queue(dev, skb);
2935
2936 if (po->has_vnet_hdr) { 2934 if (po->has_vnet_hdr) {
2937 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2935 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2938 if (err) 2936 if (err)
diff --git a/net/rds/send.c b/net/rds/send.c
index e81aa176f4e2..41b9f0f5bb9c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -170,8 +170,8 @@ restart:
170 * The acquire_in_xmit() check above ensures that only one 170 * The acquire_in_xmit() check above ensures that only one
171 * caller can increment c_send_gen at any time. 171 * caller can increment c_send_gen at any time.
172 */ 172 */
173 cp->cp_send_gen++; 173 send_gen = READ_ONCE(cp->cp_send_gen) + 1;
174 send_gen = cp->cp_send_gen; 174 WRITE_ONCE(cp->cp_send_gen, send_gen);
175 175
176 /* 176 /*
177 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 177 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
@@ -431,7 +431,7 @@ over_batch:
431 smp_mb(); 431 smp_mb();
432 if ((test_bit(0, &conn->c_map_queued) || 432 if ((test_bit(0, &conn->c_map_queued) ||
433 !list_empty(&cp->cp_send_queue)) && 433 !list_empty(&cp->cp_send_queue)) &&
434 send_gen == cp->cp_send_gen) { 434 send_gen == READ_ONCE(cp->cp_send_gen)) {
435 rds_stats_inc(s_send_lock_queue_raced); 435 rds_stats_inc(s_send_lock_queue_raced);
436 if (batch_count < send_batch_count) 436 if (batch_count < send_batch_count)
437 goto restart; 437 goto restart;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index aed6cf2e9fd8..f2e9ed34a963 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -835,7 +835,7 @@ out_nlmsg_trim:
835} 835}
836 836
837static int 837static int
838act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 838tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
839 struct list_head *actions, int event) 839 struct list_head *actions, int event)
840{ 840{
841 struct sk_buff *skb; 841 struct sk_buff *skb;
@@ -1018,7 +1018,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1018 } 1018 }
1019 1019
1020 if (event == RTM_GETACTION) 1020 if (event == RTM_GETACTION)
1021 ret = act_get_notify(net, portid, n, &actions, event); 1021 ret = tcf_get_notify(net, portid, n, &actions, event);
1022 else { /* delete */ 1022 else { /* delete */
1023 ret = tcf_del_notify(net, n, &actions, portid); 1023 ret = tcf_del_notify(net, n, &actions, portid);
1024 if (ret) 1024 if (ret)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4e16b02ed832..6110447fe51d 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
228 sctp_adaptation_ind_param_t aiparam; 228 sctp_adaptation_ind_param_t aiparam;
229 sctp_supported_ext_param_t ext_param; 229 sctp_supported_ext_param_t ext_param;
230 int num_ext = 0; 230 int num_ext = 0;
231 __u8 extensions[3]; 231 __u8 extensions[4];
232 struct sctp_paramhdr *auth_chunks = NULL, 232 struct sctp_paramhdr *auth_chunks = NULL,
233 *auth_hmacs = NULL; 233 *auth_hmacs = NULL;
234 234
@@ -396,7 +396,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
396 sctp_adaptation_ind_param_t aiparam; 396 sctp_adaptation_ind_param_t aiparam;
397 sctp_supported_ext_param_t ext_param; 397 sctp_supported_ext_param_t ext_param;
398 int num_ext = 0; 398 int num_ext = 0;
399 __u8 extensions[3]; 399 __u8 extensions[4];
400 struct sctp_paramhdr *auth_chunks = NULL, 400 struct sctp_paramhdr *auth_chunks = NULL,
401 *auth_hmacs = NULL, 401 *auth_hmacs = NULL,
402 *auth_random = NULL; 402 *auth_random = NULL;