aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-08-21 18:26:37 -0400
committerMark Brown <broonie@kernel.org>2015-08-21 18:26:37 -0400
commitd033de5ceee8333e4fee3d59a956244d3736102a (patch)
tree5bdebcac18ed67890571632570c324e0c41a7223 /net
parent1a8e7fab70c8d7cad2e606e7b21d46e42e51c2fd (diff)
parentf7644cbfcdf03528f0f450f3940c4985b2291f49 (diff)
Merge tag 'v4.2-rc6' into asoc-topology
Linux 4.2-rc6
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c28
-rw-r--r--net/bridge/br_mdb.c18
-rw-r--r--net/bridge/br_multicast.c87
-rw-r--r--net/bridge/br_netfilter_hooks.c16
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c12
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c7
-rw-r--r--net/ceph/ceph_common.c16
-rw-r--r--net/ceph/messenger.c24
-rw-r--r--net/core/datagram.c56
-rw-r--r--net/core/dev.c45
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/rtnetlink.c198
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c25
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c67
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c7
-rw-r--r--net/netfilter/nfnetlink.c38
-rw-r--r--net/netfilter/xt_CT.c8
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c81
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/ib_rdma.c4
-rw-r--r--net/rds/transport.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c13
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/switchdev/switchdev.c12
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
96 files changed, 988 insertions, 581 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
704 704
705 mutex_unlock(&virtio_9p_lock); 705 mutex_unlock(&virtio_9p_lock);
706 706
707 vdev->config->reset(vdev);
707 vdev->config->del_vqs(vdev); 708 vdev->config->del_vqs(vdev);
708 709
709 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25);
267 ax25_stop_t1timer(ax25); 268 ax25_stop_t1timer(ax25);
268 ax25_stop_t2timer(ax25); 269 ax25_stop_t2timer(ax25);
269 ax25_stop_t3timer(ax25); 270 ax25_stop_t3timer(ax25);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e97572b5d2cc..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,14 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 dev_queue_xmit(skb); 45 skb_sender_cpu_clear(skb);
46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
46 } 56 }
47 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
48 return 0; 64 return 0;
49} 65}
50EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index e29ad70b3000..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
323 struct net_bridge_port_group *p; 323 struct net_bridge_port_group *p;
324 struct net_bridge_port_group __rcu **pp; 324 struct net_bridge_port_group __rcu **pp;
325 struct net_bridge_mdb_htable *mdb; 325 struct net_bridge_mdb_htable *mdb;
326 unsigned long now = jiffies;
326 int err; 327 int err;
327 328
328 mdb = mlock_dereference(br->mdb, br); 329 mdb = mlock_dereference(br->mdb, br);
@@ -347,8 +348,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
347 if (unlikely(!p)) 348 if (unlikely(!p))
348 return -ENOMEM; 349 return -ENOMEM;
349 rcu_assign_pointer(*pp, p); 350 rcu_assign_pointer(*pp, p);
351 if (state == MDB_TEMPORARY)
352 mod_timer(&p->timer, now + br->multicast_membership_interval);
350 353
351 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
352 return 0; 354 return 0;
353} 355}
354 356
@@ -371,6 +373,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
371 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 373 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
372 return -EINVAL; 374 return -EINVAL;
373 375
376 memset(&ip, 0, sizeof(ip));
374 ip.proto = entry->addr.proto; 377 ip.proto = entry->addr.proto;
375 if (ip.proto == htons(ETH_P_IP)) 378 if (ip.proto == htons(ETH_P_IP))
376 ip.u.ip4 = entry->addr.u.ip4; 379 ip.u.ip4 = entry->addr.u.ip4;
@@ -417,20 +420,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
417 if (!netif_running(br->dev) || br->multicast_disabled) 420 if (!netif_running(br->dev) || br->multicast_disabled)
418 return -EINVAL; 421 return -EINVAL;
419 422
423 memset(&ip, 0, sizeof(ip));
420 ip.proto = entry->addr.proto; 424 ip.proto = entry->addr.proto;
421 if (ip.proto == htons(ETH_P_IP)) { 425 if (ip.proto == htons(ETH_P_IP))
422 if (timer_pending(&br->ip4_other_query.timer))
423 return -EBUSY;
424
425 ip.u.ip4 = entry->addr.u.ip4; 426 ip.u.ip4 = entry->addr.u.ip4;
426#if IS_ENABLED(CONFIG_IPV6) 427#if IS_ENABLED(CONFIG_IPV6)
427 } else { 428 else
428 if (timer_pending(&br->ip6_other_query.timer))
429 return -EBUSY;
430
431 ip.u.ip6 = entry->addr.u.ip6; 429 ip.u.ip6 = entry->addr.u.ip6;
432#endif 430#endif
433 }
434 431
435 spin_lock_bh(&br->multicast_lock); 432 spin_lock_bh(&br->multicast_lock);
436 mdb = mlock_dereference(br->mdb, br); 433 mdb = mlock_dereference(br->mdb, br);
@@ -448,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
448 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
449 goto unlock; 446 goto unlock;
450 447
448 entry->state = p->state;
451 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
452 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
453 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..0b39dcc65b94 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40static void br_multicast_add_router(struct net_bridge *br, 40static void br_multicast_add_router(struct net_bridge *br,
41 struct net_bridge_port *port); 41 struct net_bridge_port *port);
42static void br_ip4_multicast_leave_group(struct net_bridge *br,
43 struct net_bridge_port *port,
44 __be32 group,
45 __u16 vid);
46#if IS_ENABLED(CONFIG_IPV6)
47static void br_ip6_multicast_leave_group(struct net_bridge *br,
48 struct net_bridge_port *port,
49 const struct in6_addr *group,
50 __u16 vid);
51#endif
42unsigned int br_mdb_rehash_seq; 52unsigned int br_mdb_rehash_seq;
43 53
44static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 54static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1010 continue; 1020 continue;
1011 } 1021 }
1012 1022
1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1014 if (err) 1024 type == IGMPV3_MODE_IS_INCLUDE) &&
1015 break; 1025 ntohs(grec->grec_nsrcs) == 0) {
1026 br_ip4_multicast_leave_group(br, port, group, vid);
1027 } else {
1028 err = br_ip4_multicast_add_group(br, port, group, vid);
1029 if (err)
1030 break;
1031 }
1016 } 1032 }
1017 1033
1018 return err; 1034 return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1071 continue; 1087 continue;
1072 } 1088 }
1073 1089
1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1075 vid); 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1076 if (err) 1092 ntohs(*nsrcs) == 0) {
1077 break; 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1094 vid);
1095 } else {
1096 err = br_ip6_multicast_add_group(br, port,
1097 &grec->grec_mca, vid);
1098 if (!err)
1099 break;
1100 }
1078 } 1101 }
1079 1102
1080 return err; 1103 return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1393 1416
1394 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1395 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1396 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1397 timer_pending(&other_query->timer))
1398 goto out; 1420 goto out;
1399 1421
1400 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1402 if (!mp) 1424 if (!mp)
1403 goto out; 1425 goto out;
1404 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1405 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1406 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1407 1454
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1427 } 1474 }
1428 } 1475 }
1429 1476
1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1431 struct net_bridge_port_group __rcu **pp;
1432
1433 for (pp = &mp->ports;
1434 (p = mlock_dereference(*pp, br)) != NULL;
1435 pp = &p->next) {
1436 if (p->port != port)
1437 continue;
1438
1439 rcu_assign_pointer(*pp, p->next);
1440 hlist_del_init(&p->mglist);
1441 del_timer(&p->timer);
1442 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1444
1445 if (!mp->ports && !mp->mglist &&
1446 netif_running(br->dev))
1447 mod_timer(&mp->timer, jiffies);
1448 }
1449 goto out;
1450 }
1451
1452 now = jiffies; 1477 now = jiffies;
1453 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1454 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d89f4fac0bc5..c8b9bcfe997e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111/* largest possible L2 header, see br_nf_dev_queue_xmit() */ 111/* largest possible L2 header, see br_nf_dev_queue_xmit() */
112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) 112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
113 113
114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
115struct brnf_frag_data { 115struct brnf_frag_data {
116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; 116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
117 u8 encap_size; 117 u8 encap_size;
@@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
694} 694}
695#endif 695#endif
696 696
697#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
697static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, 698static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
698 int (*output)(struct sock *, struct sk_buff *)) 699 int (*output)(struct sock *, struct sk_buff *))
699{ 700{
@@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
712 713
713 return ip_do_fragment(sk, skb, output); 714 return ip_do_fragment(sk, skb, output);
714} 715}
716#endif
715 717
716static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 718static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
717{ 719{
@@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
742 struct brnf_frag_data *data; 744 struct brnf_frag_data *data;
743 745
744 if (br_validate_ipv4(skb)) 746 if (br_validate_ipv4(skb))
745 return NF_DROP; 747 goto drop;
746 748
747 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; 749 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
748 750
@@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
767 struct brnf_frag_data *data; 769 struct brnf_frag_data *data;
768 770
769 if (br_validate_ipv6(skb)) 771 if (br_validate_ipv6(skb))
770 return NF_DROP; 772 goto drop;
771 773
772 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; 774 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
773 775
@@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
782 784
783 if (v6ops) 785 if (v6ops)
784 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit); 786 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
785 else 787
786 return -EMSGSIZE; 788 kfree_skb(skb);
789 return -EMSGSIZE;
787 } 790 }
788#endif 791#endif
789 nf_bridge_info_free(skb); 792 nf_bridge_info_free(skb);
790 return br_dev_queue_push_xmit(sk, skb); 793 return br_dev_queue_push_xmit(sk, skb);
794 drop:
795 kfree_skb(skb);
796 return 0;
791} 797}
792 798
793/* PF_BRIDGE/POST_ROUTING ********************************************/ 799/* PF_BRIDGE/POST_ROUTING ********************************************/
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 6d12d2675c80..13b7d1e3d185 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
104{ 104{
105 const struct ipv6hdr *hdr; 105 const struct ipv6hdr *hdr;
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct inet6_dev *idev = in6_dev_get(skb->dev); 107 struct inet6_dev *idev = __in6_dev_get(skb->dev);
108 u32 pkt_len; 108 u32 pkt_len;
109 u8 ip6h_len = sizeof(struct ipv6hdr); 109 u8 ip6h_len = sizeof(struct ipv6hdr);
110 110
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6b67ed3831de..3da5525eb8a2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -457,6 +457,8 @@ static int br_afspec(struct net_bridge *br,
457 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 457 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
458 return -EINVAL; 458 return -EINVAL;
459 vinfo = nla_data(attr); 459 vinfo = nla_data(attr);
460 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
461 return -EINVAL;
460 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 462 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
461 if (vinfo_start) 463 if (vinfo_start)
462 return -EINVAL; 464 return -EINVAL;
@@ -691,9 +693,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
691 struct nlattr *tb[], 693 struct nlattr *tb[],
692 struct nlattr *data[]) 694 struct nlattr *data[])
693{ 695{
696 struct net_bridge *br = netdev_priv(brdev);
697 int ret;
698
694 if (!data) 699 if (!data)
695 return 0; 700 return 0;
696 return br_setport(br_port_get_rtnl(dev), data); 701
702 spin_lock_bh(&br->lock);
703 ret = br_setport(br_port_get_rtnl(dev), data);
704 spin_unlock_bh(&br->lock);
705
706 return ret;
697} 707}
698 708
699static int br_port_fill_slave_info(struct sk_buff *skb, 709static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122 * not dropped, but CAIF is sending flow off instead. 122 * not dropped, but CAIF is sending flow off instead.
123 */ 123 */
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 unsigned long flags; 127 unsigned long flags;
128 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
130 bool queued = false;
130 131
131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
139 140
140 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
141 if (err) 142 if (err)
142 return err; 143 goto out;
144
143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
144 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
145 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 147 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
147 } 149 }
148 skb->dev = NULL; 150 skb->dev = NULL;
149 skb_set_owner_r(skb, sk); 151 skb_set_owner_r(skb, sk);
150 /* Cache the SKB length before we tack it onto the receive
151 * queue. Once it is added it no longer belongs to us and
152 * may be freed by other threads of control pulling packets
153 * from the queue.
154 */
155 spin_lock_irqsave(&list->lock, flags); 152 spin_lock_irqsave(&list->lock, flags);
156 if (!sock_flag(sk, SOCK_DEAD)) 153 queued = !sock_flag(sk, SOCK_DEAD);
154 if (queued)
157 __skb_queue_tail(list, skb); 155 __skb_queue_tail(list, skb);
158 spin_unlock_irqrestore(&list->lock, flags); 156 spin_unlock_irqrestore(&list->lock, flags);
159 157out:
160 if (!sock_flag(sk, SOCK_DEAD)) 158 if (queued)
161 sk->sk_data_ready(sk); 159 sk->sk_data_ready(sk);
162 else 160 else
163 kfree_skb(skb); 161 kfree_skb(skb);
164 return 0;
165} 162}
166 163
167/* Packet Receive Callback function called from CAIF Stack */ 164/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7933e62a7318..166d436196c1 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* timer for statistics update */
89struct s_stats can_stats; /* packet statistics */ 89struct s_stats can_stats; /* packet statistics */
90struct s_pstats can_pstats; /* receive list statistics */ 90struct s_pstats can_pstats; /* receive list statistics */
91 91
92static atomic_t skbcounter = ATOMIC_INIT(0);
93
92/* 94/*
93 * af_can socket functions 95 * af_can socket functions
94 */ 96 */
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
310 return err; 312 return err;
311 } 313 }
312 314
313 if (newskb) { 315 if (newskb)
314 if (!(newskb->tstamp.tv64))
315 __net_timestamp(newskb);
316
317 netif_rx_ni(newskb); 316 netif_rx_ni(newskb);
318 }
319 317
320 /* update statistics */ 318 /* update statistics */
321 can_stats.tx_frames++; 319 can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
683 can_stats.rx_frames++; 681 can_stats.rx_frames++;
684 can_stats.rx_frames_delta++; 682 can_stats.rx_frames_delta++;
685 683
684 /* create non-zero unique skb identifier together with *skb */
685 while (!(can_skb_prv(skb)->skbcnt))
686 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
687
686 rcu_read_lock(); 688 rcu_read_lock();
687 689
688 /* deliver the packet to sockets listening on all devices */ 690 /* deliver the packet to sockets listening on all devices */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index b523453585be..a1ba6875c2a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
261 261
262 can_skb_reserve(skb); 262 can_skb_reserve(skb);
263 can_skb_prv(skb)->ifindex = dev->ifindex; 263 can_skb_prv(skb)->ifindex = dev->ifindex;
264 can_skb_prv(skb)->skbcnt = 0;
264 265
265 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 266 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
266 267
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1217 } 1218 }
1218 1219
1219 can_skb_prv(skb)->ifindex = dev->ifindex; 1220 can_skb_prv(skb)->ifindex = dev->ifindex;
1221 can_skb_prv(skb)->skbcnt = 0;
1220 skb->dev = dev; 1222 skb->dev = dev;
1221 can_skb_set_owner(skb, sk); 1223 can_skb_set_owner(skb, sk);
1222 err = can_send(skb, 1); /* send with loopback */ 1224 err = can_send(skb, 1); /* send with loopback */
diff --git a/net/can/raw.c b/net/can/raw.c
index 31b9748cbb4e..2e67b1423cd3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
75 */ 75 */
76 76
77struct uniqframe { 77struct uniqframe {
78 ktime_t tstamp; 78 int skbcnt;
79 const struct sk_buff *skb; 79 const struct sk_buff *skb;
80 unsigned int join_rx_count; 80 unsigned int join_rx_count;
81}; 81};
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
133 133
134 /* eliminate multiple filter matches for the same skb */ 134 /* eliminate multiple filter matches for the same skb */
135 if (this_cpu_ptr(ro->uniq)->skb == oskb && 135 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136 ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) { 136 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137 if (ro->join_filters) { 137 if (ro->join_filters) {
138 this_cpu_inc(ro->uniq->join_rx_count); 138 this_cpu_inc(ro->uniq->join_rx_count);
139 /* drop frame until all enabled filters matched */ 139 /* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
144 } 144 }
145 } else { 145 } else {
146 this_cpu_ptr(ro->uniq)->skb = oskb; 146 this_cpu_ptr(ro->uniq)->skb = oskb;
147 this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp; 147 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 148 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149 /* drop first frame to check all enabled filters? */ 149 /* drop first frame to check all enabled filters? */
150 if (ro->join_filters && ro->count > 1) 150 if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
749 749
750 can_skb_reserve(skb); 750 can_skb_reserve(skb);
751 can_skb_prv(skb)->ifindex = dev->ifindex; 751 can_skb_prv(skb)->ifindex = dev->ifindex;
752 can_skb_prv(skb)->skbcnt = 0;
752 753
753 err = memcpy_from_msg(skb_put(skb, size), msg, size); 754 err = memcpy_from_msg(skb_put(skb, size), msg, size);
754 if (err < 0) 755 if (err < 0)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cb7db320dd27..f30329f72641 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -9,6 +9,7 @@
9#include <keys/ceph-type.h> 9#include <keys/ceph-type.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mount.h> 11#include <linux/mount.h>
12#include <linux/nsproxy.h>
12#include <linux/parser.h> 13#include <linux/parser.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/seq_file.h> 15#include <linux/seq_file.h>
@@ -16,8 +17,6 @@
16#include <linux/statfs.h> 17#include <linux/statfs.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/nsproxy.h>
20#include <net/net_namespace.h>
21 20
22 21
23#include <linux/ceph/ceph_features.h> 22#include <linux/ceph/ceph_features.h>
@@ -131,6 +130,13 @@ int ceph_compare_options(struct ceph_options *new_opt,
131 int i; 130 int i;
132 int ret; 131 int ret;
133 132
133 /*
134 * Don't bother comparing options if network namespaces don't
135 * match.
136 */
137 if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
138 return -1;
139
134 ret = memcmp(opt1, opt2, ofs); 140 ret = memcmp(opt1, opt2, ofs);
135 if (ret) 141 if (ret)
136 return ret; 142 return ret;
@@ -335,9 +341,6 @@ ceph_parse_options(char *options, const char *dev_name,
335 int err = -ENOMEM; 341 int err = -ENOMEM;
336 substring_t argstr[MAX_OPT_ARGS]; 342 substring_t argstr[MAX_OPT_ARGS];
337 343
338 if (current->nsproxy->net_ns != &init_net)
339 return ERR_PTR(-EINVAL);
340
341 opt = kzalloc(sizeof(*opt), GFP_KERNEL); 344 opt = kzalloc(sizeof(*opt), GFP_KERNEL);
342 if (!opt) 345 if (!opt)
343 return ERR_PTR(-ENOMEM); 346 return ERR_PTR(-ENOMEM);
@@ -608,6 +611,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
608fail_monc: 611fail_monc:
609 ceph_monc_stop(&client->monc); 612 ceph_monc_stop(&client->monc);
610fail: 613fail:
614 ceph_messenger_fini(&client->msgr);
611 kfree(client); 615 kfree(client);
612 return ERR_PTR(err); 616 return ERR_PTR(err);
613} 617}
@@ -621,8 +625,8 @@ void ceph_destroy_client(struct ceph_client *client)
621 625
622 /* unmount */ 626 /* unmount */
623 ceph_osdc_stop(&client->osdc); 627 ceph_osdc_stop(&client->osdc);
624
625 ceph_monc_stop(&client->monc); 628 ceph_monc_stop(&client->monc);
629 ceph_messenger_fini(&client->msgr);
626 630
627 ceph_debugfs_client_cleanup(client); 631 ceph_debugfs_client_cleanup(client);
628 632
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1679f47280e2..e3be1d22a247 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -6,6 +6,7 @@
6#include <linux/inet.h> 6#include <linux/inet.h>
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -479,7 +480,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
479 int ret; 480 int ret;
480 481
481 BUG_ON(con->sock); 482 BUG_ON(con->sock);
482 ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family, 483 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
483 SOCK_STREAM, IPPROTO_TCP, &sock); 484 SOCK_STREAM, IPPROTO_TCP, &sock);
484 if (ret) 485 if (ret)
485 return ret; 486 return ret;
@@ -1731,17 +1732,17 @@ static int verify_hello(struct ceph_connection *con)
1731 1732
1732static bool addr_is_blank(struct sockaddr_storage *ss) 1733static bool addr_is_blank(struct sockaddr_storage *ss)
1733{ 1734{
1735 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1736 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1737
1734 switch (ss->ss_family) { 1738 switch (ss->ss_family) {
1735 case AF_INET: 1739 case AF_INET:
1736 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1740 return addr->s_addr == htonl(INADDR_ANY);
1737 case AF_INET6: 1741 case AF_INET6:
1738 return 1742 return ipv6_addr_any(addr6);
1739 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1743 default:
1740 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1744 return true;
1741 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1742 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1743 } 1745 }
1744 return false;
1745} 1746}
1746 1747
1747static int addr_port(struct sockaddr_storage *ss) 1748static int addr_port(struct sockaddr_storage *ss)
@@ -2944,11 +2945,18 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2944 msgr->tcp_nodelay = tcp_nodelay; 2945 msgr->tcp_nodelay = tcp_nodelay;
2945 2946
2946 atomic_set(&msgr->stopping, 0); 2947 atomic_set(&msgr->stopping, 0);
2948 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
2947 2949
2948 dout("%s %p\n", __func__, msgr); 2950 dout("%s %p\n", __func__, msgr);
2949} 2951}
2950EXPORT_SYMBOL(ceph_messenger_init); 2952EXPORT_SYMBOL(ceph_messenger_init);
2951 2953
2954void ceph_messenger_fini(struct ceph_messenger *msgr)
2955{
2956 put_net(read_pnet(&msgr->net));
2957}
2958EXPORT_SYMBOL(ceph_messenger_fini);
2959
2952static void clear_standby(struct ceph_connection *con) 2960static void clear_standby(struct ceph_connection *con)
2953{ 2961{
2954 /* come back from STANDBY? */ 2962 /* come back from STANDBY? */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..4967262b2707 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static int skb_set_peeked(struct sk_buff *skb)
135{
136 struct sk_buff *nskb;
137
138 if (skb->peeked)
139 return 0;
140
141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb))
143 goto done;
144
145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb)
147 return -ENOMEM;
148
149 skb->prev->next = nskb;
150 skb->next->prev = nskb;
151 nskb->prev = skb->prev;
152 nskb->next = skb->next;
153
154 consume_skb(skb);
155 skb = nskb;
156
157done:
158 skb->peeked = 1;
159
160 return 0;
161}
162
134/** 163/**
135 * __skb_recv_datagram - Receive a datagram skbuff 164 * __skb_recv_datagram - Receive a datagram skbuff
136 * @sk: socket 165 * @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
165struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 194struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166 int *peeked, int *off, int *err) 195 int *peeked, int *off, int *err)
167{ 196{
197 struct sk_buff_head *queue = &sk->sk_receive_queue;
168 struct sk_buff *skb, *last; 198 struct sk_buff *skb, *last;
199 unsigned long cpu_flags;
169 long timeo; 200 long timeo;
170 /* 201 /*
171 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184 * Look at current nfs client by the way... 215 * Look at current nfs client by the way...
185 * However, this function was correct in any case. 8) 216 * However, this function was correct in any case. 8)
186 */ 217 */
187 unsigned long cpu_flags;
188 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 int _off = *off; 218 int _off = *off;
190 219
191 last = (struct sk_buff *)queue; 220 last = (struct sk_buff *)queue;
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199 _off -= skb->len; 228 _off -= skb->len;
200 continue; 229 continue;
201 } 230 }
202 skb->peeked = 1; 231
232 error = skb_set_peeked(skb);
233 if (error)
234 goto unlock_err;
235
203 atomic_inc(&skb->users); 236 atomic_inc(&skb->users);
204 } else 237 } else
205 __skb_unlink(skb, queue); 238 __skb_unlink(skb, queue);
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223 256
224 return NULL; 257 return NULL;
225 258
259unlock_err:
260 spin_unlock_irqrestore(&queue->lock, cpu_flags);
226no_packet: 261no_packet:
227 *err = error; 262 *err = error;
228 return NULL; 263 return NULL;
@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
622 !skb->csum_complete_sw) 657 !skb->csum_complete_sw)
623 netdev_rx_csum_fault(skb->dev); 658 netdev_rx_csum_fault(skb->dev);
624 } 659 }
625 skb->csum_valid = !sum; 660 if (!skb_shared(skb))
661 skb->csum_valid = !sum;
626 return sum; 662 return sum;
627} 663}
628EXPORT_SYMBOL(__skb_checksum_complete_head); 664EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
642 netdev_rx_csum_fault(skb->dev); 678 netdev_rx_csum_fault(skb->dev);
643 } 679 }
644 680
645 /* Save full packet checksum */ 681 if (!skb_shared(skb)) {
646 skb->csum = csum; 682 /* Save full packet checksum */
647 skb->ip_summed = CHECKSUM_COMPLETE; 683 skb->csum = csum;
648 skb->csum_complete_sw = 1; 684 skb->ip_summed = CHECKSUM_COMPLETE;
649 skb->csum_valid = !sum; 685 skb->csum_complete_sw = 1;
686 skb->csum_valid = !sum;
687 }
650 688
651 return sum; 689 return sum;
652} 690}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6778a9999d52..a8e4dd430285 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
678 return dev->netdev_ops->ndo_get_iflink(dev); 678 return dev->netdev_ops->ndo_get_iflink(dev);
679 679
680 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
681 if (dev->rtnl_link_ops)
682 return 0;
683
684 return dev->ifindex; 680 return dev->ifindex;
685} 681}
686EXPORT_SYMBOL(dev_get_iflink); 682EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3452 local_irq_save(flags); 3448 local_irq_save(flags);
3453 3449
3454 rps_lock(sd); 3450 rps_lock(sd);
3451 if (!netif_running(skb->dev))
3452 goto drop;
3455 qlen = skb_queue_len(&sd->input_pkt_queue); 3453 qlen = skb_queue_len(&sd->input_pkt_queue);
3456 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3454 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3457 if (qlen) { 3455 if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
3473 goto enqueue; 3471 goto enqueue;
3474 } 3472 }
3475 3473
3474drop:
3476 sd->dropped++; 3475 sd->dropped++;
3477 rps_unlock(sd); 3476 rps_unlock(sd);
3478 3477
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3775 3774
3776 pt_prev = NULL; 3775 pt_prev = NULL;
3777 3776
3778 rcu_read_lock();
3779
3780another_round: 3777another_round:
3781 skb->skb_iif = skb->dev->ifindex; 3778 skb->skb_iif = skb->dev->ifindex;
3782 3779
@@ -3786,7 +3783,7 @@ another_round:
3786 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3783 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3787 skb = skb_vlan_untag(skb); 3784 skb = skb_vlan_untag(skb);
3788 if (unlikely(!skb)) 3785 if (unlikely(!skb))
3789 goto unlock; 3786 goto out;
3790 } 3787 }
3791 3788
3792#ifdef CONFIG_NET_CLS_ACT 3789#ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
3816 if (static_key_false(&ingress_needed)) { 3813 if (static_key_false(&ingress_needed)) {
3817 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3814 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3818 if (!skb) 3815 if (!skb)
3819 goto unlock; 3816 goto out;
3820 3817
3821 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 3818 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3822 goto unlock; 3819 goto out;
3823 } 3820 }
3824#endif 3821#endif
3825#ifdef CONFIG_NET_CLS_ACT 3822#ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
3837 if (vlan_do_receive(&skb)) 3834 if (vlan_do_receive(&skb))
3838 goto another_round; 3835 goto another_round;
3839 else if (unlikely(!skb)) 3836 else if (unlikely(!skb))
3840 goto unlock; 3837 goto out;
3841 } 3838 }
3842 3839
3843 rx_handler = rcu_dereference(skb->dev->rx_handler); 3840 rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
3849 switch (rx_handler(&skb)) { 3846 switch (rx_handler(&skb)) {
3850 case RX_HANDLER_CONSUMED: 3847 case RX_HANDLER_CONSUMED:
3851 ret = NET_RX_SUCCESS; 3848 ret = NET_RX_SUCCESS;
3852 goto unlock; 3849 goto out;
3853 case RX_HANDLER_ANOTHER: 3850 case RX_HANDLER_ANOTHER:
3854 goto another_round; 3851 goto another_round;
3855 case RX_HANDLER_EXACT: 3852 case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
3903 ret = NET_RX_DROP; 3900 ret = NET_RX_DROP;
3904 } 3901 }
3905 3902
3906unlock: 3903out:
3907 rcu_read_unlock();
3908 return ret; 3904 return ret;
3909} 3905}
3910 3906
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
3935 3931
3936static int netif_receive_skb_internal(struct sk_buff *skb) 3932static int netif_receive_skb_internal(struct sk_buff *skb)
3937{ 3933{
3934 int ret;
3935
3938 net_timestamp_check(netdev_tstamp_prequeue, skb); 3936 net_timestamp_check(netdev_tstamp_prequeue, skb);
3939 3937
3940 if (skb_defer_rx_timestamp(skb)) 3938 if (skb_defer_rx_timestamp(skb))
3941 return NET_RX_SUCCESS; 3939 return NET_RX_SUCCESS;
3942 3940
3941 rcu_read_lock();
3942
3943#ifdef CONFIG_RPS 3943#ifdef CONFIG_RPS
3944 if (static_key_false(&rps_needed)) { 3944 if (static_key_false(&rps_needed)) {
3945 struct rps_dev_flow voidflow, *rflow = &voidflow; 3945 struct rps_dev_flow voidflow, *rflow = &voidflow;
3946 int cpu, ret; 3946 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
3947
3948 rcu_read_lock();
3949
3950 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3951 3947
3952 if (cpu >= 0) { 3948 if (cpu >= 0) {
3953 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3949 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3954 rcu_read_unlock(); 3950 rcu_read_unlock();
3955 return ret; 3951 return ret;
3956 } 3952 }
3957 rcu_read_unlock();
3958 } 3953 }
3959#endif 3954#endif
3960 return __netif_receive_skb(skb); 3955 ret = __netif_receive_skb(skb);
3956 rcu_read_unlock();
3957 return ret;
3961} 3958}
3962 3959
3963/** 3960/**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4502 struct sk_buff *skb; 4499 struct sk_buff *skb;
4503 4500
4504 while ((skb = __skb_dequeue(&sd->process_queue))) { 4501 while ((skb = __skb_dequeue(&sd->process_queue))) {
4502 rcu_read_lock();
4505 local_irq_enable(); 4503 local_irq_enable();
4506 __netif_receive_skb(skb); 4504 __netif_receive_skb(skb);
4505 rcu_read_unlock();
4507 local_irq_disable(); 4506 local_irq_disable();
4508 input_queue_head_incr(sd); 4507 input_queue_head_incr(sd);
4509 if (++work >= quota) { 4508 if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
6139 unlist_netdevice(dev); 6138 unlist_netdevice(dev);
6140 6139
6141 dev->reg_state = NETREG_UNREGISTERING; 6140 dev->reg_state = NETREG_UNREGISTERING;
6141 on_each_cpu(flush_backlog, dev, 1);
6142 } 6142 }
6143 6143
6144 synchronize_net(); 6144 synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
6409 struct netdev_queue *tx; 6409 struct netdev_queue *tx;
6410 size_t sz = count * sizeof(*tx); 6410 size_t sz = count * sizeof(*tx);
6411 6411
6412 BUG_ON(count < 1 || count > 0xffff); 6412 if (count < 1 || count > 0xffff)
6413 return -EINVAL;
6413 6414
6414 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6415 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6415 if (!tx) { 6416 if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
6773 6774
6774 dev->reg_state = NETREG_UNREGISTERED; 6775 dev->reg_state = NETREG_UNREGISTERED;
6775 6776
6776 on_each_cpu(flush_backlog, dev, 1);
6777
6778 netdev_wait_allrefs(dev); 6777 netdev_wait_allrefs(dev);
6779 6778
6780 /* paranoia */ 6779 /* paranoia */
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
284 int newrefcnt; 284 int newrefcnt;
285 285
286 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
287 WARN_ON(newrefcnt < 0); 287 if (unlikely(newrefcnt < 0))
288 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
289 __func__, dst, newrefcnt);
288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289 call_rcu(&dst->rcu_head, dst_destroy_rcu); 291 call_rcu(&dst->rcu_head, dst_destroy_rcu);
290 } 292 }
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9dfb88a933e7..92d886f4adcb 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,7 +66,7 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * avbps is scaled by 2^5, avpps is scaled by 2^10. 69 * avbps and avpps are scaled by 2^5.
70 * both values are reported as 32 bit unsigned values. bps can 70 * both values are reported as 32 bit unsigned values. bps can
71 overflow for fast links : max speed being 34360Mbit/sec 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
@@ -85,10 +85,10 @@ struct gen_estimator
85 struct gnet_stats_rate_est64 *rate_est; 85 struct gnet_stats_rate_est64 *rate_est;
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u32 last_packets;
89 unsigned long avpps;
88 u64 last_bytes; 90 u64 last_bytes;
89 u64 avbps; 91 u64 avbps;
90 u32 last_packets;
91 u32 avpps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 94 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
@@ -118,8 +118,8 @@ static void est_timer(unsigned long arg)
118 rcu_read_lock(); 118 rcu_read_lock();
119 list_for_each_entry_rcu(e, &elist[idx].list, list) { 119 list_for_each_entry_rcu(e, &elist[idx].list, list) {
120 struct gnet_stats_basic_packed b = {0}; 120 struct gnet_stats_basic_packed b = {0};
121 unsigned long rate;
121 u64 brate; 122 u64 brate;
122 u32 rate;
123 123
124 spin_lock(e->stats_lock); 124 spin_lock(e->stats_lock);
125 read_lock(&est_lock); 125 read_lock(&est_lock);
@@ -133,10 +133,11 @@ static void est_timer(unsigned long arg)
133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134 e->rate_est->bps = (e->avbps+0xF)>>5; 134 e->rate_est->bps = (e->avbps+0xF)>>5;
135 135
136 rate = (b.packets - e->last_packets)<<(12 - idx); 136 rate = b.packets - e->last_packets;
137 rate <<= (7 - idx);
137 e->last_packets = b.packets; 138 e->last_packets = b.packets;
138 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 139 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
139 e->rate_est->pps = (e->avpps+0x1FF)>>10; 140 e->rate_est->pps = (e->avpps + 0xF) >> 5;
140skip: 141skip:
141 read_unlock(&est_lock); 142 read_unlock(&est_lock);
142 spin_unlock(e->stats_lock); 143 spin_unlock(e->stats_lock);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 05badbb58865..1ebdf1c0d118 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3571,13 +3571,6 @@ static int pktgen_thread_worker(void *arg)
3571 pr_debug("%s removing thread\n", t->tsk->comm); 3571 pr_debug("%s removing thread\n", t->tsk->comm);
3572 pktgen_rem_thread(t); 3572 pktgen_rem_thread(t);
3573 3573
3574 /* Wait for kthread_stop */
3575 while (!kthread_should_stop()) {
3576 set_current_state(TASK_INTERRUPTIBLE);
3577 schedule();
3578 }
3579 __set_current_state(TASK_RUNNING);
3580
3581 return 0; 3574 return 0;
3582} 3575}
3583 3576
@@ -3769,6 +3762,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3769 } 3762 }
3770 3763
3771 t->net = pn; 3764 t->net = pn;
3765 get_task_struct(p);
3772 wake_up_process(p); 3766 wake_up_process(p);
3773 wait_for_completion(&t->start_done); 3767 wait_for_completion(&t->start_done);
3774 3768
@@ -3891,6 +3885,7 @@ static void __net_exit pg_net_exit(struct net *net)
3891 t = list_entry(q, struct pktgen_thread, th_list); 3885 t = list_entry(q, struct pktgen_thread, th_list);
3892 list_del(&t->th_list); 3886 list_del(&t->th_list);
3893 kthread_stop(t->tsk); 3887 kthread_stop(t->tsk);
3888 put_task_struct(t->tsk);
3894 kfree(t); 3889 kfree(t);
3895 } 3890 }
3896 3891
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 01ced4a889e0..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1329}; 1329};
1330 1330
1331static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
1332 [IFLA_VF_INFO] = { .type = NLA_NESTED },
1333};
1334
1335static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1331static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1336 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1332 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1337 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1333 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
@@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1488 return 0; 1484 return 0;
1489} 1485}
1490 1486
1491static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) 1487static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1492{ 1488{
1493 int rem, err = -EINVAL;
1494 struct nlattr *vf;
1495 const struct net_device_ops *ops = dev->netdev_ops; 1489 const struct net_device_ops *ops = dev->netdev_ops;
1490 int err = -EINVAL;
1496 1491
1497 nla_for_each_nested(vf, attr, rem) { 1492 if (tb[IFLA_VF_MAC]) {
1498 switch (nla_type(vf)) { 1493 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1499 case IFLA_VF_MAC: {
1500 struct ifla_vf_mac *ivm;
1501 ivm = nla_data(vf);
1502 err = -EOPNOTSUPP;
1503 if (ops->ndo_set_vf_mac)
1504 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1505 ivm->mac);
1506 break;
1507 }
1508 case IFLA_VF_VLAN: {
1509 struct ifla_vf_vlan *ivv;
1510 ivv = nla_data(vf);
1511 err = -EOPNOTSUPP;
1512 if (ops->ndo_set_vf_vlan)
1513 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
1514 ivv->vlan,
1515 ivv->qos);
1516 break;
1517 }
1518 case IFLA_VF_TX_RATE: {
1519 struct ifla_vf_tx_rate *ivt;
1520 struct ifla_vf_info ivf;
1521 ivt = nla_data(vf);
1522 err = -EOPNOTSUPP;
1523 if (ops->ndo_get_vf_config)
1524 err = ops->ndo_get_vf_config(dev, ivt->vf,
1525 &ivf);
1526 if (err)
1527 break;
1528 err = -EOPNOTSUPP;
1529 if (ops->ndo_set_vf_rate)
1530 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1531 ivf.min_tx_rate,
1532 ivt->rate);
1533 break;
1534 }
1535 case IFLA_VF_RATE: {
1536 struct ifla_vf_rate *ivt;
1537 ivt = nla_data(vf);
1538 err = -EOPNOTSUPP;
1539 if (ops->ndo_set_vf_rate)
1540 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1541 ivt->min_tx_rate,
1542 ivt->max_tx_rate);
1543 break;
1544 }
1545 case IFLA_VF_SPOOFCHK: {
1546 struct ifla_vf_spoofchk *ivs;
1547 ivs = nla_data(vf);
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 break;
1553 }
1554 case IFLA_VF_LINK_STATE: {
1555 struct ifla_vf_link_state *ivl;
1556 ivl = nla_data(vf);
1557 err = -EOPNOTSUPP;
1558 if (ops->ndo_set_vf_link_state)
1559 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1560 ivl->link_state);
1561 break;
1562 }
1563 case IFLA_VF_RSS_QUERY_EN: {
1564 struct ifla_vf_rss_query_en *ivrssq_en;
1565 1494
1566 ivrssq_en = nla_data(vf); 1495 err = -EOPNOTSUPP;
1567 err = -EOPNOTSUPP; 1496 if (ops->ndo_set_vf_mac)
1568 if (ops->ndo_set_vf_rss_query_en) 1497 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1569 err = ops->ndo_set_vf_rss_query_en(dev, 1498 ivm->mac);
1570 ivrssq_en->vf, 1499 if (err < 0)
1571 ivrssq_en->setting); 1500 return err;
1572 break; 1501 }
1573 } 1502
1574 default: 1503 if (tb[IFLA_VF_VLAN]) {
1575 err = -EINVAL; 1504 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1576 break; 1505
1577 } 1506 err = -EOPNOTSUPP;
1578 if (err) 1507 if (ops->ndo_set_vf_vlan)
1579 break; 1508 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1509 ivv->qos);
1510 if (err < 0)
1511 return err;
1512 }
1513
1514 if (tb[IFLA_VF_TX_RATE]) {
1515 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1516 struct ifla_vf_info ivf;
1517
1518 err = -EOPNOTSUPP;
1519 if (ops->ndo_get_vf_config)
1520 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1521 if (err < 0)
1522 return err;
1523
1524 err = -EOPNOTSUPP;
1525 if (ops->ndo_set_vf_rate)
1526 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1527 ivf.min_tx_rate,
1528 ivt->rate);
1529 if (err < 0)
1530 return err;
1531 }
1532
1533 if (tb[IFLA_VF_RATE]) {
1534 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1535
1536 err = -EOPNOTSUPP;
1537 if (ops->ndo_set_vf_rate)
1538 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1539 ivt->min_tx_rate,
1540 ivt->max_tx_rate);
1541 if (err < 0)
1542 return err;
1543 }
1544
1545 if (tb[IFLA_VF_SPOOFCHK]) {
1546 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1547
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 if (err < 0)
1553 return err;
1580 } 1554 }
1555
1556 if (tb[IFLA_VF_LINK_STATE]) {
1557 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1558
1559 err = -EOPNOTSUPP;
1560 if (ops->ndo_set_vf_link_state)
1561 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1562 ivl->link_state);
1563 if (err < 0)
1564 return err;
1565 }
1566
1567 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1568 struct ifla_vf_rss_query_en *ivrssq_en;
1569
1570 err = -EOPNOTSUPP;
1571 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1572 if (ops->ndo_set_vf_rss_query_en)
1573 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1574 ivrssq_en->setting);
1575 if (err < 0)
1576 return err;
1577 }
1578
1581 return err; 1579 return err;
1582} 1580}
1583 1581
@@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb,
1773 } 1771 }
1774 1772
1775 if (tb[IFLA_VFINFO_LIST]) { 1773 if (tb[IFLA_VFINFO_LIST]) {
1774 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
1776 struct nlattr *attr; 1775 struct nlattr *attr;
1777 int rem; 1776 int rem;
1777
1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
1779 if (nla_type(attr) != IFLA_VF_INFO) { 1779 if (nla_type(attr) != IFLA_VF_INFO ||
1780 nla_len(attr) < NLA_HDRLEN) {
1780 err = -EINVAL; 1781 err = -EINVAL;
1781 goto errout; 1782 goto errout;
1782 } 1783 }
1783 err = do_setvfinfo(dev, attr); 1784 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
1785 ifla_vf_policy);
1786 if (err < 0)
1787 goto errout;
1788 err = do_setvfinfo(dev, vfinfo);
1784 if (err < 0) 1789 if (err < 0)
1785 goto errout; 1790 goto errout;
1786 status |= DO_SETLINK_NOTIFY; 1791 status |= DO_SETLINK_NOTIFY;
@@ -1799,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
1799 goto errout; 1804 goto errout;
1800 1805
1801 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1802 if (nla_type(attr) != IFLA_VF_PORT) 1807 if (nla_type(attr) != IFLA_VF_PORT ||
1803 continue; 1808 nla_len(attr) < NLA_HDRLEN) {
1804 err = nla_parse_nested(port, IFLA_PORT_MAX, 1809 err = -EINVAL;
1805 attr, ifla_port_policy); 1810 goto errout;
1811 }
1812 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
1813 ifla_port_policy);
1806 if (err < 0) 1814 if (err < 0)
1807 goto errout; 1815 goto errout;
1808 if (!port[IFLA_PORT_VF]) { 1816 if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 392e29a0227d..b445d492c115 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -630,7 +630,7 @@ static int dsa_of_probe(struct device *dev)
630 continue; 630 continue;
631 631
632 cd->sw_addr = be32_to_cpup(sw_addr); 632 cd->sw_addr = be32_to_cpup(sw_addr);
633 if (cd->sw_addr > PHY_MAX_ADDR) 633 if (cd->sw_addr >= PHY_MAX_ADDR)
634 continue; 634 continue;
635 635
636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) 636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
@@ -642,6 +642,8 @@ static int dsa_of_probe(struct device *dev)
642 continue; 642 continue;
643 643
644 port_index = be32_to_cpup(port_reg); 644 port_index = be32_to_cpup(port_reg);
645 if (port_index >= DSA_MAX_PORTS)
646 break;
645 647
646 port_name = of_get_property(port, "label", NULL); 648 port_name = of_get_property(port, "label", NULL);
647 if (!port_name) 649 if (!port_name)
@@ -666,8 +668,6 @@ static int dsa_of_probe(struct device *dev)
666 goto out_free_chip; 668 goto out_free_chip;
667 } 669 }
668 670
669 if (port_index == DSA_MAX_PORTS)
670 break;
671 } 671 }
672 } 672 }
673 673
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/tcp_states.h> 21#include <net/tcp_states.h>
22 22
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
39 39
40 sk_dst_reset(sk); 40 sk_dst_reset(sk);
41 41
42 lock_sock(sk);
43
44 oif = sk->sk_bound_dev_if; 42 oif = sk->sk_bound_dev_if;
45 saddr = inet->inet_saddr; 43 saddr = inet->inet_saddr;
46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 44 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
82 sk_dst_set(sk, &rt->dst); 80 sk_dst_set(sk, &rt->dst);
83 err = 0; 81 err = 0;
84out: 82out:
85 release_sock(sk);
86 return err; 83 return err;
87} 84}
85EXPORT_SYMBOL(__ip4_datagram_connect);
86
87int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
88{
89 int res;
90
91 lock_sock(sk);
92 res = __ip4_datagram_connect(sk, uaddr, addr_len);
93 release_sock(sk);
94 return res;
95}
88EXPORT_SYMBOL(ip4_datagram_connect); 96EXPORT_SYMBOL(ip4_datagram_connect);
89 97
90/* Because UDP xmit path can manipulate sk_dst_cache without holding 98/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7498716e8f54..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
@@ -1740,6 +1739,8 @@ static int inet_netconf_msgsize_devconf(int type)
1740 size += nla_total_size(4); 1739 size += nla_total_size(4);
1741 if (type == -1 || type == NETCONFA_PROXY_NEIGH) 1740 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1742 size += nla_total_size(4); 1741 size += nla_total_size(4);
1742 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1743 size += nla_total_size(4);
1743 1744
1744 return size; 1745 return size;
1745} 1746}
@@ -1780,6 +1781,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1780 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 1781 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1781 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) 1782 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1782 goto nla_put_failure; 1783 goto nla_put_failure;
1784 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1785 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1786 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1787 goto nla_put_failure;
1783 1788
1784 nlmsg_end(skb, nlh); 1789 nlmsg_end(skb, nlh);
1785 return 0; 1790 return 0;
@@ -1819,6 +1824,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1819 [NETCONFA_FORWARDING] = { .len = sizeof(int) }, 1824 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1820 [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, 1825 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1821 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, 1826 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1827 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1822}; 1828};
1823 1829
1824static int inet_netconf_get_devconf(struct sk_buff *in_skb, 1830static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -2048,6 +2054,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
2048 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, 2054 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2049 ifindex, cnf); 2055 ifindex, cnf);
2050 } 2056 }
2057 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2058 new_value != old_value) {
2059 ifindex = devinet_conf_ifindex(net, cnf);
2060 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2061 ifindex, cnf);
2062 }
2051 } 2063 }
2052 2064
2053 return ret; 2065 return ret;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..37c4bb89a708 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9bc26677058e..c3b1f3a0f4cf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -152,8 +152,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
152 inet6_sk(sk)->tclass) < 0) 152 inet6_sk(sk)->tclass) < 0)
153 goto errout; 153 goto errout;
154 154
155 if (ipv6_only_sock(sk) && 155 if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1)) 156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
157 goto errout; 157 goto errout;
158 } 158 }
159#endif 159#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
624 624
625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
626{ 626{
627 unsigned int locksz = sizeof(spinlock_t);
627 unsigned int i, nblocks = 1; 628 unsigned int i, nblocks = 1;
628 629
629 if (sizeof(spinlock_t) != 0) { 630 if (locksz != 0) {
630 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 /* allocate 2 cache lines or at least one spinlock per cpu */
631 nblocks = max_t(unsigned int, 632 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
632 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
633 1);
634 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
635 634
636 /* no more locks than number of hash buckets */ 635 /* no more locks than number of hash buckets */
637 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
638 637
639 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
640 GFP_KERNEL | __GFP_NOWARN); 639 GFP_KERNEL | __GFP_NOWARN);
641 if (!hashinfo->ehash_locks) 640 if (!hashinfo->ehash_locks)
642 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
643 642
644 if (!hashinfo->ehash_locks) 643 if (!hashinfo->ehash_locks)
645 return -ENOMEM; 644 return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
351 ihl = ip_hdrlen(skb); 351 ihl = ip_hdrlen(skb);
352 352
353 /* Determine the position of this fragment. */ 353 /* Determine the position of this fragment. */
354 end = offset + skb->len - ihl; 354 end = offset + skb->len - skb_network_offset(skb) - ihl;
355 err = -EINVAL; 355 err = -EINVAL;
356 356
357 /* Is this the final fragment? */ 357 /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (!pskb_pull(skb, ihl)) 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
385 goto err; 385 goto err;
386 386
387 err = pskb_trim_rcsum(skb, end - offset); 387 err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
641 iph->frag_off = 0; 641 iph->frag_off = 0;
642 } 642 }
643 643
644 ip_send_check(iph);
645
644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
645 qp->q.fragments = NULL; 647 qp->q.fragments = NULL;
646 qp->q.fragments_tail = NULL; 648 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 4c2c3ba4ba65..626d9e56a6bd 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
586EXPORT_SYMBOL(ip_tunnel_encap); 586EXPORT_SYMBOL(ip_tunnel_encap);
587 587
588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, 588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
589 struct rtable *rt, __be16 df) 589 struct rtable *rt, __be16 df,
590 const struct iphdr *inner_iph)
590{ 591{
591 struct ip_tunnel *tunnel = netdev_priv(dev); 592 struct ip_tunnel *tunnel = netdev_priv(dev);
592 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; 593 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
603 604
604 if (skb->protocol == htons(ETH_P_IP)) { 605 if (skb->protocol == htons(ETH_P_IP)) {
605 if (!skb_is_gso(skb) && 606 if (!skb_is_gso(skb) &&
606 (df & htons(IP_DF)) && mtu < pkt_size) { 607 (inner_iph->frag_off & htons(IP_DF)) &&
608 mtu < pkt_size) {
607 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 609 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
608 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 610 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
609 return -E2BIG; 611 return -E2BIG;
@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
737 goto tx_error; 739 goto tx_error;
738 } 740 }
739 741
740 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { 742 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
741 ip_rt_put(rt); 743 ip_rt_put(rt);
742 goto tx_error; 744 goto tx_error;
743 } 745 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 95c9b6eece25..92305a1a021a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
255 unsigned int verdict = NF_DROP; 255 unsigned int verdict = NF_DROP;
256 const struct arphdr *arp; 256 const struct arphdr *arp;
257 struct arpt_entry *e, *back; 257 struct arpt_entry *e, **jumpstack;
258 const char *indev, *outdev; 258 const char *indev, *outdev;
259 const void *table_base; 259 const void *table_base;
260 unsigned int cpu, stackidx = 0;
260 const struct xt_table_info *private; 261 const struct xt_table_info *private;
261 struct xt_action_param acpar; 262 struct xt_action_param acpar;
262 unsigned int addend; 263 unsigned int addend;
@@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
270 local_bh_disable(); 271 local_bh_disable();
271 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
272 private = table->private; 273 private = table->private;
274 cpu = smp_processor_id();
273 /* 275 /*
274 * Ensure we load private-> members after we've fetched the base 276 * Ensure we load private-> members after we've fetched the base
275 * pointer. 277 * pointer.
276 */ 278 */
277 smp_read_barrier_depends(); 279 smp_read_barrier_depends();
278 table_base = private->entries; 280 table_base = private->entries;
281 jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
279 282
280 e = get_entry(table_base, private->hook_entry[hook]); 283 e = get_entry(table_base, private->hook_entry[hook]);
281 back = get_entry(table_base, private->underflow[hook]);
282 284
283 acpar.in = state->in; 285 acpar.in = state->in;
284 acpar.out = state->out; 286 acpar.out = state->out;
@@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
312 verdict = (unsigned int)(-v) - 1; 314 verdict = (unsigned int)(-v) - 1;
313 break; 315 break;
314 } 316 }
315 e = back; 317 if (stackidx == 0) {
316 back = get_entry(table_base, back->comefrom); 318 e = get_entry(table_base,
319 private->underflow[hook]);
320 } else {
321 e = jumpstack[--stackidx];
322 e = arpt_next_entry(e);
323 }
317 continue; 324 continue;
318 } 325 }
319 if (table_base + v 326 if (table_base + v
320 != arpt_next_entry(e)) { 327 != arpt_next_entry(e)) {
321 /* Save old back ptr in next entry */
322 struct arpt_entry *next = arpt_next_entry(e);
323 next->comefrom = (void *)back - table_base;
324 328
325 /* set back pointer to next entry */ 329 if (stackidx >= private->stacksize) {
326 back = next; 330 verdict = NF_DROP;
331 break;
332 }
333 jumpstack[stackidx++] = e;
327 } 334 }
328 335
329 e = get_entry(table_base, v); 336 e = get_entry(table_base, v);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1917 const struct inet_connection_sock *icsk = inet_csk(sk);
1918 struct tcp_sock *tp = tcp_sk(sk); 1918 struct tcp_sock *tp = tcp_sk(sk);
1919 struct sk_buff *skb; 1919 struct sk_buff *skb;
1920 bool new_recovery = false; 1920 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1921 bool is_reneg; /* is receiver reneging on SACKs? */
1922 1922
1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1923 /* Reduce ssthresh if it has not yet been made inside this window. */
1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1925 !after(tp->high_seq, tp->snd_una) || 1925 !after(tp->high_seq, tp->snd_una) ||
1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1927 new_recovery = true;
1928 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1929 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1930 tcp_ca_event(sk, CA_EVENT_LOSS); 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 44{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 if (usin->sin6_family == AF_INET) { 56 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 57 if (__ipv6_only_sock(sk))
58 return -EAFNOSUPPORT; 58 return -EAFNOSUPPORT;
59 err = ip4_datagram_connect(sk, uaddr, addr_len); 59 err = __ip4_datagram_connect(sk, uaddr, addr_len);
60 goto ipv4_connected; 60 goto ipv4_connected;
61 } 61 }
62 62
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 98 sin.sin_addr.s_addr = daddr->s6_addr32[3];
99 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
100 100
101 err = ip4_datagram_connect(sk, 101 err = __ip4_datagram_connect(sk,
102 (struct sockaddr *) &sin, 102 (struct sockaddr *) &sin,
103 sizeof(sin)); 103 sizeof(sin));
104 104
105ipv4_connected: 105ipv4_connected:
106 if (err) 106 if (err)
@@ -204,6 +204,16 @@ out:
204 fl6_sock_release(flowlabel); 204 fl6_sock_release(flowlabel);
205 return err; 205 return err;
206} 206}
207
208int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209{
210 int res;
211
212 lock_sock(sk);
213 res = __ip6_datagram_connect(sk, uaddr, addr_len);
214 release_sock(sk);
215 return res;
216}
207EXPORT_SYMBOL_GPL(ip6_datagram_connect); 217EXPORT_SYMBOL_GPL(ip6_datagram_connect);
208 218
209int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, 219int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f2e464eba5ef..57990c929cd8 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
331 if (offset < 0) 331 if (offset < 0)
332 goto out; 332 goto out;
333 333
334 if (!ipv6_is_mld(skb, nexthdr, offset)) 334 if (ipv6_is_mld(skb, nexthdr, offset))
335 goto out; 335 deliver = true;
336 336
337 deliver = true; 337 goto out;
338 } 338 }
339 /* unknown RA - process it normally */ 339 /* unknown RA - process it normally */
340 } 340 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
292static const struct net_offload sit_offload = { 292static const struct net_offload sit_offload = {
293 .callbacks = { 293 .callbacks = {
294 .gso_segment = ipv6_gso_segment, 294 .gso_segment = ipv6_gso_segment,
295 .gro_receive = ipv6_gro_receive,
296 .gro_complete = ipv6_gro_complete,
297 }, 295 },
298}; 296};
299 297
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1a1122a6bbf5..6090969937f8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -369,10 +369,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
369 struct inet6_dev *idev; 369 struct inet6_dev *idev;
370 370
371 dst_destroy_metrics_generic(dst); 371 dst_destroy_metrics_generic(dst);
372 372 free_percpu(rt->rt6i_pcpu);
373 if (rt->rt6i_pcpu)
374 free_percpu(rt->rt6i_pcpu);
375
376 rt6_uncached_list_del(rt); 373 rt6_uncached_list_del(rt);
377 374
378 idev = rt->rt6i_idev; 375 idev = rt->rt6i_idev;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
723 723
724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 724 debugfs_remove_recursive(sdata->vif.debugfs_dir);
725 sdata->vif.debugfs_dir = NULL; 725 sdata->vif.debugfs_dir = NULL;
726 sdata->debugfs.subdir_stations = NULL;
726} 727}
727 728
728void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 729void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1863 ieee80211_teardown_sdata(sdata); 1863 ieee80211_teardown_sdata(sdata);
1864} 1864}
1865 1865
1866/*
1867 * Remove all interfaces, may only be called at hardware unregistration
1868 * time because it doesn't do RCU-safe list removals.
1869 */
1870void ieee80211_remove_interfaces(struct ieee80211_local *local) 1866void ieee80211_remove_interfaces(struct ieee80211_local *local)
1871{ 1867{
1872 struct ieee80211_sub_if_data *sdata, *tmp; 1868 struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1875 1871
1876 ASSERT_RTNL(); 1872 ASSERT_RTNL();
1877 1873
1878 /* 1874 /* Before destroying the interfaces, make sure they're all stopped so
1879 * Close all AP_VLAN interfaces first, as otherwise they 1875 * that the hardware is stopped. Otherwise, the driver might still be
1880 * might be closed while the AP interface they belong to 1876 * iterating the interfaces during the shutdown, e.g. from a worker
1881 * is closed, causing unregister_netdevice_many() to crash. 1877 * or from RX processing or similar, and if it does so (using atomic
1878 * iteration) while we're manipulating the list, the iteration will
1879 * crash.
1880 *
1881 * After this, the hardware should be stopped and the driver should
1882 * have stopped all of its activities, so that we can do RCU-unaware
1883 * manipulations of the interface list below.
1882 */ 1884 */
1883 list_for_each_entry(sdata, &local->interfaces, list) 1885 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1884 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1886
1885 dev_close(sdata->dev); 1887 WARN(local->open_count, "%s: open count remains %d\n",
1888 wiphy_name(local->hw.wiphy), local->open_count);
1886 1889
1887 mutex_lock(&local->iflist_mtx); 1890 mutex_lock(&local->iflist_mtx);
1888 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1891 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
307 /* AID */ 307 /* AID */
308 pos = skb_put(skb, 2); 308 pos = skb_put(skb, 2);
309 put_unaligned_le16(plid, pos + 2); 309 put_unaligned_le16(plid, pos);
310 } 310 }
311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1122 WLAN_SP_MESH_PEERING_CONFIRM) {
1123 baseaddr += 4; 1123 baseaddr += 4;
1124 baselen += 4; 1124 baselen += 4;
1125
1126 if (baselen > len)
1127 return;
1125 } 1128 }
1126 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
1127 mesh_process_plink_frame(sdata, mgmt, &elems); 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 76 if (sdata->vif.type != NL80211_IFTYPE_STATION)
77 continue; 77 continue;
78 ieee80211_mgd_quiesce(sdata); 78 ieee80211_mgd_quiesce(sdata);
79 /* If suspended during TX in progress, and wowlan
80 * is enabled (connection will be active) there
81 * can be a race where the driver is put out
82 * of power-save due to TX and during suspend
83 * dynamic_ps_timer is cancelled and TX packet
84 * is flushed, leaving the driver in ACTIVE even
85 * after resuming until dynamic_ps_timer puts
86 * driver back in DOZE.
87 */
88 if (sdata->u.mgd.associated &&
89 sdata->u.mgd.powersave &&
90 !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
91 local->hw.conf.flags |= IEEE80211_CONF_PS;
92 ieee80211_hw_config(local,
93 IEEE80211_CONF_CHANGE_PS);
94 }
79 } 95 }
80 96
81 err = drv_suspend(local, wowlan); 97 err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_channel *ch; 60 struct ieee80211_channel *ch;
61 struct cfg80211_chan_def chandef; 61 struct cfg80211_chan_def chandef;
62 int i, subband_start; 62 int i, subband_start;
63 struct wiphy *wiphy = sdata->local->hw.wiphy;
63 64
64 for (i = start; i <= end; i += spacing) { 65 for (i = start; i <= end; i += spacing) {
65 if (!ch_cnt) 66 if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
70 /* we will be active on the channel */ 71 /* we will be active on the channel */
71 cfg80211_chandef_create(&chandef, ch, 72 cfg80211_chandef_create(&chandef, ch,
72 NL80211_CHAN_NO_HT); 73 NL80211_CHAN_NO_HT);
73 if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 74 if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
74 &chandef, 75 sdata->wdev.iftype)) {
75 sdata->wdev.iftype)) {
76 ch_cnt++; 76 ch_cnt++;
77 /* 77 /*
78 * check if the next channel is also part of 78 * check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1117 queued = true; 1117 queued = true;
1118 info->control.vif = &tx->sdata->vif; 1118 info->control.vif = &tx->sdata->vif;
1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
1121 IEEE80211_TX_CTL_NO_PS_BUFFER |
1122 IEEE80211_TX_STATUS_EOSP;
1121 __skb_queue_tail(&tid_tx->pending, skb); 1123 __skb_queue_tail(&tid_tx->pending, skb);
1122 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1123 purge_skb = __skb_dequeue(&tid_tx->pending); 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..651039ad1681 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1751,7 +1773,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1773 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1774 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1775 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1776 }
1756 1777
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1778 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index cd60d397fe05..8a8b2abc35ff 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
213 213
214 if (verdict == NF_ACCEPT) { 214 if (verdict == NF_ACCEPT) {
215 next_hook: 215 next_hook:
216 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], 216 verdict = nf_iterate(entry->state.hook_list,
217 skb, &entry->state, &elem); 217 skb, &entry->state, &elem);
218 } 218 }
219 219
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..71f1e9fdfa18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,12 +349,10 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
358 if (IS_ERR(ct)) { 356 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct); 357 err = PTR_ERR(ct);
360 goto err1; 358 goto err1;
@@ -365,7 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
365 if (!nfct_synproxy_ext_add(ct)) 363 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 364 goto err2;
367 365
368 nf_conntrack_tmpl_insert(net, ct); 366 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
367 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 368 snet->tmpl = ct;
370 369
371 snet->stats = alloc_percpu(struct synproxy_stats); 370 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 8b117c90ecd7..0c0e8ecf02ab 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
269 } 269 }
270} 270}
271 271
272enum {
273 NFNL_BATCH_FAILURE = (1 << 0),
274 NFNL_BATCH_DONE = (1 << 1),
275 NFNL_BATCH_REPLAY = (1 << 2),
276};
277
272static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 278static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
273 u_int16_t subsys_id) 279 u_int16_t subsys_id)
274{ 280{
@@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
276 struct net *net = sock_net(skb->sk); 282 struct net *net = sock_net(skb->sk);
277 const struct nfnetlink_subsystem *ss; 283 const struct nfnetlink_subsystem *ss;
278 const struct nfnl_callback *nc; 284 const struct nfnl_callback *nc;
279 bool success = true, done = false;
280 static LIST_HEAD(err_list); 285 static LIST_HEAD(err_list);
286 u32 status;
281 int err; 287 int err;
282 288
283 if (subsys_id >= NFNL_SUBSYS_COUNT) 289 if (subsys_id >= NFNL_SUBSYS_COUNT)
284 return netlink_ack(skb, nlh, -EINVAL); 290 return netlink_ack(skb, nlh, -EINVAL);
285replay: 291replay:
292 status = 0;
293
286 skb = netlink_skb_clone(oskb, GFP_KERNEL); 294 skb = netlink_skb_clone(oskb, GFP_KERNEL);
287 if (!skb) 295 if (!skb)
288 return netlink_ack(oskb, nlh, -ENOMEM); 296 return netlink_ack(oskb, nlh, -ENOMEM);
@@ -336,10 +344,10 @@ replay:
336 if (type == NFNL_MSG_BATCH_BEGIN) { 344 if (type == NFNL_MSG_BATCH_BEGIN) {
337 /* Malformed: Batch begin twice */ 345 /* Malformed: Batch begin twice */
338 nfnl_err_reset(&err_list); 346 nfnl_err_reset(&err_list);
339 success = false; 347 status |= NFNL_BATCH_FAILURE;
340 goto done; 348 goto done;
341 } else if (type == NFNL_MSG_BATCH_END) { 349 } else if (type == NFNL_MSG_BATCH_END) {
342 done = true; 350 status |= NFNL_BATCH_DONE;
343 goto done; 351 goto done;
344 } else if (type < NLMSG_MIN_TYPE) { 352 } else if (type < NLMSG_MIN_TYPE) {
345 err = -EINVAL; 353 err = -EINVAL;
@@ -382,11 +390,8 @@ replay:
382 * original skb. 390 * original skb.
383 */ 391 */
384 if (err == -EAGAIN) { 392 if (err == -EAGAIN) {
385 nfnl_err_reset(&err_list); 393 status |= NFNL_BATCH_REPLAY;
386 ss->abort(oskb); 394 goto next;
387 nfnl_unlock(subsys_id);
388 kfree_skb(skb);
389 goto replay;
390 } 395 }
391 } 396 }
392ack: 397ack:
@@ -402,7 +407,7 @@ ack:
402 */ 407 */
403 nfnl_err_reset(&err_list); 408 nfnl_err_reset(&err_list);
404 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
405 success = false; 410 status |= NFNL_BATCH_FAILURE;
406 goto done; 411 goto done;
407 } 412 }
408 /* We don't stop processing the batch on errors, thus, 413 /* We don't stop processing the batch on errors, thus,
@@ -410,19 +415,26 @@ ack:
410 * triggers. 415 * triggers.
411 */ 416 */
412 if (err) 417 if (err)
413 success = false; 418 status |= NFNL_BATCH_FAILURE;
414 } 419 }
415 420next:
416 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 421 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
417 if (msglen > skb->len) 422 if (msglen > skb->len)
418 msglen = skb->len; 423 msglen = skb->len;
419 skb_pull(skb, msglen); 424 skb_pull(skb, msglen);
420 } 425 }
421done: 426done:
422 if (success && done) 427 if (status & NFNL_BATCH_REPLAY) {
428 ss->abort(oskb);
429 nfnl_err_reset(&err_list);
430 nfnl_unlock(subsys_id);
431 kfree_skb(skb);
432 goto replay;
433 } else if (status == NFNL_BATCH_DONE) {
423 ss->commit(oskb); 434 ss->commit(oskb);
424 else 435 } else {
425 ss->abort(oskb); 436 ss->abort(oskb);
437 }
426 438
427 nfnl_err_deliver(&err_list, oskb); 439 nfnl_err_deliver(&err_list, oskb);
428 nfnl_unlock(subsys_id); 440 nfnl_unlock(subsys_id);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..c6630030c912 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,8 +201,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
207 ret = PTR_ERR(ct); 205 ret = PTR_ERR(ct);
208 if (IS_ERR(ct)) 206 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
@@ -227,8 +225,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 225 if (ret < 0)
228 goto err3; 226 goto err3;
229 } 227 }
230 228 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 229 nf_conntrack_get(&ct->ct_general);
232out: 230out:
233 info->ct = ct; 231 info->ct = ct;
234 return 0; 232 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dea925388a5b..d8e2e3918ce2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
158out: 158out:
159 spin_unlock(&netlink_tap_lock); 159 spin_unlock(&netlink_tap_lock);
160 160
161 if (found && nt->module) 161 if (found)
162 module_put(nt->module); 162 module_put(nt->module);
163 163
164 return found ? 0 : -ENODEV; 164 return found ? 0 : -ENODEV;
@@ -357,25 +357,52 @@ err1:
357 return NULL; 357 return NULL;
358} 358}
359 359
360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring) 393 bool tx_ring)
362{ 394{
363 struct netlink_sock *nlk = nlk_sk(sk); 395 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring; 396 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL; 397 void **pg_vec = NULL;
367 unsigned int order = 0; 398 unsigned int order = 0;
368 int err;
369 399
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372 401
373 if (!closing) { 402 if (atomic_read(&nlk->mapped))
374 if (atomic_read(&nlk->mapped)) 403 return -EBUSY;
375 return -EBUSY; 404 if (atomic_read(&ring->pending))
376 if (atomic_read(&ring->pending)) 405 return -EBUSY;
377 return -EBUSY;
378 }
379 406
380 if (req->nm_block_nr) { 407 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL) 408 if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
407 return -EINVAL; 434 return -EINVAL;
408 } 435 }
409 436
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock); 437 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) { 438 if (atomic_read(&nlk->mapped) == 0) {
413 err = 0; 439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
414 spin_lock_bh(&queue->lock); 440 mutex_unlock(&nlk->pg_vec_lock);
415 441 return 0;
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 } 442 }
443
430 mutex_unlock(&nlk->pg_vec_lock); 444 mutex_unlock(&nlk->pg_vec_lock);
431 445
432 if (pg_vec) 446 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr); 447 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err; 448
449 return -EBUSY;
435} 450}
436 451
437static void netlink_mm_open(struct vm_area_struct *vma) 452static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
900 915
901 memset(&req, 0, sizeof(req)); 916 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec) 917 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false); 918 __netlink_set_ring(sk, &req, false, NULL, 0);
904 memset(&req, 0, sizeof(req)); 919 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec) 920 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true); 921 __netlink_set_ring(sk, &req, true, NULL, 0);
907 } 922 }
908#endif /* CONFIG_NETLINK_MMAP */ 923#endif /* CONFIG_NETLINK_MMAP */
909 924
@@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2223 return -EINVAL; 2238 return -EINVAL;
2224 if (copy_from_user(&req, optval, sizeof(req))) 2239 if (copy_from_user(&req, optval, sizeof(req)))
2225 return -EFAULT; 2240 return -EFAULT;
2226 err = netlink_set_ring(sk, &req, false, 2241 err = netlink_set_ring(sk, &req,
2227 optname == NETLINK_TX_RING); 2242 optname == NETLINK_TX_RING);
2228 break; 2243 break;
2229 } 2244 }
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 753
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes() 755 + (nr_node_ids
756 * sizeof(struct flow_stats *)), 756 * sizeof(struct flow_stats *)),
757 0, 0, NULL); 757 0, 0, NULL);
758 if (flow_cache == NULL) 758 if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 273b8bff6ba4..657ba9f5d308 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
759 } 759 }
760 760
761 ibmr = rds_ib_alloc_fmr(rds_ibdev); 761 ibmr = rds_ib_alloc_fmr(rds_ibdev);
762 if (IS_ERR(ibmr)) 762 if (IS_ERR(ibmr)) {
763 rds_ib_dev_put(rds_ibdev);
763 return ibmr; 764 return ibmr;
765 }
764 766
765 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); 767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
766 if (ret == 0) 768 if (ret == 0)
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 8b4a6cd2c3a7..83498e1c75b8 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister);
73 73
74void rds_trans_put(struct rds_transport *trans) 74void rds_trans_put(struct rds_transport *trans)
75{ 75{
76 if (trans && trans->t_owner) 76 if (trans)
77 module_put(trans->t_owner); 77 module_put(trans->t_owner);
78} 78}
79 79
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337 359
338 if (tcf_bpf_is_ebpf(prog)) 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
339 bpf_prog_put(prog->filter); 361 tcf_bpf_cfg_cleanup(&tmp);
340 else
341 bpf_prog_destroy(prog->filter);
342} 362}
343 363
344static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
378 goto errout; 378 goto errout;
379 379
380 if (oldprog) { 380 if (oldprog) {
381 list_replace_rcu(&prog->link, &oldprog->link); 381 list_replace_rcu(&oldprog->link, &prog->link);
382 tcf_unbind_filter(tp, &oldprog->res); 382 tcf_unbind_filter(tp, &oldprog->res);
383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
384 } else { 384 } else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
425 if (!fnew) 425 if (!fnew)
426 goto err2; 426 goto err2;
427 427
428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
429
428 fold = (struct flow_filter *)*arg; 430 fold = (struct flow_filter *)*arg;
429 if (fold) { 431 if (fold) {
430 err = -EINVAL; 432 err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
486 fnew->mask = ~0U; 488 fnew->mask = ~0U;
487 fnew->tp = tp; 489 fnew->tp = tp;
488 get_random_bytes(&fnew->hashrnd, 4); 490 get_random_bytes(&fnew->hashrnd, 4);
489 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
490 } 491 }
491 492
492 fnew->perturb_timer.function = flow_perturbation; 493 fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
526 if (*arg == 0) 527 if (*arg == 0)
527 list_add_tail_rcu(&fnew->list, &head->filters); 528 list_add_tail_rcu(&fnew->list, &head->filters);
528 else 529 else
529 list_replace_rcu(&fnew->list, &fold->list); 530 list_replace_rcu(&fold->list, &fnew->list);
530 531
531 *arg = (unsigned long)fnew; 532 *arg = (unsigned long)fnew;
532 533
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
499 *arg = (unsigned long) fnew; 499 *arg = (unsigned long) fnew;
500 500
501 if (fold) { 501 if (fold) {
502 list_replace_rcu(&fnew->list, &fold->list); 502 list_replace_rcu(&fold->list, &fnew->list);
503 tcf_unbind_filter(tp, &fold->res); 503 tcf_unbind_filter(tp, &fold->res);
504 call_rcu(&fold->rcu, fl_destroy_filter); 504 call_rcu(&fold->rcu, fl_destroy_filter);
505 } else { 505 } else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..21ca33c9f036 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
155 skb = dequeue_head(flow); 155 skb = dequeue_head(flow);
156 len = qdisc_pkt_len(skb); 156 len = qdisc_pkt_len(skb);
157 q->backlogs[idx] -= len; 157 q->backlogs[idx] -= len;
158 kfree_skb(skb);
159 sch->q.qlen--; 158 sch->q.qlen--;
160 qdisc_qstats_drop(sch); 159 qdisc_qstats_drop(sch);
161 qdisc_qstats_backlog_dec(sch, skb); 160 qdisc_qstats_backlog_dec(sch, skb);
161 kfree_skb(skb);
162 flow->dropped++; 162 flow->dropped++;
163 return idx; 163 return idx;
164} 164}
165 165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168 unsigned int prev_backlog;
169
170 prev_backlog = sch->qstats.backlog;
171 fq_codel_drop(sch);
172 return prev_backlog - sch->qstats.backlog;
173}
174
166static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167{ 176{
168 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
604 .enqueue = fq_codel_enqueue, 613 .enqueue = fq_codel_enqueue,
605 .dequeue = fq_codel_dequeue, 614 .dequeue = fq_codel_dequeue,
606 .peek = qdisc_peek_dequeued, 615 .peek = qdisc_peek_dequeued,
607 .drop = fq_codel_drop, 616 .drop = fq_codel_qdisc_drop,
608 .init = fq_codel_init, 617 .init = fq_codel_init,
609 .reset = fq_codel_reset, 618 .reset = fq_codel_reset,
610 .destroy = fq_codel_destroy, 619 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
306 len = qdisc_pkt_len(skb); 306 len = qdisc_pkt_len(skb);
307 slot->backlog -= len; 307 slot->backlog -= len;
308 sfq_dec(q, x); 308 sfq_dec(q, x);
309 kfree_skb(skb);
310 sch->q.qlen--; 309 sch->q.qlen--;
311 qdisc_qstats_drop(sch); 310 qdisc_qstats_drop(sch);
312 qdisc_qstats_backlog_dec(sch, skb); 311 qdisc_qstats_backlog_dec(sch, skb);
312 kfree_skb(skb);
313 return len; 313 return len;
314 } 314 }
315 315
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 84f77a054025..9f2add3cba26 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -171,8 +171,10 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
171 * released. 171 * released.
172 */ 172 */
173 173
174 attr->trans = SWITCHDEV_TRANS_ABORT; 174 if (err != -EOPNOTSUPP) {
175 __switchdev_port_attr_set(dev, attr); 175 attr->trans = SWITCHDEV_TRANS_ABORT;
176 __switchdev_port_attr_set(dev, attr);
177 }
176 178
177 return err; 179 return err;
178 } 180 }
@@ -249,8 +251,10 @@ int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
249 * released. 251 * released.
250 */ 252 */
251 253
252 obj->trans = SWITCHDEV_TRANS_ABORT; 254 if (err != -EOPNOTSUPP) {
253 __switchdev_port_obj_add(dev, obj); 255 obj->trans = SWITCHDEV_TRANS_ABORT;
256 __switchdev_port_obj_add(dev, obj);
257 }
254 258
255 return err; 259 return err;
256 } 260 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 46b6ed534ef2..3a7567f690f3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); 2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2008 if (res) 2008 if (res)
2009 goto exit; 2009 goto exit;
2010 security_sk_clone(sock->sk, new_sock->sk);
2010 2011
2011 new_sk = new_sock->sk; 2012 new_sk = new_sock->sk;
2012 new_tsock = tipc_sk(new_sk); 2013 new_tsock = tipc_sk(new_sk);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
797 return false; 797 return false;
798} 798}
799 799
800bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 800static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
801 struct cfg80211_chan_def *chandef, 801 struct cfg80211_chan_def *chandef,
802 enum nl80211_iftype iftype) 802 enum nl80211_iftype iftype,
803 bool check_no_ir)
803{ 804{
804 bool res; 805 bool res;
805 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
806 IEEE80211_CHAN_RADAR; 807 IEEE80211_CHAN_RADAR;
807 808
808 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
809 810
810 /* 811 if (check_no_ir)
811 * Under certain conditions suggested by some regulatory bodies a
812 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
813 * only if such relaxations are not enabled and the conditions are not
814 * met.
815 */
816 if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
817 prohibited_flags |= IEEE80211_CHAN_NO_IR; 812 prohibited_flags |= IEEE80211_CHAN_NO_IR;
818 813
819 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 trace_cfg80211_return_bool(res); 822 trace_cfg80211_return_bool(res);
828 return res; 823 return res;
829} 824}
825
826bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 struct cfg80211_chan_def *chandef,
828 enum nl80211_iftype iftype)
829{
830 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
831}
830EXPORT_SYMBOL(cfg80211_reg_can_beacon); 832EXPORT_SYMBOL(cfg80211_reg_can_beacon);
831 833
834bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
835 struct cfg80211_chan_def *chandef,
836 enum nl80211_iftype iftype)
837{
838 bool check_no_ir;
839
840 ASSERT_RTNL();
841
842 /*
843 * Under certain conditions suggested by some regulatory bodies a
844 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
845 * only if such relaxations are not enabled and the conditions are not
846 * met.
847 */
848 check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
849 chandef->chan);
850
851 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
852}
853EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
854
832int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 855int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
833 struct cfg80211_chan_def *chandef) 856 struct cfg80211_chan_def *chandef)
834{ 857{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
2003 switch (iftype) { 2003 switch (iftype) {
2004 case NL80211_IFTYPE_AP: 2004 case NL80211_IFTYPE_AP:
2005 case NL80211_IFTYPE_P2P_GO: 2005 case NL80211_IFTYPE_P2P_GO:
2006 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
2007 iftype)) {
2007 result = -EINVAL; 2008 result = -EINVAL;
2008 break; 2009 break;
2009 } 2010 }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3404 } else if (!nl80211_get_ap_channel(rdev, &params))
3404 return -EINVAL; 3405 return -EINVAL;
3405 3406
3406 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3407 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
3407 wdev->iftype)) 3408 wdev->iftype))
3408 return -EINVAL; 3409 return -EINVAL;
3409 3410
3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3411 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
6492 if (err) 6493 if (err)
6493 return err; 6494 return err;
6494 6495
6495 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6496 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
6496 wdev->iftype)) 6497 wdev->iftype))
6497 return -EINVAL; 6498 return -EINVAL;
6498 6499
6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6500 err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
10170 return -EINVAL; 10171 return -EINVAL;
10171 10172
10172 /* we will be active on the TDLS link */ 10173 /* we will be active on the TDLS link */
10173 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10174 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
10175 wdev->iftype))
10174 return -EINVAL; 10176 return -EINVAL;
10175 10177
10176 /* don't allow switching to DFS channels */ 10178 /* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
544 reg_regdb_query(alpha2); 544 reg_regdb_query(alpha2);
545 545
546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
547 pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 if (!is_world_regdom((char *) alpha2)) 551 if (!is_world_regdom((char *) alpha2))
552 pr_info("Calling CRDA for country: %c%c\n", 552 pr_debug("Calling CRDA for country: %c%c\n",
553 alpha2[0], alpha2[1]); 553 alpha2[0], alpha2[1]);
554 else 554 else
555 pr_info("Calling CRDA to update world regulatory domain\n"); 555 pr_debug("Calling CRDA to update world regulatory domain\n");
556 556
557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
558} 558}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1589 case NL80211_IFTYPE_AP: 1589 case NL80211_IFTYPE_AP:
1590 case NL80211_IFTYPE_P2P_GO: 1590 case NL80211_IFTYPE_P2P_GO:
1591 case NL80211_IFTYPE_ADHOC: 1591 case NL80211_IFTYPE_ADHOC:
1592 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
1593 case NL80211_IFTYPE_STATION: 1593 case NL80211_IFTYPE_STATION:
1594 case NL80211_IFTYPE_P2P_CLIENT: 1594 case NL80211_IFTYPE_P2P_CLIENT:
1595 return cfg80211_chandef_usable(wiphy, &chandef, 1595 return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2358 2358
2359TRACE_EVENT(cfg80211_reg_can_beacon, 2359TRACE_EVENT(cfg80211_reg_can_beacon,
2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2361 enum nl80211_iftype iftype), 2361 enum nl80211_iftype iftype, bool check_no_ir),
2362 TP_ARGS(wiphy, chandef, iftype), 2362 TP_ARGS(wiphy, chandef, iftype, check_no_ir),
2363 TP_STRUCT__entry( 2363 TP_STRUCT__entry(
2364 WIPHY_ENTRY 2364 WIPHY_ENTRY
2365 CHAN_DEF_ENTRY 2365 CHAN_DEF_ENTRY
2366 __field(enum nl80211_iftype, iftype) 2366 __field(enum nl80211_iftype, iftype)
2367 __field(bool, check_no_ir)
2367 ), 2368 ),
2368 TP_fast_assign( 2369 TP_fast_assign(
2369 WIPHY_ASSIGN; 2370 WIPHY_ASSIGN;
2370 CHAN_DEF_ASSIGN(chandef); 2371 CHAN_DEF_ASSIGN(chandef);
2371 __entry->iftype = iftype; 2372 __entry->iftype = iftype;
2373 __entry->check_no_ir = check_no_ir;
2372 ), 2374 ),
2373 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2375 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
2374 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2376 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
2377 BOOL_TO_STR(__entry->check_no_ir))
2375); 2378);
2376 2379
2377TRACE_EVENT(cfg80211_chandef_dfs_required, 2380TRACE_EVENT(cfg80211_chandef_dfs_required,