aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /net
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bridge/br_fdb.c52
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/ceph/string_table.c8
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/filter.c109
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_vti.c31
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/calipso.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ping.c33
-rw-r--r--net/irda/iriap.c8
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/status.c14
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_rbtree.c10
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/vport-geneve.c9
-rw-r--r--net/openvswitch/vport-gre.c11
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c9
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_accept.c1
-rw-r--r--net/rxrpc/call_event.c7
-rw-r--r--net/rxrpc/call_object.c11
-rw-r--r--net/rxrpc/input.c39
-rw-r--r--net/rxrpc/recvmsg.c25
-rw-r--r--net/rxrpc/skbuff.c41
-rw-r--r--net/sched/act_api.c34
-rw-r--r--net/sched/act_police.c62
-rw-r--r--net/sched/cls_api.c51
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/sctp_diag.c18
-rw-r--r--net/sctp/ulpevent.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/clnt.c24
-rw-r--r--net/sunrpc/xprt.c26
-rw-r--r--net/sunrpc/xprtsock.c60
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c10
-rw-r--r--net/wireless/chan.c1
-rw-r--r--net/wireless/nl80211.c34
56 files changed, 488 insertions, 377 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 82a116ba590e..8de138d3306b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; 172 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
173 err = register_netdevice(dev); 173 err = register_netdevice(dev);
174 if (err < 0) 174 if (err < 0)
175 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 4acb1d5417aa..f24b25c25106 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -507,8 +507,8 @@ err_out:
507 /* wakeup anybody waiting for slots to pin pages */ 507 /* wakeup anybody waiting for slots to pin pages */
508 wake_up(&vp_wq); 508 wake_up(&vp_wq);
509 } 509 }
510 kfree(in_pages); 510 kvfree(in_pages);
511 kfree(out_pages); 511 kvfree(out_pages);
512 return err; 512 return err;
513} 513}
514 514
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c18080ad4085..cd620fab41b0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
267 267
268 /* If old entry was unassociated with any port, then delete it. */ 268 /* If old entry was unassociated with any port, then delete it. */
269 f = __br_fdb_get(br, br->dev->dev_addr, 0); 269 f = __br_fdb_get(br, br->dev->dev_addr, 0);
270 if (f && f->is_local && !f->dst) 270 if (f && f->is_local && !f->dst && !f->added_by_user)
271 fdb_delete_local(br, NULL, f); 271 fdb_delete_local(br, NULL, f);
272 272
273 fdb_insert(br, NULL, newaddr, 0); 273 fdb_insert(br, NULL, newaddr, 0);
@@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
282 if (!br_vlan_should_use(v)) 282 if (!br_vlan_should_use(v))
283 continue; 283 continue;
284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
285 if (f && f->is_local && !f->dst) 285 if (f && f->is_local && !f->dst && !f->added_by_user)
286 fdb_delete_local(br, NULL, f); 286 fdb_delete_local(br, NULL, f);
287 fdb_insert(br, NULL, newaddr, v->vid); 287 fdb_insert(br, NULL, newaddr, v->vid);
288 } 288 }
@@ -764,20 +764,25 @@ out:
764} 764}
765 765
766/* Update (create or replace) forwarding database entry */ 766/* Update (create or replace) forwarding database entry */
767static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 767static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
768 __u16 state, __u16 flags, __u16 vid) 768 const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
769{ 769{
770 struct net_bridge *br = source->br;
771 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 770 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
772 struct net_bridge_fdb_entry *fdb; 771 struct net_bridge_fdb_entry *fdb;
773 bool modified = false; 772 bool modified = false;
774 773
775 /* If the port cannot learn allow only local and static entries */ 774 /* If the port cannot learn allow only local and static entries */
776 if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && 775 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
777 !(source->state == BR_STATE_LEARNING || 776 !(source->state == BR_STATE_LEARNING ||
778 source->state == BR_STATE_FORWARDING)) 777 source->state == BR_STATE_FORWARDING))
779 return -EPERM; 778 return -EPERM;
780 779
780 if (!source && !(state & NUD_PERMANENT)) {
781 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
782 br->dev->name);
783 return -EINVAL;
784 }
785
781 fdb = fdb_find(head, addr, vid); 786 fdb = fdb_find(head, addr, vid);
782 if (fdb == NULL) { 787 if (fdb == NULL) {
783 if (!(flags & NLM_F_CREATE)) 788 if (!(flags & NLM_F_CREATE))
@@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
832 return 0; 837 return 0;
833} 838}
834 839
835static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, 840static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
836 const unsigned char *addr, u16 nlh_flags, u16 vid) 841 struct net_bridge_port *p, const unsigned char *addr,
842 u16 nlh_flags, u16 vid)
837{ 843{
838 int err = 0; 844 int err = 0;
839 845
840 if (ndm->ndm_flags & NTF_USE) { 846 if (ndm->ndm_flags & NTF_USE) {
847 if (!p) {
848 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
849 br->dev->name);
850 return -EINVAL;
851 }
841 local_bh_disable(); 852 local_bh_disable();
842 rcu_read_lock(); 853 rcu_read_lock();
843 br_fdb_update(p->br, p, addr, vid, true); 854 br_fdb_update(br, p, addr, vid, true);
844 rcu_read_unlock(); 855 rcu_read_unlock();
845 local_bh_enable(); 856 local_bh_enable();
846 } else { 857 } else {
847 spin_lock_bh(&p->br->hash_lock); 858 spin_lock_bh(&br->hash_lock);
848 err = fdb_add_entry(p, addr, ndm->ndm_state, 859 err = fdb_add_entry(br, p, addr, ndm->ndm_state,
849 nlh_flags, vid); 860 nlh_flags, vid);
850 spin_unlock_bh(&p->br->hash_lock); 861 spin_unlock_bh(&br->hash_lock);
851 } 862 }
852 863
853 return err; 864 return err;
@@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
884 dev->name); 895 dev->name);
885 return -EINVAL; 896 return -EINVAL;
886 } 897 }
898 br = p->br;
887 vg = nbp_vlan_group(p); 899 vg = nbp_vlan_group(p);
888 } 900 }
889 901
@@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
895 } 907 }
896 908
897 /* VID was specified, so use it. */ 909 /* VID was specified, so use it. */
898 if (dev->priv_flags & IFF_EBRIDGE) 910 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
899 err = br_fdb_insert(br, NULL, addr, vid);
900 else
901 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
902 } else { 911 } else {
903 if (dev->priv_flags & IFF_EBRIDGE) 912 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
904 err = br_fdb_insert(br, NULL, addr, 0);
905 else
906 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
907 if (err || !vg || !vg->num_vlans) 913 if (err || !vg || !vg->num_vlans)
908 goto out; 914 goto out;
909 915
@@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
914 list_for_each_entry(v, &vg->vlan_list, vlist) { 920 list_for_each_entry(v, &vg->vlan_list, vlist) {
915 if (!br_vlan_should_use(v)) 921 if (!br_vlan_should_use(v))
916 continue; 922 continue;
917 if (dev->priv_flags & IFF_EBRIDGE) 923 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
918 err = br_fdb_insert(br, NULL, addr, v->vid);
919 else
920 err = __br_fdb_add(ndm, p, addr, nlh_flags,
921 v->vid);
922 if (err) 924 if (err)
923 goto out; 925 goto out;
924 } 926 }
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index c83326c5ba58..ef34a02719d7 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req)
574 put_generic_request(req); 574 put_generic_request(req);
575} 575}
576 576
577void cancel_generic_request(struct ceph_mon_generic_request *req) 577static void cancel_generic_request(struct ceph_mon_generic_request *req)
578{ 578{
579 struct ceph_mon_client *monc = req->monc; 579 struct ceph_mon_client *monc = req->monc;
580 struct ceph_mon_generic_request *lookup_req; 580 struct ceph_mon_generic_request *lookup_req;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b5ec09612ff7..a97e7b506612 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4220 4220
4221 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 4221 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4222 GFP_NOIO); 4222 GFP_NOIO);
4223 if (!pages) { 4223 if (IS_ERR(pages)) {
4224 ceph_msg_put(m); 4224 ceph_msg_put(m);
4225 return NULL; 4225 return NULL;
4226 } 4226 }
diff --git a/net/ceph/string_table.c b/net/ceph/string_table.c
index ca53c8319209..22fb96efcf34 100644
--- a/net/ceph/string_table.c
+++ b/net/ceph/string_table.c
@@ -84,12 +84,6 @@ retry:
84} 84}
85EXPORT_SYMBOL(ceph_find_or_create_string); 85EXPORT_SYMBOL(ceph_find_or_create_string);
86 86
87static void ceph_free_string(struct rcu_head *head)
88{
89 struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
90 kfree(cs);
91}
92
93void ceph_release_string(struct kref *ref) 87void ceph_release_string(struct kref *ref)
94{ 88{
95 struct ceph_string *cs = container_of(ref, struct ceph_string, kref); 89 struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref)
101 } 95 }
102 spin_unlock(&string_tree_lock); 96 spin_unlock(&string_tree_lock);
103 97
104 call_rcu(&cs->rcu, ceph_free_string); 98 kfree_rcu(cs, rcu);
105} 99}
106EXPORT_SYMBOL(ceph_release_string); 100EXPORT_SYMBOL(ceph_release_string);
107 101
diff --git a/net/core/dev.c b/net/core/dev.c
index 4ce07dc25573..dd6ce598de89 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6045,8 +6045,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
6045EXPORT_SYMBOL(netdev_lower_dev_get_private); 6045EXPORT_SYMBOL(netdev_lower_dev_get_private);
6046 6046
6047 6047
6048int dev_get_nest_level(struct net_device *dev, 6048int dev_get_nest_level(struct net_device *dev)
6049 bool (*type_check)(const struct net_device *dev))
6050{ 6049{
6051 struct net_device *lower = NULL; 6050 struct net_device *lower = NULL;
6052 struct list_head *iter; 6051 struct list_head *iter;
@@ -6056,15 +6055,12 @@ int dev_get_nest_level(struct net_device *dev,
6056 ASSERT_RTNL(); 6055 ASSERT_RTNL();
6057 6056
6058 netdev_for_each_lower_dev(dev, lower, iter) { 6057 netdev_for_each_lower_dev(dev, lower, iter) {
6059 nest = dev_get_nest_level(lower, type_check); 6058 nest = dev_get_nest_level(lower);
6060 if (max_nest < nest) 6059 if (max_nest < nest)
6061 max_nest = nest; 6060 max_nest = nest;
6062 } 6061 }
6063 6062
6064 if (type_check(dev)) 6063 return max_nest + 1;
6065 max_nest++;
6066
6067 return max_nest;
6068} 6064}
6069EXPORT_SYMBOL(dev_get_nest_level); 6065EXPORT_SYMBOL(dev_get_nest_level);
6070 6066
diff --git a/net/core/filter.c b/net/core/filter.c
index 5708999f8a79..cb06aceb512a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
1355{ 1355{
1356 int err; 1356 int err;
1357 1357
1358 if (!skb_cloned(skb)) 1358 err = skb_ensure_writable(skb, write_len);
1359 return 0; 1359 bpf_compute_data_end(skb);
1360 if (skb_clone_writable(skb, write_len)) 1360
1361 return 0;
1362 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1363 if (!err)
1364 bpf_compute_data_end(skb);
1365 return err; 1361 return err;
1366} 1362}
1367 1363
1364static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1365{
1366 if (skb_at_tc_ingress(skb))
1367 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1368}
1369
1370static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1371{
1372 if (skb_at_tc_ingress(skb))
1373 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1374}
1375
1368static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1376static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1369{ 1377{
1370 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1371 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1378 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1372 int offset = (int) r2; 1379 unsigned int offset = (unsigned int) r2;
1373 void *from = (void *) (long) r3; 1380 void *from = (void *) (long) r3;
1374 unsigned int len = (unsigned int) r4; 1381 unsigned int len = (unsigned int) r4;
1375 void *ptr; 1382 void *ptr;
1376 1383
1377 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1384 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1378 return -EINVAL; 1385 return -EINVAL;
1379 1386 if (unlikely(offset > 0xffff))
1380 /* bpf verifier guarantees that:
1381 * 'from' pointer points to bpf program stack
1382 * 'len' bytes of it were initialized
1383 * 'len' > 0
1384 * 'skb' is a valid pointer to 'struct sk_buff'
1385 *
1386 * so check for invalid 'offset' and too large 'len'
1387 */
1388 if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
1389 return -EFAULT; 1387 return -EFAULT;
1390 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1388 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1391 return -EFAULT; 1389 return -EFAULT;
1392 1390
1393 ptr = skb_header_pointer(skb, offset, len, sp->buff); 1391 ptr = skb->data + offset;
1394 if (unlikely(!ptr))
1395 return -EFAULT;
1396
1397 if (flags & BPF_F_RECOMPUTE_CSUM) 1392 if (flags & BPF_F_RECOMPUTE_CSUM)
1398 skb_postpull_rcsum(skb, ptr, len); 1393 __skb_postpull_rcsum(skb, ptr, len, offset);
1399 1394
1400 memcpy(ptr, from, len); 1395 memcpy(ptr, from, len);
1401 1396
1402 if (ptr == sp->buff)
1403 /* skb_store_bits cannot return -EFAULT here */
1404 skb_store_bits(skb, offset, ptr, len);
1405
1406 if (flags & BPF_F_RECOMPUTE_CSUM) 1397 if (flags & BPF_F_RECOMPUTE_CSUM)
1407 skb_postpush_rcsum(skb, ptr, len); 1398 __skb_postpush_rcsum(skb, ptr, len, offset);
1408 if (flags & BPF_F_INVALIDATE_HASH) 1399 if (flags & BPF_F_INVALIDATE_HASH)
1409 skb_clear_hash(skb); 1400 skb_clear_hash(skb);
1410 1401
@@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1425static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1416static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1426{ 1417{
1427 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; 1418 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1428 int offset = (int) r2; 1419 unsigned int offset = (unsigned int) r2;
1429 void *to = (void *)(unsigned long) r3; 1420 void *to = (void *)(unsigned long) r3;
1430 unsigned int len = (unsigned int) r4; 1421 unsigned int len = (unsigned int) r4;
1431 void *ptr; 1422 void *ptr;
1432 1423
1433 if (unlikely((u32) offset > 0xffff)) 1424 if (unlikely(offset > 0xffff))
1434 goto err_clear; 1425 goto err_clear;
1435 1426
1436 ptr = skb_header_pointer(skb, offset, len, to); 1427 ptr = skb_header_pointer(skb, offset, len, to);
@@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1458static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1449static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1459{ 1450{
1460 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1451 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1461 int offset = (int) r2; 1452 unsigned int offset = (unsigned int) r2;
1462 __sum16 sum, *ptr; 1453 __sum16 *ptr;
1463 1454
1464 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1455 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1465 return -EINVAL; 1456 return -EINVAL;
1466 if (unlikely((u32) offset > 0xffff)) 1457 if (unlikely(offset > 0xffff || offset & 1))
1467 return -EFAULT; 1458 return -EFAULT;
1468 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1459 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1469 return -EFAULT;
1470
1471 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1472 if (unlikely(!ptr))
1473 return -EFAULT; 1460 return -EFAULT;
1474 1461
1462 ptr = (__sum16 *)(skb->data + offset);
1475 switch (flags & BPF_F_HDR_FIELD_MASK) { 1463 switch (flags & BPF_F_HDR_FIELD_MASK) {
1476 case 0: 1464 case 0:
1477 if (unlikely(from != 0)) 1465 if (unlikely(from != 0))
@@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1489 return -EINVAL; 1477 return -EINVAL;
1490 } 1478 }
1491 1479
1492 if (ptr == &sum)
1493 /* skb_store_bits guaranteed to not return -EFAULT here */
1494 skb_store_bits(skb, offset, ptr, sizeof(sum));
1495
1496 return 0; 1480 return 0;
1497} 1481}
1498 1482
@@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1512 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1496 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1513 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1497 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1514 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1498 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1515 int offset = (int) r2; 1499 unsigned int offset = (unsigned int) r2;
1516 __sum16 sum, *ptr; 1500 __sum16 *ptr;
1517 1501
1518 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | 1502 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
1519 BPF_F_HDR_FIELD_MASK))) 1503 BPF_F_HDR_FIELD_MASK)))
1520 return -EINVAL; 1504 return -EINVAL;
1521 if (unlikely((u32) offset > 0xffff)) 1505 if (unlikely(offset > 0xffff || offset & 1))
1522 return -EFAULT; 1506 return -EFAULT;
1523 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1507 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1524 return -EFAULT; 1508 return -EFAULT;
1525 1509
1526 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); 1510 ptr = (__sum16 *)(skb->data + offset);
1527 if (unlikely(!ptr))
1528 return -EFAULT;
1529 if (is_mmzero && !*ptr) 1511 if (is_mmzero && !*ptr)
1530 return 0; 1512 return 0;
1531 1513
@@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1548 1530
1549 if (is_mmzero && !*ptr) 1531 if (is_mmzero && !*ptr)
1550 *ptr = CSUM_MANGLED_0; 1532 *ptr = CSUM_MANGLED_0;
1551 if (ptr == &sum)
1552 /* skb_store_bits guaranteed to not return -EFAULT here */
1553 skb_store_bits(skb, offset, ptr, sizeof(sum));
1554
1555 return 0; 1533 return 0;
1556} 1534}
1557 1535
@@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
1607 1585
1608static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 1586static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1609{ 1587{
1610 if (skb_at_tc_ingress(skb))
1611 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1612
1613 return dev_forward_skb(dev, skb); 1588 return dev_forward_skb(dev, skb);
1614} 1589}
1615 1590
@@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1648 if (unlikely(!skb)) 1623 if (unlikely(!skb))
1649 return -ENOMEM; 1624 return -ENOMEM;
1650 1625
1626 bpf_push_mac_rcsum(skb);
1627
1651 return flags & BPF_F_INGRESS ? 1628 return flags & BPF_F_INGRESS ?
1652 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1629 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1653} 1630}
@@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb)
1693 return -EINVAL; 1670 return -EINVAL;
1694 } 1671 }
1695 1672
1673 bpf_push_mac_rcsum(skb);
1674
1696 return ri->flags & BPF_F_INGRESS ? 1675 return ri->flags & BPF_F_INGRESS ?
1697 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1676 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1698} 1677}
@@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
1756 vlan_proto != htons(ETH_P_8021AD))) 1735 vlan_proto != htons(ETH_P_8021AD)))
1757 vlan_proto = htons(ETH_P_8021Q); 1736 vlan_proto = htons(ETH_P_8021Q);
1758 1737
1738 bpf_push_mac_rcsum(skb);
1759 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 1739 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1740 bpf_pull_mac_rcsum(skb);
1741
1760 bpf_compute_data_end(skb); 1742 bpf_compute_data_end(skb);
1761 return ret; 1743 return ret;
1762} 1744}
@@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1776 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1758 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1777 int ret; 1759 int ret;
1778 1760
1761 bpf_push_mac_rcsum(skb);
1779 ret = skb_vlan_pop(skb); 1762 ret = skb_vlan_pop(skb);
1763 bpf_pull_mac_rcsum(skb);
1764
1780 bpf_compute_data_end(skb); 1765 bpf_compute_data_end(skb);
1781 return ret; 1766 return ret;
1782} 1767}
@@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2298} 2283}
2299 2284
2300#ifdef CONFIG_SOCK_CGROUP_DATA 2285#ifdef CONFIG_SOCK_CGROUP_DATA
2301static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 2286static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2302{ 2287{
2303 struct sk_buff *skb = (struct sk_buff *)(long)r1; 2288 struct sk_buff *skb = (struct sk_buff *)(long)r1;
2304 struct bpf_map *map = (struct bpf_map *)(long)r2; 2289 struct bpf_map *map = (struct bpf_map *)(long)r2;
@@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2321 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); 2306 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
2322} 2307}
2323 2308
2324static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { 2309static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
2325 .func = bpf_skb_in_cgroup, 2310 .func = bpf_skb_under_cgroup,
2326 .gpl_only = false, 2311 .gpl_only = false,
2327 .ret_type = RET_INTEGER, 2312 .ret_type = RET_INTEGER,
2328 .arg1_type = ARG_PTR_TO_CTX, 2313 .arg1_type = ARG_PTR_TO_CTX,
@@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2402 case BPF_FUNC_get_smp_processor_id: 2387 case BPF_FUNC_get_smp_processor_id:
2403 return &bpf_get_smp_processor_id_proto; 2388 return &bpf_get_smp_processor_id_proto;
2404#ifdef CONFIG_SOCK_CGROUP_DATA 2389#ifdef CONFIG_SOCK_CGROUP_DATA
2405 case BPF_FUNC_skb_in_cgroup: 2390 case BPF_FUNC_skb_under_cgroup:
2406 return &bpf_skb_in_cgroup_proto; 2391 return &bpf_skb_under_cgroup_proto;
2407#endif 2392#endif
2408 default: 2393 default:
2409 return sk_filter_func_proto(func_id); 2394 return sk_filter_func_proto(func_id);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d07fc076bea0..febca0f1008c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2452,9 +2452,7 @@ struct fib_route_iter {
2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, 2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2453 loff_t pos) 2453 loff_t pos)
2454{ 2454{
2455 struct fib_table *tb = iter->main_tb;
2456 struct key_vector *l, **tp = &iter->tnode; 2455 struct key_vector *l, **tp = &iter->tnode;
2457 struct trie *t;
2458 t_key key; 2456 t_key key;
2459 2457
2460 /* use cache location of next-to-find key */ 2458 /* use cache location of next-to-find key */
@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2462 pos -= iter->pos; 2460 pos -= iter->pos;
2463 key = iter->key; 2461 key = iter->key;
2464 } else { 2462 } else {
2465 t = (struct trie *)tb->tb_data;
2466 iter->tnode = t->kv;
2467 iter->pos = 0; 2463 iter->pos = 0;
2468 key = 0; 2464 key = 0;
2469 } 2465 }
@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2504 return NULL; 2500 return NULL;
2505 2501
2506 iter->main_tb = tb; 2502 iter->main_tb = tb;
2503 t = (struct trie *)tb->tb_data;
2504 iter->tnode = t->kv;
2507 2505
2508 if (*pos != 0) 2506 if (*pos != 0)
2509 return fib_route_get_idx(iter, *pos); 2507 return fib_route_get_idx(iter, *pos);
2510 2508
2511 t = (struct trie *)tb->tb_data;
2512 iter->tnode = t->kv;
2513 iter->pos = 0; 2509 iter->pos = 0;
2514 iter->key = 0; 2510 iter->key = 0;
2515 2511
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5b1481be0282..113cc43df789 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
370 tunnel->parms.o_flags, proto, tunnel->parms.o_key, 370 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
371 htonl(tunnel->o_seqno)); 371 htonl(tunnel->o_seqno));
372 372
373 skb_set_inner_protocol(skb, proto);
374 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 373 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
375} 374}
376 375
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index a917903d5e97..cc701fa70b12 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
557 .get_link_net = ip_tunnel_get_link_net, 557 .get_link_net = ip_tunnel_get_link_net,
558}; 558};
559 559
560static bool is_vti_tunnel(const struct net_device *dev)
561{
562 return dev->netdev_ops == &vti_netdev_ops;
563}
564
565static int vti_device_event(struct notifier_block *unused,
566 unsigned long event, void *ptr)
567{
568 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
569 struct ip_tunnel *tunnel = netdev_priv(dev);
570
571 if (!is_vti_tunnel(dev))
572 return NOTIFY_DONE;
573
574 switch (event) {
575 case NETDEV_DOWN:
576 if (!net_eq(tunnel->net, dev_net(dev)))
577 xfrm_garbage_collect(tunnel->net);
578 break;
579 }
580 return NOTIFY_DONE;
581}
582
583static struct notifier_block vti_notifier_block __read_mostly = {
584 .notifier_call = vti_device_event,
585};
586
560static int __init vti_init(void) 587static int __init vti_init(void)
561{ 588{
562 const char *msg; 589 const char *msg;
@@ -564,6 +591,8 @@ static int __init vti_init(void)
564 591
565 pr_info("IPv4 over IPsec tunneling driver\n"); 592 pr_info("IPv4 over IPsec tunneling driver\n");
566 593
594 register_netdevice_notifier(&vti_notifier_block);
595
567 msg = "tunnel device"; 596 msg = "tunnel device";
568 err = register_pernet_device(&vti_net_ops); 597 err = register_pernet_device(&vti_net_ops);
569 if (err < 0) 598 if (err < 0)
@@ -596,6 +625,7 @@ xfrm_proto_ah_failed:
596xfrm_proto_esp_failed: 625xfrm_proto_esp_failed:
597 unregister_pernet_device(&vti_net_ops); 626 unregister_pernet_device(&vti_net_ops);
598pernet_dev_failed: 627pernet_dev_failed:
628 unregister_netdevice_notifier(&vti_notifier_block);
599 pr_err("vti init: failed to register %s\n", msg); 629 pr_err("vti init: failed to register %s\n", msg);
600 return err; 630 return err;
601} 631}
@@ -607,6 +637,7 @@ static void __exit vti_fini(void)
607 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 637 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
608 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 638 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
609 unregister_pernet_device(&vti_net_ops); 639 unregister_pernet_device(&vti_net_ops);
640 unregister_netdevice_notifier(&vti_notifier_block);
610} 641}
611 642
612module_init(vti_init); 643module_init(vti_init);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab3e796596b1..df8425fcbc2c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3543,7 +3543,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
3543 /* combine the user config with event to determine if permanent 3543 /* combine the user config with event to determine if permanent
3544 * addresses are to be removed from address hash table 3544 * addresses are to be removed from address hash table
3545 */ 3545 */
3546 keep_addr = !(how || _keep_addr <= 0); 3546 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3547 3547
3548 /* Step 2: clear hash table */ 3548 /* Step 2: clear hash table */
3549 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3549 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
@@ -3599,7 +3599,7 @@ restart:
3599 /* re-combine the user config with event to determine if permanent 3599 /* re-combine the user config with event to determine if permanent
3600 * addresses are to be removed from the interface list 3600 * addresses are to be removed from the interface list
3601 */ 3601 */
3602 keep_addr = (!how && _keep_addr > 0); 3602 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3603 3603
3604 INIT_LIST_HEAD(&del_list); 3604 INIT_LIST_HEAD(&del_list);
3605 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3605 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index c53b92c617c5..37ac9de713c6 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop,
952 memcpy(new, hop, start); 952 memcpy(new, hop, start);
953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, 953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
954 secattr); 954 secattr);
955 if (ret_val < 0) 955 if (ret_val < 0) {
956 kfree(new);
956 return ERR_PTR(ret_val); 957 return ERR_PTR(ret_val);
958 }
957 959
958 buf_len = start + ret_val; 960 buf_len = start + ret_val;
959 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ 961 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 776d145113e1..704274cbd495 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
521 521
522 skb_set_inner_protocol(skb, protocol);
523
524 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 522 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
525 NEXTHDR_GRE); 523 NEXTHDR_GRE);
526} 524}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index fed40d1ec29b..0900352c924c 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
55 struct icmp6hdr user_icmph; 55 struct icmp6hdr user_icmph;
56 int addr_type; 56 int addr_type;
57 struct in6_addr *daddr; 57 struct in6_addr *daddr;
58 int iif = 0; 58 int oif = 0;
59 struct flowi6 fl6; 59 struct flowi6 fl6;
60 int err; 60 int err;
61 struct dst_entry *dst; 61 struct dst_entry *dst;
@@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
78 if (u->sin6_family != AF_INET6) { 78 if (u->sin6_family != AF_INET6) {
79 return -EAFNOSUPPORT; 79 return -EAFNOSUPPORT;
80 } 80 }
81 if (sk->sk_bound_dev_if &&
82 sk->sk_bound_dev_if != u->sin6_scope_id) {
83 return -EINVAL;
84 }
85 daddr = &(u->sin6_addr); 81 daddr = &(u->sin6_addr);
86 iif = u->sin6_scope_id; 82 if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
83 oif = u->sin6_scope_id;
87 } else { 84 } else {
88 if (sk->sk_state != TCP_ESTABLISHED) 85 if (sk->sk_state != TCP_ESTABLISHED)
89 return -EDESTADDRREQ; 86 return -EDESTADDRREQ;
90 daddr = &sk->sk_v6_daddr; 87 daddr = &sk->sk_v6_daddr;
91 } 88 }
92 89
93 if (!iif) 90 if (!oif)
94 iif = sk->sk_bound_dev_if; 91 oif = sk->sk_bound_dev_if;
92
93 if (!oif)
94 oif = np->sticky_pktinfo.ipi6_ifindex;
95
96 if (!oif && ipv6_addr_is_multicast(daddr))
97 oif = np->mcast_oif;
98 else if (!oif)
99 oif = np->ucast_oif;
95 100
96 addr_type = ipv6_addr_type(daddr); 101 addr_type = ipv6_addr_type(daddr);
97 if (__ipv6_addr_needs_scope_id(addr_type) && !iif) 102 if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
98 return -EINVAL; 103 (addr_type & IPV6_ADDR_MAPPED) ||
99 if (addr_type & IPV6_ADDR_MAPPED) 104 (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
100 return -EINVAL; 105 return -EINVAL;
101 106
102 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ 107 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
106 fl6.flowi6_proto = IPPROTO_ICMPV6; 111 fl6.flowi6_proto = IPPROTO_ICMPV6;
107 fl6.saddr = np->saddr; 112 fl6.saddr = np->saddr;
108 fl6.daddr = *daddr; 113 fl6.daddr = *daddr;
114 fl6.flowi6_oif = oif;
109 fl6.flowi6_mark = sk->sk_mark; 115 fl6.flowi6_mark = sk->sk_mark;
110 fl6.fl6_icmp_type = user_icmph.icmp6_type; 116 fl6.fl6_icmp_type = user_icmph.icmp6_type;
111 fl6.fl6_icmp_code = user_icmph.icmp6_code; 117 fl6.fl6_icmp_code = user_icmph.icmp6_code;
112 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 118 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
113 119
114 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
115 fl6.flowi6_oif = np->mcast_oif;
116 else if (!fl6.flowi6_oif)
117 fl6.flowi6_oif = np->ucast_oif;
118
119 ipc6.tclass = np->tclass; 120 ipc6.tclass = np->tclass;
120 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 121 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
121 122
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 4a7ae32afa09..1138eaf5c682 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
185 185
186 self->magic = IAS_MAGIC; 186 self->magic = IAS_MAGIC;
187 self->mode = mode; 187 self->mode = mode;
188 if (mode == IAS_CLIENT) 188 if (mode == IAS_CLIENT) {
189 iriap_register_lsap(self, slsap_sel, mode); 189 if (iriap_register_lsap(self, slsap_sel, mode)) {
190 kfree(self);
191 return NULL;
192 }
193 }
190 194
191 self->confirm = callback; 195 self->confirm = callback;
192 self->priv = priv; 196 self->priv = priv;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e99ab8d97a..543b1d4fc33d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
869 869
870 /* free all potentially still buffered bcast frames */ 870 /* free all potentially still buffered bcast frames */
871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
872 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 872 ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
873 873
874 mutex_lock(&local->mtx); 874 mutex_lock(&local->mtx);
875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true); 875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 184473c257eb..ba5fc1f01e53 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
1094 1094
1095 trace_drv_get_expected_throughput(sta); 1095 trace_drv_get_expected_throughput(sta);
1096 if (local->ops->get_expected_throughput) 1096 if (local->ops->get_expected_throughput)
1097 ret = local->ops->get_expected_throughput(sta); 1097 ret = local->ops->get_expected_throughput(&local->hw, sta);
1098 trace_drv_return_u32(local, ret); 1098 trace_drv_return_u32(local, ret);
1099 1099
1100 return ret; 1100 return ret;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c66411df9863..42120d965263 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
881 881
882 netif_carrier_off(sdata->dev); 882 netif_carrier_off(sdata->dev);
883 883
884 /* flush STAs and mpaths on this iface */
885 sta_info_flush(sdata);
886 mesh_path_flush_by_iface(sdata);
887
884 /* stop the beacon */ 888 /* stop the beacon */
885 ifmsh->mesh_id_len = 0; 889 ifmsh->mesh_id_len = 0;
886 sdata->vif.bss_conf.enable_beacon = false; 890 sdata->vif.bss_conf.enable_beacon = false;
887 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 891 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
888 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 892 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
893
894 /* remove beacon */
889 bcn = rcu_dereference_protected(ifmsh->beacon, 895 bcn = rcu_dereference_protected(ifmsh->beacon,
890 lockdep_is_held(&sdata->wdev.mtx)); 896 lockdep_is_held(&sdata->wdev.mtx));
891 RCU_INIT_POINTER(ifmsh->beacon, NULL); 897 RCU_INIT_POINTER(ifmsh->beacon, NULL);
892 kfree_rcu(bcn, rcu_head); 898 kfree_rcu(bcn, rcu_head);
893 899
894 /* flush STAs and mpaths on this iface */
895 sta_info_flush(sdata);
896 mesh_path_flush_by_iface(sdata);
897
898 /* free all potentially still buffered group-addressed frames */ 900 /* free all potentially still buffered group-addressed frames */
899 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); 901 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
900 skb_queue_purge(&ifmsh->ps.bc_buf); 902 skb_queue_purge(&ifmsh->ps.bc_buf);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2e8a9024625a..9dce3b157908 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1270 1270
1271 if (!txqi->tin.backlog_packets) 1271 if (txqi->tin.backlog_packets)
1272 set_bit(tid, &sta->txq_buffered_tids); 1272 set_bit(tid, &sta->txq_buffered_tids);
1273 else 1273 else
1274 clear_bit(tid, &sta->txq_buffered_tids); 1274 clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index c6d5c724e032..a2a68269675d 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
771 clear_sta_flag(sta, WLAN_STA_SP); 771 clear_sta_flag(sta, WLAN_STA_SP);
772 772
773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK); 773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
774
775 /* mesh Peer Service Period support */
776 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
777 ieee80211_is_data_qos(fc))
778 ieee80211_mpsp_trigger_process(
779 ieee80211_get_qos_ctl(hdr), sta, true, acked);
780
774 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { 781 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
775 /* 782 /*
776 * The STA is in power save mode, so assume 783 * The STA is in power save mode, so assume
@@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
781 return; 788 return;
782 } 789 }
783 790
784 /* mesh Peer Service Period support */
785 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
786 ieee80211_is_data_qos(fc))
787 ieee80211_mpsp_trigger_process(
788 ieee80211_get_qos_ctl(hdr),
789 sta, true, acked);
790
791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && 791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
792 (ieee80211_is_data(hdr->frame_control)) && 792 (ieee80211_is_data(hdr->frame_control)) &&
793 (rates_idx != -1)) 793 (rates_idx != -1))
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 91461c415525..502396694f47 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
368 skb = skb_dequeue(&ps->bc_buf); 368 skb = skb_dequeue(&ps->bc_buf);
369 if (skb) { 369 if (skb) {
370 purged++; 370 purged++;
371 dev_kfree_skb(skb); 371 ieee80211_free_txskb(&local->hw, skb);
372 } 372 }
373 total += skb_queue_len(&ps->bc_buf); 373 total += skb_queue_len(&ps->bc_buf);
374 } 374 }
@@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { 451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
452 ps_dbg(tx->sdata, 452 ps_dbg(tx->sdata,
453 "BC TX buffer full - dropping the oldest frame\n"); 453 "BC TX buffer full - dropping the oldest frame\n");
454 dev_kfree_skb(skb_dequeue(&ps->bc_buf)); 454 ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
455 } else 455 } else
456 tx->local->total_ps_buffered++; 456 tx->local->total_ps_buffered++;
457 457
@@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); 4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) 4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
4277 break; 4277 break;
4278 dev_kfree_skb_any(skb); 4278 ieee80211_free_txskb(hw, skb);
4279 } 4279 }
4280 4280
4281 info = IEEE80211_SKB_CB(skb); 4281 info = IEEE80211_SKB_CB(skb);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 9e3693128313..f8dbacf66795 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
574 helper = rcu_dereference(nfct_help(expect->master)->helper); 574 helper = rcu_dereference(nfct_help(expect->master)->helper);
575 if (helper) { 575 if (helper) {
576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); 576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
577 if (helper->expect_policy[expect->class].name) 577 if (helper->expect_policy[expect->class].name[0])
578 seq_printf(s, "/%s", 578 seq_printf(s, "/%s",
579 helper->expect_policy[expect->class].name); 579 helper->expect_policy[expect->class].name);
580 } 580 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bb77a97961bf..5c0db5c64734 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1473 "timeout to %u seconds for", 1473 "timeout to %u seconds for",
1474 info->timeout); 1474 info->timeout);
1475 nf_ct_dump_tuple(&exp->tuple); 1475 nf_ct_dump_tuple(&exp->tuple);
1476 mod_timer(&exp->timeout, jiffies + info->timeout * HZ); 1476 mod_timer_pending(&exp->timeout,
1477 jiffies + info->timeout * HZ);
1477 } 1478 }
1478 spin_unlock_bh(&nf_conntrack_expect_lock); 1479 spin_unlock_bh(&nf_conntrack_expect_lock);
1479 } 1480 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 050bb3420a6b..fdfc71f416b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1894 1894
1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1896 return -EINVAL; 1896 return -EINVAL;
1897 if (otuple.dst.protonum != rtuple.dst.protonum)
1898 return -EINVAL;
1897 1899
1898 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, 1900 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
1899 &rtuple, u3); 1901 &rtuple, u3);
@@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2362 return PTR_ERR(exp); 2364 return PTR_ERR(exp);
2363 2365
2364 err = nf_ct_expect_related_report(exp, portid, report); 2366 err = nf_ct_expect_related_report(exp, portid, report);
2365 if (err < 0) { 2367 nf_ct_expect_put(exp);
2366 nf_ct_expect_put(exp); 2368 return err;
2367 return err;
2368 }
2369
2370 return 0;
2371} 2369}
2372 2370
2373static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2371static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 8d9db9d4702b..7d77217de6a3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
1383 return NF_DROP; 1383 return NF_DROP;
1384 } 1384 }
1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1386 if (!cseq) { 1386 if (!cseq && *(*dptr + matchoff) != '0') {
1387 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1387 nf_ct_helper_log(skb, ct, "cannot get cseq");
1388 return NF_DROP; 1388 return NF_DROP;
1389 } 1389 }
@@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1446 return NF_DROP; 1446 return NF_DROP;
1447 } 1447 }
1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1449 if (!cseq) { 1449 if (!cseq && *(*dptr + matchoff) != '0') {
1450 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1450 nf_ct_helper_log(skb, ct, "cannot get cseq");
1451 return NF_DROP; 1451 return NF_DROP;
1452 } 1452 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5d36a0926b4a..f49f45081acb 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1146 int err; 1146 int err;
1147 1147
1148 queue = instance_lookup(q, queue_num); 1148 queue = verdict_instance_lookup(q, queue_num,
1149 if (!queue) 1149 NETLINK_CB(skb).portid);
1150 queue = verdict_instance_lookup(q, queue_num,
1151 NETLINK_CB(skb).portid);
1152 if (IS_ERR(queue)) 1150 if (IS_ERR(queue))
1153 return PTR_ERR(queue); 1151 return PTR_ERR(queue);
1154 1152
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index ba7aed13e174..82c264e40278 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
59 const struct nlattr * const tb[]) 59 const struct nlattr * const tb[])
60{ 60{
61 struct nft_exthdr *priv = nft_expr_priv(expr); 61 struct nft_exthdr *priv = nft_expr_priv(expr);
62 u32 offset, len;
62 63
63 if (tb[NFTA_EXTHDR_DREG] == NULL || 64 if (tb[NFTA_EXTHDR_DREG] == NULL ||
64 tb[NFTA_EXTHDR_TYPE] == NULL || 65 tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
66 tb[NFTA_EXTHDR_LEN] == NULL) 67 tb[NFTA_EXTHDR_LEN] == NULL)
67 return -EINVAL; 68 return -EINVAL;
68 69
70 offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
71 len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
72
73 if (offset > U8_MAX || len > U8_MAX)
74 return -ERANGE;
75
69 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); 76 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
70 priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); 77 priv->offset = offset;
71 priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); 78 priv->len = len;
72 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); 79 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
73 80
74 return nft_validate_register_store(ctx, priv->dreg, NULL, 81 return nft_validate_register_store(ctx, priv->dreg, NULL,
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 6473936d05c6..ffe9ae062d23 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
70 } else if (d > 0) 70 } else if (d > 0)
71 parent = parent->rb_right; 71 parent = parent->rb_right;
72 else { 72 else {
73found:
74 if (!nft_set_elem_active(&rbe->ext, genmask)) { 73 if (!nft_set_elem_active(&rbe->ext, genmask)) {
75 parent = parent->rb_left; 74 parent = parent->rb_left;
76 continue; 75 continue;
@@ -84,9 +83,12 @@ found:
84 } 83 }
85 } 84 }
86 85
87 if (set->flags & NFT_SET_INTERVAL && interval != NULL) { 86 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
88 rbe = interval; 87 nft_set_elem_active(&interval->ext, genmask) &&
89 goto found; 88 !nft_rbtree_interval_end(interval)) {
89 spin_unlock_bh(&nft_rbtree_lock);
90 *ext = &interval->ext;
91 return true;
90 } 92 }
91out: 93out:
92 spin_unlock_bh(&nft_rbtree_lock); 94 spin_unlock_bh(&nft_rbtree_lock);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c644c78ed485..e054a748ff25 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
433 struct nf_conntrack_l4proto *l4proto; 433 struct nf_conntrack_l4proto *l4proto;
434 struct nf_conntrack_tuple tuple; 434 struct nf_conntrack_tuple tuple;
435 struct nf_conntrack_tuple_hash *h; 435 struct nf_conntrack_tuple_hash *h;
436 enum ip_conntrack_info ctinfo;
437 struct nf_conn *ct; 436 struct nf_conn *ct;
438 unsigned int dataoff; 437 unsigned int dataoff;
439 u8 protonum; 438 u8 protonum;
@@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
458 457
459 ct = nf_ct_tuplehash_to_ctrack(h); 458 ct = nf_ct_tuplehash_to_ctrack(h);
460 459
461 ctinfo = ovs_ct_get_info(h);
462 if (ctinfo == IP_CT_NEW) {
463 /* This should not happen. */
464 WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
465 }
466 skb->nfct = &ct->ct_general; 460 skb->nfct = &ct->ct_general;
467 skb->nfctinfo = ctinfo; 461 skb->nfctinfo = ovs_ct_get_info(h);
468 return ct; 462 return ct;
469} 463}
470 464
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 1a1fcec88695..5aaf3babfc3f 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
93 return ERR_CAST(dev); 93 return ERR_CAST(dev);
94 } 94 }
95 95
96 dev_change_flags(dev, dev->flags | IFF_UP); 96 err = dev_change_flags(dev, dev->flags | IFF_UP);
97 if (err < 0) {
98 rtnl_delete_link(dev);
99 rtnl_unlock();
100 ovs_vport_free(vport);
101 goto error;
102 }
103
97 rtnl_unlock(); 104 rtnl_unlock();
98 return vport; 105 return vport;
99error: 106error:
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 7f8897f33a67..0e72d95b0e8f 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
54 struct net *net = ovs_dp_get_net(parms->dp); 54 struct net *net = ovs_dp_get_net(parms->dp);
55 struct net_device *dev; 55 struct net_device *dev;
56 struct vport *vport; 56 struct vport *vport;
57 int err;
57 58
58 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); 59 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
59 if (IS_ERR(vport)) 60 if (IS_ERR(vport))
@@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
67 return ERR_CAST(dev); 68 return ERR_CAST(dev);
68 } 69 }
69 70
70 dev_change_flags(dev, dev->flags | IFF_UP); 71 err = dev_change_flags(dev, dev->flags | IFF_UP);
71 rtnl_unlock(); 72 if (err < 0) {
73 rtnl_delete_link(dev);
74 rtnl_unlock();
75 ovs_vport_free(vport);
76 return ERR_PTR(err);
77 }
72 78
79 rtnl_unlock();
73 return vport; 80 return vport;
74} 81}
75 82
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 434e04c3a189..95c36147a6e1 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
140 140
141static void internal_set_rx_headroom(struct net_device *dev, int new_hr) 141static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
142{ 142{
143 dev->needed_headroom = new_hr; 143 dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
144} 144}
145 145
146static const struct net_device_ops internal_dev_netdev_ops = { 146static const struct net_device_ops internal_dev_netdev_ops = {
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 5eb7694348b5..7eb955e453e6 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
130 return ERR_CAST(dev); 130 return ERR_CAST(dev);
131 } 131 }
132 132
133 dev_change_flags(dev, dev->flags | IFF_UP); 133 err = dev_change_flags(dev, dev->flags | IFF_UP);
134 if (err < 0) {
135 rtnl_delete_link(dev);
136 rtnl_unlock();
137 ovs_vport_free(vport);
138 goto error;
139 }
140
134 rtnl_unlock(); 141 rtnl_unlock();
135 return vport; 142 return vport;
136error: 143error:
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1bb9e7ac9e14..ff83fb1ddd47 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -425,6 +425,7 @@ struct rxrpc_call {
425 spinlock_t lock; 425 spinlock_t lock;
426 rwlock_t state_lock; /* lock for state transition */ 426 rwlock_t state_lock; /* lock for state transition */
427 atomic_t usage; 427 atomic_t usage;
428 atomic_t skb_count; /* Outstanding packets on this call */
428 atomic_t sequence; /* Tx data packet sequence counter */ 429 atomic_t sequence; /* Tx data packet sequence counter */
429 u32 local_abort; /* local abort code */ 430 u32 local_abort; /* local abort code */
430 u32 remote_abort; /* remote abort code */ 431 u32 remote_abort; /* remote abort code */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 0b2832141bd0..9bae21e66d65 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
130 call->state = RXRPC_CALL_SERVER_ACCEPTING; 130 call->state = RXRPC_CALL_SERVER_ACCEPTING;
131 list_add_tail(&call->accept_link, &rx->acceptq); 131 list_add_tail(&call->accept_link, &rx->acceptq);
132 rxrpc_get_call(call); 132 rxrpc_get_call(call);
133 atomic_inc(&call->skb_count);
133 nsp = rxrpc_skb(notification); 134 nsp = rxrpc_skb(notification);
134 nsp->call = call; 135 nsp->call = call;
135 136
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index fc32aa5764a2..e60cf65c2232 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
460 ASSERTCMP(sp->call, ==, NULL); 460 ASSERTCMP(sp->call, ==, NULL);
461 sp->call = call; 461 sp->call = call;
462 rxrpc_get_call(call); 462 rxrpc_get_call(call);
463 atomic_inc(&call->skb_count);
463 464
464 /* insert into the buffer in sequence order */ 465 /* insert into the buffer in sequence order */
465 spin_lock_bh(&call->lock); 466 spin_lock_bh(&call->lock);
@@ -734,6 +735,7 @@ all_acked:
734 skb->mark = RXRPC_SKB_MARK_FINAL_ACK; 735 skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
735 sp->call = call; 736 sp->call = call;
736 rxrpc_get_call(call); 737 rxrpc_get_call(call);
738 atomic_inc(&call->skb_count);
737 spin_lock_bh(&call->lock); 739 spin_lock_bh(&call->lock);
738 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) 740 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
739 BUG(); 741 BUG();
@@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
793 sp->error = error; 795 sp->error = error;
794 sp->call = call; 796 sp->call = call;
795 rxrpc_get_call(call); 797 rxrpc_get_call(call);
798 atomic_inc(&call->skb_count);
796 799
797 spin_lock_bh(&call->lock); 800 spin_lock_bh(&call->lock);
798 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); 801 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
@@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work)
834 return; 837 return;
835 } 838 }
836 839
840 if (!call->conn)
841 goto skip_msg_init;
842
837 /* there's a good chance we're going to have to send a message, so set 843 /* there's a good chance we're going to have to send a message, so set
838 * one up in advance */ 844 * one up in advance */
839 msg.msg_name = &call->conn->params.peer->srx.transport; 845 msg.msg_name = &call->conn->params.peer->srx.transport;
@@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work)
856 memset(iov, 0, sizeof(iov)); 862 memset(iov, 0, sizeof(iov));
857 iov[0].iov_base = &whdr; 863 iov[0].iov_base = &whdr;
858 iov[0].iov_len = sizeof(whdr); 864 iov[0].iov_len = sizeof(whdr);
865skip_msg_init:
859 866
860 /* deal with events of a final nature */ 867 /* deal with events of a final nature */
861 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { 868 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 91287c9d01bb..ae057e0740f3 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -275,6 +275,7 @@ error:
275 list_del_init(&call->link); 275 list_del_init(&call->link);
276 write_unlock_bh(&rxrpc_call_lock); 276 write_unlock_bh(&rxrpc_call_lock);
277 277
278 set_bit(RXRPC_CALL_RELEASED, &call->flags);
278 call->state = RXRPC_CALL_DEAD; 279 call->state = RXRPC_CALL_DEAD;
279 rxrpc_put_call(call); 280 rxrpc_put_call(call);
280 _leave(" = %d", ret); 281 _leave(" = %d", ret);
@@ -287,6 +288,7 @@ error:
287 */ 288 */
288found_user_ID_now_present: 289found_user_ID_now_present:
289 write_unlock(&rx->call_lock); 290 write_unlock(&rx->call_lock);
291 set_bit(RXRPC_CALL_RELEASED, &call->flags);
290 call->state = RXRPC_CALL_DEAD; 292 call->state = RXRPC_CALL_DEAD;
291 rxrpc_put_call(call); 293 rxrpc_put_call(call);
292 _leave(" = -EEXIST [%p]", call); 294 _leave(" = -EEXIST [%p]", call);
@@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call)
491 spin_lock_bh(&call->lock); 493 spin_lock_bh(&call->lock);
492 while ((skb = skb_dequeue(&call->rx_queue)) || 494 while ((skb = skb_dequeue(&call->rx_queue)) ||
493 (skb = skb_dequeue(&call->rx_oos_queue))) { 495 (skb = skb_dequeue(&call->rx_oos_queue))) {
494 sp = rxrpc_skb(skb);
495 if (sp->call) {
496 ASSERTCMP(sp->call, ==, call);
497 rxrpc_put_call(call);
498 sp->call = NULL;
499 }
500 skb->destructor = NULL;
501 spin_unlock_bh(&call->lock); 496 spin_unlock_bh(&call->lock);
502 497
498 sp = rxrpc_skb(skb);
503 _debug("- zap %s %%%u #%u", 499 _debug("- zap %s %%%u #%u",
504 rxrpc_pkts[sp->hdr.type], 500 rxrpc_pkts[sp->hdr.type],
505 sp->hdr.serial, sp->hdr.seq); 501 sp->hdr.serial, sp->hdr.seq);
@@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call)
605 601
606 if (atomic_dec_and_test(&call->usage)) { 602 if (atomic_dec_and_test(&call->usage)) {
607 _debug("call %d dead", call->debug_id); 603 _debug("call %d dead", call->debug_id);
604 WARN_ON(atomic_read(&call->skb_count) != 0);
608 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 605 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
609 rxrpc_queue_work(&call->destroyer); 606 rxrpc_queue_work(&call->destroyer);
610 } 607 }
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 991a20d25093..70bb77818dea 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
56 _debug("already terminated"); 56 _debug("already terminated");
57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); 57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
58 skb->destructor = NULL;
59 sp->call = NULL;
60 rxrpc_put_call(call);
61 rxrpc_free_skb(skb); 58 rxrpc_free_skb(skb);
62 return 0; 59 return 0;
63 } 60 }
@@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
111 ret = 0; 108 ret = 0;
112 109
113out: 110out:
114 /* release the socket buffer */ 111 rxrpc_free_skb(skb);
115 if (skb) {
116 skb->destructor = NULL;
117 sp->call = NULL;
118 rxrpc_put_call(call);
119 rxrpc_free_skb(skb);
120 }
121 112
122 _leave(" = %d", ret); 113 _leave(" = %d", ret);
123 return ret; 114 return ret;
@@ -133,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
133 struct rxrpc_skb_priv *sp; 124 struct rxrpc_skb_priv *sp;
134 bool terminal; 125 bool terminal;
135 int ret, ackbit, ack; 126 int ret, ackbit, ack;
127 u32 serial;
128 u8 flags;
136 129
137 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); 130 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
138 131
139 sp = rxrpc_skb(skb); 132 sp = rxrpc_skb(skb);
140 ASSERTCMP(sp->call, ==, NULL); 133 ASSERTCMP(sp->call, ==, NULL);
134 flags = sp->hdr.flags;
135 serial = sp->hdr.serial;
141 136
142 spin_lock(&call->lock); 137 spin_lock(&call->lock);
143 138
@@ -200,8 +195,9 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
200 195
201 sp->call = call; 196 sp->call = call;
202 rxrpc_get_call(call); 197 rxrpc_get_call(call);
203 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && 198 atomic_inc(&call->skb_count);
204 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); 199 terminal = ((flags & RXRPC_LAST_PACKET) &&
200 !(flags & RXRPC_CLIENT_INITIATED));
205 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); 201 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
206 if (ret < 0) { 202 if (ret < 0) {
207 if (ret == -ENOMEM || ret == -ENOBUFS) { 203 if (ret == -ENOMEM || ret == -ENOBUFS) {
@@ -213,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
213 } 209 }
214 210
215 skb = NULL; 211 skb = NULL;
212 sp = NULL;
216 213
217 _debug("post #%u", seq); 214 _debug("post #%u", seq);
218 ASSERTCMP(call->rx_data_post, ==, seq); 215 ASSERTCMP(call->rx_data_post, ==, seq);
219 call->rx_data_post++; 216 call->rx_data_post++;
220 217
221 if (sp->hdr.flags & RXRPC_LAST_PACKET) 218 if (flags & RXRPC_LAST_PACKET)
222 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); 219 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
223 220
224 /* if we've reached an out of sequence packet then we need to drain 221 /* if we've reached an out of sequence packet then we need to drain
@@ -234,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
234 231
235 spin_unlock(&call->lock); 232 spin_unlock(&call->lock);
236 atomic_inc(&call->ackr_not_idle); 233 atomic_inc(&call->ackr_not_idle);
237 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); 234 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
238 _leave(" = 0 [posted]"); 235 _leave(" = 0 [posted]");
239 return 0; 236 return 0;
240 237
@@ -247,7 +244,7 @@ out:
247 244
248discard_and_ack: 245discard_and_ack:
249 _debug("discard and ACK packet %p", skb); 246 _debug("discard and ACK packet %p", skb);
250 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 247 __rxrpc_propose_ACK(call, ack, serial, true);
251discard: 248discard:
252 spin_unlock(&call->lock); 249 spin_unlock(&call->lock);
253 rxrpc_free_skb(skb); 250 rxrpc_free_skb(skb);
@@ -255,7 +252,7 @@ discard:
255 return 0; 252 return 0;
256 253
257enqueue_and_ack: 254enqueue_and_ack:
258 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 255 __rxrpc_propose_ACK(call, ack, serial, true);
259enqueue_packet: 256enqueue_packet:
260 _net("defer skb %p", skb); 257 _net("defer skb %p", skb);
261 spin_unlock(&call->lock); 258 spin_unlock(&call->lock);
@@ -575,13 +572,13 @@ done:
575 * post connection-level events to the connection 572 * post connection-level events to the connection
576 * - this includes challenges, responses and some aborts 573 * - this includes challenges, responses and some aborts
577 */ 574 */
578static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 575static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
579 struct sk_buff *skb) 576 struct sk_buff *skb)
580{ 577{
581 _enter("%p,%p", conn, skb); 578 _enter("%p,%p", conn, skb);
582 579
583 skb_queue_tail(&conn->rx_queue, skb); 580 skb_queue_tail(&conn->rx_queue, skb);
584 return rxrpc_queue_conn(conn); 581 rxrpc_queue_conn(conn);
585} 582}
586 583
587/* 584/*
@@ -702,7 +699,6 @@ void rxrpc_data_ready(struct sock *sk)
702 699
703 rcu_read_lock(); 700 rcu_read_lock();
704 701
705retry_find_conn:
706 conn = rxrpc_find_connection_rcu(local, skb); 702 conn = rxrpc_find_connection_rcu(local, skb);
707 if (!conn) 703 if (!conn)
708 goto cant_route_call; 704 goto cant_route_call;
@@ -710,8 +706,7 @@ retry_find_conn:
710 if (sp->hdr.callNumber == 0) { 706 if (sp->hdr.callNumber == 0) {
711 /* Connection-level packet */ 707 /* Connection-level packet */
712 _debug("CONN %p {%d}", conn, conn->debug_id); 708 _debug("CONN %p {%d}", conn, conn->debug_id);
713 if (!rxrpc_post_packet_to_conn(conn, skb)) 709 rxrpc_post_packet_to_conn(conn, skb);
714 goto retry_find_conn;
715 } else { 710 } else {
716 /* Call-bound packets are routed by connection channel. */ 711 /* Call-bound packets are routed by connection channel. */
717 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; 712 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -749,6 +744,8 @@ cant_route_call:
749 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 744 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
750 _debug("reject type %d",sp->hdr.type); 745 _debug("reject type %d",sp->hdr.type);
751 rxrpc_reject_packet(local, skb); 746 rxrpc_reject_packet(local, skb);
747 } else {
748 rxrpc_free_skb(skb);
752 } 749 }
753 _leave(" [no call]"); 750 _leave(" [no call]");
754 return; 751 return;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a3fa2ed85d63..9ed66d533002 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
203 } 203 }
204 204
205 /* we transferred the whole data packet */ 205 /* we transferred the whole data packet */
206 if (!(flags & MSG_PEEK))
207 rxrpc_kernel_data_consumed(call, skb);
208
206 if (sp->hdr.flags & RXRPC_LAST_PACKET) { 209 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
207 _debug("last"); 210 _debug("last");
208 if (rxrpc_conn_is_client(call->conn)) { 211 if (rxrpc_conn_is_client(call->conn)) {
@@ -360,28 +363,6 @@ wait_error:
360} 363}
361 364
362/** 365/**
363 * rxrpc_kernel_data_delivered - Record delivery of data message
364 * @skb: Message holding data
365 *
366 * Record the delivery of a data message. This permits RxRPC to keep its
367 * tracking correct. The socket buffer will be deleted.
368 */
369void rxrpc_kernel_data_delivered(struct sk_buff *skb)
370{
371 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
372 struct rxrpc_call *call = sp->call;
373
374 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
375 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
376 call->rx_data_recv = sp->hdr.seq;
377
378 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
379 rxrpc_free_skb(skb);
380}
381
382EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
383
384/**
385 * rxrpc_kernel_is_data_last - Determine if data message is last one 366 * rxrpc_kernel_is_data_last - Determine if data message is last one
386 * @skb: Message holding data 367 * @skb: Message holding data
387 * 368 *
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index eee0cfd9ac8c..06c51d4b622d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
98 spin_unlock_bh(&call->lock); 98 spin_unlock_bh(&call->lock);
99} 99}
100 100
101/**
102 * rxrpc_kernel_data_consumed - Record consumption of data message
103 * @call: The call to which the message pertains.
104 * @skb: Message holding data
105 *
106 * Record the consumption of a data message and generate an ACK if appropriate.
107 * The call state is shifted if this was the final packet. The caller must be
108 * in process context with no spinlocks held.
109 *
110 * TODO: Actually generate the ACK here rather than punting this to the
111 * workqueue.
112 */
113void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
114{
115 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
116
117 _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
118
119 ASSERTCMP(sp->call, ==, call);
120 ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
121
122 /* TODO: Fix the sequence number tracking */
123 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
124 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
125 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
126
127 call->rx_data_recv = sp->hdr.seq;
128 rxrpc_hard_ACK_data(call, sp);
129}
130EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
131
101/* 132/*
102 * destroy a packet that has an RxRPC control buffer 133 * Destroy a packet that has an RxRPC control buffer
103 * - advance the hard-ACK state of the parent call (done here in case something
104 * in the kernel bypasses recvmsg() and steals the packet directly off of the
105 * socket receive queue)
106 */ 134 */
107void rxrpc_packet_destructor(struct sk_buff *skb) 135void rxrpc_packet_destructor(struct sk_buff *skb)
108{ 136{
@@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb)
112 _enter("%p{%p}", skb, call); 140 _enter("%p{%p}", skb, call);
113 141
114 if (call) { 142 if (call) {
115 /* send the final ACK on a client call */ 143 if (atomic_dec_return(&call->skb_count) < 0)
116 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) 144 BUG();
117 rxrpc_hard_ACK_data(call, sp);
118 rxrpc_put_call(call); 145 rxrpc_put_call(call);
119 sp->call = NULL; 146 sp->call = NULL;
120 } 147 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e4a5f2607ffa..d09d0687594b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -64,7 +64,6 @@ int __tcf_hash_release(struct tc_action *p, bool bind, bool strict)
64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { 64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
65 if (p->ops->cleanup) 65 if (p->ops->cleanup)
66 p->ops->cleanup(p, bind); 66 p->ops->cleanup(p, bind);
67 list_del(&p->list);
68 tcf_hash_destroy(p->hinfo, p); 67 tcf_hash_destroy(p->hinfo, p);
69 ret = ACT_P_DELETED; 68 ret = ACT_P_DELETED;
70 } 69 }
@@ -421,18 +420,19 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
421 return res; 420 return res;
422} 421}
423 422
424int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 423int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
425 struct tcf_result *res) 424 int nr_actions, struct tcf_result *res)
426{ 425{
427 const struct tc_action *a; 426 int ret = -1, i;
428 int ret = -1;
429 427
430 if (skb->tc_verd & TC_NCLS) { 428 if (skb->tc_verd & TC_NCLS) {
431 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 429 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
432 ret = TC_ACT_OK; 430 ret = TC_ACT_OK;
433 goto exec_done; 431 goto exec_done;
434 } 432 }
435 list_for_each_entry(a, actions, list) { 433 for (i = 0; i < nr_actions; i++) {
434 const struct tc_action *a = actions[i];
435
436repeat: 436repeat:
437 ret = a->ops->act(skb, a, res); 437 ret = a->ops->act(skb, a, res);
438 if (ret == TC_ACT_REPEAT) 438 if (ret == TC_ACT_REPEAT)
@@ -754,16 +754,6 @@ err_out:
754 return ERR_PTR(err); 754 return ERR_PTR(err);
755} 755}
756 756
757static void cleanup_a(struct list_head *actions)
758{
759 struct tc_action *a, *tmp;
760
761 list_for_each_entry_safe(a, tmp, actions, list) {
762 list_del(&a->list);
763 kfree(a);
764 }
765}
766
767static int tca_action_flush(struct net *net, struct nlattr *nla, 757static int tca_action_flush(struct net *net, struct nlattr *nla,
768 struct nlmsghdr *n, u32 portid) 758 struct nlmsghdr *n, u32 portid)
769{ 759{
@@ -905,7 +895,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
905 return ret; 895 return ret;
906 } 896 }
907err: 897err:
908 cleanup_a(&actions); 898 tcf_action_destroy(&actions, 0);
909 return ret; 899 return ret;
910} 900}
911 901
@@ -942,15 +932,9 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
942 932
943 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); 933 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
944 if (ret) 934 if (ret)
945 goto done; 935 return ret;
946 936
947 /* dump then free all the actions after update; inserted policy 937 return tcf_add_notify(net, n, &actions, portid);
948 * stays intact
949 */
950 ret = tcf_add_notify(net, n, &actions, portid);
951 cleanup_a(&actions);
952done:
953 return ret;
954} 938}
955 939
956static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) 940static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b3c7e975fc9e..8a3be1d99775 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -63,49 +63,8 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
63 const struct tc_action_ops *ops) 63 const struct tc_action_ops *ops)
64{ 64{
65 struct tc_action_net *tn = net_generic(net, police_net_id); 65 struct tc_action_net *tn = net_generic(net, police_net_id);
66 struct tcf_hashinfo *hinfo = tn->hinfo;
67 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
68 struct nlattr *nest;
69
70 spin_lock_bh(&hinfo->lock);
71
72 s_i = cb->args[0];
73
74 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
75 struct hlist_head *head;
76 struct tc_action *p;
77
78 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
79
80 hlist_for_each_entry_rcu(p, head, tcfa_head) {
81 index++;
82 if (index < s_i)
83 continue;
84 nest = nla_nest_start(skb, index);
85 if (nest == NULL)
86 goto nla_put_failure;
87 if (type == RTM_DELACTION)
88 err = tcf_action_dump_1(skb, p, 0, 1);
89 else
90 err = tcf_action_dump_1(skb, p, 0, 0);
91 if (err < 0) {
92 index--;
93 nla_nest_cancel(skb, nest);
94 goto done;
95 }
96 nla_nest_end(skb, nest);
97 n_i++;
98 }
99 }
100done:
101 spin_unlock_bh(&hinfo->lock);
102 if (n_i)
103 cb->args[0] += n_i;
104 return n_i;
105 66
106nla_put_failure: 67 return tcf_generic_walker(tn, skb, cb, type, ops);
107 nla_nest_cancel(skb, nest);
108 goto done;
109} 68}
110 69
111static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { 70static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@ -125,6 +84,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
125 struct tcf_police *police; 84 struct tcf_police *police;
126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 85 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
127 struct tc_action_net *tn = net_generic(net, police_net_id); 86 struct tc_action_net *tn = net_generic(net, police_net_id);
87 bool exists = false;
128 int size; 88 int size;
129 89
130 if (nla == NULL) 90 if (nla == NULL)
@@ -139,24 +99,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
139 size = nla_len(tb[TCA_POLICE_TBF]); 99 size = nla_len(tb[TCA_POLICE_TBF]);
140 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) 100 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
141 return -EINVAL; 101 return -EINVAL;
102
142 parm = nla_data(tb[TCA_POLICE_TBF]); 103 parm = nla_data(tb[TCA_POLICE_TBF]);
104 exists = tcf_hash_check(tn, parm->index, a, bind);
105 if (exists && bind)
106 return 0;
143 107
144 if (parm->index) { 108 if (!exists) {
145 if (tcf_hash_check(tn, parm->index, a, bind)) {
146 if (ovr)
147 goto override;
148 /* not replacing */
149 return -EEXIST;
150 }
151 } else {
152 ret = tcf_hash_create(tn, parm->index, NULL, a, 109 ret = tcf_hash_create(tn, parm->index, NULL, a,
153 &act_police_ops, bind, false); 110 &act_police_ops, bind, false);
154 if (ret) 111 if (ret)
155 return ret; 112 return ret;
156 ret = ACT_P_CREATED; 113 ret = ACT_P_CREATED;
114 } else {
115 tcf_hash_release(*a, bind);
116 if (!ovr)
117 return -EEXIST;
157 } 118 }
158 119
159override:
160 police = to_police(*a); 120 police = to_police(*a);
161 if (parm->rate.rate) { 121 if (parm->rate.rate) {
162 err = -ENOMEM; 122 err = -ENOMEM;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 843a716a4303..a7c5645373af 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -541,8 +541,12 @@ out:
541void tcf_exts_destroy(struct tcf_exts *exts) 541void tcf_exts_destroy(struct tcf_exts *exts)
542{ 542{
543#ifdef CONFIG_NET_CLS_ACT 543#ifdef CONFIG_NET_CLS_ACT
544 tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); 544 LIST_HEAD(actions);
545 INIT_LIST_HEAD(&exts->actions); 545
546 tcf_exts_to_list(exts, &actions);
547 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
548 kfree(exts->actions);
549 exts->nr_actions = 0;
546#endif 550#endif
547} 551}
548EXPORT_SYMBOL(tcf_exts_destroy); 552EXPORT_SYMBOL(tcf_exts_destroy);
@@ -554,7 +558,6 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
554 { 558 {
555 struct tc_action *act; 559 struct tc_action *act;
556 560
557 INIT_LIST_HEAD(&exts->actions);
558 if (exts->police && tb[exts->police]) { 561 if (exts->police && tb[exts->police]) {
559 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 562 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
560 "police", ovr, 563 "police", ovr,
@@ -563,14 +566,20 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
563 return PTR_ERR(act); 566 return PTR_ERR(act);
564 567
565 act->type = exts->type = TCA_OLD_COMPAT; 568 act->type = exts->type = TCA_OLD_COMPAT;
566 list_add(&act->list, &exts->actions); 569 exts->actions[0] = act;
570 exts->nr_actions = 1;
567 } else if (exts->action && tb[exts->action]) { 571 } else if (exts->action && tb[exts->action]) {
568 int err; 572 LIST_HEAD(actions);
573 int err, i = 0;
574
569 err = tcf_action_init(net, tb[exts->action], rate_tlv, 575 err = tcf_action_init(net, tb[exts->action], rate_tlv,
570 NULL, ovr, 576 NULL, ovr,
571 TCA_ACT_BIND, &exts->actions); 577 TCA_ACT_BIND, &actions);
572 if (err) 578 if (err)
573 return err; 579 return err;
580 list_for_each_entry(act, &actions, list)
581 exts->actions[i++] = act;
582 exts->nr_actions = i;
574 } 583 }
575 } 584 }
576#else 585#else
@@ -587,37 +596,49 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
587 struct tcf_exts *src) 596 struct tcf_exts *src)
588{ 597{
589#ifdef CONFIG_NET_CLS_ACT 598#ifdef CONFIG_NET_CLS_ACT
590 LIST_HEAD(tmp); 599 struct tcf_exts old = *dst;
600
591 tcf_tree_lock(tp); 601 tcf_tree_lock(tp);
592 list_splice_init(&dst->actions, &tmp); 602 dst->nr_actions = src->nr_actions;
593 list_splice(&src->actions, &dst->actions); 603 dst->actions = src->actions;
594 dst->type = src->type; 604 dst->type = src->type;
595 tcf_tree_unlock(tp); 605 tcf_tree_unlock(tp);
596 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 606
607 tcf_exts_destroy(&old);
597#endif 608#endif
598} 609}
599EXPORT_SYMBOL(tcf_exts_change); 610EXPORT_SYMBOL(tcf_exts_change);
600 611
601#define tcf_exts_first_act(ext) \ 612#ifdef CONFIG_NET_CLS_ACT
602 list_first_entry_or_null(&(exts)->actions, \ 613static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
603 struct tc_action, list) 614{
615 if (exts->nr_actions == 0)
616 return NULL;
617 else
618 return exts->actions[0];
619}
620#endif
604 621
605int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 622int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
606{ 623{
607#ifdef CONFIG_NET_CLS_ACT 624#ifdef CONFIG_NET_CLS_ACT
608 struct nlattr *nest; 625 struct nlattr *nest;
609 626
610 if (exts->action && !list_empty(&exts->actions)) { 627 if (exts->action && exts->nr_actions) {
611 /* 628 /*
612 * again for backward compatible mode - we want 629 * again for backward compatible mode - we want
613 * to work with both old and new modes of entering 630 * to work with both old and new modes of entering
614 * tc data even if iproute2 was newer - jhs 631 * tc data even if iproute2 was newer - jhs
615 */ 632 */
616 if (exts->type != TCA_OLD_COMPAT) { 633 if (exts->type != TCA_OLD_COMPAT) {
634 LIST_HEAD(actions);
635
617 nest = nla_nest_start(skb, exts->action); 636 nest = nla_nest_start(skb, exts->action);
618 if (nest == NULL) 637 if (nest == NULL)
619 goto nla_put_failure; 638 goto nla_put_failure;
620 if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0) 639
640 tcf_exts_to_list(exts, &actions);
641 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
621 goto nla_put_failure; 642 goto nla_put_failure;
622 nla_nest_end(skb, nest); 643 nla_nest_end(skb, nest);
623 } else if (exts->police) { 644 } else if (exts->police) {
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 4cb5aedfe3ee..ef8ba77a5bea 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
293 return ERR_PTR(err); 293 return ERR_PTR(err);
294 } 294 }
295 295
296 iter->start_fail = 0;
296 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); 297 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
297} 298}
298 299
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index f69edcf219e5..bb691538adc8 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
13{ 13{
14 union sctp_addr laddr, paddr; 14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst; 15 struct dst_entry *dst;
16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
16 17
17 laddr = list_entry(asoc->base.bind_addr.address_list.next, 18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
18 struct sctp_sockaddr_entry, list)->a; 19 struct sctp_sockaddr_entry, list)->a;
@@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
40 } 41 }
41 42
42 r->idiag_state = asoc->state; 43 r->idiag_state = asoc->state;
43 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; 44 if (timer_pending(t3_rtx)) {
44 r->idiag_retrans = asoc->rtx_data_chunks; 45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
45 r->idiag_expires = jiffies_to_msecs( 46 r->idiag_retrans = asoc->rtx_data_chunks;
46 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); 47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
47} 53}
48 54
49static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, 55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
@@ -350,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
350 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
351 goto next; 357 goto next;
352 358
353 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) 359 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
354 goto next; 360 goto next;
355 361
356 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
@@ -465,7 +471,7 @@ skip:
465 * 3 : to mark if we have dumped the ep info of the current asoc 471 * 3 : to mark if we have dumped the ep info of the current asoc
466 * 4 : to work as a temporary variable to traversal list 472 * 4 : to work as a temporary variable to traversal list
467 */ 473 */
468 if (!(idiag_states & ~TCPF_LISTEN)) 474 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
469 goto done; 475 goto done;
470 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); 476 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
471done: 477done:
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 1bc4f71aaba8..d85b803da11d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
702 */ 702 */
703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); 703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
704 704
705 sctp_ulpevent_receive_data(event, asoc);
706
707 /* And hold the chunk as we need it for getting the IP headers 705 /* And hold the chunk as we need it for getting the IP headers
708 * later in recvmsg 706 * later in recvmsg
709 */ 707 */
710 sctp_chunk_hold(chunk); 708 sctp_chunk_hold(chunk);
711 event->chunk = chunk; 709 event->chunk = chunk;
712 710
711 sctp_ulpevent_receive_data(event, asoc);
712
713 event->stream = ntohs(chunk->subh.data_hdr->stream); 713 event->stream = ntohs(chunk->subh.data_hdr->stream);
714 event->ssn = ntohs(chunk->subh.data_hdr->ssn); 714 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
715 event->ppid = chunk->subh.data_hdr->ppid; 715 event->ppid = chunk->subh.data_hdr->ppid;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 23c8e7c39656..976c7812bbd5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
340} 340}
341 341
342static struct gss_upcall_msg * 342static struct gss_upcall_msg *
343__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 343__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
344{ 344{
345 struct gss_upcall_msg *pos; 345 struct gss_upcall_msg *pos;
346 list_for_each_entry(pos, &pipe->in_downcall, list) { 346 list_for_each_entry(pos, &pipe->in_downcall, list) {
347 if (!uid_eq(pos->uid, uid)) 347 if (!uid_eq(pos->uid, uid))
348 continue; 348 continue;
349 if (auth && pos->auth->service != auth->service)
350 continue;
349 atomic_inc(&pos->count); 351 atomic_inc(&pos->count);
350 dprintk("RPC: %s found msg %p\n", __func__, pos); 352 dprintk("RPC: %s found msg %p\n", __func__, pos);
351 return pos; 353 return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
365 struct gss_upcall_msg *old; 367 struct gss_upcall_msg *old;
366 368
367 spin_lock(&pipe->lock); 369 spin_lock(&pipe->lock);
368 old = __gss_find_upcall(pipe, gss_msg->uid); 370 old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
369 if (old == NULL) { 371 if (old == NULL) {
370 atomic_inc(&gss_msg->count); 372 atomic_inc(&gss_msg->count);
371 list_add(&gss_msg->list, &pipe->in_downcall); 373 list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
714 err = -ENOENT; 716 err = -ENOENT;
715 /* Find a matching upcall */ 717 /* Find a matching upcall */
716 spin_lock(&pipe->lock); 718 spin_lock(&pipe->lock);
717 gss_msg = __gss_find_upcall(pipe, uid); 719 gss_msg = __gss_find_upcall(pipe, uid, NULL);
718 if (gss_msg == NULL) { 720 if (gss_msg == NULL) {
719 spin_unlock(&pipe->lock); 721 spin_unlock(&pipe->lock);
720 goto err_put_ctx; 722 goto err_put_ctx;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cb49898a5a58..7f79fb7dc6a0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2638{ 2638{
2639 struct rpc_xprt_switch *xps; 2639 struct rpc_xprt_switch *xps;
2640 struct rpc_xprt *xprt; 2640 struct rpc_xprt *xprt;
2641 unsigned long reconnect_timeout;
2641 unsigned char resvport; 2642 unsigned char resvport;
2642 int ret = 0; 2643 int ret = 0;
2643 2644
@@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2649 return -EAGAIN; 2650 return -EAGAIN;
2650 } 2651 }
2651 resvport = xprt->resvport; 2652 resvport = xprt->resvport;
2653 reconnect_timeout = xprt->max_reconnect_timeout;
2652 rcu_read_unlock(); 2654 rcu_read_unlock();
2653 2655
2654 xprt = xprt_create_transport(xprtargs); 2656 xprt = xprt_create_transport(xprtargs);
@@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2657 goto out_put_switch; 2659 goto out_put_switch;
2658 } 2660 }
2659 xprt->resvport = resvport; 2661 xprt->resvport = resvport;
2662 xprt->max_reconnect_timeout = reconnect_timeout;
2660 2663
2661 rpc_xprt_switch_set_roundrobin(xps); 2664 rpc_xprt_switch_set_roundrobin(xps);
2662 if (setup) { 2665 if (setup) {
@@ -2673,6 +2676,27 @@ out_put_switch:
2673} 2676}
2674EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 2677EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
2675 2678
2679static int
2680rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
2681 struct rpc_xprt *xprt,
2682 void *data)
2683{
2684 unsigned long timeout = *((unsigned long *)data);
2685
2686 if (timeout < xprt->max_reconnect_timeout)
2687 xprt->max_reconnect_timeout = timeout;
2688 return 0;
2689}
2690
2691void
2692rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
2693{
2694 rpc_clnt_iterate_for_each_xprt(clnt,
2695 rpc_xprt_cap_max_reconnect_timeout,
2696 &timeo);
2697}
2698EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
2699
2676#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 2700#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2677static void rpc_show_header(void) 2701static void rpc_show_header(void)
2678{ 2702{
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8313960cac52..ea244b29138b 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -680,6 +680,20 @@ out:
680 spin_unlock_bh(&xprt->transport_lock); 680 spin_unlock_bh(&xprt->transport_lock);
681} 681}
682 682
683static bool
684xprt_has_timer(const struct rpc_xprt *xprt)
685{
686 return xprt->idle_timeout != 0;
687}
688
689static void
690xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
691 __must_hold(&xprt->transport_lock)
692{
693 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
694 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
695}
696
683static void 697static void
684xprt_init_autodisconnect(unsigned long data) 698xprt_init_autodisconnect(unsigned long data)
685{ 699{
@@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data)
688 spin_lock(&xprt->transport_lock); 702 spin_lock(&xprt->transport_lock);
689 if (!list_empty(&xprt->recv)) 703 if (!list_empty(&xprt->recv))
690 goto out_abort; 704 goto out_abort;
705 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
706 xprt->last_used = jiffies;
691 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 707 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
692 goto out_abort; 708 goto out_abort;
693 spin_unlock(&xprt->transport_lock); 709 spin_unlock(&xprt->transport_lock);
@@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
725 goto out; 741 goto out;
726 xprt->snd_task =NULL; 742 xprt->snd_task =NULL;
727 xprt->ops->release_xprt(xprt, NULL); 743 xprt->ops->release_xprt(xprt, NULL);
744 xprt_schedule_autodisconnect(xprt);
728out: 745out:
729 spin_unlock_bh(&xprt->transport_lock); 746 spin_unlock_bh(&xprt->transport_lock);
730 wake_up_bit(&xprt->state, XPRT_LOCKED); 747 wake_up_bit(&xprt->state, XPRT_LOCKED);
@@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task)
888 spin_unlock_bh(&xprt->transport_lock); 905 spin_unlock_bh(&xprt->transport_lock);
889} 906}
890 907
891static inline int xprt_has_timer(struct rpc_xprt *xprt)
892{
893 return xprt->idle_timeout != 0;
894}
895
896/** 908/**
897 * xprt_prepare_transmit - reserve the transport before sending a request 909 * xprt_prepare_transmit - reserve the transport before sending a request
898 * @task: RPC task about to send a request 910 * @task: RPC task about to send a request
@@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task)
1280 if (!list_empty(&req->rq_list)) 1292 if (!list_empty(&req->rq_list))
1281 list_del(&req->rq_list); 1293 list_del(&req->rq_list);
1282 xprt->last_used = jiffies; 1294 xprt->last_used = jiffies;
1283 if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 1295 xprt_schedule_autodisconnect(xprt);
1284 mod_timer(&xprt->timer,
1285 xprt->last_used + xprt->idle_timeout);
1286 spin_unlock_bh(&xprt->transport_lock); 1296 spin_unlock_bh(&xprt->transport_lock);
1287 if (req->rq_buffer) 1297 if (req->rq_buffer)
1288 xprt->ops->buf_free(req->rq_buffer); 1298 xprt->ops->buf_free(req->rq_buffer);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 111767ab124a..8ede3bc52481 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = {
177 * increase over time if the server is down or not responding. 177 * increase over time if the server is down or not responding.
178 */ 178 */
179#define XS_TCP_INIT_REEST_TO (3U * HZ) 179#define XS_TCP_INIT_REEST_TO (3U * HZ)
180#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
181 180
182/* 181/*
183 * TCP idle timeout; client drops the transport socket if it is idle 182 * TCP idle timeout; client drops the transport socket if it is idle
@@ -2173,6 +2172,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2173 write_unlock_bh(&sk->sk_callback_lock); 2172 write_unlock_bh(&sk->sk_callback_lock);
2174 } 2173 }
2175 xs_udp_do_set_buffer_size(xprt); 2174 xs_udp_do_set_buffer_size(xprt);
2175
2176 xprt->stat.connect_start = jiffies;
2176} 2177}
2177 2178
2178static void xs_udp_setup_socket(struct work_struct *work) 2179static void xs_udp_setup_socket(struct work_struct *work)
@@ -2236,6 +2237,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2236 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2237 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2237 unsigned int opt_on = 1; 2238 unsigned int opt_on = 1;
2238 unsigned int timeo; 2239 unsigned int timeo;
2240 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2239 2241
2240 /* TCP Keepalive options */ 2242 /* TCP Keepalive options */
2241 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 2243 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
@@ -2247,6 +2249,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2247 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2249 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2248 (char *)&keepcnt, sizeof(keepcnt)); 2250 (char *)&keepcnt, sizeof(keepcnt));
2249 2251
2252 /* Avoid temporary address, they are bad for long-lived
2253 * connections such as NFS mounts.
2254 * RFC4941, section 3.6 suggests that:
2255 * Individual applications, which have specific
2256 * knowledge about the normal duration of connections,
2257 * MAY override this as appropriate.
2258 */
2259 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2260 (char *)&addr_pref, sizeof(addr_pref));
2261
2250 /* TCP user timeout (see RFC5482) */ 2262 /* TCP user timeout (see RFC5482) */
2251 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2263 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2252 (xprt->timeout->to_retries + 1); 2264 (xprt->timeout->to_retries + 1);
@@ -2295,6 +2307,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2295 /* SYN_SENT! */ 2307 /* SYN_SENT! */
2296 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2308 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2297 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2309 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2310 break;
2311 case -EADDRNOTAVAIL:
2312 /* Source port number is unavailable. Try a new one! */
2313 transport->srcport = 0;
2298 } 2314 }
2299out: 2315out:
2300 return ret; 2316 return ret;
@@ -2369,6 +2385,25 @@ out:
2369 xprt_wake_pending_tasks(xprt, status); 2385 xprt_wake_pending_tasks(xprt, status);
2370} 2386}
2371 2387
2388static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
2389{
2390 unsigned long start, now = jiffies;
2391
2392 start = xprt->stat.connect_start + xprt->reestablish_timeout;
2393 if (time_after(start, now))
2394 return start - now;
2395 return 0;
2396}
2397
2398static void xs_reconnect_backoff(struct rpc_xprt *xprt)
2399{
2400 xprt->reestablish_timeout <<= 1;
2401 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
2402 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
2403 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2404 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2405}
2406
2372/** 2407/**
2373 * xs_connect - connect a socket to a remote endpoint 2408 * xs_connect - connect a socket to a remote endpoint
2374 * @xprt: pointer to transport structure 2409 * @xprt: pointer to transport structure
@@ -2386,6 +2421,7 @@ out:
2386static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2421static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2387{ 2422{
2388 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2423 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2424 unsigned long delay = 0;
2389 2425
2390 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2426 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2391 2427
@@ -2397,19 +2433,15 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2397 /* Start by resetting any existing state */ 2433 /* Start by resetting any existing state */
2398 xs_reset_transport(transport); 2434 xs_reset_transport(transport);
2399 2435
2400 queue_delayed_work(xprtiod_workqueue, 2436 delay = xs_reconnect_delay(xprt);
2401 &transport->connect_worker, 2437 xs_reconnect_backoff(xprt);
2402 xprt->reestablish_timeout); 2438
2403 xprt->reestablish_timeout <<= 1; 2439 } else
2404 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2405 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2406 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2407 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2408 } else {
2409 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2440 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2410 queue_delayed_work(xprtiod_workqueue, 2441
2411 &transport->connect_worker, 0); 2442 queue_delayed_work(xprtiod_workqueue,
2412 } 2443 &transport->connect_worker,
2444 delay);
2413} 2445}
2414 2446
2415/** 2447/**
@@ -2961,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2961 xprt->ops = &xs_tcp_ops; 2993 xprt->ops = &xs_tcp_ops;
2962 xprt->timeout = &xs_tcp_default_timeout; 2994 xprt->timeout = &xs_tcp_default_timeout;
2963 2995
2996 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2997
2964 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); 2998 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
2965 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 2999 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2966 3000
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index b62caa1c770c..ed97a5876ebe 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
728 u32 bearer_id, u32 *prev_node) 728 u32 bearer_id, u32 *prev_node)
729{ 729{
730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id); 730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
731 struct tipc_peer *peer = mon->self; 731 struct tipc_peer *peer;
732 732
733 if (!mon) 733 if (!mon)
734 return -EINVAL; 734 return -EINVAL;
735 735
736 read_lock_bh(&mon->lock); 736 read_lock_bh(&mon->lock);
737 peer = mon->self;
737 do { 738 do {
738 if (*prev_node) { 739 if (*prev_node) {
739 if (peer->addr == *prev_node) 740 if (peer->addr == *prev_node)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c49b8df438cb..f9f5f3c3dab5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2180,7 +2180,8 @@ restart:
2180 TIPC_CONN_MSG, SHORT_H_SIZE, 2180 TIPC_CONN_MSG, SHORT_H_SIZE,
2181 0, dnode, onode, dport, oport, 2181 0, dnode, onode, dport, oport,
2182 TIPC_CONN_SHUTDOWN); 2182 TIPC_CONN_SHUTDOWN);
2183 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 2183 if (skb)
2184 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2184 } 2185 }
2185 tsk->connected = 0; 2186 tsk->connected = 0;
2186 sock->state = SS_DISCONNECTING; 2187 sock->state = SS_DISCONNECTING;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 699dfabdbccd..936d7eee62d0 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
87 87
88 vq = vsock->vqs[VSOCK_VQ_TX]; 88 vq = vsock->vqs[VSOCK_VQ_TX];
89 89
90 /* Avoid unnecessary interrupts while we're processing the ring */
91 virtqueue_disable_cb(vq);
92
93 for (;;) { 90 for (;;) {
94 struct virtio_vsock_pkt *pkt; 91 struct virtio_vsock_pkt *pkt;
95 struct scatterlist hdr, buf, *sgs[2]; 92 struct scatterlist hdr, buf, *sgs[2];
@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
99 spin_lock_bh(&vsock->send_pkt_list_lock); 96 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) { 97 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock); 98 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 virtqueue_enable_cb(vq);
103 break; 99 break;
104 } 100 }
105 101
@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work)
118 } 114 }
119 115
120 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); 116 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
117 /* Usually this means that there is no more space available in
118 * the vq
119 */
121 if (ret < 0) { 120 if (ret < 0) {
122 spin_lock_bh(&vsock->send_pkt_list_lock); 121 spin_lock_bh(&vsock->send_pkt_list_lock);
123 list_add(&pkt->list, &vsock->send_pkt_list); 122 list_add(&pkt->list, &vsock->send_pkt_list);
124 spin_unlock_bh(&vsock->send_pkt_list_lock); 123 spin_unlock_bh(&vsock->send_pkt_list_lock);
125
126 if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
127 continue; /* retry now that we have more space */
128 break; 124 break;
129 } 125 }
130 126
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b0e11b6dc994..0f506220a3bd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
513 r = cfg80211_get_chans_dfs_available(wiphy, 513 r = cfg80211_get_chans_dfs_available(wiphy,
514 chandef->center_freq2, 514 chandef->center_freq2,
515 width); 515 width);
516 break;
516 default: 517 default:
517 WARN_ON(chandef->center_freq2); 518 WARN_ON(chandef->center_freq2);
518 break; 519 break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 46417f9cce68..f02653a08993 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
5380{ 5380{
5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; 5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
5382 u32 mask = 0; 5382 u32 mask = 0;
5383 u16 ht_opmode;
5383 5384
5384#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ 5385#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
5385do { \ 5386do { \
@@ -5471,9 +5472,36 @@ do { \
5471 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, 5472 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
5472 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 5473 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
5473 nl80211_check_s32); 5474 nl80211_check_s32);
5474 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, 5475 /*
5475 mask, NL80211_MESHCONF_HT_OPMODE, 5476 * Check HT operation mode based on
5476 nl80211_check_u16); 5477 * IEEE 802.11 2012 8.4.2.59 HT Operation element.
5478 */
5479 if (tb[NL80211_MESHCONF_HT_OPMODE]) {
5480 ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
5481
5482 if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION |
5483 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
5484 IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5485 return -EINVAL;
5486
5487 if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
5488 (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5489 return -EINVAL;
5490
5491 switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
5492 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
5493 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
5494 if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
5495 return -EINVAL;
5496 break;
5497 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
5498 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
5499 if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5500 return -EINVAL;
5501 break;
5502 }
5503 cfg->ht_opmode = ht_opmode;
5504 }
5477 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 5505 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
5478 1, 65535, mask, 5506 1, 65535, mask,
5479 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, 5507 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,