aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-06-20 11:25:44 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-06-20 11:25:44 -0400
commitaf52739b922f656eb1f39016fabaabe4baeda2e2 (patch)
tree79a7aa810d0493cd0cf4adebac26d37f12e8b545 /net
parent25ed6a5e97809129a1bc852b6b5c7d03baa112c4 (diff)
parent33688abb2802ff3a230bd2441f765477b94cc89e (diff)
Merge 4.7-rc4 into staging-next
We want the fixes in here, and we can resolve a merge issue in drivers/iio/industrialio-trigger.c Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/compat.c20
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_output.c11
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c5
-rw-r--r--net/netfilter/nf_conntrack_ftp.c1
-rw-r--r--net/netfilter/nf_conntrack_helper.c9
-rw-r--r--net/netfilter/nf_conntrack_irc.c1
-rw-r--r--net/netfilter/nf_conntrack_sane.c1
-rw-r--r--net/netfilter/nf_conntrack_sip.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_conntrack_tftp.c1
-rw-r--r--net/netfilter/nf_queue.c17
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c20
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/packet/af_packet.c25
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rds/tcp.c78
-rw-r--r--net/rds/tcp.h1
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_listen.c20
-rw-r--r--net/rds/threads.c10
-rw-r--r--net/rxrpc/rxkad.c4
-rw-r--r--net/sched/act_police.c33
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/cls_u32.c72
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_fq_codel.c26
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_ingress.c12
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sunrpc/clnt.c31
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/netlink_compat.c3
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/wext-core.c25
55 files changed, 379 insertions, 164 deletions
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index dcea4f4c62b3..c18080ad4085 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
279 * change from under us. 279 * change from under us.
280 */ 280 */
281 list_for_each_entry(v, &vg->vlan_list, vlist) { 281 list_for_each_entry(v, &vg->vlan_list, vlist) {
282 if (!br_vlan_should_use(v))
283 continue;
282 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
283 if (f && f->is_local && !f->dst) 285 if (f && f->is_local && !f->dst)
284 fdb_delete_local(br, NULL, f); 286 fdb_delete_local(br, NULL, f);
diff --git a/net/compat.c b/net/compat.c
index 5cfd26a0006f..1cd2ec046164 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
309 __scm_destroy(scm); 309 __scm_destroy(scm);
310} 310}
311 311
312static int do_set_attach_filter(struct socket *sock, int level, int optname, 312/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
313 char __user *optval, unsigned int optlen) 313struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
314{ 314{
315 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 315 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
316 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); 316 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
323 __get_user(ptr, &fprog32->filter) || 323 __get_user(ptr, &fprog32->filter) ||
324 __put_user(len, &kfprog->len) || 324 __put_user(len, &kfprog->len) ||
325 __put_user(compat_ptr(ptr), &kfprog->filter)) 325 __put_user(compat_ptr(ptr), &kfprog->filter))
326 return NULL;
327
328 return kfprog;
329}
330EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
331
332static int do_set_attach_filter(struct socket *sock, int level, int optname,
333 char __user *optval, unsigned int optlen)
334{
335 struct sock_fprog __user *kfprog;
336
337 kfprog = get_compat_bpf_fprog(optval);
338 if (!kfprog)
326 return -EFAULT; 339 return -EFAULT;
327 340
328 return sock_setsockopt(sock, level, optname, (char __user *)kfprog, 341 return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
354static int compat_sock_setsockopt(struct socket *sock, int level, int optname, 367static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
355 char __user *optval, unsigned int optlen) 368 char __user *optval, unsigned int optlen)
356{ 369{
357 if (optname == SO_ATTACH_FILTER) 370 if (optname == SO_ATTACH_FILTER ||
371 optname == SO_ATTACH_REUSEPORT_CBPF)
358 return do_set_attach_filter(sock, level, optname, 372 return do_set_attach_filter(sock, level, optname,
359 optval, optlen); 373 optval, optlen);
360 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 374 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index f96ee8b9478d..be873e4e3125 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -47,6 +47,7 @@ nla_put_failure:
47 * @xstats_type: TLV type for backward compatibility xstats TLV 47 * @xstats_type: TLV type for backward compatibility xstats TLV
48 * @lock: statistics lock 48 * @lock: statistics lock
49 * @d: dumping handle 49 * @d: dumping handle
50 * @padattr: padding attribute
50 * 51 *
51 * Initializes the dumping handle, grabs the statistic lock and appends 52 * Initializes the dumping handle, grabs the statistic lock and appends
52 * an empty TLV header to the socket buffer for use a container for all 53 * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
87 * @type: TLV type for top level statistic TLV 88 * @type: TLV type for top level statistic TLV
88 * @lock: statistics lock 89 * @lock: statistics lock
89 * @d: dumping handle 90 * @d: dumping handle
91 * @padattr: padding attribute
90 * 92 *
91 * Initializes the dumping handle, grabs the statistic lock and appends 93 * Initializes the dumping handle, grabs the statistic lock and appends
92 * an empty TLV header to the socket buffer for use a container for all 94 * an empty TLV header to the socket buffer for use a container for all
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 2b3f76fe65f4..7a0b616557ab 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/of_net.h>
27 28
28#include "net-sysfs.h" 29#include "net-sysfs.h"
29 30
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d56c0559b477..0ff31d97d485 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1618 } 1618 }
1619 } 1619 }
1620 1620
1621 if (rcu_access_pointer(sk->sk_filter)) { 1621 if (rcu_access_pointer(sk->sk_filter) &&
1622 if (udp_lib_checksum_complete(skb)) 1622 udp_lib_checksum_complete(skb))
1623 goto csum_error; 1623 goto csum_error;
1624 if (sk_filter(sk, skb)) 1624
1625 goto drop; 1625 if (sk_filter(sk, skb))
1626 } 1626 goto drop;
1627 1627
1628 udp_csum_pull_header(skb); 1628 udp_csum_pull_header(skb);
1629 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 1629 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f4ac2842d4d9..fdc9de276ab1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1256,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
1256 if (ret) 1256 if (ret)
1257 return ret; 1257 return ret;
1258 1258
1259 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1260
1259 tunnel = netdev_priv(dev); 1261 tunnel = netdev_priv(dev);
1260 1262
1261 ip6gre_tnl_link_config(tunnel, 1); 1263 ip6gre_tnl_link_config(tunnel, 1);
@@ -1289,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1289 1291
1290 dev->features |= NETIF_F_NETNS_LOCAL; 1292 dev->features |= NETIF_F_NETNS_LOCAL;
1291 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1293 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1294 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1292} 1295}
1293 1296
1294static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1297static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cbf127ae7c67..635b8d340cdb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1071 const struct in6_addr *final_dst) 1071 const struct in6_addr *final_dst)
1072{ 1072{
1073 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 1073 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1074 int err;
1075 1074
1076 dst = ip6_sk_dst_check(sk, dst, fl6); 1075 dst = ip6_sk_dst_check(sk, dst, fl6);
1076 if (!dst)
1077 dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1077 1078
1078 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); 1079 return dst;
1079 if (err)
1080 return ERR_PTR(err);
1081 if (final_dst)
1082 fl6->daddr = *final_dst;
1083
1084 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1085} 1080}
1086EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); 1081EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1087 1082
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index 6989c70ae29f..4a84b5ad9ecb 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
33 fl6.daddr = *gw; 33 fl6.daddr = *gw;
34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | 34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); 35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
36 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
36 dst = ip6_route_output(net, NULL, &fl6); 37 dst = ip6_route_output(net, NULL, &fl6);
37 if (dst->error) { 38 if (dst->error) {
38 dst_release(dst); 39 dst_release(dst);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79e33e02f11a..f36c2d076fce 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1721 destp = ntohs(inet->inet_dport); 1721 destp = ntohs(inet->inet_dport);
1722 srcp = ntohs(inet->inet_sport); 1722 srcp = ntohs(inet->inet_sport);
1723 1723
1724 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1724 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1725 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1726 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1725 timer_active = 1; 1727 timer_active = 1;
1726 timer_expires = icsk->icsk_timeout; 1728 timer_expires = icsk->icsk_timeout;
1727 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1729 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2da1896af934..f421c9f23c5b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
653 } 653 }
654 } 654 }
655 655
656 if (rcu_access_pointer(sk->sk_filter)) { 656 if (rcu_access_pointer(sk->sk_filter) &&
657 if (udp_lib_checksum_complete(skb)) 657 udp_lib_checksum_complete(skb))
658 goto csum_error; 658 goto csum_error;
659 if (sk_filter(sk, skb)) 659
660 goto drop; 660 if (sk_filter(sk, skb))
661 } 661 goto drop;
662 662
663 udp_csum_pull_header(skb); 663 udp_csum_pull_header(skb);
664 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 664 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 6edfa9980314..1e40dacaa137 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1581 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1581 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1582 tunnel->encap = encap; 1582 tunnel->encap = encap;
1583 if (encap == L2TP_ENCAPTYPE_UDP) { 1583 if (encap == L2TP_ENCAPTYPE_UDP) {
1584 struct udp_tunnel_sock_cfg udp_cfg; 1584 struct udp_tunnel_sock_cfg udp_cfg = { };
1585 1585
1586 udp_cfg.sk_user_data = tunnel; 1586 udp_cfg.sk_user_data = tunnel;
1587 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; 1587 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 4c6404e1ad6e..21b1fdf5d01d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
161 del_timer_sync(&sta->mesh->plink_timer); 161 del_timer_sync(&sta->mesh->plink_timer);
162 } 162 }
163 163
164 /* make sure no readers can access nexthop sta from here on */
165 mesh_path_flush_by_nexthop(sta);
166 synchronize_net();
167
164 if (changed) 168 if (changed)
165 ieee80211_mbss_info_change_notify(sdata, changed); 169 ieee80211_mbss_info_change_notify(sdata, changed);
166} 170}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c8b8ccc370eb..78b0ef32dddd 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
280 u8 sa_offs, da_offs, pn_offs; 280 u8 sa_offs, da_offs, pn_offs;
281 u8 band; 281 u8 band;
282 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + 282 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
283 sizeof(rfc1042_header)]; 283 sizeof(rfc1042_header)] __aligned(2);
284 284
285 struct rcu_head rcu_head; 285 struct rcu_head rcu_head;
286}; 286};
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 2cb3c626cd43..096a45103f14 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
762 * If available, return 1, otherwise invalidate this connection 762 * If available, return 1, otherwise invalidate this connection
763 * template and return 0. 763 * template and return 0.
764 */ 764 */
765int ip_vs_check_template(struct ip_vs_conn *ct) 765int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
766{ 766{
767 struct ip_vs_dest *dest = ct->dest; 767 struct ip_vs_dest *dest = ct->dest;
768 struct netns_ipvs *ipvs = ct->ipvs; 768 struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
772 */ 772 */
773 if ((dest == NULL) || 773 if ((dest == NULL) ||
774 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 774 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
775 expire_quiescent_template(ipvs, dest)) { 775 expire_quiescent_template(ipvs, dest) ||
776 (cdest && (dest != cdest))) {
776 IP_VS_DBG_BUF(9, "check_template: dest not available for " 777 IP_VS_DBG_BUF(9, "check_template: dest not available for "
777 "protocol %s s:%s:%d v:%s:%d " 778 "protocol %s s:%s:%d v:%s:%d "
778 "-> d:%s:%d\n", 779 "-> d:%s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1207f20d24e4..2c1b498a7a27 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
321 321
322 /* Check if a template already exists */ 322 /* Check if a template already exists */
323 ct = ip_vs_ct_in_get(&param); 323 ct = ip_vs_ct_in_get(&param);
324 if (!ct || !ip_vs_check_template(ct)) { 324 if (!ct || !ip_vs_check_template(ct, NULL)) {
325 struct ip_vs_scheduler *sched; 325 struct ip_vs_scheduler *sched;
326 326
327 /* 327 /*
@@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1154 vport, &param) < 0) 1154 vport, &param) < 0)
1155 return NULL; 1155 return NULL;
1156 ct = ip_vs_ct_in_get(&param); 1156 ct = ip_vs_ct_in_get(&param);
1157 if (!ct) { 1157 /* check if template exists and points to the same dest */
1158 if (!ct || !ip_vs_check_template(ct, dest)) {
1158 ct = ip_vs_conn_new(&param, dest->af, daddr, dport, 1159 ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
1159 IP_VS_CONN_F_TEMPLATE, dest, 0); 1160 IP_VS_CONN_F_TEMPLATE, dest, 0);
1160 if (!ct) { 1161 if (!ct) {
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 883c691ec8d0..19efeba02abb 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
632 if (ret) { 632 if (ret) {
633 pr_err("failed to register helper for pf: %d port: %d\n", 633 pr_err("failed to register helper for pf: %d port: %d\n",
634 ftp[i][j].tuple.src.l3num, ports[i]); 634 ftp[i][j].tuple.src.l3num, ports[i]);
635 ports_c = i;
635 nf_conntrack_ftp_fini(); 636 nf_conntrack_ftp_fini();
636 return ret; 637 return ret;
637 } 638 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index f703adb7e5f7..196cb39649e1 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
361 361
362int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 362int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
363{ 363{
364 int ret = 0; 364 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
365 struct nf_conntrack_helper *cur;
366 unsigned int h = helper_hash(&me->tuple); 365 unsigned int h = helper_hash(&me->tuple);
366 struct nf_conntrack_helper *cur;
367 int ret = 0;
367 368
368 BUG_ON(me->expect_policy == NULL); 369 BUG_ON(me->expect_policy == NULL);
369 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); 370 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
371 372
372 mutex_lock(&nf_ct_helper_mutex); 373 mutex_lock(&nf_ct_helper_mutex);
373 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { 374 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
374 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && 375 if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
375 cur->tuple.src.l3num == me->tuple.src.l3num &&
376 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
377 ret = -EEXIST; 376 ret = -EEXIST;
378 goto out; 377 goto out;
379 } 378 }
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 8b6da2719600..f97ac61d2536 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
271 if (ret) { 271 if (ret) {
272 pr_err("failed to register helper for pf: %u port: %u\n", 272 pr_err("failed to register helper for pf: %u port: %u\n",
273 irc[i].tuple.src.l3num, ports[i]); 273 irc[i].tuple.src.l3num, ports[i]);
274 ports_c = i;
274 nf_conntrack_irc_fini(); 275 nf_conntrack_irc_fini();
275 return ret; 276 return ret;
276 } 277 }
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 7523a575f6d1..3fcbaab83b3d 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
223 if (ret) { 223 if (ret) {
224 pr_err("failed to register helper for pf: %d port: %d\n", 224 pr_err("failed to register helper for pf: %d port: %d\n",
225 sane[i][j].tuple.src.l3num, ports[i]); 225 sane[i][j].tuple.src.l3num, ports[i]);
226 ports_c = i;
226 nf_conntrack_sane_fini(); 227 nf_conntrack_sane_fini();
227 return ret; 228 return ret;
228 } 229 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 3e06402739e0..f72ba5587588 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
1669 if (ret) { 1669 if (ret) {
1670 pr_err("failed to register helper for pf: %u port: %u\n", 1670 pr_err("failed to register helper for pf: %u port: %u\n",
1671 sip[i][j].tuple.src.l3num, ports[i]); 1671 sip[i][j].tuple.src.l3num, ports[i]);
1672 ports_c = i;
1672 nf_conntrack_sip_fini(); 1673 nf_conntrack_sip_fini();
1673 return ret; 1674 return ret;
1674 } 1675 }
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f87e84ebcec3..c026c472ea80 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
487 { } 487 { }
488}; 488};
489 489
490#define NET_NF_CONNTRACK_MAX 2089
491
492static struct ctl_table nf_ct_netfilter_table[] = { 490static struct ctl_table nf_ct_netfilter_table[] = {
493 { 491 {
494 .procname = "nf_conntrack_max", 492 .procname = "nf_conntrack_max",
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 36f964066461..2e65b5430fba 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
142 if (ret) { 142 if (ret) {
143 pr_err("failed to register helper for pf: %u port: %u\n", 143 pr_err("failed to register helper for pf: %u port: %u\n",
144 tftp[i][j].tuple.src.l3num, ports[i]); 144 tftp[i][j].tuple.src.l3num, ports[i]);
145 ports_c = i;
145 nf_conntrack_tftp_fini(); 146 nf_conntrack_tftp_fini();
146 return ret; 147 return ret;
147 } 148 }
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5baa8e24e6ac..b19ad20a705c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -26,23 +26,21 @@
26 * Once the queue is registered it must reinject all packets it 26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what. 27 * receives, no matter what.
28 */ 28 */
29static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
30 29
31/* return EBUSY when somebody else is registered, return EEXIST if the 30/* return EBUSY when somebody else is registered, return EEXIST if the
32 * same handler is registered, return 0 in case of success. */ 31 * same handler is registered, return 0 in case of success. */
33void nf_register_queue_handler(const struct nf_queue_handler *qh) 32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
34{ 33{
35 /* should never happen, we only have one queueing backend in kernel */ 34 /* should never happen, we only have one queueing backend in kernel */
36 WARN_ON(rcu_access_pointer(queue_handler)); 35 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
37 rcu_assign_pointer(queue_handler, qh); 36 rcu_assign_pointer(net->nf.queue_handler, qh);
38} 37}
39EXPORT_SYMBOL(nf_register_queue_handler); 38EXPORT_SYMBOL(nf_register_queue_handler);
40 39
41/* The caller must flush their queue before this */ 40/* The caller must flush their queue before this */
42void nf_unregister_queue_handler(void) 41void nf_unregister_queue_handler(struct net *net)
43{ 42{
44 RCU_INIT_POINTER(queue_handler, NULL); 43 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
45 synchronize_rcu();
46} 44}
47EXPORT_SYMBOL(nf_unregister_queue_handler); 45EXPORT_SYMBOL(nf_unregister_queue_handler);
48 46
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
103 const struct nf_queue_handler *qh; 101 const struct nf_queue_handler *qh;
104 102
105 rcu_read_lock(); 103 rcu_read_lock();
106 qh = rcu_dereference(queue_handler); 104 qh = rcu_dereference(net->nf.queue_handler);
107 if (qh) 105 if (qh)
108 qh->nf_hook_drop(net, ops); 106 qh->nf_hook_drop(net, ops);
109 rcu_read_unlock(); 107 rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
122 struct nf_queue_entry *entry = NULL; 120 struct nf_queue_entry *entry = NULL;
123 const struct nf_afinfo *afinfo; 121 const struct nf_afinfo *afinfo;
124 const struct nf_queue_handler *qh; 122 const struct nf_queue_handler *qh;
123 struct net *net = state->net;
125 124
126 /* QUEUE == DROP if no one is waiting, to be safe. */ 125 /* QUEUE == DROP if no one is waiting, to be safe. */
127 qh = rcu_dereference(queue_handler); 126 qh = rcu_dereference(net->nf.queue_handler);
128 if (!qh) { 127 if (!qh) {
129 status = -ESRCH; 128 status = -ESRCH;
130 goto err; 129 goto err;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4d292b933b5c..7b7aa871a174 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2647 /* Only accept unspec with dump */ 2647 /* Only accept unspec with dump */
2648 if (nfmsg->nfgen_family == NFPROTO_UNSPEC) 2648 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2649 return -EAFNOSUPPORT; 2649 return -EAFNOSUPPORT;
2650 if (!nla[NFTA_SET_TABLE])
2651 return -EINVAL;
2650 2652
2651 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2653 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2652 if (IS_ERR(set)) 2654 if (IS_ERR(set))
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index aa93877ab6e2..5d36a0926b4a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
557 557
558 if (entskb->tstamp.tv64) { 558 if (entskb->tstamp.tv64) {
559 struct nfqnl_msg_packet_timestamp ts; 559 struct nfqnl_msg_packet_timestamp ts;
560 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
561 561
562 ts.sec = cpu_to_be64(kts.tv_sec); 562 ts.sec = cpu_to_be64(kts.tv_sec);
563 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 563 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1482 net->nf.proc_netfilter, &nfqnl_file_ops)) 1482 net->nf.proc_netfilter, &nfqnl_file_ops))
1483 return -ENOMEM; 1483 return -ENOMEM;
1484#endif 1484#endif
1485 nf_register_queue_handler(net, &nfqh);
1485 return 0; 1486 return 0;
1486} 1487}
1487 1488
1488static void __net_exit nfnl_queue_net_exit(struct net *net) 1489static void __net_exit nfnl_queue_net_exit(struct net *net)
1489{ 1490{
1491 nf_unregister_queue_handler(net);
1490#ifdef CONFIG_PROC_FS 1492#ifdef CONFIG_PROC_FS
1491 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1493 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1492#endif 1494#endif
1493} 1495}
1494 1496
1497static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
1498{
1499 synchronize_rcu();
1500}
1501
1495static struct pernet_operations nfnl_queue_net_ops = { 1502static struct pernet_operations nfnl_queue_net_ops = {
1496 .init = nfnl_queue_net_init, 1503 .init = nfnl_queue_net_init,
1497 .exit = nfnl_queue_net_exit, 1504 .exit = nfnl_queue_net_exit,
1498 .id = &nfnl_queue_net_id, 1505 .exit_batch = nfnl_queue_net_exit_batch,
1499 .size = sizeof(struct nfnl_queue_net), 1506 .id = &nfnl_queue_net_id,
1507 .size = sizeof(struct nfnl_queue_net),
1500}; 1508};
1501 1509
1502static int __init nfnetlink_queue_init(void) 1510static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
1517 } 1525 }
1518 1526
1519 register_netdevice_notifier(&nfqnl_dev_notifier); 1527 register_netdevice_notifier(&nfqnl_dev_notifier);
1520 nf_register_queue_handler(&nfqh);
1521 return status; 1528 return status;
1522 1529
1523cleanup_netlink_notifier: 1530cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@ out:
1529 1536
1530static void __exit nfnetlink_queue_fini(void) 1537static void __exit nfnetlink_queue_fini(void)
1531{ 1538{
1532 nf_unregister_queue_handler();
1533 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1539 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1534 nfnetlink_subsys_unregister(&nfqnl_subsys); 1540 nfnetlink_subsys_unregister(&nfqnl_subsys);
1535 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1541 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c69c892231d7..2675d580c490 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
612 return -EINVAL; 612 return -EINVAL;
613 613
614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
615 target_offset + sizeof(struct compat_xt_standard_target) != next_offset) 615 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
616 return -EINVAL; 616 return -EINVAL;
617 617
618 /* compat_xt_entry match has less strict aligment requirements, 618 /* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
694 return -EINVAL; 694 return -EINVAL;
695 695
696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
697 target_offset + sizeof(struct xt_standard_target) != next_offset) 697 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
698 return -EINVAL; 698 return -EINVAL;
699 699
700 return xt_check_entry_match(elems, base + target_offset, 700 return xt_check_entry_match(elems, base + target_offset,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4040eb92d9c9..9bff6ef16fa7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,6 +93,7 @@
93#include <net/inet_common.h> 93#include <net/inet_common.h>
94#endif 94#endif
95#include <linux/bpf.h> 95#include <linux/bpf.h>
96#include <net/compat.h>
96 97
97#include "internal.h" 98#include "internal.h"
98 99
@@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3940} 3941}
3941 3942
3942 3943
3944#ifdef CONFIG_COMPAT
3945static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3946 char __user *optval, unsigned int optlen)
3947{
3948 struct packet_sock *po = pkt_sk(sock->sk);
3949
3950 if (level != SOL_PACKET)
3951 return -ENOPROTOOPT;
3952
3953 if (optname == PACKET_FANOUT_DATA &&
3954 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3955 optval = (char __user *)get_compat_bpf_fprog(optval);
3956 if (!optval)
3957 return -EFAULT;
3958 optlen = sizeof(struct sock_fprog);
3959 }
3960
3961 return packet_setsockopt(sock, level, optname, optval, optlen);
3962}
3963#endif
3964
3943static int packet_notifier(struct notifier_block *this, 3965static int packet_notifier(struct notifier_block *this,
3944 unsigned long msg, void *ptr) 3966 unsigned long msg, void *ptr)
3945{ 3967{
@@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = {
4416 .shutdown = sock_no_shutdown, 4438 .shutdown = sock_no_shutdown,
4417 .setsockopt = packet_setsockopt, 4439 .setsockopt = packet_setsockopt,
4418 .getsockopt = packet_getsockopt, 4440 .getsockopt = packet_getsockopt,
4441#ifdef CONFIG_COMPAT
4442 .compat_setsockopt = compat_packet_setsockopt,
4443#endif
4419 .sendmsg = packet_sendmsg, 4444 .sendmsg = packet_sendmsg,
4420 .recvmsg = packet_recvmsg, 4445 .recvmsg = packet_recvmsg,
4421 .mmap = packet_mmap, 4446 .mmap = packet_mmap,
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 80256b08eac0..387df5f32e49 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -74,6 +74,7 @@ enum {
74 RDS_CONN_CONNECTING, 74 RDS_CONN_CONNECTING,
75 RDS_CONN_DISCONNECTING, 75 RDS_CONN_DISCONNECTING,
76 RDS_CONN_UP, 76 RDS_CONN_UP,
77 RDS_CONN_RESETTING,
77 RDS_CONN_ERROR, 78 RDS_CONN_ERROR,
78}; 79};
79 80
@@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
813void rds_shutdown_worker(struct work_struct *); 814void rds_shutdown_worker(struct work_struct *);
814void rds_send_worker(struct work_struct *); 815void rds_send_worker(struct work_struct *);
815void rds_recv_worker(struct work_struct *); 816void rds_recv_worker(struct work_struct *);
817void rds_connect_path_complete(struct rds_connection *conn, int curr);
816void rds_connect_complete(struct rds_connection *conn); 818void rds_connect_complete(struct rds_connection *conn);
817 819
818/* transport.c */ 820/* transport.c */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c0be1ecd11c9..8413f6c99e13 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
561 minfo.fport = inc->i_hdr.h_dport; 561 minfo.fport = inc->i_hdr.h_dport;
562 } 562 }
563 563
564 minfo.flags = 0;
565
564 rds_info_copy(iter, &minfo, sizeof(minfo)); 566 rds_info_copy(iter, &minfo, sizeof(minfo));
565} 567}
diff --git a/net/rds/send.c b/net/rds/send.c
index c9cdb358ea88..b1962f8e30f7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue); 99 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags); 100 spin_unlock_irqrestore(&conn->c_lock, flags);
101} 101}
102EXPORT_SYMBOL_GPL(rds_send_reset);
102 103
103static int acquire_in_xmit(struct rds_connection *conn) 104static int acquire_in_xmit(struct rds_connection *conn)
104{ 105{
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 86187dad1440..74ee126a6fe6 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
126} 126}
127 127
128/* 128/*
129 * This is the only path that sets tc->t_sock. Send and receive trust that 129 * rds_tcp_reset_callbacks() switches the to the new sock and
130 * it is set. The RDS_CONN_UP bit protects those paths from being 130 * returns the existing tc->t_sock.
131 * called while it isn't set. 131 *
132 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
133 * and rds_tcp_reset_callbacks. Send and receive trust that
134 * it is set. The absence of RDS_CONN_UP bit protects those paths
135 * from being called while it isn't set.
136 */
137void rds_tcp_reset_callbacks(struct socket *sock,
138 struct rds_connection *conn)
139{
140 struct rds_tcp_connection *tc = conn->c_transport_data;
141 struct socket *osock = tc->t_sock;
142
143 if (!osock)
144 goto newsock;
145
146 /* Need to resolve a duelling SYN between peers.
147 * We have an outstanding SYN to this peer, which may
148 * potentially have transitioned to the RDS_CONN_UP state,
149 * so we must quiesce any send threads before resetting
150 * c_transport_data. We quiesce these threads by setting
151 * c_state to something other than RDS_CONN_UP, and then
152 * waiting for any existing threads in rds_send_xmit to
153 * complete release_in_xmit(). (Subsequent threads entering
154 * rds_send_xmit() will bail on !rds_conn_up().
155 *
156 * However an incoming syn-ack at this point would end up
157 * marking the conn as RDS_CONN_UP, and would again permit
158 * rds_send_xmi() threads through, so ideally we would
159 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
160 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
161 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
162 * would not get set. As a result, we set c_state to
163 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
164 * cannot mark rds_conn_path_up() in the window before lock_sock()
165 */
166 atomic_set(&conn->c_state, RDS_CONN_RESETTING);
167 wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
168 lock_sock(osock->sk);
169 /* reset receive side state for rds_tcp_data_recv() for osock */
170 if (tc->t_tinc) {
171 rds_inc_put(&tc->t_tinc->ti_inc);
172 tc->t_tinc = NULL;
173 }
174 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
175 tc->t_tinc_data_rem = 0;
176 tc->t_sock = NULL;
177
178 write_lock_bh(&osock->sk->sk_callback_lock);
179
180 osock->sk->sk_user_data = NULL;
181 osock->sk->sk_data_ready = tc->t_orig_data_ready;
182 osock->sk->sk_write_space = tc->t_orig_write_space;
183 osock->sk->sk_state_change = tc->t_orig_state_change;
184 write_unlock_bh(&osock->sk->sk_callback_lock);
185 release_sock(osock->sk);
186 sock_release(osock);
187newsock:
188 rds_send_reset(conn);
189 lock_sock(sock->sk);
190 write_lock_bh(&sock->sk->sk_callback_lock);
191 tc->t_sock = sock;
192 sock->sk->sk_user_data = conn;
193 sock->sk->sk_data_ready = rds_tcp_data_ready;
194 sock->sk->sk_write_space = rds_tcp_write_space;
195 sock->sk->sk_state_change = rds_tcp_state_change;
196
197 write_unlock_bh(&sock->sk->sk_callback_lock);
198 release_sock(sock->sk);
199}
200
201/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
202 * above rds_tcp_reset_callbacks for notes about synchronization
203 * with data path
132 */ 204 */
133void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 205void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
134{ 206{
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 41c228300525..ec0602b0dc24 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -50,6 +50,7 @@ struct rds_tcp_statistics {
50void rds_tcp_tune(struct socket *sock); 50void rds_tcp_tune(struct socket *sock);
51void rds_tcp_nonagle(struct socket *sock); 51void rds_tcp_nonagle(struct socket *sock);
52void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); 52void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
53void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
53void rds_tcp_restore_callbacks(struct socket *sock, 54void rds_tcp_restore_callbacks(struct socket *sock,
54 struct rds_tcp_connection *tc); 55 struct rds_tcp_connection *tc);
55u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 56u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index fb82e0a0bf89..fba13d0305fb 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk)
60 case TCP_SYN_RECV: 60 case TCP_SYN_RECV:
61 break; 61 break;
62 case TCP_ESTABLISHED: 62 case TCP_ESTABLISHED:
63 rds_connect_complete(conn); 63 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
64 break; 64 break;
65 case TCP_CLOSE_WAIT: 65 case TCP_CLOSE_WAIT:
66 case TCP_CLOSE: 66 case TCP_CLOSE:
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 4bf4befe5066..686b1d03a558 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
78 struct inet_sock *inet; 78 struct inet_sock *inet;
79 struct rds_tcp_connection *rs_tcp = NULL; 79 struct rds_tcp_connection *rs_tcp = NULL;
80 int conn_state; 80 int conn_state;
81 struct sock *nsk;
82 81
83 if (!sock) /* module unload or netns delete in progress */ 82 if (!sock) /* module unload or netns delete in progress */
84 return -ENETUNREACH; 83 return -ENETUNREACH;
@@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
136 !conn->c_outgoing) { 135 !conn->c_outgoing) {
137 goto rst_nsk; 136 goto rst_nsk;
138 } else { 137 } else {
139 atomic_set(&conn->c_state, RDS_CONN_CONNECTING); 138 rds_tcp_reset_callbacks(new_sock, conn);
140 wait_event(conn->c_waitq,
141 !test_bit(RDS_IN_XMIT, &conn->c_flags));
142 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
143 conn->c_outgoing = 0; 139 conn->c_outgoing = 0;
140 /* rds_connect_path_complete() marks RDS_CONN_UP */
141 rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
144 } 142 }
143 } else {
144 rds_tcp_set_callbacks(new_sock, conn);
145 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
145 } 146 }
146 rds_tcp_set_callbacks(new_sock, conn);
147 rds_connect_complete(conn); /* marks RDS_CONN_UP */
148 new_sock = NULL; 147 new_sock = NULL;
149 ret = 0; 148 ret = 0;
150 goto out; 149 goto out;
151rst_nsk: 150rst_nsk:
152 /* reset the newly returned accept sock and bail */ 151 /* reset the newly returned accept sock and bail */
153 nsk = new_sock->sk; 152 kernel_sock_shutdown(new_sock, SHUT_RDWR);
154 rds_tcp_stats_inc(s_tcp_listen_closed_stale);
155 nsk->sk_user_data = NULL;
156 nsk->sk_prot->disconnect(nsk, 0);
157 tcp_done(nsk);
158 new_sock = NULL;
159 ret = 0; 153 ret = 0;
160out: 154out:
161 if (rs_tcp) 155 if (rs_tcp)
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 454aa6d23327..4a323045719b 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -71,9 +71,9 @@
71struct workqueue_struct *rds_wq; 71struct workqueue_struct *rds_wq;
72EXPORT_SYMBOL_GPL(rds_wq); 72EXPORT_SYMBOL_GPL(rds_wq);
73 73
74void rds_connect_complete(struct rds_connection *conn) 74void rds_connect_path_complete(struct rds_connection *conn, int curr)
75{ 75{
76 if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { 76 if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
77 printk(KERN_WARNING "%s: Cannot transition to state UP, " 77 printk(KERN_WARNING "%s: Cannot transition to state UP, "
78 "current state is %d\n", 78 "current state is %d\n",
79 __func__, 79 __func__,
@@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
90 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 90 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
92} 92}
93EXPORT_SYMBOL_GPL(rds_connect_path_complete);
94
95void rds_connect_complete(struct rds_connection *conn)
96{
97 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
98}
93EXPORT_SYMBOL_GPL(rds_connect_complete); 99EXPORT_SYMBOL_GPL(rds_connect_complete);
94 100
95/* 101/*
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b726a046a7d..bab56ed649ba 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1162,9 +1162,7 @@ static int rxkad_init(void)
1162 /* pin the cipher we need so that the crypto layer doesn't invoke 1162 /* pin the cipher we need so that the crypto layer doesn't invoke
1163 * keventd to go get it */ 1163 * keventd to go get it */
1164 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1164 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
1165 if (IS_ERR(rxkad_ci)) 1165 return PTR_ERR_OR_ZERO(rxkad_ci);
1166 return PTR_ERR(rxkad_ci);
1167 return 0;
1168} 1166}
1169 1167
1170/* 1168/*
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b884dae692a1..c557789765dc 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -38,7 +38,7 @@ struct tcf_police {
38 bool peak_present; 38 bool peak_present;
39}; 39};
40#define to_police(pc) \ 40#define to_police(pc) \
41 container_of(pc, struct tcf_police, common) 41 container_of(pc->priv, struct tcf_police, common)
42 42
43#define POL_TAB_MASK 15 43#define POL_TAB_MASK 15
44 44
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
119 struct nlattr *est, struct tc_action *a, 119 struct nlattr *est, struct tc_action *a,
120 int ovr, int bind) 120 int ovr, int bind)
121{ 121{
122 unsigned int h;
123 int ret = 0, err; 122 int ret = 0, err;
124 struct nlattr *tb[TCA_POLICE_MAX + 1]; 123 struct nlattr *tb[TCA_POLICE_MAX + 1];
125 struct tc_police *parm; 124 struct tc_police *parm;
126 struct tcf_police *police; 125 struct tcf_police *police;
127 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
128 struct tc_action_net *tn = net_generic(net, police_net_id); 127 struct tc_action_net *tn = net_generic(net, police_net_id);
129 struct tcf_hashinfo *hinfo = tn->hinfo;
130 int size; 128 int size;
131 129
132 if (nla == NULL) 130 if (nla == NULL)
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
145 143
146 if (parm->index) { 144 if (parm->index) {
147 if (tcf_hash_search(tn, a, parm->index)) { 145 if (tcf_hash_search(tn, a, parm->index)) {
148 police = to_police(a->priv); 146 police = to_police(a);
149 if (bind) { 147 if (bind) {
150 police->tcf_bindcnt += 1; 148 police->tcf_bindcnt += 1;
151 police->tcf_refcnt += 1; 149 police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
156 /* not replacing */ 154 /* not replacing */
157 return -EEXIST; 155 return -EEXIST;
158 } 156 }
157 } else {
158 ret = tcf_hash_create(tn, parm->index, NULL, a,
159 sizeof(*police), bind, false);
160 if (ret)
161 return ret;
162 ret = ACT_P_CREATED;
159 } 163 }
160 164
161 police = kzalloc(sizeof(*police), GFP_KERNEL); 165 police = to_police(a);
162 if (police == NULL)
163 return -ENOMEM;
164 ret = ACT_P_CREATED;
165 police->tcf_refcnt = 1;
166 spin_lock_init(&police->tcf_lock);
167 if (bind)
168 police->tcf_bindcnt = 1;
169override: 166override:
170 if (parm->rate.rate) { 167 if (parm->rate.rate) {
171 err = -ENOMEM; 168 err = -ENOMEM;
@@ -237,16 +234,8 @@ override:
237 return ret; 234 return ret;
238 235
239 police->tcfp_t_c = ktime_get_ns(); 236 police->tcfp_t_c = ktime_get_ns();
240 police->tcf_index = parm->index ? parm->index : 237 tcf_hash_insert(tn, a);
241 tcf_hash_new_index(tn);
242 police->tcf_tm.install = jiffies;
243 police->tcf_tm.lastuse = jiffies;
244 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
245 spin_lock_bh(&hinfo->lock);
246 hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
247 spin_unlock_bh(&hinfo->lock);
248 238
249 a->priv = police;
250 return ret; 239 return ret;
251 240
252failure_unlock: 241failure_unlock:
@@ -255,7 +244,7 @@ failure:
255 qdisc_put_rtab(P_tab); 244 qdisc_put_rtab(P_tab);
256 qdisc_put_rtab(R_tab); 245 qdisc_put_rtab(R_tab);
257 if (ret == ACT_P_CREATED) 246 if (ret == ACT_P_CREATED)
258 kfree(police); 247 tcf_hash_cleanup(a, est);
259 return err; 248 return err;
260} 249}
261 250
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 730aacafc22d..b3b7978f4182 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
171 struct tc_cls_flower_offload offload = {0}; 171 struct tc_cls_flower_offload offload = {0};
172 struct tc_to_netdev tc; 172 struct tc_to_netdev tc;
173 173
174 if (!tc_should_offload(dev, 0)) 174 if (!tc_should_offload(dev, tp, 0))
175 return; 175 return;
176 176
177 offload.command = TC_CLSFLOWER_DESTROY; 177 offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
194 struct tc_cls_flower_offload offload = {0}; 194 struct tc_cls_flower_offload offload = {0};
195 struct tc_to_netdev tc; 195 struct tc_to_netdev tc;
196 196
197 if (!tc_should_offload(dev, flags)) 197 if (!tc_should_offload(dev, tp, flags))
198 return; 198 return;
199 199
200 offload.command = TC_CLSFLOWER_REPLACE; 200 offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
216 struct tc_cls_flower_offload offload = {0}; 216 struct tc_cls_flower_offload offload = {0};
217 struct tc_to_netdev tc; 217 struct tc_to_netdev tc;
218 218
219 if (!tc_should_offload(dev, 0)) 219 if (!tc_should_offload(dev, tp, 0))
220 return; 220 return;
221 221
222 offload.command = TC_CLSFLOWER_STATS; 222 offload.command = TC_CLSFLOWER_STATS;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 079b43b3c5d2..ffe593efe930 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
440 offload.type = TC_SETUP_CLSU32; 440 offload.type = TC_SETUP_CLSU32;
441 offload.cls_u32 = &u32_offload; 441 offload.cls_u32 = &u32_offload;
442 442
443 if (tc_should_offload(dev, 0)) { 443 if (tc_should_offload(dev, tp, 0)) {
444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; 444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
445 offload.cls_u32->knode.handle = handle; 445 offload.cls_u32->knode.handle = handle;
446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
457 struct tc_to_netdev offload; 457 struct tc_to_netdev offload;
458 int err; 458 int err;
459 459
460 if (!tc_should_offload(dev, tp, flags))
461 return tc_skip_sw(flags) ? -EINVAL : 0;
462
460 offload.type = TC_SETUP_CLSU32; 463 offload.type = TC_SETUP_CLSU32;
461 offload.cls_u32 = &u32_offload; 464 offload.cls_u32 = &u32_offload;
462 465
463 if (tc_should_offload(dev, flags)) { 466 offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
464 offload.cls_u32->command = TC_CLSU32_NEW_HNODE; 467 offload.cls_u32->hnode.divisor = h->divisor;
465 offload.cls_u32->hnode.divisor = h->divisor; 468 offload.cls_u32->hnode.handle = h->handle;
466 offload.cls_u32->hnode.handle = h->handle; 469 offload.cls_u32->hnode.prio = h->prio;
467 offload.cls_u32->hnode.prio = h->prio;
468 470
469 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 471 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
470 tp->protocol, &offload); 472 tp->protocol, &offload);
471 if (tc_skip_sw(flags)) 473 if (tc_skip_sw(flags))
472 return err; 474 return err;
473 }
474 475
475 return 0; 476 return 0;
476} 477}
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
484 offload.type = TC_SETUP_CLSU32; 485 offload.type = TC_SETUP_CLSU32;
485 offload.cls_u32 = &u32_offload; 486 offload.cls_u32 = &u32_offload;
486 487
487 if (tc_should_offload(dev, 0)) { 488 if (tc_should_offload(dev, tp, 0)) {
488 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; 489 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
489 offload.cls_u32->hnode.divisor = h->divisor; 490 offload.cls_u32->hnode.divisor = h->divisor;
490 offload.cls_u32->hnode.handle = h->handle; 491 offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
507 offload.type = TC_SETUP_CLSU32; 508 offload.type = TC_SETUP_CLSU32;
508 offload.cls_u32 = &u32_offload; 509 offload.cls_u32 = &u32_offload;
509 510
510 if (tc_should_offload(dev, flags)) { 511 if (!tc_should_offload(dev, tp, flags))
511 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 512 return tc_skip_sw(flags) ? -EINVAL : 0;
512 offload.cls_u32->knode.handle = n->handle; 513
513 offload.cls_u32->knode.fshift = n->fshift; 514 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
515 offload.cls_u32->knode.handle = n->handle;
516 offload.cls_u32->knode.fshift = n->fshift;
514#ifdef CONFIG_CLS_U32_MARK 517#ifdef CONFIG_CLS_U32_MARK
515 offload.cls_u32->knode.val = n->val; 518 offload.cls_u32->knode.val = n->val;
516 offload.cls_u32->knode.mask = n->mask; 519 offload.cls_u32->knode.mask = n->mask;
517#else 520#else
518 offload.cls_u32->knode.val = 0; 521 offload.cls_u32->knode.val = 0;
519 offload.cls_u32->knode.mask = 0; 522 offload.cls_u32->knode.mask = 0;
520#endif 523#endif
521 offload.cls_u32->knode.sel = &n->sel; 524 offload.cls_u32->knode.sel = &n->sel;
522 offload.cls_u32->knode.exts = &n->exts; 525 offload.cls_u32->knode.exts = &n->exts;
523 if (n->ht_down) 526 if (n->ht_down)
524 offload.cls_u32->knode.link_handle = n->ht_down->handle; 527 offload.cls_u32->knode.link_handle = n->ht_down->handle;
525 528
526 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 529 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
527 tp->protocol, &offload); 530 tp->protocol, &offload);
528 if (tc_skip_sw(flags)) 531 if (tc_skip_sw(flags))
529 return err; 532 return err;
530 }
531 533
532 return 0; 534 return 0;
533} 535}
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
863 if (tb[TCA_U32_FLAGS]) { 865 if (tb[TCA_U32_FLAGS]) {
864 flags = nla_get_u32(tb[TCA_U32_FLAGS]); 866 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
865 if (!tc_flags_valid(flags)) 867 if (!tc_flags_valid(flags))
866 return err; 868 return -EINVAL;
867 } 869 }
868 870
869 n = (struct tc_u_knode *)*arg; 871 n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
921 ht->divisor = divisor; 923 ht->divisor = divisor;
922 ht->handle = handle; 924 ht->handle = handle;
923 ht->prio = tp->prio; 925 ht->prio = tp->prio;
926
927 err = u32_replace_hw_hnode(tp, ht, flags);
928 if (err) {
929 kfree(ht);
930 return err;
931 }
932
924 RCU_INIT_POINTER(ht->next, tp_c->hlist); 933 RCU_INIT_POINTER(ht->next, tp_c->hlist);
925 rcu_assign_pointer(tp_c->hlist, ht); 934 rcu_assign_pointer(tp_c->hlist, ht);
926 *arg = (unsigned long)ht; 935 *arg = (unsigned long)ht;
927 936
928 u32_replace_hw_hnode(tp, ht, flags);
929 return 0; 937 return 0;
930 } 938 }
931 939
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a63e879e8975..bf8af2c43c2c 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
375 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
376 } 376 }
377 377
378 qdisc_qstats_backlog_inc(sch, skb);
378 sch->q.qlen++; 379 sch->q.qlen++;
379 return err; 380 return err;
380} 381}
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
407 408
408 bstats_update(&cl->bstats, skb); 409 bstats_update(&cl->bstats, skb);
409 qdisc_bstats_update(sch, skb); 410 qdisc_bstats_update(sch, skb);
411 qdisc_qstats_backlog_dec(sch, skb);
410 sch->q.qlen--; 412 sch->q.qlen--;
411 return skb; 413 return skb;
412 } 414 }
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
428 if (cl->qdisc->ops->drop) { 430 if (cl->qdisc->ops->drop) {
429 len = cl->qdisc->ops->drop(cl->qdisc); 431 len = cl->qdisc->ops->drop(cl->qdisc);
430 if (len > 0) { 432 if (len > 0) {
433 sch->qstats.backlog -= len;
431 sch->q.qlen--; 434 sch->q.qlen--;
432 if (cl->qdisc->q.qlen == 0) 435 if (cl->qdisc->q.qlen == 0)
433 list_del(&cl->alist); 436 list_del(&cl->alist);
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
463 qdisc_reset(cl->qdisc); 466 qdisc_reset(cl->qdisc);
464 } 467 }
465 } 468 }
469 sch->qstats.backlog = 0;
466 sch->q.qlen = 0; 470 sch->q.qlen = 0;
467} 471}
468 472
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6883a8971562..da250b2e06ae 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
199 unsigned int idx, prev_backlog, prev_qlen; 199 unsigned int idx, prev_backlog, prev_qlen;
200 struct fq_codel_flow *flow; 200 struct fq_codel_flow *flow;
201 int uninitialized_var(ret); 201 int uninitialized_var(ret);
202 unsigned int pkt_len;
202 bool memory_limited; 203 bool memory_limited;
203 204
204 idx = fq_codel_classify(skb, sch, &ret); 205 idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
230 prev_backlog = sch->qstats.backlog; 231 prev_backlog = sch->qstats.backlog;
231 prev_qlen = sch->q.qlen; 232 prev_qlen = sch->q.qlen;
232 233
234 /* save this packet length as it might be dropped by fq_codel_drop() */
235 pkt_len = qdisc_pkt_len(skb);
233 /* fq_codel_drop() is quite expensive, as it performs a linear search 236 /* fq_codel_drop() is quite expensive, as it performs a linear search
234 * in q->backlogs[] to find a fat flow. 237 * in q->backlogs[] to find a fat flow.
235 * So instead of dropping a single packet, drop half of its backlog 238 * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
237 */ 240 */
238 ret = fq_codel_drop(sch, q->drop_batch_size); 241 ret = fq_codel_drop(sch, q->drop_batch_size);
239 242
240 q->drop_overlimit += prev_qlen - sch->q.qlen; 243 prev_qlen -= sch->q.qlen;
244 prev_backlog -= sch->qstats.backlog;
245 q->drop_overlimit += prev_qlen;
241 if (memory_limited) 246 if (memory_limited)
242 q->drop_overmemory += prev_qlen - sch->q.qlen; 247 q->drop_overmemory += prev_qlen;
243 /* As we dropped packet(s), better let upper stack know this */
244 qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
245 prev_backlog - sch->qstats.backlog);
246 248
247 return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; 249 /* As we dropped packet(s), better let upper stack know this.
250 * If we dropped a packet for this flow, return NET_XMIT_CN,
251 * but in this case, our parents wont increase their backlogs.
252 */
253 if (ret == idx) {
254 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
255 prev_backlog - pkt_len);
256 return NET_XMIT_CN;
257 }
258 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
259 return NET_XMIT_SUCCESS;
248} 260}
249 261
250/* This is the specific function called from codel_dequeue() 262/* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
649 qs.backlog = q->backlogs[idx]; 661 qs.backlog = q->backlogs[idx];
650 qs.drops = flow->dropped; 662 qs.drops = flow->dropped;
651 } 663 }
652 if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) 664 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
653 return -1; 665 return -1;
654 if (idx < q->flows_cnt) 666 if (idx < q->flows_cnt)
655 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 667 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 269dd71b3828..f9e0e9c03d0a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
49{ 49{
50 q->gso_skb = skb; 50 q->gso_skb = skb;
51 q->qstats.requeues++; 51 q->qstats.requeues++;
52 qdisc_qstats_backlog_inc(q, skb);
52 q->q.qlen++; /* it's still part of the queue */ 53 q->q.qlen++; /* it's still part of the queue */
53 __netif_schedule(q); 54 __netif_schedule(q);
54 55
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
92 txq = skb_get_tx_queue(txq->dev, skb); 93 txq = skb_get_tx_queue(txq->dev, skb);
93 if (!netif_xmit_frozen_or_stopped(txq)) { 94 if (!netif_xmit_frozen_or_stopped(txq)) {
94 q->gso_skb = NULL; 95 q->gso_skb = NULL;
96 qdisc_qstats_backlog_dec(q, skb);
95 q->q.qlen--; 97 q->q.qlen--;
96 } else 98 } else
97 skb = NULL; 99 skb = NULL;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d783d7cc3348..1ac9f9f03fe3 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1529 q->eligible = RB_ROOT; 1529 q->eligible = RB_ROOT;
1530 INIT_LIST_HEAD(&q->droplist); 1530 INIT_LIST_HEAD(&q->droplist);
1531 qdisc_watchdog_cancel(&q->watchdog); 1531 qdisc_watchdog_cancel(&q->watchdog);
1532 sch->qstats.backlog = 0;
1532 sch->q.qlen = 0; 1533 sch->q.qlen = 0;
1533} 1534}
1534 1535
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1559 struct hfsc_sched *q = qdisc_priv(sch); 1560 struct hfsc_sched *q = qdisc_priv(sch);
1560 unsigned char *b = skb_tail_pointer(skb); 1561 unsigned char *b = skb_tail_pointer(skb);
1561 struct tc_hfsc_qopt qopt; 1562 struct tc_hfsc_qopt qopt;
1562 struct hfsc_class *cl;
1563 unsigned int i;
1564
1565 sch->qstats.backlog = 0;
1566 for (i = 0; i < q->clhash.hashsize; i++) {
1567 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1568 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1569 }
1570 1563
1571 qopt.defcls = q->defcls; 1564 qopt.defcls = q->defcls;
1572 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1565 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1604 if (cl->qdisc->q.qlen == 1) 1597 if (cl->qdisc->q.qlen == 1)
1605 set_active(cl, qdisc_pkt_len(skb)); 1598 set_active(cl, qdisc_pkt_len(skb));
1606 1599
1600 qdisc_qstats_backlog_inc(sch, skb);
1607 sch->q.qlen++; 1601 sch->q.qlen++;
1608 1602
1609 return NET_XMIT_SUCCESS; 1603 return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
1672 1666
1673 qdisc_unthrottled(sch); 1667 qdisc_unthrottled(sch);
1674 qdisc_bstats_update(sch, skb); 1668 qdisc_bstats_update(sch, skb);
1669 qdisc_qstats_backlog_dec(sch, skb);
1675 sch->q.qlen--; 1670 sch->q.qlen--;
1676 1671
1677 return skb; 1672 return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
1695 } 1690 }
1696 cl->qstats.drops++; 1691 cl->qstats.drops++;
1697 qdisc_qstats_drop(sch); 1692 qdisc_qstats_drop(sch);
1693 sch->qstats.backlog -= len;
1698 sch->q.qlen--; 1694 sch->q.qlen--;
1699 return len; 1695 return len;
1700 } 1696 }
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 10adbc617905..8fe6999b642a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
27 return TC_H_MIN(classid) + 1; 27 return TC_H_MIN(classid) + 1;
28} 28}
29 29
30static bool ingress_cl_offload(u32 classid)
31{
32 return true;
33}
34
30static unsigned long ingress_bind_filter(struct Qdisc *sch, 35static unsigned long ingress_bind_filter(struct Qdisc *sch,
31 unsigned long parent, u32 classid) 36 unsigned long parent, u32 classid)
32{ 37{
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
86 .put = ingress_put, 91 .put = ingress_put,
87 .walk = ingress_walk, 92 .walk = ingress_walk,
88 .tcf_chain = ingress_find_tcf, 93 .tcf_chain = ingress_find_tcf,
94 .tcf_cl_offload = ingress_cl_offload,
89 .bind_tcf = ingress_bind_filter, 95 .bind_tcf = ingress_bind_filter,
90 .unbind_tcf = ingress_put, 96 .unbind_tcf = ingress_put,
91}; 97};
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
110 } 116 }
111} 117}
112 118
119static bool clsact_cl_offload(u32 classid)
120{
121 return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
122}
123
113static unsigned long clsact_bind_filter(struct Qdisc *sch, 124static unsigned long clsact_bind_filter(struct Qdisc *sch,
114 unsigned long parent, u32 classid) 125 unsigned long parent, u32 classid)
115{ 126{
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
158 .put = ingress_put, 169 .put = ingress_put,
159 .walk = ingress_walk, 170 .walk = ingress_walk,
160 .tcf_chain = clsact_find_tcf, 171 .tcf_chain = clsact_find_tcf,
172 .tcf_cl_offload = clsact_cl_offload,
161 .bind_tcf = clsact_bind_filter, 173 .bind_tcf = clsact_bind_filter,
162 .unbind_tcf = ingress_put, 174 .unbind_tcf = ingress_put,
163}; 175};
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fee1b15506b2..4b0a82191bc4 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
85 85
86 ret = qdisc_enqueue(skb, qdisc); 86 ret = qdisc_enqueue(skb, qdisc);
87 if (ret == NET_XMIT_SUCCESS) { 87 if (ret == NET_XMIT_SUCCESS) {
88 qdisc_qstats_backlog_inc(sch, skb);
88 sch->q.qlen++; 89 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 90 return NET_XMIT_SUCCESS;
90 } 91 }
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
117 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); 118 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
118 if (skb) { 119 if (skb) {
119 qdisc_bstats_update(sch, skb); 120 qdisc_bstats_update(sch, skb);
121 qdisc_qstats_backlog_dec(sch, skb);
120 sch->q.qlen--; 122 sch->q.qlen--;
121 return skb; 123 return skb;
122 } 124 }
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
135 for (prio = q->bands-1; prio >= 0; prio--) { 137 for (prio = q->bands-1; prio >= 0; prio--) {
136 qdisc = q->queues[prio]; 138 qdisc = q->queues[prio];
137 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { 139 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
140 sch->qstats.backlog -= len;
138 sch->q.qlen--; 141 sch->q.qlen--;
139 return len; 142 return len;
140 } 143 }
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
151 154
152 for (prio = 0; prio < q->bands; prio++) 155 for (prio = 0; prio < q->bands; prio++)
153 qdisc_reset(q->queues[prio]); 156 qdisc_reset(q->queues[prio]);
157 sch->qstats.backlog = 0;
154 sch->q.qlen = 0; 158 sch->q.qlen = 0;
155} 159}
156 160
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 8d2d8d953432..f18857febdad 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1235 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1235 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight,
1237 qdisc_pkt_len(skb)); 1237 qdisc_pkt_len(skb));
1238 if (err) 1238 if (err) {
1239 return err; 1239 cl->qstats.drops++;
1240 return qdisc_drop(skb, sch);
1241 }
1240 } 1242 }
1241 1243
1242 err = qdisc_enqueue(skb, cl->qdisc); 1244 err = qdisc_enqueue(skb, cl->qdisc);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8c0508c0e287..91578bdd378c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
97 97
98 ret = qdisc_enqueue(skb, child); 98 ret = qdisc_enqueue(skb, child);
99 if (likely(ret == NET_XMIT_SUCCESS)) { 99 if (likely(ret == NET_XMIT_SUCCESS)) {
100 qdisc_qstats_backlog_inc(sch, skb);
100 sch->q.qlen++; 101 sch->q.qlen++;
101 } else if (net_xmit_drop_count(ret)) { 102 } else if (net_xmit_drop_count(ret)) {
102 q->stats.pdrop++; 103 q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
118 skb = child->dequeue(child); 119 skb = child->dequeue(child);
119 if (skb) { 120 if (skb) {
120 qdisc_bstats_update(sch, skb); 121 qdisc_bstats_update(sch, skb);
122 qdisc_qstats_backlog_dec(sch, skb);
121 sch->q.qlen--; 123 sch->q.qlen--;
122 } else { 124 } else {
123 if (!red_is_idling(&q->vars)) 125 if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) { 145 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144 q->stats.other++; 146 q->stats.other++;
145 qdisc_qstats_drop(sch); 147 qdisc_qstats_drop(sch);
148 sch->qstats.backlog -= len;
146 sch->q.qlen--; 149 sch->q.qlen--;
147 return len; 150 return len;
148 } 151 }
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
158 struct red_sched_data *q = qdisc_priv(sch); 161 struct red_sched_data *q = qdisc_priv(sch);
159 162
160 qdisc_reset(q->qdisc); 163 qdisc_reset(q->qdisc);
164 sch->qstats.backlog = 0;
161 sch->q.qlen = 0; 165 sch->q.qlen = 0;
162 red_restart(&q->vars); 166 red_restart(&q->vars);
163} 167}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 83b90b584fae..3161e491990b 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
207 return ret; 207 return ret;
208 } 208 }
209 209
210 qdisc_qstats_backlog_inc(sch, skb);
210 sch->q.qlen++; 211 sch->q.qlen++;
211 return NET_XMIT_SUCCESS; 212 return NET_XMIT_SUCCESS;
212} 213}
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
217 unsigned int len = 0; 218 unsigned int len = 0;
218 219
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { 220 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
221 sch->qstats.backlog -= len;
220 sch->q.qlen--; 222 sch->q.qlen--;
221 qdisc_qstats_drop(sch); 223 qdisc_qstats_drop(sch);
222 } 224 }
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
263 q->t_c = now; 265 q->t_c = now;
264 q->tokens = toks; 266 q->tokens = toks;
265 q->ptokens = ptoks; 267 q->ptokens = ptoks;
268 qdisc_qstats_backlog_dec(sch, skb);
266 sch->q.qlen--; 269 sch->q.qlen--;
267 qdisc_unthrottled(sch); 270 qdisc_unthrottled(sch);
268 qdisc_bstats_update(sch, skb); 271 qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
294 struct tbf_sched_data *q = qdisc_priv(sch); 297 struct tbf_sched_data *q = qdisc_priv(sch);
295 298
296 qdisc_reset(q->qdisc); 299 qdisc_reset(q->qdisc);
300 sch->qstats.backlog = 0;
297 sch->q.qlen = 0; 301 sch->q.qlen = 0;
298 q->t_c = ktime_get_ns(); 302 q->t_c = ktime_get_ns();
299 q->tokens = q->buffer; 303 q->tokens = q->buffer;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 06b4df9faaa1..2808d550d273 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -446,16 +446,27 @@ out_no_rpciod:
446 return ERR_PTR(err); 446 return ERR_PTR(err);
447} 447}
448 448
449struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 449static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
450 struct rpc_xprt *xprt) 450 struct rpc_xprt *xprt)
451{ 451{
452 struct rpc_clnt *clnt = NULL; 452 struct rpc_clnt *clnt = NULL;
453 struct rpc_xprt_switch *xps; 453 struct rpc_xprt_switch *xps;
454 454
455 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 455 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
456 if (xps == NULL) 456 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
457 return ERR_PTR(-ENOMEM); 457 xps = args->bc_xprt->xpt_bc_xps;
458 458 xprt_switch_get(xps);
459 } else {
460 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
461 if (xps == NULL) {
462 xprt_put(xprt);
463 return ERR_PTR(-ENOMEM);
464 }
465 if (xprt->bc_xprt) {
466 xprt_switch_get(xps);
467 xprt->bc_xprt->xpt_bc_xps = xps;
468 }
469 }
459 clnt = rpc_new_client(args, xps, xprt, NULL); 470 clnt = rpc_new_client(args, xps, xprt, NULL);
460 if (IS_ERR(clnt)) 471 if (IS_ERR(clnt))
461 return clnt; 472 return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
483 494
484 return clnt; 495 return clnt;
485} 496}
486EXPORT_SYMBOL_GPL(rpc_create_xprt);
487 497
488/** 498/**
489 * rpc_create - create an RPC client and transport with one call 499 * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
509 }; 519 };
510 char servername[48]; 520 char servername[48];
511 521
522 if (args->bc_xprt) {
523 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
524 xprt = args->bc_xprt->xpt_bc_xprt;
525 if (xprt) {
526 xprt_get(xprt);
527 return rpc_create_xprt(args, xprt);
528 }
529 }
530
512 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 531 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
513 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 532 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
514 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 533 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index f5572e31d518..4f01f63102ee 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
136 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 136 /* See comment on corresponding get in xs_setup_bc_tcp(): */
137 if (xprt->xpt_bc_xprt) 137 if (xprt->xpt_bc_xprt)
138 xprt_put(xprt->xpt_bc_xprt); 138 xprt_put(xprt->xpt_bc_xprt);
139 if (xprt->xpt_bc_xps)
140 xprt_switch_put(xprt->xpt_bc_xps);
139 xprt->xpt_ops->xpo_free(xprt); 141 xprt->xpt_ops->xpo_free(xprt);
140 module_put(owner); 142 module_put(owner);
141} 143}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d3e0c42361e..7e2b2fa189c3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3057 return xprt; 3057 return xprt;
3058 3058
3059 args->bc_xprt->xpt_bc_xprt = NULL; 3059 args->bc_xprt->xpt_bc_xprt = NULL;
3060 args->bc_xprt->xpt_bc_xps = NULL;
3060 xprt_put(xprt); 3061 xprt_put(xprt);
3061 ret = ERR_PTR(-EINVAL); 3062 ret = ERR_PTR(-EINVAL);
3062out_err: 3063out_err:
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index f795b1dd0ccd..3ad9fab1985f 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
604 604
605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
607 strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); 607 nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
608 TIPC_MAX_LINK_NAME);
608 609
609 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, 610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
610 &link_info, sizeof(link_info)); 611 &link_info, sizeof(link_info));
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 80aa6a3e6817..735362c26c8e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry; 316 struct dentry *dentry = unix_sk(s)->path.dentry;
317 317
318 if (dentry && d_backing_inode(dentry) == i) { 318 if (dentry && d_real_inode(dentry) == i) {
319 sock_hold(s); 319 sock_hold(s);
320 goto found; 320 goto found;
321 } 321 }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); 911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
912 if (err) 912 if (err)
913 goto fail; 913 goto fail;
914 inode = d_backing_inode(path.dentry); 914 inode = d_real_inode(path.dentry);
915 err = inode_permission(inode, MAY_WRITE); 915 err = inode_permission(inode, MAY_WRITE);
916 if (err) 916 if (err)
917 goto put_fail; 917 goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1048 goto out_up; 1048 goto out_up;
1049 } 1049 }
1050 addr->hash = UNIX_HASH_SIZE; 1050 addr->hash = UNIX_HASH_SIZE;
1051 hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1051 hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1052 spin_lock(&unix_table_lock); 1052 spin_lock(&unix_table_lock);
1053 u->path = u_path; 1053 u->path = u_path;
1054 list = &unix_socket_table[hash]; 1054 list = &unix_socket_table[hash];
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d25c82bc1bbe..ecca3896b9f7 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
363 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); 363 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
364 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); 364 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
365 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); 365 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
366 WARN_ON(ops->set_tx_power && !ops->get_tx_power);
367 WARN_ON(ops->set_antenna && !ops->get_antenna);
368 366
369 alloc_size = sizeof(*rdev) + sizeof_priv; 367 alloc_size = sizeof(*rdev) + sizeof_priv;
370 368
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6250b1cfcde5..dbb2738e356a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
958 return private(dev, iwr, cmd, info, handler); 958 return private(dev, iwr, cmd, info, handler);
959 } 959 }
960 /* Old driver API : call driver ioctl handler */ 960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl) 961 if (dev->netdev_ops->ndo_do_ioctl) {
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 962#ifdef CONFIG_COMPAT
963 if (info->flags & IW_REQUEST_FLAG_COMPAT) {
964 int ret = 0;
965 struct iwreq iwr_lcl;
966 struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
967
968 memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
969 iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
970 iwr_lcl.u.data.length = iwp_compat->length;
971 iwr_lcl.u.data.flags = iwp_compat->flags;
972
973 ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
974
975 iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
976 iwp_compat->length = iwr_lcl.u.data.length;
977 iwp_compat->flags = iwr_lcl.u.data.flags;
978
979 return ret;
980 } else
981#endif
982 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
983 }
963 return -EOPNOTSUPP; 984 return -EOPNOTSUPP;
964} 985}
965 986