aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/bat_v_elp.c3
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dsa/master.c4
-rw-r--r--net/dsa/slave.c17
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv6/ip6_gre.c7
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/l2tp/l2tp_core.h20
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_tables_api.c85
-rw-r--r--net/netfilter/nft_compat.c62
-rw-r--r--net/netfilter/nft_dynset.c18
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_lookup.c18
-rw-r--r--net/netfilter/nft_objref.c18
-rw-r--r--net/rds/bind.c6
-rw-r--r--net/rxrpc/recvmsg.c3
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/stream.c20
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--net/smc/smc_cdc.c21
-rw-r--r--net/smc/smc_cdc.h34
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c9
-rw-r--r--net/smc/smc_core.c6
-rw-r--r--net/smc/smc_core.h20
-rw-r--r--net/smc/smc_ib.c6
-rw-r--r--net/smc/smc_llc.c3
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/smc/smc_tx.c64
-rw-r--r--net/smc/smc_wr.c46
-rw-r--r--net/smc/smc_wr.h1
-rw-r--r--net/socket.c82
-rw-r--r--net/vmw_vsock/virtio_transport.c29
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/sme.c2
48 files changed, 474 insertions, 240 deletions
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f099eb8..ef0dec20c7d8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
104 104
105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
106 106
107 /* free the TID stats immediately */
108 cfg80211_sinfo_release_content(&sinfo);
109
107 dev_put(real_netdev); 110 dev_put(real_netdev);
108 if (ret == -ENOENT) { 111 if (ret == -ENOENT) {
109 /* Node is not associated anymore! It would be 112 /* Node is not associated anymore! It would be
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 508f4416dfc9..415d494cbe22 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -20,7 +20,6 @@
20#include "main.h" 20#include "main.h"
21 21
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/bug.h>
24#include <linux/byteorder/generic.h> 23#include <linux/byteorder/generic.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/gfp.h> 25#include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
179 parent_dev = __dev_get_by_index((struct net *)parent_net, 178 parent_dev = __dev_get_by_index((struct net *)parent_net,
180 dev_get_iflink(net_dev)); 179 dev_get_iflink(net_dev));
181 /* if we got a NULL parent_dev there is something broken.. */ 180 /* if we got a NULL parent_dev there is something broken.. */
182 if (WARN(!parent_dev, "Cannot find parent device")) 181 if (!parent_dev) {
182 pr_err("Cannot find parent device\n");
183 return false; 183 return false;
184 }
184 185
185 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) 186 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
186 return false; 187 return false;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5db5a0a4c959..b85ca809e509 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
221 221
222 netif_trans_update(soft_iface); 222 netif_trans_update(soft_iface);
223 vid = batadv_get_vid(skb, 0); 223 vid = batadv_get_vid(skb, 0);
224
225 skb_reset_mac_header(skb);
224 ethhdr = eth_hdr(skb); 226 ethhdr = eth_hdr(skb);
225 227
226 switch (ntohs(ethhdr->h_proto)) { 228 switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7559d6835ecb..7a54dc11ac2d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4112 /* Only some socketops are supported */ 4112 /* Only some socketops are supported */
4113 switch (optname) { 4113 switch (optname) {
4114 case SO_RCVBUF: 4114 case SO_RCVBUF:
4115 val = min_t(u32, val, sysctl_rmem_max);
4115 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4116 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4116 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 4117 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4117 break; 4118 break;
4118 case SO_SNDBUF: 4119 case SO_SNDBUF:
4120 val = min_t(u32, val, sysctl_wmem_max);
4119 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4121 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4120 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4122 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4121 break; 4123 break;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index d6d5c20d7044..8c826603bf36 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
546 546
547 /* No sk_callback_lock since already detached. */ 547 /* No sk_callback_lock since already detached. */
548 if (psock->parser.enabled) 548 strp_done(&psock->parser.strp);
549 strp_done(&psock->parser.strp);
550 549
551 cancel_work_sync(&psock->work); 550 cancel_work_sync(&psock->work);
552 551
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a47b5c..baaaeb2b2c42 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
203 u8 pkt, u8 opt, u8 *val, u8 len) 203 u8 pkt, u8 opt, u8 *val, u8 len)
204{ 204{
205 if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) 205 if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
206 return 0; 206 return 0;
207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); 207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
208} 208}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, 214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
215 u8 pkt, u8 opt, u8 *val, u8 len) 215 u8 pkt, u8 opt, u8 *val, u8 len)
216{ 216{
217 if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) 217 if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
218 return 0; 218 return 0;
219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); 219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
220} 220}
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 71bb15f491c8..54f5551fb799 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
205 rtnl_unlock(); 205 rtnl_unlock();
206} 206}
207 207
208static struct lock_class_key dsa_master_addr_list_lock_key;
209
208int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 210int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
209{ 211{
210 int ret; 212 int ret;
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
218 wmb(); 220 wmb();
219 221
220 dev->dsa_ptr = cpu_dp; 222 dev->dsa_ptr = cpu_dp;
223 lockdep_set_class(&dev->addr_list_lock,
224 &dsa_master_addr_list_lock_key);
221 225
222 ret = dsa_master_ethtool_setup(dev); 226 ret = dsa_master_ethtool_setup(dev);
223 if (ret) 227 if (ret)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3fcc1d01615..a1c9fe155057 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
140static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 140static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
141{ 141{
142 struct net_device *master = dsa_slave_to_master(dev); 142 struct net_device *master = dsa_slave_to_master(dev);
143 143 if (dev->flags & IFF_UP) {
144 if (change & IFF_ALLMULTI) 144 if (change & IFF_ALLMULTI)
145 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 145 dev_set_allmulti(master,
146 if (change & IFF_PROMISC) 146 dev->flags & IFF_ALLMULTI ? 1 : -1);
147 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 147 if (change & IFF_PROMISC)
148 dev_set_promiscuity(master,
149 dev->flags & IFF_PROMISC ? 1 : -1);
150 }
148} 151}
149 152
150static void dsa_slave_set_rx_mode(struct net_device *dev) 153static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
639 int ret; 642 int ret;
640 643
641 /* Port's PHY and MAC both need to be EEE capable */ 644 /* Port's PHY and MAC both need to be EEE capable */
642 if (!dev->phydev && !dp->pl) 645 if (!dev->phydev || !dp->pl)
643 return -ENODEV; 646 return -ENODEV;
644 647
645 if (!ds->ops->set_mac_eee) 648 if (!ds->ops->set_mac_eee)
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
659 int ret; 662 int ret;
660 663
661 /* Port's PHY and MAC both need to be EEE capable */ 664 /* Port's PHY and MAC both need to be EEE capable */
662 if (!dev->phydev && !dp->pl) 665 if (!dev->phydev || !dp->pl)
663 return -ENODEV; 666 return -ENODEV;
664 667
665 if (!ds->ops->get_mac_eee) 668 if (!ds->ops->get_mac_eee)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 20a64fe6254b..3978f807fa8b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1455,12 +1455,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1455{ 1455{
1456 struct ip_tunnel *t = netdev_priv(dev); 1456 struct ip_tunnel *t = netdev_priv(dev);
1457 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags;
1459
1460 if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
1461 !t->collect_md)
1462 o_flags |= TUNNEL_KEY;
1458 1463
1459 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1464 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1460 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1465 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1461 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1466 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1462 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1467 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1463 gre_tnl_flags_to_gre_flags(p->o_flags)) || 1468 gre_tnl_flags_to_gre_flags(o_flags)) ||
1464 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1469 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1465 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1470 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1466 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1471 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4416368dbd49..801a9a0c217e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2098{ 2098{
2099 struct ip6_tnl *t = netdev_priv(dev); 2099 struct ip6_tnl *t = netdev_priv(dev);
2100 struct __ip6_tnl_parm *p = &t->parms; 2100 struct __ip6_tnl_parm *p = &t->parms;
2101 __be16 o_flags = p->o_flags;
2102
2103 if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
2104 !p->collect_md)
2105 o_flags |= TUNNEL_KEY;
2101 2106
2102 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2107 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2103 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2108 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2104 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2109 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2105 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2110 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2106 gre_tnl_flags_to_gre_flags(p->o_flags)) || 2111 gre_tnl_flags_to_gre_flags(o_flags)) ||
2107 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2112 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2108 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2113 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2109 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2114 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0bc351..6d0b1f3e927b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
23 struct sock *sk = sk_to_full_sk(skb->sk); 23 struct sock *sk = sk_to_full_sk(skb->sk);
24 unsigned int hh_len; 24 unsigned int hh_len;
25 struct dst_entry *dst; 25 struct dst_entry *dst;
26 int strict = (ipv6_addr_type(&iph->daddr) &
27 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
26 struct flowi6 fl6 = { 28 struct flowi6 fl6 = {
27 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : 29 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
28 rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, 30 strict ? skb_dst(skb)->dev->ifindex : 0,
29 .flowi6_mark = skb->mark, 31 .flowi6_mark = skb->mark,
30 .flowi6_uid = sock_net_uid(net, sk), 32 .flowi6_uid = sock_net_uid(net, sk),
31 .daddr = iph->daddr, 33 .daddr = iph->daddr,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7e1e27..ee5403cbe655 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
146 } else { 146 } else {
147 ip6_flow_hdr(hdr, 0, flowlabel); 147 ip6_flow_hdr(hdr, 0, flowlabel);
148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); 148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
149
150 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
149 } 151 }
150 152
151 hdr->nexthdr = NEXTHDR_ROUTING; 153 hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1e03305c0549..e8a1dabef803 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
546 } 546 }
547 547
548 err = 0; 548 err = 0;
549 if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) 549 if (__in6_dev_get(skb->dev) &&
550 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
550 goto out; 551 goto out;
551 552
552 if (t->parms.iph.daddr == 0) 553 if (t->parms.iph.daddr == 0)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d435696a..fed6becc5daf 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
83#define L2TP_SLFLAG_S 0x40000000 83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff 84#define L2TP_SL_SEQ_MASK 0x00ffffff
85 85
86#define L2TP_HDR_SIZE_SEQ 10 86#define L2TP_HDR_SIZE_MAX 14
87#define L2TP_HDR_SIZE_NOSEQ 6
88 87
89/* Default trace flags */ 88/* Default trace flags */
90#define L2TP_DEFAULT_DEBUG_FLAGS 0 89#define L2TP_DEFAULT_DEBUG_FLAGS 0
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
808 __skb_pull(skb, sizeof(struct udphdr)); 807 __skb_pull(skb, sizeof(struct udphdr));
809 808
810 /* Short packet? */ 809 /* Short packet? */
811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 810 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
812 l2tp_info(tunnel, L2TP_MSG_DATA, 811 l2tp_info(tunnel, L2TP_MSG_DATA,
813 "%s: recv short packet (len=%d)\n", 812 "%s: recv short packet (len=%d)\n",
814 tunnel->name, skb->len); 813 tunnel->name, skb->len);
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
884 goto error; 883 goto error;
885 } 884 }
886 885
886 if (tunnel->version == L2TP_HDR_VER_3 &&
887 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
888 goto error;
889
887 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); 890 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
888 l2tp_session_dec_refcount(session); 891 l2tp_session_dec_refcount(session);
889 892
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe94d389..b2ce90260c35 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
301} 301}
302#endif 302#endif
303 303
304static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
305 unsigned char **ptr, unsigned char **optr)
306{
307 int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
308
309 if (opt_len > 0) {
310 int off = *ptr - *optr;
311
312 if (!pskb_may_pull(skb, off + opt_len))
313 return -1;
314
315 if (skb->data != *optr) {
316 *optr = skb->data;
317 *ptr = skb->data + off;
318 }
319 }
320
321 return 0;
322}
323
304#define l2tp_printk(ptr, type, func, fmt, ...) \ 324#define l2tp_printk(ptr, type, func, fmt, ...) \
305do { \ 325do { \
306 if (((ptr)->debug) & (type)) \ 326 if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86d4dcc..d4c60523c549 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
166 } 166 }
167 167
168 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
169 goto discard_sess;
170
168 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 171 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
169 l2tp_session_dec_refcount(session); 172 l2tp_session_dec_refcount(session);
170 173
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4a0b0c..0ae6899edac0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
179 } 179 }
180 180
181 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
182 goto discard_sess;
183
181 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 184 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
182 l2tp_session_dec_refcount(session); 185 l2tp_session_dec_refcount(session);
183 186
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f170d6c6629a..928f13a208b0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1938 int head_need, bool may_encrypt) 1938 int head_need, bool may_encrypt)
1939{ 1939{
1940 struct ieee80211_local *local = sdata->local; 1940 struct ieee80211_local *local = sdata->local;
1941 struct ieee80211_hdr *hdr;
1942 bool enc_tailroom;
1941 int tail_need = 0; 1943 int tail_need = 0;
1942 1944
1943 if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { 1945 hdr = (struct ieee80211_hdr *) skb->data;
1946 enc_tailroom = may_encrypt &&
1947 (sdata->crypto_tx_tailroom_needed_cnt ||
1948 ieee80211_is_mgmt(hdr->frame_control));
1949
1950 if (enc_tailroom) {
1944 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1951 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1945 tail_need -= skb_tailroom(skb); 1952 tail_need -= skb_tailroom(skb);
1946 tail_need = max_t(int, tail_need, 0); 1953 tail_need = max_t(int, tail_need, 0);
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1948 1955
1949 if (skb_cloned(skb) && 1956 if (skb_cloned(skb) &&
1950 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || 1957 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1951 !skb_clone_writable(skb, ETH_HLEN) || 1958 !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1952 (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1953 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1959 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1954 else if (head_need || tail_need) 1960 else if (head_need || tail_need)
1955 I802_DEBUG_INC(local->tx_expand_skb_head); 1961 I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 741b533148ba..db4d46332e86 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1007 } 1007 }
1008 1008
1009 if (nf_ct_key_equal(h, tuple, zone, net)) { 1009 if (nf_ct_key_equal(h, tuple, zone, net)) {
1010 /* Tuple is taken already, so caller will need to find
1011 * a new source port to use.
1012 *
1013 * Only exception:
1014 * If the *original tuples* are identical, then both
1015 * conntracks refer to the same flow.
1016 * This is a rare situation, it can occur e.g. when
1017 * more than one UDP packet is sent from same socket
1018 * in different threads.
1019 *
1020 * Let nf_ct_resolve_clash() deal with this later.
1021 */
1022 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1023 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1024 continue;
1025
1010 NF_CT_STAT_INC_ATOMIC(net, found); 1026 NF_CT_STAT_INC_ATOMIC(net, found);
1011 rcu_read_unlock(); 1027 rcu_read_unlock();
1012 return 1; 1028 return 1;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fb07f6cfc719..5a92f23f179f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
116 kfree(trans); 116 kfree(trans);
117} 117}
118 118
119static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
120{
121 struct net *net = ctx->net;
122 struct nft_trans *trans;
123
124 if (!nft_set_is_anonymous(set))
125 return;
126
127 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
128 if (trans->msg_type == NFT_MSG_NEWSET &&
129 nft_trans_set(trans) == set) {
130 nft_trans_set_bound(trans) = true;
131 break;
132 }
133 }
134}
135
119static int nf_tables_register_hook(struct net *net, 136static int nf_tables_register_hook(struct net *net,
120 const struct nft_table *table, 137 const struct nft_table *table,
121 struct nft_chain *chain) 138 struct nft_chain *chain)
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
211 return err; 228 return err;
212} 229}
213 230
214/* either expr ops provide both activate/deactivate, or neither */
215static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
216{
217 if (!ops)
218 return true;
219
220 if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
221 return false;
222
223 return true;
224}
225
226static void nft_rule_expr_activate(const struct nft_ctx *ctx, 231static void nft_rule_expr_activate(const struct nft_ctx *ctx,
227 struct nft_rule *rule) 232 struct nft_rule *rule)
228{ 233{
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
238} 243}
239 244
240static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, 245static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
241 struct nft_rule *rule) 246 struct nft_rule *rule,
247 enum nft_trans_phase phase)
242{ 248{
243 struct nft_expr *expr; 249 struct nft_expr *expr;
244 250
245 expr = nft_expr_first(rule); 251 expr = nft_expr_first(rule);
246 while (expr != nft_expr_last(rule) && expr->ops) { 252 while (expr != nft_expr_last(rule) && expr->ops) {
247 if (expr->ops->deactivate) 253 if (expr->ops->deactivate)
248 expr->ops->deactivate(ctx, expr); 254 expr->ops->deactivate(ctx, expr, phase);
249 255
250 expr = nft_expr_next(expr); 256 expr = nft_expr_next(expr);
251 } 257 }
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
296 nft_trans_destroy(trans); 302 nft_trans_destroy(trans);
297 return err; 303 return err;
298 } 304 }
299 nft_rule_expr_deactivate(ctx, rule); 305 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
300 306
301 return 0; 307 return 0;
302} 308}
@@ -1929,9 +1935,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1929 */ 1935 */
1930int nft_register_expr(struct nft_expr_type *type) 1936int nft_register_expr(struct nft_expr_type *type)
1931{ 1937{
1932 if (!nft_expr_check_ops(type->ops))
1933 return -EINVAL;
1934
1935 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1938 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1936 if (type->family == NFPROTO_UNSPEC) 1939 if (type->family == NFPROTO_UNSPEC)
1937 list_add_tail_rcu(&type->list, &nf_tables_expressions); 1940 list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2079,10 +2082,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
2079 err = PTR_ERR(ops); 2082 err = PTR_ERR(ops);
2080 goto err1; 2083 goto err1;
2081 } 2084 }
2082 if (!nft_expr_check_ops(ops)) {
2083 err = -EINVAL;
2084 goto err1;
2085 }
2086 } else 2085 } else
2087 ops = type->ops; 2086 ops = type->ops;
2088 2087
@@ -2511,7 +2510,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2511static void nf_tables_rule_release(const struct nft_ctx *ctx, 2510static void nf_tables_rule_release(const struct nft_ctx *ctx,
2512 struct nft_rule *rule) 2511 struct nft_rule *rule)
2513{ 2512{
2514 nft_rule_expr_deactivate(ctx, rule); 2513 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
2515 nf_tables_rule_destroy(ctx, rule); 2514 nf_tables_rule_destroy(ctx, rule);
2516} 2515}
2517 2516
@@ -3708,39 +3707,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
3708bind: 3707bind:
3709 binding->chain = ctx->chain; 3708 binding->chain = ctx->chain;
3710 list_add_tail_rcu(&binding->list, &set->bindings); 3709 list_add_tail_rcu(&binding->list, &set->bindings);
3710 nft_set_trans_bind(ctx, set);
3711
3711 return 0; 3712 return 0;
3712} 3713}
3713EXPORT_SYMBOL_GPL(nf_tables_bind_set); 3714EXPORT_SYMBOL_GPL(nf_tables_bind_set);
3714 3715
3715void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
3716 struct nft_set_binding *binding)
3717{
3718 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
3719 nft_is_active(ctx->net, set))
3720 list_add_tail_rcu(&set->list, &ctx->table->sets);
3721
3722 list_add_tail_rcu(&binding->list, &set->bindings);
3723}
3724EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
3725
3726void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 3716void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
3727 struct nft_set_binding *binding) 3717 struct nft_set_binding *binding, bool event)
3728{ 3718{
3729 list_del_rcu(&binding->list); 3719 list_del_rcu(&binding->list);
3730 3720
3731 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3721 if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
3732 nft_is_active(ctx->net, set))
3733 list_del_rcu(&set->list); 3722 list_del_rcu(&set->list);
3723 if (event)
3724 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
3725 GFP_KERNEL);
3726 }
3734} 3727}
3735EXPORT_SYMBOL_GPL(nf_tables_unbind_set); 3728EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
3736 3729
3737void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) 3730void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
3738{ 3731{
3739 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3732 if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
3740 nft_is_active(ctx->net, set)) {
3741 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
3742 nft_set_destroy(set); 3733 nft_set_destroy(set);
3743 }
3744} 3734}
3745EXPORT_SYMBOL_GPL(nf_tables_destroy_set); 3735EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
3746 3736
@@ -6535,6 +6525,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6535 nf_tables_rule_notify(&trans->ctx, 6525 nf_tables_rule_notify(&trans->ctx,
6536 nft_trans_rule(trans), 6526 nft_trans_rule(trans),
6537 NFT_MSG_DELRULE); 6527 NFT_MSG_DELRULE);
6528 nft_rule_expr_deactivate(&trans->ctx,
6529 nft_trans_rule(trans),
6530 NFT_TRANS_COMMIT);
6538 break; 6531 break;
6539 case NFT_MSG_NEWSET: 6532 case NFT_MSG_NEWSET:
6540 nft_clear(net, nft_trans_set(trans)); 6533 nft_clear(net, nft_trans_set(trans));
@@ -6621,7 +6614,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
6621 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); 6614 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
6622 break; 6615 break;
6623 case NFT_MSG_NEWSET: 6616 case NFT_MSG_NEWSET:
6624 nft_set_destroy(nft_trans_set(trans)); 6617 if (!nft_trans_set_bound(trans))
6618 nft_set_destroy(nft_trans_set(trans));
6625 break; 6619 break;
6626 case NFT_MSG_NEWSETELEM: 6620 case NFT_MSG_NEWSETELEM:
6627 nft_set_elem_destroy(nft_trans_elem_set(trans), 6621 nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6682,7 +6676,9 @@ static int __nf_tables_abort(struct net *net)
6682 case NFT_MSG_NEWRULE: 6676 case NFT_MSG_NEWRULE:
6683 trans->ctx.chain->use--; 6677 trans->ctx.chain->use--;
6684 list_del_rcu(&nft_trans_rule(trans)->list); 6678 list_del_rcu(&nft_trans_rule(trans)->list);
6685 nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); 6679 nft_rule_expr_deactivate(&trans->ctx,
6680 nft_trans_rule(trans),
6681 NFT_TRANS_ABORT);
6686 break; 6682 break;
6687 case NFT_MSG_DELRULE: 6683 case NFT_MSG_DELRULE:
6688 trans->ctx.chain->use++; 6684 trans->ctx.chain->use++;
@@ -6692,7 +6688,8 @@ static int __nf_tables_abort(struct net *net)
6692 break; 6688 break;
6693 case NFT_MSG_NEWSET: 6689 case NFT_MSG_NEWSET:
6694 trans->ctx.table->use--; 6690 trans->ctx.table->use--;
6695 list_del_rcu(&nft_trans_set(trans)->list); 6691 if (!nft_trans_set_bound(trans))
6692 list_del_rcu(&nft_trans_set(trans)->list);
6696 break; 6693 break;
6697 case NFT_MSG_DELSET: 6694 case NFT_MSG_DELSET:
6698 trans->ctx.table->use++; 6695 trans->ctx.table->use++;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 5eb269428832..fe64df848365 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net)
61 return net_generic(net, nft_compat_net_id); 61 return net_generic(net, nft_compat_net_id);
62} 62}
63 63
64static void nft_xt_get(struct nft_xt *xt)
65{
66 /* refcount_inc() warns on 0 -> 1 transition, but we can't
67 * init the reference count to 1 in .select_ops -- we can't
68 * undo such an increase when another expression inside the same
69 * rule fails afterwards.
70 */
71 if (xt->listcnt == 0)
72 refcount_set(&xt->refcnt, 1);
73 else
74 refcount_inc(&xt->refcnt);
75
76 xt->listcnt++;
77}
78
64static bool nft_xt_put(struct nft_xt *xt) 79static bool nft_xt_put(struct nft_xt *xt)
65{ 80{
66 if (refcount_dec_and_test(&xt->refcnt)) { 81 if (refcount_dec_and_test(&xt->refcnt)) {
@@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
291 return -EINVAL; 306 return -EINVAL;
292 307
293 nft_xt = container_of(expr->ops, struct nft_xt, ops); 308 nft_xt = container_of(expr->ops, struct nft_xt, ops);
294 refcount_inc(&nft_xt->refcnt); 309 nft_xt_get(nft_xt);
295 return 0; 310 return 0;
296} 311}
297 312
@@ -504,7 +519,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
504 return ret; 519 return ret;
505 520
506 nft_xt = container_of(expr->ops, struct nft_xt, ops); 521 nft_xt = container_of(expr->ops, struct nft_xt, ops);
507 refcount_inc(&nft_xt->refcnt); 522 nft_xt_get(nft_xt);
508 return 0; 523 return 0;
509} 524}
510 525
@@ -558,41 +573,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
558 __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); 573 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
559} 574}
560 575
561static void nft_compat_activate(const struct nft_ctx *ctx,
562 const struct nft_expr *expr,
563 struct list_head *h)
564{
565 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
566
567 if (xt->listcnt == 0)
568 list_add(&xt->head, h);
569
570 xt->listcnt++;
571}
572
573static void nft_compat_activate_mt(const struct nft_ctx *ctx,
574 const struct nft_expr *expr)
575{
576 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
577
578 nft_compat_activate(ctx, expr, &cn->nft_match_list);
579}
580
581static void nft_compat_activate_tg(const struct nft_ctx *ctx,
582 const struct nft_expr *expr)
583{
584 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
585
586 nft_compat_activate(ctx, expr, &cn->nft_target_list);
587}
588
589static void nft_compat_deactivate(const struct nft_ctx *ctx, 576static void nft_compat_deactivate(const struct nft_ctx *ctx,
590 const struct nft_expr *expr) 577 const struct nft_expr *expr,
578 enum nft_trans_phase phase)
591{ 579{
592 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); 580 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
593 581
594 if (--xt->listcnt == 0) 582 if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
595 list_del_init(&xt->head); 583 if (--xt->listcnt == 0)
584 list_del_init(&xt->head);
585 }
596} 586}
597 587
598static void 588static void
@@ -848,7 +838,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
848 nft_match->ops.eval = nft_match_eval; 838 nft_match->ops.eval = nft_match_eval;
849 nft_match->ops.init = nft_match_init; 839 nft_match->ops.init = nft_match_init;
850 nft_match->ops.destroy = nft_match_destroy; 840 nft_match->ops.destroy = nft_match_destroy;
851 nft_match->ops.activate = nft_compat_activate_mt;
852 nft_match->ops.deactivate = nft_compat_deactivate; 841 nft_match->ops.deactivate = nft_compat_deactivate;
853 nft_match->ops.dump = nft_match_dump; 842 nft_match->ops.dump = nft_match_dump;
854 nft_match->ops.validate = nft_match_validate; 843 nft_match->ops.validate = nft_match_validate;
@@ -866,7 +855,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
866 855
867 nft_match->ops.size = matchsize; 856 nft_match->ops.size = matchsize;
868 857
869 nft_match->listcnt = 1; 858 nft_match->listcnt = 0;
870 list_add(&nft_match->head, &cn->nft_match_list); 859 list_add(&nft_match->head, &cn->nft_match_list);
871 860
872 return &nft_match->ops; 861 return &nft_match->ops;
@@ -953,7 +942,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
953 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 942 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
954 nft_target->ops.init = nft_target_init; 943 nft_target->ops.init = nft_target_init;
955 nft_target->ops.destroy = nft_target_destroy; 944 nft_target->ops.destroy = nft_target_destroy;
956 nft_target->ops.activate = nft_compat_activate_tg;
957 nft_target->ops.deactivate = nft_compat_deactivate; 945 nft_target->ops.deactivate = nft_compat_deactivate;
958 nft_target->ops.dump = nft_target_dump; 946 nft_target->ops.dump = nft_target_dump;
959 nft_target->ops.validate = nft_target_validate; 947 nft_target->ops.validate = nft_target_validate;
@@ -964,7 +952,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
964 else 952 else
965 nft_target->ops.eval = nft_target_eval_xt; 953 nft_target->ops.eval = nft_target_eval_xt;
966 954
967 nft_target->listcnt = 1; 955 nft_target->listcnt = 0;
968 list_add(&nft_target->head, &cn->nft_target_list); 956 list_add(&nft_target->head, &cn->nft_target_list);
969 957
970 return &nft_target->ops; 958 return &nft_target->ops;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 07d4efd3d851..f1172f99752b 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -235,20 +235,17 @@ err1:
235 return err; 235 return err;
236} 236}
237 237
238static void nft_dynset_activate(const struct nft_ctx *ctx,
239 const struct nft_expr *expr)
240{
241 struct nft_dynset *priv = nft_expr_priv(expr);
242
243 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
244}
245
246static void nft_dynset_deactivate(const struct nft_ctx *ctx, 238static void nft_dynset_deactivate(const struct nft_ctx *ctx,
247 const struct nft_expr *expr) 239 const struct nft_expr *expr,
240 enum nft_trans_phase phase)
248{ 241{
249 struct nft_dynset *priv = nft_expr_priv(expr); 242 struct nft_dynset *priv = nft_expr_priv(expr);
250 243
251 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 244 if (phase == NFT_TRANS_PREPARE)
245 return;
246
247 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
248 phase == NFT_TRANS_COMMIT);
252} 249}
253 250
254static void nft_dynset_destroy(const struct nft_ctx *ctx, 251static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
296 .eval = nft_dynset_eval, 293 .eval = nft_dynset_eval,
297 .init = nft_dynset_init, 294 .init = nft_dynset_init,
298 .destroy = nft_dynset_destroy, 295 .destroy = nft_dynset_destroy,
299 .activate = nft_dynset_activate,
300 .deactivate = nft_dynset_deactivate, 296 .deactivate = nft_dynset_deactivate,
301 .dump = nft_dynset_dump, 297 .dump = nft_dynset_dump,
302}; 298};
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 0777a93211e2..3f6d1d2a6281 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
72} 72}
73 73
74static void nft_immediate_deactivate(const struct nft_ctx *ctx, 74static void nft_immediate_deactivate(const struct nft_ctx *ctx,
75 const struct nft_expr *expr) 75 const struct nft_expr *expr,
76 enum nft_trans_phase phase)
76{ 77{
77 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 78 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
78 79
80 if (phase == NFT_TRANS_COMMIT)
81 return;
82
79 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); 83 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
80} 84}
81 85
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 227b2b15a19c..14496da5141d 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
121 return 0; 121 return 0;
122} 122}
123 123
124static void nft_lookup_activate(const struct nft_ctx *ctx,
125 const struct nft_expr *expr)
126{
127 struct nft_lookup *priv = nft_expr_priv(expr);
128
129 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
130}
131
132static void nft_lookup_deactivate(const struct nft_ctx *ctx, 124static void nft_lookup_deactivate(const struct nft_ctx *ctx,
133 const struct nft_expr *expr) 125 const struct nft_expr *expr,
126 enum nft_trans_phase phase)
134{ 127{
135 struct nft_lookup *priv = nft_expr_priv(expr); 128 struct nft_lookup *priv = nft_expr_priv(expr);
136 129
137 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 130 if (phase == NFT_TRANS_PREPARE)
131 return;
132
133 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
134 phase == NFT_TRANS_COMMIT);
138} 135}
139 136
140static void nft_lookup_destroy(const struct nft_ctx *ctx, 137static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
225 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), 222 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
226 .eval = nft_lookup_eval, 223 .eval = nft_lookup_eval,
227 .init = nft_lookup_init, 224 .init = nft_lookup_init,
228 .activate = nft_lookup_activate,
229 .deactivate = nft_lookup_deactivate, 225 .deactivate = nft_lookup_deactivate,
230 .destroy = nft_lookup_destroy, 226 .destroy = nft_lookup_destroy,
231 .dump = nft_lookup_dump, 227 .dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index a3185ca2a3a9..ae178e914486 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -155,20 +155,17 @@ nla_put_failure:
155 return -1; 155 return -1;
156} 156}
157 157
158static void nft_objref_map_activate(const struct nft_ctx *ctx,
159 const struct nft_expr *expr)
160{
161 struct nft_objref_map *priv = nft_expr_priv(expr);
162
163 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
164}
165
166static void nft_objref_map_deactivate(const struct nft_ctx *ctx, 158static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
167 const struct nft_expr *expr) 159 const struct nft_expr *expr,
160 enum nft_trans_phase phase)
168{ 161{
169 struct nft_objref_map *priv = nft_expr_priv(expr); 162 struct nft_objref_map *priv = nft_expr_priv(expr);
170 163
171 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 164 if (phase == NFT_TRANS_PREPARE)
165 return;
166
167 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
168 phase == NFT_TRANS_COMMIT);
172} 169}
173 170
174static void nft_objref_map_destroy(const struct nft_ctx *ctx, 171static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
185 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), 182 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
186 .eval = nft_objref_map_eval, 183 .eval = nft_objref_map_eval,
187 .init = nft_objref_map_init, 184 .init = nft_objref_map_init,
188 .activate = nft_objref_map_activate,
189 .deactivate = nft_objref_map_deactivate, 185 .deactivate = nft_objref_map_deactivate,
190 .destroy = nft_objref_map_destroy, 186 .destroy = nft_objref_map_destroy,
191 .dump = nft_objref_map_dump, 187 .dump = nft_objref_map_dump,
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6788a3..17c9d9f0c848 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
78 __rds_create_bind_key(key, addr, port, scope_id); 78 __rds_create_bind_key(key, addr, port, scope_id);
79 rcu_read_lock(); 79 rcu_read_lock();
80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); 80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
81 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 81 if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
82 rds_sock_addref(rs); 82 !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
83 else
84 rs = NULL; 83 rs = NULL;
84
85 rcu_read_unlock(); 85 rcu_read_unlock();
86 86
87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index eaf19ebaa964..3f7bb11f3290 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@ error_requeue_call:
596 } 596 }
597error_no_call: 597error_no_call:
598 release_sock(&rx->sk); 598 release_sock(&rx->sk);
599error_trace:
599 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 600 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
600 return ret; 601 return ret;
601 602
@@ -604,7 +605,7 @@ wait_interrupted:
604wait_error: 605wait_error:
605 finish_wait(sk_sleep(&rx->sk), &wait); 606 finish_wait(sk_sleep(&rx->sk), &wait);
606 call = NULL; 607 call = NULL;
607 goto error_no_call; 608 goto error_trace;
608} 609}
609 610
610/** 611/**
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6aa57fbbbaf..12ca9d13db83 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1371 if (!tc_skip_hw(fnew->flags)) { 1371 if (!tc_skip_hw(fnew->flags)) {
1372 err = fl_hw_replace_filter(tp, fnew, extack); 1372 err = fl_hw_replace_filter(tp, fnew, extack);
1373 if (err) 1373 if (err)
1374 goto errout_mask; 1374 goto errout_mask_ht;
1375 } 1375 }
1376 1376
1377 if (!tc_in_hw(fnew->flags)) 1377 if (!tc_in_hw(fnew->flags))
@@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1401 kfree(mask); 1401 kfree(mask);
1402 return 0; 1402 return 0;
1403 1403
1404errout_mask_ht:
1405 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1406 fnew->mask->filter_ht_params);
1407
1404errout_mask: 1408errout_mask:
1405 fl_mask_put(head, fnew->mask, false); 1409 fl_mask_put(head, fnew->mask, false);
1406 1410
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f93c3cf9e567..65d6d04546ae 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
2028 struct sctp_transport *transport = NULL; 2028 struct sctp_transport *transport = NULL;
2029 struct sctp_sndrcvinfo _sinfo, *sinfo; 2029 struct sctp_sndrcvinfo _sinfo, *sinfo;
2030 struct sctp_association *asoc; 2030 struct sctp_association *asoc, *tmp;
2031 struct sctp_cmsgs cmsgs; 2031 struct sctp_cmsgs cmsgs;
2032 union sctp_addr *daddr; 2032 union sctp_addr *daddr;
2033 bool new = false; 2033 bool new = false;
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2053 2053
2054 /* SCTP_SENDALL process */ 2054 /* SCTP_SENDALL process */
2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { 2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
2056 list_for_each_entry(asoc, &ep->asocs, asocs) { 2056 list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg, 2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
2058 msg_len); 2058 msg_len);
2059 if (err == 0) 2059 if (err == 0)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 80e0ae5534ec..f24633114dfd 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
84 } 84 }
85} 85}
86 86
87static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
88{
89 size_t index = 0;
90
91 while (count--) {
92 if (elem == flex_array_get(fa, index))
93 break;
94 index++;
95 }
96
97 return index;
98}
99
87/* Migrates chunks from stream queues to new stream queues if needed, 100/* Migrates chunks from stream queues to new stream queues if needed,
88 * but not across associations. Also, removes those chunks to streams 101 * but not across associations. Also, removes those chunks to streams
89 * higher than the new max. 102 * higher than the new max.
@@ -147,6 +160,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
147 160
148 if (stream->out) { 161 if (stream->out) {
149 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); 162 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
163 if (stream->out_curr) {
164 size_t index = fa_index(stream->out, stream->out_curr,
165 stream->outcnt);
166
167 BUG_ON(index == stream->outcnt);
168 stream->out_curr = flex_array_get(out, index);
169 }
150 fa_free(stream->out); 170 fa_free(stream->out);
151 } 171 }
152 172
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c4e56602e0c6..b04a813fc865 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1505 1505
1506 smc = smc_sk(sk); 1506 smc = smc_sk(sk);
1507 lock_sock(sk); 1507 lock_sock(sk);
1508 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1509 /* socket was connected before, no more data to read */
1510 rc = 0;
1511 goto out;
1512 }
1508 if ((sk->sk_state == SMC_INIT) || 1513 if ((sk->sk_state == SMC_INIT) ||
1509 (sk->sk_state == SMC_LISTEN) || 1514 (sk->sk_state == SMC_LISTEN) ||
1510 (sk->sk_state == SMC_CLOSED)) 1515 (sk->sk_state == SMC_CLOSED))
@@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1840 1845
1841 smc = smc_sk(sk); 1846 smc = smc_sk(sk);
1842 lock_sock(sk); 1847 lock_sock(sk);
1843 1848 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1849 /* socket was connected before, no more data to read */
1850 rc = 0;
1851 goto out;
1852 }
1844 if (sk->sk_state == SMC_INIT || 1853 if (sk->sk_state == SMC_INIT ||
1845 sk->sk_state == SMC_LISTEN || 1854 sk->sk_state == SMC_LISTEN ||
1846 sk->sk_state == SMC_CLOSED) 1855 sk->sk_state == SMC_CLOSED)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index db83332ac1c8..a712c9f8699b 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -21,13 +21,6 @@
21 21
22/********************************** send *************************************/ 22/********************************** send *************************************/
23 23
24struct smc_cdc_tx_pend {
25 struct smc_connection *conn; /* socket connection */
26 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
27 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
28 u16 ctrl_seq; /* conn. tx sequence # */
29};
30
31/* handler for send/transmission completion of a CDC msg */ 24/* handler for send/transmission completion of a CDC msg */
32static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, 25static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
33 struct smc_link *link, 26 struct smc_link *link,
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
61 54
62int smc_cdc_get_free_slot(struct smc_connection *conn, 55int smc_cdc_get_free_slot(struct smc_connection *conn,
63 struct smc_wr_buf **wr_buf, 56 struct smc_wr_buf **wr_buf,
57 struct smc_rdma_wr **wr_rdma_buf,
64 struct smc_cdc_tx_pend **pend) 58 struct smc_cdc_tx_pend **pend)
65{ 59{
66 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; 60 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
67 int rc; 61 int rc;
68 62
69 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, 63 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
64 wr_rdma_buf,
70 (struct smc_wr_tx_pend_priv **)pend); 65 (struct smc_wr_tx_pend_priv **)pend);
71 if (!conn->alert_token_local) 66 if (!conn->alert_token_local)
72 /* abnormal termination */ 67 /* abnormal termination */
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
96 struct smc_wr_buf *wr_buf, 91 struct smc_wr_buf *wr_buf,
97 struct smc_cdc_tx_pend *pend) 92 struct smc_cdc_tx_pend *pend)
98{ 93{
94 union smc_host_cursor cfed;
99 struct smc_link *link; 95 struct smc_link *link;
100 int rc; 96 int rc;
101 97
@@ -107,10 +103,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
107 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
108 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, 104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
109 &conn->local_tx_ctrl, conn); 105 &conn->local_tx_ctrl, conn);
106 smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
110 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 107 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
111 if (!rc) 108 if (!rc)
112 smc_curs_copy(&conn->rx_curs_confirmed, 109 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
113 &conn->local_tx_ctrl.cons, conn);
114 110
115 return rc; 111 return rc;
116} 112}
@@ -121,11 +117,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
121 struct smc_wr_buf *wr_buf; 117 struct smc_wr_buf *wr_buf;
122 int rc; 118 int rc;
123 119
124 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 120 rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
125 if (rc) 121 if (rc)
126 return rc; 122 return rc;
127 123
128 return smc_cdc_msg_send(conn, wr_buf, pend); 124 spin_lock_bh(&conn->send_lock);
125 rc = smc_cdc_msg_send(conn, wr_buf, pend);
126 spin_unlock_bh(&conn->send_lock);
127 return rc;
129} 128}
130 129
131int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) 130int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index b5bfe38c7f9b..271e2524dc8f 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
160#endif 160#endif
161} 161}
162 162
163/* calculate cursor difference between old and new, where old <= new */ 163/* calculate cursor difference between old and new, where old <= new and
164 * difference cannot exceed size
165 */
164static inline int smc_curs_diff(unsigned int size, 166static inline int smc_curs_diff(unsigned int size,
165 union smc_host_cursor *old, 167 union smc_host_cursor *old,
166 union smc_host_cursor *new) 168 union smc_host_cursor *new)
@@ -185,6 +187,28 @@ static inline int smc_curs_comp(unsigned int size,
185 return smc_curs_diff(size, old, new); 187 return smc_curs_diff(size, old, new);
186} 188}
187 189
190/* calculate cursor difference between old and new, where old <= new and
191 * difference may exceed size
192 */
193static inline int smc_curs_diff_large(unsigned int size,
194 union smc_host_cursor *old,
195 union smc_host_cursor *new)
196{
197 if (old->wrap < new->wrap)
198 return min_t(int,
199 (size - old->count) + new->count +
200 (new->wrap - old->wrap - 1) * size,
201 size);
202
203 if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
204 return min_t(int,
205 (size - old->count) + new->count +
206 (new->wrap + 0xffff - old->wrap) * size,
207 size);
208
209 return max_t(int, 0, (new->count - old->count));
210}
211
188static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, 212static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
189 union smc_host_cursor *local, 213 union smc_host_cursor *local,
190 struct smc_connection *conn) 214 struct smc_connection *conn)
@@ -270,10 +294,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
270 smcr_cdc_msg_to_host(local, peer, conn); 294 smcr_cdc_msg_to_host(local, peer, conn);
271} 295}
272 296
273struct smc_cdc_tx_pend; 297struct smc_cdc_tx_pend {
298 struct smc_connection *conn; /* socket connection */
299 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
300 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
301 u16 ctrl_seq; /* conn. tx sequence # */
302};
274 303
275int smc_cdc_get_free_slot(struct smc_connection *conn, 304int smc_cdc_get_free_slot(struct smc_connection *conn,
276 struct smc_wr_buf **wr_buf, 305 struct smc_wr_buf **wr_buf,
306 struct smc_rdma_wr **wr_rdma_buf,
277 struct smc_cdc_tx_pend **pend); 307 struct smc_cdc_tx_pend **pend);
278void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 308void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
279int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, 309int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 776e9dfc915d..d53fd588d1f5 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
378 vec.iov_len = sizeof(struct smc_clc_msg_decline); 378 vec.iov_len = sizeof(struct smc_clc_msg_decline);
379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, 379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
380 sizeof(struct smc_clc_msg_decline)); 380 sizeof(struct smc_clc_msg_decline));
381 if (len < sizeof(struct smc_clc_msg_decline)) 381 if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
382 len = -EPROTO; 382 len = -EPROTO;
383 return len > 0 ? 0 : len; 383 return len > 0 ? 0 : len;
384} 384}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ea2b87f29469..e39cadda1bf5 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work)
345 345
346 switch (sk->sk_state) { 346 switch (sk->sk_state) {
347 case SMC_INIT: 347 case SMC_INIT:
348 if (atomic_read(&conn->bytes_to_rcv) || 348 sk->sk_state = SMC_APPCLOSEWAIT1;
349 (rxflags->peer_done_writing &&
350 !smc_cdc_rxed_any_close(conn))) {
351 sk->sk_state = SMC_APPCLOSEWAIT1;
352 } else {
353 sk->sk_state = SMC_CLOSED;
354 sock_put(sk); /* passive closing */
355 }
356 break; 349 break;
357 case SMC_ACTIVE: 350 case SMC_ACTIVE:
358 sk->sk_state = SMC_APPCLOSEWAIT1; 351 sk->sk_state = SMC_APPCLOSEWAIT1;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 35c1cdc93e1c..aa1c551cee81 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
128{ 128{
129 struct smc_link_group *lgr = conn->lgr; 129 struct smc_link_group *lgr = conn->lgr;
130 130
131 if (!lgr)
132 return;
131 write_lock_bh(&lgr->conns_lock); 133 write_lock_bh(&lgr->conns_lock);
132 if (conn->alert_token_local) { 134 if (conn->alert_token_local) {
133 __smc_lgr_unregister_conn(conn); 135 __smc_lgr_unregister_conn(conn);
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
300 conn->sndbuf_desc->used = 0; 302 conn->sndbuf_desc->used = 0;
301 if (conn->rmb_desc) { 303 if (conn->rmb_desc) {
302 if (!conn->rmb_desc->regerr) { 304 if (!conn->rmb_desc->regerr) {
303 conn->rmb_desc->used = 0;
304 if (!lgr->is_smcd) { 305 if (!lgr->is_smcd) {
305 /* unregister rmb with peer */ 306 /* unregister rmb with peer */
306 smc_llc_do_delete_rkey( 307 smc_llc_do_delete_rkey(
307 &lgr->lnk[SMC_SINGLE_LINK], 308 &lgr->lnk[SMC_SINGLE_LINK],
308 conn->rmb_desc); 309 conn->rmb_desc);
309 } 310 }
311 conn->rmb_desc->used = 0;
310 } else { 312 } else {
311 /* buf registration failed, reuse not possible */ 313 /* buf registration failed, reuse not possible */
312 write_lock_bh(&lgr->rmbs_lock); 314 write_lock_bh(&lgr->rmbs_lock);
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
628 local_contact = SMC_REUSE_CONTACT; 630 local_contact = SMC_REUSE_CONTACT;
629 conn->lgr = lgr; 631 conn->lgr = lgr;
630 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 632 smc_lgr_register_conn(conn); /* add smc conn to lgr */
633 if (delayed_work_pending(&lgr->free_work))
634 cancel_delayed_work(&lgr->free_work);
631 write_unlock_bh(&lgr->conns_lock); 635 write_unlock_bh(&lgr->conns_lock);
632 break; 636 break;
633 } 637 }
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index b00287989a3d..8806d2afa6ed 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -52,6 +52,24 @@ enum smc_wr_reg_state {
52 FAILED /* ib_wr_reg_mr response: failure */ 52 FAILED /* ib_wr_reg_mr response: failure */
53}; 53};
54 54
55struct smc_rdma_sge { /* sges for RDMA writes */
56 struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
57};
58
59#define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
60 * message send
61 */
62
63struct smc_rdma_sges { /* sges per message send */
64 struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
65};
66
67struct smc_rdma_wr { /* work requests per message
68 * send
69 */
70 struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
71};
72
55struct smc_link { 73struct smc_link {
56 struct smc_ib_device *smcibdev; /* ib-device */ 74 struct smc_ib_device *smcibdev; /* ib-device */
57 u8 ibport; /* port - values 1 | 2 */ 75 u8 ibport; /* port - values 1 | 2 */
@@ -64,6 +82,8 @@ struct smc_link {
64 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ 82 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
65 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ 83 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
66 struct ib_sge *wr_tx_sges; /* WR send gather meta data */ 84 struct ib_sge *wr_tx_sges; /* WR send gather meta data */
85 struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
86 struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
67 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ 87 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
68 /* above four vectors have wr_tx_cnt elements and use the same index */ 88 /* above four vectors have wr_tx_cnt elements and use the same index */
69 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ 89 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index e519ef29c0ff..76487a16934e 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
289 289
290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) 290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
291{ 291{
292 struct smc_ib_device *smcibdev = 292 struct smc_link *lnk = (struct smc_link *)priv;
293 (struct smc_ib_device *)ibevent->device; 293 struct smc_ib_device *smcibdev = lnk->smcibdev;
294 u8 port_idx; 294 u8 port_idx;
295 295
296 switch (ibevent->event) { 296 switch (ibevent->event) {
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
298 case IB_EVENT_GID_CHANGE: 298 case IB_EVENT_GID_CHANGE:
299 case IB_EVENT_PORT_ERR: 299 case IB_EVENT_PORT_ERR:
300 case IB_EVENT_QP_ACCESS_ERR: 300 case IB_EVENT_QP_ACCESS_ERR:
301 port_idx = ibevent->element.port_num - 1; 301 port_idx = ibevent->element.qp->port - 1;
302 set_bit(port_idx, &smcibdev->port_event_mask); 302 set_bit(port_idx, &smcibdev->port_event_mask);
303 schedule_work(&smcibdev->port_event_work); 303 schedule_work(&smcibdev->port_event_work);
304 break; 304 break;
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a6d3623d06f4..4fd60c522802 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link,
166{ 166{
167 int rc; 167 int rc;
168 168
169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); 169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
170 pend);
170 if (rc < 0) 171 if (rc < 0)
171 return rc; 172 return rc;
172 BUILD_BUG_ON_MSG( 173 BUILD_BUG_ON_MSG(
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7cb3e4f07c10..632c3109dee5 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -27,7 +27,7 @@
27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { 27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
28 [SMC_PNETID_NAME] = { 28 [SMC_PNETID_NAME] = {
29 .type = NLA_NUL_STRING, 29 .type = NLA_NUL_STRING,
30 .len = SMC_MAX_PNETID_LEN - 1 30 .len = SMC_MAX_PNETID_LEN
31 }, 31 },
32 [SMC_PNETID_ETHNAME] = { 32 [SMC_PNETID_ETHNAME] = {
33 .type = NLA_NUL_STRING, 33 .type = NLA_NUL_STRING,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index d8366ed51757..f93f3580c100 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
166 166
167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
168 if (send_done)
169 return send_done;
168 rc = smc_tx_wait(smc, msg->msg_flags); 170 rc = smc_tx_wait(smc, msg->msg_flags);
169 if (rc) { 171 if (rc)
170 if (send_done)
171 return send_done;
172 goto out_err; 172 goto out_err;
173 }
174 continue; 173 continue;
175 } 174 }
176 175
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
267 266
268/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 267/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
269static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 268static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
270 int num_sges, struct ib_sge sges[]) 269 int num_sges, struct ib_rdma_wr *rdma_wr)
271{ 270{
272 struct smc_link_group *lgr = conn->lgr; 271 struct smc_link_group *lgr = conn->lgr;
273 struct ib_rdma_wr rdma_wr;
274 struct smc_link *link; 272 struct smc_link *link;
275 int rc; 273 int rc;
276 274
277 memset(&rdma_wr, 0, sizeof(rdma_wr));
278 link = &lgr->lnk[SMC_SINGLE_LINK]; 275 link = &lgr->lnk[SMC_SINGLE_LINK];
279 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); 276 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
280 rdma_wr.wr.sg_list = sges; 277 rdma_wr->wr.num_sge = num_sges;
281 rdma_wr.wr.num_sge = num_sges; 278 rdma_wr->remote_addr =
282 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
283 rdma_wr.remote_addr =
284 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 279 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
285 /* RMBE within RMB */ 280 /* RMBE within RMB */
286 conn->tx_off + 281 conn->tx_off +
287 /* offset within RMBE */ 282 /* offset within RMBE */
288 peer_rmbe_offset; 283 peer_rmbe_offset;
289 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 284 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
290 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); 285 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
291 if (rc) { 286 if (rc) {
292 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 287 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
293 smc_lgr_terminate(lgr); 288 smc_lgr_terminate(lgr);
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
314/* SMC-R helper for smc_tx_rdma_writes() */ 309/* SMC-R helper for smc_tx_rdma_writes() */
315static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 310static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
316 size_t src_off, size_t src_len, 311 size_t src_off, size_t src_len,
317 size_t dst_off, size_t dst_len) 312 size_t dst_off, size_t dst_len,
313 struct smc_rdma_wr *wr_rdma_buf)
318{ 314{
319 dma_addr_t dma_addr = 315 dma_addr_t dma_addr =
320 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 316 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
321 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
322 int src_len_sum = src_len, dst_len_sum = dst_len; 317 int src_len_sum = src_len, dst_len_sum = dst_len;
323 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
324 int sent_count = src_off; 318 int sent_count = src_off;
325 int srcchunk, dstchunk; 319 int srcchunk, dstchunk;
326 int num_sges; 320 int num_sges;
327 int rc; 321 int rc;
328 322
329 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 323 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
324 struct ib_sge *sge =
325 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
326
330 num_sges = 0; 327 num_sges = 0;
331 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 328 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
332 sges[srcchunk].addr = dma_addr + src_off; 329 sge[srcchunk].addr = dma_addr + src_off;
333 sges[srcchunk].length = src_len; 330 sge[srcchunk].length = src_len;
334 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
335 num_sges++; 331 num_sges++;
336 332
337 src_off += src_len; 333 src_off += src_len;
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
344 src_len = dst_len - src_len; /* remainder */ 340 src_len = dst_len - src_len; /* remainder */
345 src_len_sum += src_len; 341 src_len_sum += src_len;
346 } 342 }
347 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); 343 rc = smc_tx_rdma_write(conn, dst_off, num_sges,
344 &wr_rdma_buf->wr_tx_rdma[dstchunk]);
348 if (rc) 345 if (rc)
349 return rc; 346 return rc;
350 if (dst_len_sum == len) 347 if (dst_len_sum == len)
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
403/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 400/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
404 * usable snd_wnd as max transmit 401 * usable snd_wnd as max transmit
405 */ 402 */
406static int smc_tx_rdma_writes(struct smc_connection *conn) 403static int smc_tx_rdma_writes(struct smc_connection *conn,
404 struct smc_rdma_wr *wr_rdma_buf)
407{ 405{
408 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 406 size_t len, src_len, dst_off, dst_len; /* current chunk values */
409 union smc_host_cursor sent, prep, prod, cons; 407 union smc_host_cursor sent, prep, prod, cons;
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
464 dst_off, dst_len); 462 dst_off, dst_len);
465 else 463 else
466 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 464 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
467 dst_off, dst_len); 465 dst_off, dst_len, wr_rdma_buf);
468 if (rc) 466 if (rc)
469 return rc; 467 return rc;
470 468
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
485static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 483static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
486{ 484{
487 struct smc_cdc_producer_flags *pflags; 485 struct smc_cdc_producer_flags *pflags;
486 struct smc_rdma_wr *wr_rdma_buf;
488 struct smc_cdc_tx_pend *pend; 487 struct smc_cdc_tx_pend *pend;
489 struct smc_wr_buf *wr_buf; 488 struct smc_wr_buf *wr_buf;
490 int rc; 489 int rc;
491 490
492 spin_lock_bh(&conn->send_lock); 491 rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
493 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
494 if (rc < 0) { 492 if (rc < 0) {
495 if (rc == -EBUSY) { 493 if (rc == -EBUSY) {
496 struct smc_sock *smc = 494 struct smc_sock *smc =
497 container_of(conn, struct smc_sock, conn); 495 container_of(conn, struct smc_sock, conn);
498 496
499 if (smc->sk.sk_err == ECONNABORTED) { 497 if (smc->sk.sk_err == ECONNABORTED)
500 rc = sock_error(&smc->sk); 498 return sock_error(&smc->sk);
501 goto out_unlock;
502 }
503 rc = 0; 499 rc = 0;
504 if (conn->alert_token_local) /* connection healthy */ 500 if (conn->alert_token_local) /* connection healthy */
505 mod_delayed_work(system_wq, &conn->tx_work, 501 mod_delayed_work(system_wq, &conn->tx_work,
506 SMC_TX_WORK_DELAY); 502 SMC_TX_WORK_DELAY);
507 } 503 }
508 goto out_unlock; 504 return rc;
509 } 505 }
510 506
507 spin_lock_bh(&conn->send_lock);
511 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 508 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
512 rc = smc_tx_rdma_writes(conn); 509 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
513 if (rc) { 510 if (rc) {
514 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 511 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
515 (struct smc_wr_tx_pend_priv *)pend); 512 (struct smc_wr_tx_pend_priv *)pend);
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
536 533
537 spin_lock_bh(&conn->send_lock); 534 spin_lock_bh(&conn->send_lock);
538 if (!pflags->urg_data_present) 535 if (!pflags->urg_data_present)
539 rc = smc_tx_rdma_writes(conn); 536 rc = smc_tx_rdma_writes(conn, NULL);
540 if (!rc) 537 if (!rc)
541 rc = smcd_cdc_msg_send(conn); 538 rc = smcd_cdc_msg_send(conn);
542 539
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
598 if (to_confirm > conn->rmbe_update_limit) { 595 if (to_confirm > conn->rmbe_update_limit) {
599 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 596 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
600 sender_free = conn->rmb_desc->len - 597 sender_free = conn->rmb_desc->len -
601 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); 598 smc_curs_diff_large(conn->rmb_desc->len,
599 &cfed, &prod);
602 } 600 }
603 601
604 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 602 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index c2694750a6a8..253aa75dc2b6 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
160 * @link: Pointer to smc_link used to later send the message. 160 * @link: Pointer to smc_link used to later send the message.
161 * @handler: Send completion handler function pointer. 161 * @handler: Send completion handler function pointer.
162 * @wr_buf: Out value returns pointer to message buffer. 162 * @wr_buf: Out value returns pointer to message buffer.
163 * @wr_rdma_buf: Out value returns pointer to rdma work request.
163 * @wr_pend_priv: Out value returns pointer serving as handler context. 164 * @wr_pend_priv: Out value returns pointer serving as handler context.
164 * 165 *
165 * Return: 0 on success, or -errno on error. 166 * Return: 0 on success, or -errno on error.
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
167int smc_wr_tx_get_free_slot(struct smc_link *link, 168int smc_wr_tx_get_free_slot(struct smc_link *link,
168 smc_wr_tx_handler handler, 169 smc_wr_tx_handler handler,
169 struct smc_wr_buf **wr_buf, 170 struct smc_wr_buf **wr_buf,
171 struct smc_rdma_wr **wr_rdma_buf,
170 struct smc_wr_tx_pend_priv **wr_pend_priv) 172 struct smc_wr_tx_pend_priv **wr_pend_priv)
171{ 173{
172 struct smc_wr_tx_pend *wr_pend; 174 struct smc_wr_tx_pend *wr_pend;
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
204 wr_ib = &link->wr_tx_ibs[idx]; 206 wr_ib = &link->wr_tx_ibs[idx];
205 wr_ib->wr_id = wr_id; 207 wr_ib->wr_id = wr_id;
206 *wr_buf = &link->wr_tx_bufs[idx]; 208 *wr_buf = &link->wr_tx_bufs[idx];
209 if (wr_rdma_buf)
210 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
207 *wr_pend_priv = &wr_pend->priv; 211 *wr_pend_priv = &wr_pend->priv;
208 return 0; 212 return 0;
209} 213}
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link,
218 u32 idx = pend->idx; 222 u32 idx = pend->idx;
219 223
220 /* clear the full struct smc_wr_tx_pend including .priv */ 224 /* clear the full struct smc_wr_tx_pend including .priv */
221 memset(&link->wr_tx_pends[pend->idx], 0, 225 memset(&link->wr_tx_pends[idx], 0,
222 sizeof(link->wr_tx_pends[pend->idx])); 226 sizeof(link->wr_tx_pends[idx]));
223 memset(&link->wr_tx_bufs[pend->idx], 0, 227 memset(&link->wr_tx_bufs[idx], 0,
224 sizeof(link->wr_tx_bufs[pend->idx])); 228 sizeof(link->wr_tx_bufs[idx]));
225 test_and_clear_bit(idx, link->wr_tx_mask); 229 test_and_clear_bit(idx, link->wr_tx_mask);
226 return 1; 230 return 1;
227 } 231 }
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk)
465 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; 469 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
466 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; 470 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
467 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 471 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
472 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
473 lnk->roce_pd->local_dma_lkey;
474 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
475 lnk->roce_pd->local_dma_lkey;
476 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
477 lnk->roce_pd->local_dma_lkey;
478 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
479 lnk->roce_pd->local_dma_lkey;
468 lnk->wr_tx_ibs[i].next = NULL; 480 lnk->wr_tx_ibs[i].next = NULL;
469 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; 481 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
470 lnk->wr_tx_ibs[i].num_sge = 1; 482 lnk->wr_tx_ibs[i].num_sge = 1;
471 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; 483 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
472 lnk->wr_tx_ibs[i].send_flags = 484 lnk->wr_tx_ibs[i].send_flags =
473 IB_SEND_SIGNALED | IB_SEND_SOLICITED; 485 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
486 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
487 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
488 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
489 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
490 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
491 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
474 } 492 }
475 for (i = 0; i < lnk->wr_rx_cnt; i++) { 493 for (i = 0; i < lnk->wr_rx_cnt; i++) {
476 lnk->wr_rx_sges[i].addr = 494 lnk->wr_rx_sges[i].addr =
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
521 lnk->wr_tx_mask = NULL; 539 lnk->wr_tx_mask = NULL;
522 kfree(lnk->wr_tx_sges); 540 kfree(lnk->wr_tx_sges);
523 lnk->wr_tx_sges = NULL; 541 lnk->wr_tx_sges = NULL;
542 kfree(lnk->wr_tx_rdma_sges);
543 lnk->wr_tx_rdma_sges = NULL;
524 kfree(lnk->wr_rx_sges); 544 kfree(lnk->wr_rx_sges);
525 lnk->wr_rx_sges = NULL; 545 lnk->wr_rx_sges = NULL;
546 kfree(lnk->wr_tx_rdmas);
547 lnk->wr_tx_rdmas = NULL;
526 kfree(lnk->wr_rx_ibs); 548 kfree(lnk->wr_rx_ibs);
527 lnk->wr_rx_ibs = NULL; 549 lnk->wr_rx_ibs = NULL;
528 kfree(lnk->wr_tx_ibs); 550 kfree(lnk->wr_tx_ibs);
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
552 GFP_KERNEL); 574 GFP_KERNEL);
553 if (!link->wr_rx_ibs) 575 if (!link->wr_rx_ibs)
554 goto no_mem_wr_tx_ibs; 576 goto no_mem_wr_tx_ibs;
577 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
578 sizeof(link->wr_tx_rdmas[0]),
579 GFP_KERNEL);
580 if (!link->wr_tx_rdmas)
581 goto no_mem_wr_rx_ibs;
582 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
583 sizeof(link->wr_tx_rdma_sges[0]),
584 GFP_KERNEL);
585 if (!link->wr_tx_rdma_sges)
586 goto no_mem_wr_tx_rdmas;
555 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), 587 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
556 GFP_KERNEL); 588 GFP_KERNEL);
557 if (!link->wr_tx_sges) 589 if (!link->wr_tx_sges)
558 goto no_mem_wr_rx_ibs; 590 goto no_mem_wr_tx_rdma_sges;
559 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, 591 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
560 sizeof(link->wr_rx_sges[0]), 592 sizeof(link->wr_rx_sges[0]),
561 GFP_KERNEL); 593 GFP_KERNEL);
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges:
579 kfree(link->wr_rx_sges); 611 kfree(link->wr_rx_sges);
580no_mem_wr_tx_sges: 612no_mem_wr_tx_sges:
581 kfree(link->wr_tx_sges); 613 kfree(link->wr_tx_sges);
614no_mem_wr_tx_rdma_sges:
615 kfree(link->wr_tx_rdma_sges);
616no_mem_wr_tx_rdmas:
617 kfree(link->wr_tx_rdmas);
582no_mem_wr_rx_ibs: 618no_mem_wr_rx_ibs:
583 kfree(link->wr_rx_ibs); 619 kfree(link->wr_rx_ibs);
584no_mem_wr_tx_ibs: 620no_mem_wr_tx_ibs:
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 1d85bb14fd6f..09bf32fd3959 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev);
85 85
86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, 86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
87 struct smc_wr_buf **wr_buf, 87 struct smc_wr_buf **wr_buf,
88 struct smc_rdma_wr **wrs,
88 struct smc_wr_tx_pend_priv **wr_pend_priv); 89 struct smc_wr_tx_pend_priv **wr_pend_priv);
89int smc_wr_tx_put_slot(struct smc_link *link, 90int smc_wr_tx_put_slot(struct smc_link *link,
90 struct smc_wr_tx_pend_priv *wr_pend_priv); 91 struct smc_wr_tx_pend_priv *wr_pend_priv);
diff --git a/net/socket.c b/net/socket.c
index e89884e2197b..d80d87a395ea 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
941EXPORT_SYMBOL(dlci_ioctl_set); 941EXPORT_SYMBOL(dlci_ioctl_set);
942 942
943static long sock_do_ioctl(struct net *net, struct socket *sock, 943static long sock_do_ioctl(struct net *net, struct socket *sock,
944 unsigned int cmd, unsigned long arg, 944 unsigned int cmd, unsigned long arg)
945 unsigned int ifreq_size)
946{ 945{
947 int err; 946 int err;
948 void __user *argp = (void __user *)arg; 947 void __user *argp = (void __user *)arg;
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
968 } else { 967 } else {
969 struct ifreq ifr; 968 struct ifreq ifr;
970 bool need_copyout; 969 bool need_copyout;
971 if (copy_from_user(&ifr, argp, ifreq_size)) 970 if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
972 return -EFAULT; 971 return -EFAULT;
973 err = dev_ioctl(net, cmd, &ifr, &need_copyout); 972 err = dev_ioctl(net, cmd, &ifr, &need_copyout);
974 if (!err && need_copyout) 973 if (!err && need_copyout)
975 if (copy_to_user(argp, &ifr, ifreq_size)) 974 if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
976 return -EFAULT; 975 return -EFAULT;
977 } 976 }
978 return err; 977 return err;
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1071 err = open_related_ns(&net->ns, get_net_ns); 1070 err = open_related_ns(&net->ns, get_net_ns);
1072 break; 1071 break;
1073 default: 1072 default:
1074 err = sock_do_ioctl(net, sock, cmd, arg, 1073 err = sock_do_ioctl(net, sock, cmd, arg);
1075 sizeof(struct ifreq));
1076 break; 1074 break;
1077 } 1075 }
1078 return err; 1076 return err;
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2780 int err; 2778 int err;
2781 2779
2782 set_fs(KERNEL_DS); 2780 set_fs(KERNEL_DS);
2783 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, 2781 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2784 sizeof(struct compat_ifreq));
2785 set_fs(old_fs); 2782 set_fs(old_fs);
2786 if (!err) 2783 if (!err)
2787 err = compat_put_timeval(&ktv, up); 2784 err = compat_put_timeval(&ktv, up);
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2797 int err; 2794 int err;
2798 2795
2799 set_fs(KERNEL_DS); 2796 set_fs(KERNEL_DS);
2800 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, 2797 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2801 sizeof(struct compat_ifreq));
2802 set_fs(old_fs); 2798 set_fs(old_fs);
2803 if (!err) 2799 if (!err)
2804 err = compat_put_timespec(&kts, up); 2800 err = compat_put_timespec(&kts, up);
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
2994 return dev_ioctl(net, cmd, &ifreq, NULL); 2990 return dev_ioctl(net, cmd, &ifreq, NULL);
2995} 2991}
2996 2992
2993static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
2994 unsigned int cmd,
2995 struct compat_ifreq __user *uifr32)
2996{
2997 struct ifreq __user *uifr;
2998 int err;
2999
3000 /* Handle the fact that while struct ifreq has the same *layout* on
3001 * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
3002 * which are handled elsewhere, it still has different *size* due to
3003 * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
3004 * resulting in struct ifreq being 32 and 40 bytes respectively).
3005 * As a result, if the struct happens to be at the end of a page and
3006 * the next page isn't readable/writable, we get a fault. To prevent
3007 * that, copy back and forth to the full size.
3008 */
3009
3010 uifr = compat_alloc_user_space(sizeof(*uifr));
3011 if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
3012 return -EFAULT;
3013
3014 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
3015
3016 if (!err) {
3017 switch (cmd) {
3018 case SIOCGIFFLAGS:
3019 case SIOCGIFMETRIC:
3020 case SIOCGIFMTU:
3021 case SIOCGIFMEM:
3022 case SIOCGIFHWADDR:
3023 case SIOCGIFINDEX:
3024 case SIOCGIFADDR:
3025 case SIOCGIFBRDADDR:
3026 case SIOCGIFDSTADDR:
3027 case SIOCGIFNETMASK:
3028 case SIOCGIFPFLAGS:
3029 case SIOCGIFTXQLEN:
3030 case SIOCGMIIPHY:
3031 case SIOCGMIIREG:
3032 case SIOCGIFNAME:
3033 if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
3034 err = -EFAULT;
3035 break;
3036 }
3037 }
3038 return err;
3039}
3040
2997static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 3041static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
2998 struct compat_ifreq __user *uifr32) 3042 struct compat_ifreq __user *uifr32)
2999{ 3043{
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
3109 } 3153 }
3110 3154
3111 set_fs(KERNEL_DS); 3155 set_fs(KERNEL_DS);
3112 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, 3156 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
3113 sizeof(struct compat_ifreq));
3114 set_fs(old_fs); 3157 set_fs(old_fs);
3115 3158
3116out: 3159out:
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3210 case SIOCSIFTXQLEN: 3253 case SIOCSIFTXQLEN:
3211 case SIOCBRADDIF: 3254 case SIOCBRADDIF:
3212 case SIOCBRDELIF: 3255 case SIOCBRDELIF:
3256 case SIOCGIFNAME:
3213 case SIOCSIFNAME: 3257 case SIOCSIFNAME:
3214 case SIOCGMIIPHY: 3258 case SIOCGMIIPHY:
3215 case SIOCGMIIREG: 3259 case SIOCGMIIREG:
3216 case SIOCSMIIREG: 3260 case SIOCSMIIREG:
3217 case SIOCSARP:
3218 case SIOCGARP:
3219 case SIOCDARP:
3220 case SIOCATMARK:
3221 case SIOCBONDENSLAVE: 3261 case SIOCBONDENSLAVE:
3222 case SIOCBONDRELEASE: 3262 case SIOCBONDRELEASE:
3223 case SIOCBONDSETHWADDR: 3263 case SIOCBONDSETHWADDR:
3224 case SIOCBONDCHANGEACTIVE: 3264 case SIOCBONDCHANGEACTIVE:
3225 case SIOCGIFNAME: 3265 return compat_ifreq_ioctl(net, sock, cmd, argp);
3226 return sock_do_ioctl(net, sock, cmd, arg, 3266
3227 sizeof(struct compat_ifreq)); 3267 case SIOCSARP:
3268 case SIOCGARP:
3269 case SIOCDARP:
3270 case SIOCATMARK:
3271 return sock_do_ioctl(net, sock, cmd, arg);
3228 } 3272 }
3229 3273
3230 return -ENOIOCTLCMD; 3274 return -ENOIOCTLCMD;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9e8744..15eb5d3d4750 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
75{ 75{
76 struct virtio_vsock *vsock = virtio_vsock_get(); 76 struct virtio_vsock *vsock = virtio_vsock_get();
77 77
78 if (!vsock)
79 return VMADDR_CID_ANY;
80
78 return vsock->guest_cid; 81 return vsock->guest_cid;
79} 82}
80 83
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
584 587
585 virtio_vsock_update_guest_cid(vsock); 588 virtio_vsock_update_guest_cid(vsock);
586 589
587 ret = vsock_core_init(&virtio_transport.transport);
588 if (ret < 0)
589 goto out_vqs;
590
591 vsock->rx_buf_nr = 0; 590 vsock->rx_buf_nr = 0;
592 vsock->rx_buf_max_nr = 0; 591 vsock->rx_buf_max_nr = 0;
593 atomic_set(&vsock->queued_replies, 0); 592 atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
618 mutex_unlock(&the_virtio_vsock_mutex); 617 mutex_unlock(&the_virtio_vsock_mutex);
619 return 0; 618 return 0;
620 619
621out_vqs:
622 vsock->vdev->config->del_vqs(vsock->vdev);
623out: 620out:
624 kfree(vsock); 621 kfree(vsock);
625 mutex_unlock(&the_virtio_vsock_mutex); 622 mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
637 flush_work(&vsock->event_work); 634 flush_work(&vsock->event_work);
638 flush_work(&vsock->send_pkt_work); 635 flush_work(&vsock->send_pkt_work);
639 636
637 /* Reset all connected sockets when the device disappear */
638 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
639
640 vdev->config->reset(vdev); 640 vdev->config->reset(vdev);
641 641
642 mutex_lock(&vsock->rx_lock); 642 mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
669 669
670 mutex_lock(&the_virtio_vsock_mutex); 670 mutex_lock(&the_virtio_vsock_mutex);
671 the_virtio_vsock = NULL; 671 the_virtio_vsock = NULL;
672 vsock_core_exit();
673 mutex_unlock(&the_virtio_vsock_mutex); 672 mutex_unlock(&the_virtio_vsock_mutex);
674 673
675 vdev->config->del_vqs(vdev); 674 vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
702 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 701 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
703 if (!virtio_vsock_workqueue) 702 if (!virtio_vsock_workqueue)
704 return -ENOMEM; 703 return -ENOMEM;
704
705 ret = register_virtio_driver(&virtio_vsock_driver); 705 ret = register_virtio_driver(&virtio_vsock_driver);
706 if (ret) 706 if (ret)
707 destroy_workqueue(virtio_vsock_workqueue); 707 goto out_wq;
708
709 ret = vsock_core_init(&virtio_transport.transport);
710 if (ret)
711 goto out_vdr;
712
713 return 0;
714
715out_vdr:
716 unregister_virtio_driver(&virtio_vsock_driver);
717out_wq:
718 destroy_workqueue(virtio_vsock_workqueue);
708 return ret; 719 return ret;
720
709} 721}
710 722
711static void __exit virtio_vsock_exit(void) 723static void __exit virtio_vsock_exit(void)
712{ 724{
725 vsock_core_exit();
713 unregister_virtio_driver(&virtio_vsock_driver); 726 unregister_virtio_driver(&virtio_vsock_driver);
714 destroy_workqueue(virtio_vsock_workqueue); 727 destroy_workqueue(virtio_vsock_workqueue);
715} 728}
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 882d97bdc6bf..550ac9d827fe 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
41 cfg80211_sched_dfs_chan_update(rdev); 41 cfg80211_sched_dfs_chan_update(rdev);
42 } 42 }
43 43
44 schedule_work(&cfg80211_disconnect_work);
45
44 return err; 46 return err;
45} 47}
46 48
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c5d6f3418601..f6b40563dc63 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, 445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
446 u32 center_freq_khz, u32 bw_khz); 446 u32 center_freq_khz, u32 bw_khz);
447 447
448extern struct work_struct cfg80211_disconnect_work;
449
448/** 450/**
449 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable 451 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
450 * @wiphy: the wiphy to validate against 452 * @wiphy: the wiphy to validate against
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f741d8376a46..7d34cb884840 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
667 rtnl_unlock(); 667 rtnl_unlock();
668} 668}
669 669
670static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); 670DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
671 671
672 672
673/* 673/*