aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-23 17:05:10 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-23 17:05:10 -0400
commit5617c05d44ebd7b75973b5b31a9f2e55e5882d3f (patch)
tree7250f4b5690c141115a13afdb21c12cc720ae79c /net
parent22db87ba6bbefcb12493ae9c7be2037f5f12639d (diff)
parent25cd9721c2b16ee0d775e36ec3af31f392003f80 (diff)
Merge tag 'fixes-for-v4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus
Felipe writes: usb: fixes for v4.11-rc4 f_acm got an endianness fix by Oliver Neukum. This has been around for a long time but it's finally fixed. f_hid learned that it should never access hidg->req without first grabbing the spinlock. Roger Quadros fixed two bugs in the f_uvc function driver. Janusz Dziedzic fixed a very peculiar bug with EP0, one that's rather difficult to trigger. When we're dealing with bounced EP0 requests, we should delay unmap until after ->complete() is called. UDC class got a use-after-free fix.
Diffstat (limited to 'net')
-rw-r--r--net/atm/svc.c5
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c106
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c24
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib_cm.c47
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/tcp.c38
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rxrpc/input.c27
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/sendmsg.c49
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_policy.c19
59 files changed, 348 insertions, 238 deletions
diff --git a/net/atm/svc.c b/net/atm/svc.c
index db9794ec61d8..5589de7086af 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -318,7 +318,8 @@ out:
318 return error; 318 return error;
319} 319}
320 320
321static int svc_accept(struct socket *sock, struct socket *newsock, int flags) 321static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
322 bool kern)
322{ 323{
323 struct sock *sk = sock->sk; 324 struct sock *sk = sock->sk;
324 struct sk_buff *skb; 325 struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
329 330
330 lock_sock(sk); 331 lock_sock(sk);
331 332
332 error = svc_create(sock_net(sk), newsock, 0, 0); 333 error = svc_create(sock_net(sk), newsock, 0, kern);
333 if (error) 334 if (error)
334 goto out; 335 goto out;
335 336
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a8e42cedf1db..b7c486752b3a 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1320,7 +1320,8 @@ out_release:
1320 return err; 1320 return err;
1321} 1321}
1322 1322
1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) 1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
1324 bool kern)
1324{ 1325{
1325 struct sk_buff *skb; 1326 struct sk_buff *skb;
1326 struct sock *newsk; 1327 struct sock *newsk;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f307b145ea54..507b80d59dec 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -301,7 +301,7 @@ done:
301} 301}
302 302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, 303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
304 int flags) 304 int flags, bool kern)
305{ 305{
306 DEFINE_WAIT_FUNC(wait, woken_wake_function); 306 DEFINE_WAIT_FUNC(wait, woken_wake_function);
307 struct sock *sk = sock->sk, *nsk; 307 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa1a814ceddc..ac3c650cb234 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -471,7 +471,8 @@ done:
471 return err; 471 return err;
472} 472}
473 473
474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) 474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
475 bool kern)
475{ 476{
476 DEFINE_WAIT_FUNC(wait, woken_wake_function); 477 DEFINE_WAIT_FUNC(wait, woken_wake_function);
477 struct sock *sk = sock->sk, *nsk; 478 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e4e9a2da1e7e..728e0c8dc8e7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -627,7 +627,7 @@ done:
627} 627}
628 628
629static int sco_sock_accept(struct socket *sock, struct socket *newsock, 629static int sco_sock_accept(struct socket *sock, struct socket *newsock,
630 int flags) 630 int flags, bool kern)
631{ 631{
632 DEFINE_WAIT_FUNC(wait, woken_wake_function); 632 DEFINE_WAIT_FUNC(wait, woken_wake_function);
633 struct sock *sk = sock->sk, *ch; 633 struct sock *sk = sock->sk, *ch;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 236f34244dbe..013f2290bfa5 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
30static int 30static int
31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) 31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
32{ 32{
33 br_drop_fake_rtable(skb);
33 return netif_receive_skb(skb); 34 return netif_receive_skb(skb);
34} 35}
35 36
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 95087e6e8258..fa87fbd62bb7 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
521} 521}
522 522
523 523
524/* PF_BRIDGE/LOCAL_IN ************************************************/
525/* The packet is locally destined, which requires a real
526 * dst_entry, so detach the fake one. On the way up, the
527 * packet would pass through PRE_ROUTING again (which already
528 * took place when the packet entered the bridge), but we
529 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
530 * prevent this from happening. */
531static unsigned int br_nf_local_in(void *priv,
532 struct sk_buff *skb,
533 const struct nf_hook_state *state)
534{
535 br_drop_fake_rtable(skb);
536 return NF_ACCEPT;
537}
538
539/* PF_BRIDGE/FORWARD *************************************************/ 524/* PF_BRIDGE/FORWARD *************************************************/
540static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 525static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
541{ 526{
@@ -908,12 +893,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
908 .priority = NF_BR_PRI_BRNF, 893 .priority = NF_BR_PRI_BRNF,
909 }, 894 },
910 { 895 {
911 .hook = br_nf_local_in,
912 .pf = NFPROTO_BRIDGE,
913 .hooknum = NF_BR_LOCAL_IN,
914 .priority = NF_BR_PRI_BRNF,
915 },
916 {
917 .hook = br_nf_forward_ip, 896 .hook = br_nf_forward_ip,
918 .pf = NFPROTO_BRIDGE, 897 .pf = NFPROTO_BRIDGE,
919 .hooknum = NF_BR_FORWARD, 898 .hooknum = NF_BR_FORWARD,
diff --git a/net/core/dev.c b/net/core/dev.c
index 8637b2b71f3d..7869ae3837ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
1304{ 1304{
1305 rtnl_lock(); 1305 rtnl_lock();
1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1307 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1307 rtnl_unlock(); 1308 rtnl_unlock();
1308} 1309}
1309EXPORT_SYMBOL(netdev_notify_peers); 1310EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3945821e9c1f..65ea0ff4017c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
953 while (--i >= new_num) { 953 while (--i >= new_num) {
954 struct kobject *kobj = &dev->_rx[i].kobj; 954 struct kobject *kobj = &dev->_rx[i].kobj;
955 955
956 if (!list_empty(&dev_net(dev)->exit_list)) 956 if (!atomic_read(&dev_net(dev)->count))
957 kobj->uevent_suppress = 1; 957 kobj->uevent_suppress = 1;
958 if (dev->sysfs_rx_queue_group) 958 if (dev->sysfs_rx_queue_group)
959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1371 while (--i >= new_num) { 1371 while (--i >= new_num) {
1372 struct netdev_queue *queue = dev->_tx + i; 1372 struct netdev_queue *queue = dev->_tx + i;
1373 1373
1374 if (!list_empty(&dev_net(dev)->exit_list)) 1374 if (!atomic_read(&dev_net(dev)->count))
1375 queue->kobj.uevent_suppress = 1; 1375 queue->kobj.uevent_suppress = 1;
1376#ifdef CONFIG_BQL 1376#ifdef CONFIG_BQL
1377 sysfs_remove_group(&queue->kobj, &dql_group); 1377 sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
1558{ 1558{
1559 struct device *dev = &(ndev->dev); 1559 struct device *dev = &(ndev->dev);
1560 1560
1561 if (!list_empty(&dev_net(ndev)->exit_list)) 1561 if (!atomic_read(&dev_net(ndev)->count))
1562 dev_set_uevent_suppress(dev, 1); 1562 dev_set_uevent_suppress(dev, 1);
1563 1563
1564 kobject_get(&dev->kobj); 1564 kobject_get(&dev->kobj);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3557958e9bf..cd4ba8c6b609 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3828,13 +3828,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3828 if (!skb_may_tx_timestamp(sk, false)) 3828 if (!skb_may_tx_timestamp(sk, false))
3829 return; 3829 return;
3830 3830
3831 /* take a reference to prevent skb_orphan() from freeing the socket */ 3831 /* Take a reference to prevent skb_orphan() from freeing the socket,
3832 sock_hold(sk); 3832 * but only if the socket refcount is not zero.
3833 3833 */
3834 *skb_hwtstamps(skb) = *hwtstamps; 3834 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3835 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3835 *skb_hwtstamps(skb) = *hwtstamps;
3836 3836 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3837 sock_put(sk); 3837 sock_put(sk);
3838 }
3838} 3839}
3839EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3840EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3840 3841
@@ -3893,7 +3894,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3893{ 3894{
3894 struct sock *sk = skb->sk; 3895 struct sock *sk = skb->sk;
3895 struct sock_exterr_skb *serr; 3896 struct sock_exterr_skb *serr;
3896 int err; 3897 int err = 1;
3897 3898
3898 skb->wifi_acked_valid = 1; 3899 skb->wifi_acked_valid = 1;
3899 skb->wifi_acked = acked; 3900 skb->wifi_acked = acked;
@@ -3903,14 +3904,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3903 serr->ee.ee_errno = ENOMSG; 3904 serr->ee.ee_errno = ENOMSG;
3904 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3905 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3905 3906
3906 /* take a reference to prevent skb_orphan() from freeing the socket */ 3907 /* Take a reference to prevent skb_orphan() from freeing the socket,
3907 sock_hold(sk); 3908 * but only if the socket refcount is not zero.
3908 3909 */
3909 err = sock_queue_err_skb(sk, skb); 3910 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3911 err = sock_queue_err_skb(sk, skb);
3912 sock_put(sk);
3913 }
3910 if (err) 3914 if (err)
3911 kfree_skb(skb); 3915 kfree_skb(skb);
3912
3913 sock_put(sk);
3914} 3916}
3915EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3917EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3916 3918
diff --git a/net/core/sock.c b/net/core/sock.c
index f6fd79f33097..a96d5f7a5734 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
197 197
198/* 198/*
199 * Each address family might have different locking rules, so we have 199 * Each address family might have different locking rules, so we have
200 * one slock key per address family: 200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
201 */ 202 */
202static struct lock_class_key af_family_keys[AF_MAX]; 203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
203static struct lock_class_key af_family_slock_keys[AF_MAX]; 205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
204 207
205/* 208/*
206 * Make lock validator output more readable. (we pre-construct these 209 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket 210 * strings build-time, so that runtime initialization of socket
208 * locks is fast): 211 * locks is fast):
209 */ 212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
210static const char *const af_family_key_strings[AF_MAX+1] = { 231static const char *const af_family_key_strings[AF_MAX+1] = {
211 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 232 _sock_locks("sk_lock-")
212 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
213 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
214 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
215 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
216 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
217 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
218 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
219 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
220 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
226}; 233};
227static const char *const af_family_slock_key_strings[AF_MAX+1] = { 234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 235 _sock_locks("slock-")
229 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
230 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
231 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
232 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
233 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
234 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
235 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
236 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
237 "slock-27" , "slock-28" , "slock-AF_CAN" ,
238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
243}; 236};
244static const char *const af_family_clock_key_strings[AF_MAX+1] = { 237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 238 _sock_locks("clock-")
246 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 239};
247 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 240
248 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
249 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 242 _sock_locks("k-sk_lock-")
250 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 243};
251 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
252 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 245 _sock_locks("k-slock-")
253 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 246};
254 "clock-27" , "clock-28" , "clock-AF_CAN" , 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 248 _sock_locks("k-clock-")
256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
260}; 249};
261 250
262/* 251/*
@@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
264 * so split the lock classes by using a per-AF key: 253 * so split the lock classes by using a per-AF key:
265 */ 254 */
266static struct lock_class_key af_callback_keys[AF_MAX]; 255static struct lock_class_key af_callback_keys[AF_MAX];
256static struct lock_class_key af_kern_callback_keys[AF_MAX];
267 257
268/* Take into consideration the size of the struct sk_buff overhead in the 258/* Take into consideration the size of the struct sk_buff overhead in the
269 * determination of these values, since that is non-constant across 259 * determination of these values, since that is non-constant across
@@ -1293,7 +1283,16 @@ lenout:
1293 */ 1283 */
1294static inline void sock_lock_init(struct sock *sk) 1284static inline void sock_lock_init(struct sock *sk)
1295{ 1285{
1296 sock_lock_init_class_and_name(sk, 1286 if (sk->sk_kern_sock)
1287 sock_lock_init_class_and_name(
1288 sk,
1289 af_family_kern_slock_key_strings[sk->sk_family],
1290 af_family_kern_slock_keys + sk->sk_family,
1291 af_family_kern_key_strings[sk->sk_family],
1292 af_family_kern_keys + sk->sk_family);
1293 else
1294 sock_lock_init_class_and_name(
1295 sk,
1297 af_family_slock_key_strings[sk->sk_family], 1296 af_family_slock_key_strings[sk->sk_family],
1298 af_family_slock_keys + sk->sk_family, 1297 af_family_slock_keys + sk->sk_family,
1299 af_family_key_strings[sk->sk_family], 1298 af_family_key_strings[sk->sk_family],
@@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1399 * why we need sk_prot_creator -acme 1398 * why we need sk_prot_creator -acme
1400 */ 1399 */
1401 sk->sk_prot = sk->sk_prot_creator = prot; 1400 sk->sk_prot = sk->sk_prot_creator = prot;
1401 sk->sk_kern_sock = kern;
1402 sock_lock_init(sk); 1402 sock_lock_init(sk);
1403 sk->sk_net_refcnt = kern ? 0 : 1; 1403 sk->sk_net_refcnt = kern ? 0 : 1;
1404 if (likely(sk->sk_net_refcnt)) 1404 if (likely(sk->sk_net_refcnt))
@@ -2277,7 +2277,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2277} 2277}
2278EXPORT_SYMBOL(sock_no_socketpair); 2278EXPORT_SYMBOL(sock_no_socketpair);
2279 2279
2280int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2280int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2281 bool kern)
2281{ 2282{
2282 return -EOPNOTSUPP; 2283 return -EOPNOTSUPP;
2283} 2284}
@@ -2481,7 +2482,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2481 } 2482 }
2482 2483
2483 rwlock_init(&sk->sk_callback_lock); 2484 rwlock_init(&sk->sk_callback_lock);
2484 lockdep_set_class_and_name(&sk->sk_callback_lock, 2485 if (sk->sk_kern_sock)
2486 lockdep_set_class_and_name(
2487 &sk->sk_callback_lock,
2488 af_kern_callback_keys + sk->sk_family,
2489 af_family_kern_clock_key_strings[sk->sk_family]);
2490 else
2491 lockdep_set_class_and_name(
2492 &sk->sk_callback_lock,
2485 af_callback_keys + sk->sk_family, 2493 af_callback_keys + sk->sk_family,
2486 af_family_clock_key_strings[sk->sk_family]); 2494 af_family_clock_key_strings[sk->sk_family]);
2487 2495
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
749 for (i = 0; i < hc->tx_seqbufc; i++) 749 for (i = 0; i < hc->tx_seqbufc; i++)
750 kfree(hc->tx_seqbuf[i]); 750 kfree(hc->tx_seqbuf[i]);
751 hc->tx_seqbufc = 0; 751 hc->tx_seqbufc = 0;
752 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
752} 753}
753 754
754static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 755static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 409d0cfd3447..b99168b0fabf 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
289 289
290 switch (type) { 290 switch (type) {
291 case ICMP_REDIRECT: 291 case ICMP_REDIRECT:
292 dccp_do_redirect(skb, sk); 292 if (!sock_owned_by_user(sk))
293 dccp_do_redirect(skb, sk);
293 goto out; 294 goto out;
294 case ICMP_SOURCE_QUENCH: 295 case ICMP_SOURCE_QUENCH:
295 /* Just silently ignore these. */ 296 /* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 233b57367758..d9b6a4e403e7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
122 np = inet6_sk(sk); 122 np = inet6_sk(sk);
123 123
124 if (type == NDISC_REDIRECT) { 124 if (type == NDISC_REDIRECT) {
125 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 125 if (!sock_owned_by_user(sk)) {
126 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
126 127
127 if (dst) 128 if (dst)
128 dst->ops->redirect(dst, sk, skb); 129 dst->ops->redirect(dst, sk, skb);
130 }
129 goto out; 131 goto out;
130 } 132 }
131 133
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index e267e6f4c9a5..abd07a443219 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
142 struct dccp_request_sock *dreq = dccp_rsk(req); 142 struct dccp_request_sock *dreq = dccp_rsk(req);
143 bool own_req; 143 bool own_req;
144 144
145 /* TCP/DCCP listeners became lockless.
146 * DCCP stores complex state in its request_sock, so we need
147 * a protection for them, now this code runs without being protected
148 * by the parent (listener) lock.
149 */
150 spin_lock_bh(&dreq->dreq_lock);
151
145 /* Check for retransmitted REQUEST */ 152 /* Check for retransmitted REQUEST */
146 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 153 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
147 154
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
156 inet_rtx_syn_ack(sk, req); 163 inet_rtx_syn_ack(sk, req);
157 } 164 }
158 /* Network Duplicate, discard packet */ 165 /* Network Duplicate, discard packet */
159 return NULL; 166 goto out;
160 } 167 }
161 168
162 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 169 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
182 189
183 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 190 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
184 req, &own_req); 191 req, &own_req);
185 if (!child) 192 if (child) {
186 goto listen_overflow; 193 child = inet_csk_complete_hashdance(sk, child, req, own_req);
187 194 goto out;
188 return inet_csk_complete_hashdance(sk, child, req, own_req); 195 }
189 196
190listen_overflow:
191 dccp_pr_debug("listen_overflow!\n");
192 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 197 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
193drop: 198drop:
194 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 199 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
195 req->rsk_ops->send_reset(sk, skb); 200 req->rsk_ops->send_reset(sk, skb);
196 201
197 inet_csk_reqsk_queue_drop(sk, req); 202 inet_csk_reqsk_queue_drop(sk, req);
198 return NULL; 203out:
204 spin_unlock_bh(&dreq->dreq_lock);
205 return child;
199} 206}
200 207
201EXPORT_SYMBOL_GPL(dccp_check_req); 208EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
246{ 253{
247 struct dccp_request_sock *dreq = dccp_rsk(req); 254 struct dccp_request_sock *dreq = dccp_rsk(req);
248 255
256 spin_lock_init(&dreq->dreq_lock);
249 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; 257 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
250 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); 258 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
251 inet_rsk(req)->acked = 0; 259 inet_rsk(req)->acked = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index e6e79eda9763..7de5b40a5d0d 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1070 return skb == NULL ? ERR_PTR(err) : skb; 1070 return skb == NULL ? ERR_PTR(err) : skb;
1071} 1071}
1072 1072
1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags) 1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1074 bool kern)
1074{ 1075{
1075 struct sock *sk = sock->sk, *newsk; 1076 struct sock *sk = sock->sk, *newsk;
1076 struct sk_buff *skb = NULL; 1077 struct sk_buff *skb = NULL;
@@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1099 1100
1100 cb = DN_SKB_CB(skb); 1101 cb = DN_SKB_CB(skb);
1101 sk->sk_ack_backlog--; 1102 sk->sk_ack_backlog--;
1102 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0); 1103 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1103 if (newsk == NULL) { 1104 if (newsk == NULL) {
1104 release_sock(sk); 1105 release_sock(sk);
1105 kfree_skb(skb); 1106 kfree_skb(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 602d40f43687..6b1fc6e4278e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
689 * Accept a pending connection. The TCP layer now gives BSD semantics. 689 * Accept a pending connection. The TCP layer now gives BSD semantics.
690 */ 690 */
691 691
692int inet_accept(struct socket *sock, struct socket *newsock, int flags) 692int inet_accept(struct socket *sock, struct socket *newsock, int flags,
693 bool kern)
693{ 694{
694 struct sock *sk1 = sock->sk; 695 struct sock *sk1 = sock->sk;
695 int err = -EINVAL; 696 int err = -EINVAL;
696 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err); 697 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
697 698
698 if (!sk2) 699 if (!sk2)
699 goto do_err; 700 goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
1487 int proto = iph->protocol; 1488 int proto = iph->protocol;
1488 int err = -ENOSYS; 1489 int err = -ENOSYS;
1489 1490
1490 if (skb->encapsulation) 1491 if (skb->encapsulation) {
1492 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1491 skb_set_inner_network_header(skb, nhoff); 1493 skb_set_inner_network_header(skb, nhoff);
1494 }
1492 1495
1493 csum_replace2(&iph->check, iph->tot_len, newlen); 1496 csum_replace2(&iph->check, iph->tot_len, newlen);
1494 iph->tot_len = newlen; 1497 iph->tot_len = newlen;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4d5980ade3b..5e313c1ac94f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
424/* 424/*
425 * This will accept the next outstanding connection. 425 * This will accept the next outstanding connection.
426 */ 426 */
427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
428{ 428{
429 struct inet_connection_sock *icsk = inet_csk(sk); 429 struct inet_connection_sock *icsk = inet_csk(sk);
430 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 430 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 737ce826d7ec..7a3fd25e8913 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
966 cork->length += length; 966 cork->length += length;
967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
968 (sk->sk_protocol == IPPROTO_UDP) && 968 (sk->sk_protocol == IPPROTO_UDP) &&
969 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
971 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 971 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
972 hh_len, fragheaderlen, transhdrlen, 972 hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9a89b8deafae..575e19dcc017 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
279 */ 279 */
280void tcp_v4_mtu_reduced(struct sock *sk) 280void tcp_v4_mtu_reduced(struct sock *sk)
281{ 281{
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk); 282 struct inet_sock *inet = inet_sk(sk);
284 u32 mtu = tcp_sk(sk)->mtu_info; 283 struct dst_entry *dst;
284 u32 mtu;
285 285
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 return;
288 mtu = tcp_sk(sk)->mtu_info;
286 dst = inet_csk_update_pmtu(sk, mtu); 289 dst = inet_csk_update_pmtu(sk, mtu);
287 if (!dst) 290 if (!dst)
288 return; 291 return;
@@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
428 431
429 switch (type) { 432 switch (type) {
430 case ICMP_REDIRECT: 433 case ICMP_REDIRECT:
431 do_redirect(icmp_skb, sk); 434 if (!sock_owned_by_user(sk))
435 do_redirect(icmp_skb, sk);
432 goto out; 436 goto out;
433 case ICMP_SOURCE_QUENCH: 437 case ICMP_SOURCE_QUENCH:
434 /* Just silently ignore these. */ 438 /* Just silently ignore these. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 40d893556e67..b2ab411c6d37 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
249 249
250 sk_mem_reclaim_partial(sk); 250 sk_mem_reclaim_partial(sk);
251 251
252 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 252 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
253 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
253 goto out; 254 goto out;
254 255
255 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 256 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
552 struct inet_connection_sock *icsk = inet_csk(sk); 553 struct inet_connection_sock *icsk = inet_csk(sk);
553 int event; 554 int event;
554 555
555 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 556 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
557 !icsk->icsk_pending)
556 goto out; 558 goto out;
557 559
558 if (time_after(icsk->icsk_timeout, jiffies)) { 560 if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04db40620ea6..a9a9553ee63d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
920 err = register_pernet_subsys(&inet6_net_ops); 920 err = register_pernet_subsys(&inet6_net_ops);
921 if (err) 921 if (err)
922 goto register_pernet_fail; 922 goto register_pernet_fail;
923 err = icmpv6_init();
924 if (err)
925 goto icmp_fail;
926 err = ip6_mr_init(); 923 err = ip6_mr_init();
927 if (err) 924 if (err)
928 goto ipmr_fail; 925 goto ipmr_fail;
926 err = icmpv6_init();
927 if (err)
928 goto icmp_fail;
929 err = ndisc_init(); 929 err = ndisc_init();
930 if (err) 930 if (err)
931 goto ndisc_fail; 931 goto ndisc_fail;
@@ -1061,10 +1061,10 @@ igmp_fail:
1061 ndisc_cleanup(); 1061 ndisc_cleanup();
1062ndisc_fail: 1062ndisc_fail:
1063 ip6_mr_cleanup(); 1063 ip6_mr_cleanup();
1064ipmr_fail:
1065 icmpv6_cleanup();
1066icmp_fail: 1064icmp_fail:
1067 unregister_pernet_subsys(&inet6_net_ops); 1065 unregister_pernet_subsys(&inet6_net_ops);
1066ipmr_fail:
1067 icmpv6_cleanup();
1068register_pernet_fail: 1068register_pernet_fail:
1069 sock_unregister(PF_INET6); 1069 sock_unregister(PF_INET6);
1070 rtnl_unregister_all(PF_INET6); 1070 rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e4266746e4a2..d4bf2c68a545 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -923,6 +923,8 @@ add:
923 ins = &rt->dst.rt6_next; 923 ins = &rt->dst.rt6_next;
924 iter = *ins; 924 iter = *ins;
925 while (iter) { 925 while (iter) {
926 if (iter->rt6i_metric > rt->rt6i_metric)
927 break;
926 if (rt6_qualify_for_ecmp(iter)) { 928 if (rt6_qualify_for_ecmp(iter)) {
927 *ins = iter->dst.rt6_next; 929 *ins = iter->dst.rt6_next;
928 fib6_purge_rt(iter, fn, info->nl_net); 930 fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 0838e6d01d2e..93e58a5e1837 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); 294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
295 int err = -ENOSYS; 295 int err = -ENOSYS;
296 296
297 if (skb->encapsulation) 297 if (skb->encapsulation) {
298 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
298 skb_set_inner_network_header(skb, nhoff); 299 skb_set_inner_network_header(skb, nhoff);
300 }
299 301
300 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 302 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
301 303
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 528b3c1f3fde..58f6288e9ba5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,13 +768,14 @@ slow_path:
768 * Fragment the datagram. 768 * Fragment the datagram.
769 */ 769 */
770 770
771 *prevhdr = NEXTHDR_FRAGMENT;
772 troom = rt->dst.dev->needed_tailroom; 771 troom = rt->dst.dev->needed_tailroom;
773 772
774 /* 773 /*
775 * Keep copying data until we run out. 774 * Keep copying data until we run out.
776 */ 775 */
777 while (left > 0) { 776 while (left > 0) {
777 u8 *fragnexthdr_offset;
778
778 len = left; 779 len = left;
779 /* IF: it doesn't fit, use 'mtu' - the data space left */ 780 /* IF: it doesn't fit, use 'mtu' - the data space left */
780 if (len > mtu) 781 if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
819 */ 820 */
820 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); 821 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
821 822
823 fragnexthdr_offset = skb_network_header(frag);
824 fragnexthdr_offset += prevhdr - skb_network_header(skb);
825 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
826
822 /* 827 /*
823 * Build fragment header. 828 * Build fragment header.
824 */ 829 */
@@ -1385,7 +1390,7 @@ emsgsize:
1385 if ((((length + fragheaderlen) > mtu) || 1390 if ((((length + fragheaderlen) > mtu) ||
1386 (skb && skb_is_gso(skb))) && 1391 (skb && skb_is_gso(skb))) &&
1387 (sk->sk_protocol == IPPROTO_UDP) && 1392 (sk->sk_protocol == IPPROTO_UDP) &&
1388 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1389 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1394 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1390 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1395 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1391 hh_len, fragheaderlen, exthdrlen, 1396 hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 644ba59fbd9d..3d8a3b63b4fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
485 if (!skb->ignore_df && skb->len > mtu) { 485 if (!skb->ignore_df && skb->len > mtu) {
486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
487 487
488 if (skb->protocol == htons(ETH_P_IPV6)) 488 if (skb->protocol == htons(ETH_P_IPV6)) {
489 if (mtu < IPV6_MIN_MTU)
490 mtu = IPV6_MIN_MTU;
491
489 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 492 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
490 else 493 } else {
491 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 494 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
492 htonl(mtu)); 495 htonl(mtu));
496 }
493 497
494 return -EMSGSIZE; 498 return -EMSGSIZE;
495 } 499 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 229bfcc451ef..35c58b669ebd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3300 + NLA_ALIGN(sizeof(struct rtnexthop)) 3300 + NLA_ALIGN(sizeof(struct rtnexthop))
3301 + nla_total_size(16) /* RTA_GATEWAY */ 3301 + nla_total_size(16) /* RTA_GATEWAY */
3302 + nla_total_size(4) /* RTA_OIF */
3303 + lwtunnel_get_encap_size(rt->dst.lwtstate); 3302 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3304 3303
3305 nexthop_len *= rt->rt6i_nsiblings; 3304 nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3323} 3322}
3324 3323
3325static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, 3324static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3326 unsigned int *flags) 3325 unsigned int *flags, bool skip_oif)
3327{ 3326{
3328 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { 3327 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3329 *flags |= RTNH_F_LINKDOWN; 3328 *flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3336 goto nla_put_failure; 3335 goto nla_put_failure;
3337 } 3336 }
3338 3337
3339 if (rt->dst.dev && 3338 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3339 if (!skip_oif && rt->dst.dev &&
3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3341 goto nla_put_failure; 3341 goto nla_put_failure;
3342 3342
@@ -3350,6 +3350,7 @@ nla_put_failure:
3350 return -EMSGSIZE; 3350 return -EMSGSIZE;
3351} 3351}
3352 3352
3353/* add multipath next hop */
3353static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) 3354static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3354{ 3355{
3355 struct rtnexthop *rtnh; 3356 struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3362 rtnh->rtnh_hops = 0; 3363 rtnh->rtnh_hops = 0;
3363 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; 3364 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3364 3365
3365 if (rt6_nexthop_info(skb, rt, &flags) < 0) 3366 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3366 goto nla_put_failure; 3367 goto nla_put_failure;
3367 3368
3368 rtnh->rtnh_flags = flags; 3369 rtnh->rtnh_flags = flags;
@@ -3515,7 +3516,7 @@ static int rt6_fill_node(struct net *net,
3515 3516
3516 nla_nest_end(skb, mp); 3517 nla_nest_end(skb, mp);
3517 } else { 3518 } else {
3518 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0) 3519 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3519 goto nla_put_failure; 3520 goto nla_put_failure;
3520 } 3521 }
3521 3522
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60a5295a7de6..49fa2e8c3fa9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
391 np = inet6_sk(sk); 391 np = inet6_sk(sk);
392 392
393 if (type == NDISC_REDIRECT) { 393 if (type == NDISC_REDIRECT) {
394 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
395 396
396 if (dst) 397 if (dst)
397 dst->ops->redirect(dst, sk, skb); 398 dst->ops->redirect(dst, sk, skb);
399 }
398 goto out; 400 goto out;
399 } 401 }
400 402
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 81adc29a448d..8d77ad5cadaf 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -828,7 +828,8 @@ out:
828 * Wait for incoming connection 828 * Wait for incoming connection
829 * 829 *
830 */ 830 */
831static int irda_accept(struct socket *sock, struct socket *newsock, int flags) 831static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
832 bool kern)
832{ 833{
833 struct sock *sk = sock->sk; 834 struct sock *sk = sock->sk;
834 struct irda_sock *new, *self = irda_sk(sk); 835 struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
836 struct sk_buff *skb = NULL; 837 struct sk_buff *skb = NULL;
837 int err; 838 int err;
838 839
839 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); 840 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
840 if (err) 841 if (err)
841 return err; 842 return err;
842 843
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 89bbde1081ce..84de7b6326dc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -938,7 +938,7 @@ done:
938 938
939/* Accept a pending connection */ 939/* Accept a pending connection */
940static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 940static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
941 int flags) 941 int flags, bool kern)
942{ 942{
943 DECLARE_WAITQUEUE(wait, current); 943 DECLARE_WAITQUEUE(wait, current);
944 struct sock *sk = sock->sk, *nsk; 944 struct sock *sk = sock->sk, *nsk;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 06186d608a27..cb4fff785cbf 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
641 * @sock: Socket which connections arrive on. 641 * @sock: Socket which connections arrive on.
642 * @newsock: Socket to move incoming connection to. 642 * @newsock: Socket to move incoming connection to.
643 * @flags: User specified operational flags. 643 * @flags: User specified operational flags.
644 * @kern: If the socket is kernel internal
644 * 645 *
645 * Accept a new incoming connection. 646 * Accept a new incoming connection.
646 * Returns 0 upon success, negative otherwise. 647 * Returns 0 upon success, negative otherwise.
647 */ 648 */
648static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) 649static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
650 bool kern)
649{ 651{
650 struct sock *sk = sock->sk, *newsk; 652 struct sock *sk = sock->sk, *newsk;
651 struct llc_sock *llc, *newllc; 653 struct llc_sock *llc, *newllc;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3818686182b2..33211f9a2656 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1288,7 +1288,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
1288 /* fall through */ 1288 /* fall through */
1289 case NETDEV_CHANGE: 1289 case NETDEV_CHANGE:
1290 nh->nh_flags |= RTNH_F_LINKDOWN; 1290 nh->nh_flags |= RTNH_F_LINKDOWN;
1291 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; 1291 if (event != NETDEV_UNREGISTER)
1292 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
1292 break; 1293 break;
1293 } 1294 }
1294 if (event == NETDEV_UNREGISTER) 1295 if (event == NETDEV_UNREGISTER)
@@ -2028,6 +2029,7 @@ static void mpls_net_exit(struct net *net)
2028 for (index = 0; index < platform_labels; index++) { 2029 for (index = 0; index < platform_labels; index++) {
2029 struct mpls_route *rt = rtnl_dereference(platform_label[index]); 2030 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2030 RCU_INIT_POINTER(platform_label[index], NULL); 2031 RCU_INIT_POINTER(platform_label[index], NULL);
2032 mpls_notify_route(net, index, rt, NULL, NULL);
2031 mpls_rt_free(rt); 2033 mpls_rt_free(rt);
2032 } 2034 }
2033 rtnl_unlock(); 2035 rtnl_unlock();
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bbf4526b885..ebf16f7f9089 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -765,7 +765,8 @@ out_release:
765 return err; 765 return err;
766} 766}
767 767
768static int nr_accept(struct socket *sock, struct socket *newsock, int flags) 768static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
769 bool kern)
769{ 770{
770 struct sk_buff *skb; 771 struct sk_buff *skb;
771 struct sock *newsk; 772 struct sock *newsk;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 879885b31cce..2ffb18e73df6 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
441} 441}
442 442
443static int llcp_sock_accept(struct socket *sock, struct socket *newsock, 443static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
444 int flags) 444 int flags, bool kern)
445{ 445{
446 DECLARE_WAITQUEUE(wait, current); 446 DECLARE_WAITQUEUE(wait, current);
447 struct sock *sk = sock->sk, *new_sk; 447 struct sock *sk = sock->sk, *new_sk;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 222bedcd9575..e81537991ddf 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
772 sock_put(sk); 772 sock_put(sk);
773} 773}
774 774
775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
776 bool kern)
776{ 777{
777 struct pep_sock *pn = pep_sk(sk), *newpn; 778 struct pep_sock *pn = pep_sk(sk), *newpn;
778 struct sock *newsk = NULL; 779 struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
846 } 847 }
847 848
848 /* Create a new to-be-accepted sock */ 849 /* Create a new to-be-accepted sock */
849 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0); 850 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
851 kern);
850 if (!newsk) { 852 if (!newsk) {
851 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); 853 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
852 err = -ENOBUFS; 854 err = -ENOBUFS;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index a6c8da3ee893..64634e3ec2fc 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -305,7 +305,7 @@ out:
305} 305}
306 306
307static int pn_socket_accept(struct socket *sock, struct socket *newsock, 307static int pn_socket_accept(struct socket *sock, struct socket *newsock,
308 int flags) 308 int flags, bool kern)
309{ 309{
310 struct sock *sk = sock->sk; 310 struct sock *sk = sock->sk;
311 struct sock *newsk; 311 struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
314 if (unlikely(sk->sk_state != TCP_LISTEN)) 314 if (unlikely(sk->sk_state != TCP_LISTEN))
315 return -EINVAL; 315 return -EINVAL;
316 316
317 newsk = sk->sk_prot->accept(sk, flags, &err); 317 newsk = sk->sk_prot->accept(sk, flags, &err, kern);
318 if (!newsk) 318 if (!newsk)
319 return err; 319 return err;
320 320
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 0e04dcceb1d4..1fa75ab7b733 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn)
429 */ 429 */
430 rds_cong_remove_conn(conn); 430 rds_cong_remove_conn(conn);
431 431
432 put_net(conn->c_net);
432 kmem_cache_free(rds_conn_slab, conn); 433 kmem_cache_free(rds_conn_slab, conn);
433 434
434 spin_lock_irqsave(&rds_conn_lock, flags); 435 spin_lock_irqsave(&rds_conn_lock, flags);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ce3775abc6e7..1c38d2c7caa8 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
442 ic->i_send_cq = NULL; 442 ic->i_send_cq = NULL;
443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); 443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
444 rdsdebug("ib_create_cq send failed: %d\n", ret); 444 rdsdebug("ib_create_cq send failed: %d\n", ret);
445 goto out; 445 goto rds_ibdev_out;
446 } 446 }
447 447
448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); 448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
456 ic->i_recv_cq = NULL; 456 ic->i_recv_cq = NULL;
457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); 457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
458 rdsdebug("ib_create_cq recv failed: %d\n", ret); 458 rdsdebug("ib_create_cq recv failed: %d\n", ret);
459 goto out; 459 goto send_cq_out;
460 } 460 }
461 461
462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); 462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
463 if (ret) { 463 if (ret) {
464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
465 goto out; 465 goto recv_cq_out;
466 } 466 }
467 467
468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
469 if (ret) { 469 if (ret) {
470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); 470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
471 goto out; 471 goto recv_cq_out;
472 } 472 }
473 473
474 /* XXX negotiate max send/recv with remote? */ 474 /* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); 494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
495 if (ret) { 495 if (ret) {
496 rdsdebug("rdma_create_qp failed: %d\n", ret); 496 rdsdebug("rdma_create_qp failed: %d\n", ret);
497 goto out; 497 goto recv_cq_out;
498 } 498 }
499 499
500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev, 500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
504 if (!ic->i_send_hdrs) { 504 if (!ic->i_send_hdrs) {
505 ret = -ENOMEM; 505 ret = -ENOMEM;
506 rdsdebug("ib_dma_alloc_coherent send failed\n"); 506 rdsdebug("ib_dma_alloc_coherent send failed\n");
507 goto out; 507 goto qp_out;
508 } 508 }
509 509
510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, 510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
514 if (!ic->i_recv_hdrs) { 514 if (!ic->i_recv_hdrs) {
515 ret = -ENOMEM; 515 ret = -ENOMEM;
516 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 516 rdsdebug("ib_dma_alloc_coherent recv failed\n");
517 goto out; 517 goto send_hdrs_dma_out;
518 } 518 }
519 519
520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
522 if (!ic->i_ack) { 522 if (!ic->i_ack) {
523 ret = -ENOMEM; 523 ret = -ENOMEM;
524 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 524 rdsdebug("ib_dma_alloc_coherent ack failed\n");
525 goto out; 525 goto recv_hdrs_dma_out;
526 } 526 }
527 527
528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), 528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
530 if (!ic->i_sends) { 530 if (!ic->i_sends) {
531 ret = -ENOMEM; 531 ret = -ENOMEM;
532 rdsdebug("send allocation failed\n"); 532 rdsdebug("send allocation failed\n");
533 goto out; 533 goto ack_dma_out;
534 } 534 }
535 535
536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), 536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
538 if (!ic->i_recvs) { 538 if (!ic->i_recvs) {
539 ret = -ENOMEM; 539 ret = -ENOMEM;
540 rdsdebug("recv allocation failed\n"); 540 rdsdebug("recv allocation failed\n");
541 goto out; 541 goto sends_out;
542 } 542 }
543 543
544 rds_ib_recv_init_ack(ic); 544 rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
547 ic->i_send_cq, ic->i_recv_cq); 547 ic->i_send_cq, ic->i_recv_cq);
548 548
549out: 549 return ret;
550
551sends_out:
552 vfree(ic->i_sends);
553ack_dma_out:
554 ib_dma_free_coherent(dev, sizeof(struct rds_header),
555 ic->i_ack, ic->i_ack_dma);
556recv_hdrs_dma_out:
557 ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
558 sizeof(struct rds_header),
559 ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
560send_hdrs_dma_out:
561 ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
562 sizeof(struct rds_header),
563 ic->i_send_hdrs, ic->i_send_hdrs_dma);
564qp_out:
565 rdma_destroy_qp(ic->i_cm_id);
566recv_cq_out:
567 if (!ib_destroy_cq(ic->i_recv_cq))
568 ic->i_recv_cq = NULL;
569send_cq_out:
570 if (!ib_destroy_cq(ic->i_send_cq))
571 ic->i_send_cq = NULL;
572rds_ibdev_out:
573 rds_ib_remove_conn(rds_ibdev, conn);
550 rds_ib_dev_put(rds_ibdev); 574 rds_ib_dev_put(rds_ibdev);
575
551 return ret; 576 return ret;
552} 577}
553 578
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 39518ef7af4d..82d38ccf5e8b 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -147,7 +147,7 @@ struct rds_connection {
147 147
148 /* Protocol version */ 148 /* Protocol version */
149 unsigned int c_version; 149 unsigned int c_version;
150 possible_net_t c_net; 150 struct net *c_net;
151 151
152 struct list_head c_map_item; 152 struct list_head c_map_item;
153 unsigned long c_map_queued; 153 unsigned long c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
162static inline 162static inline
163struct net *rds_conn_net(struct rds_connection *conn) 163struct net *rds_conn_net(struct rds_connection *conn)
164{ 164{
165 return read_pnet(&conn->c_net); 165 return conn->c_net;
166} 166}
167 167
168static inline 168static inline
169void rds_conn_net_set(struct rds_connection *conn, struct net *net) 169void rds_conn_net_set(struct rds_connection *conn, struct net *net)
170{ 170{
171 write_pnet(&conn->c_net, net); 171 conn->c_net = get_net(net);
172} 172}
173 173
174#define RDS_FLAG_CONG_BITMAP 0x01 174#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index a973d3b4dff0..225690076773 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
484 * we do need to clean up the listen socket here. 484 * we do need to clean up the listen socket here.
485 */ 485 */
486 if (rtn->rds_tcp_listen_sock) { 486 if (rtn->rds_tcp_listen_sock) {
487 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); 487 struct socket *lsock = rtn->rds_tcp_listen_sock;
488
488 rtn->rds_tcp_listen_sock = NULL; 489 rtn->rds_tcp_listen_sock = NULL;
489 flush_work(&rtn->rds_tcp_accept_w); 490 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
490 } 491 }
491} 492}
492 493
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
523 struct rds_tcp_connection *tc, *_tc; 524 struct rds_tcp_connection *tc, *_tc;
524 LIST_HEAD(tmp_list); 525 LIST_HEAD(tmp_list);
525 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 526 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
527 struct socket *lsock = rtn->rds_tcp_listen_sock;
526 528
527 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
528 rtn->rds_tcp_listen_sock = NULL; 529 rtn->rds_tcp_listen_sock = NULL;
529 flush_work(&rtn->rds_tcp_accept_w); 530 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
530 spin_lock_irq(&rds_tcp_conn_lock); 531 spin_lock_irq(&rds_tcp_conn_lock);
531 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 532 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
532 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 533 struct net *c_net = tc->t_cpath->cp_conn->c_net;
533 534
534 if (net != c_net || !tc->t_sock) 535 if (net != c_net || !tc->t_sock)
535 continue; 536 continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
546void *rds_tcp_listen_sock_def_readable(struct net *net) 547void *rds_tcp_listen_sock_def_readable(struct net *net)
547{ 548{
548 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 549 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
550 struct socket *lsock = rtn->rds_tcp_listen_sock;
551
552 if (!lsock)
553 return NULL;
549 554
550 return rtn->rds_tcp_listen_sock->sk->sk_user_data; 555 return lsock->sk->sk_user_data;
551} 556}
552 557
553static int rds_tcp_dev_event(struct notifier_block *this, 558static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
584 589
585 spin_lock_irq(&rds_tcp_conn_lock); 590 spin_lock_irq(&rds_tcp_conn_lock);
586 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 591 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
587 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 592 struct net *c_net = tc->t_cpath->cp_conn->c_net;
588 593
589 if (net != c_net || !tc->t_sock) 594 if (net != c_net || !tc->t_sock)
590 continue; 595 continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
638 goto out; 643 goto out;
639 } 644 }
640 645
641 ret = register_netdevice_notifier(&rds_tcp_dev_notifier); 646 ret = rds_tcp_recv_init();
642 if (ret) { 647 if (ret)
643 pr_warn("could not register rds_tcp_dev_notifier\n");
644 goto out_slab; 648 goto out_slab;
645 }
646 649
647 ret = register_pernet_subsys(&rds_tcp_net_ops); 650 ret = register_pernet_subsys(&rds_tcp_net_ops);
648 if (ret) 651 if (ret)
649 goto out_notifier; 652 goto out_recv;
650 653
651 ret = rds_tcp_recv_init(); 654 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
652 if (ret) 655 if (ret) {
656 pr_warn("could not register rds_tcp_dev_notifier\n");
653 goto out_pernet; 657 goto out_pernet;
658 }
654 659
655 rds_trans_register(&rds_tcp_transport); 660 rds_trans_register(&rds_tcp_transport);
656 661
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
660 665
661out_pernet: 666out_pernet:
662 unregister_pernet_subsys(&rds_tcp_net_ops); 667 unregister_pernet_subsys(&rds_tcp_net_ops);
663out_notifier: 668out_recv:
664 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) 669 rds_tcp_recv_exit();
665 pr_warn("could not unregister rds_tcp_dev_notifier\n");
666out_slab: 670out_slab:
667 kmem_cache_destroy(rds_tcp_conn_slab); 671 kmem_cache_destroy(rds_tcp_conn_slab);
668out: 672out:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc8906576..56ea6620fcf9 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
66 66
67/* tcp_listen.c */ 67/* tcp_listen.c */
68struct socket *rds_tcp_listen_init(struct net *); 68struct socket *rds_tcp_listen_init(struct net *);
69void rds_tcp_listen_stop(struct socket *); 69void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
70void rds_tcp_listen_data_ready(struct sock *sk); 70void rds_tcp_listen_data_ready(struct sock *sk);
71int rds_tcp_accept_one(struct socket *sock); 71int rds_tcp_accept_one(struct socket *sock);
72int rds_tcp_keepalive(struct socket *sock); 72int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 67d0929c7d3d..507678853e6c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
133 133
134 new_sock->type = sock->type; 134 new_sock->type = sock->type;
135 new_sock->ops = sock->ops; 135 new_sock->ops = sock->ops;
136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); 136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
137 if (ret < 0) 137 if (ret < 0)
138 goto out; 138 goto out;
139 139
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
223 * before it has been accepted and the accepter has set up their 223 * before it has been accepted and the accepter has set up their
224 * data_ready.. we only want to queue listen work for our listening 224 * data_ready.. we only want to queue listen work for our listening
225 * socket 225 * socket
226 *
227 * (*ready)() may be null if we are racing with netns delete, and
228 * the listen socket is being torn down.
226 */ 229 */
227 if (sk->sk_state == TCP_LISTEN) 230 if (sk->sk_state == TCP_LISTEN)
228 rds_tcp_accept_work(sk); 231 rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
231 234
232out: 235out:
233 read_unlock_bh(&sk->sk_callback_lock); 236 read_unlock_bh(&sk->sk_callback_lock);
234 ready(sk); 237 if (ready)
238 ready(sk);
235} 239}
236 240
237struct socket *rds_tcp_listen_init(struct net *net) 241struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
271 return NULL; 275 return NULL;
272} 276}
273 277
274void rds_tcp_listen_stop(struct socket *sock) 278void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
275{ 279{
276 struct sock *sk; 280 struct sock *sk;
277 281
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
292 296
293 /* wait for accepts to stop and close the socket */ 297 /* wait for accepts to stop and close the socket */
294 flush_workqueue(rds_wq); 298 flush_workqueue(rds_wq);
299 flush_work(acceptor);
295 sock_release(sock); 300 sock_release(sock);
296} 301}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b8a1df2c9785..4a9729257023 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -871,7 +871,8 @@ out_release:
871 return err; 871 return err;
872} 872}
873 873
874static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 874static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
875 bool kern)
875{ 876{
876 struct sk_buff *skb; 877 struct sk_buff *skb;
877 struct sock *newsk; 878 struct sock *newsk;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9f4cfa25af7c..18b2ad8be8e2 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
420 u16 skew) 420 u16 skew)
421{ 421{
422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423 enum rxrpc_call_state state;
423 unsigned int offset = sizeof(struct rxrpc_wire_header); 424 unsigned int offset = sizeof(struct rxrpc_wire_header);
424 unsigned int ix; 425 unsigned int ix;
425 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 426 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
434 _proto("Rx DATA %%%u { #%u f=%02x }", 435 _proto("Rx DATA %%%u { #%u f=%02x }",
435 sp->hdr.serial, seq, sp->hdr.flags); 436 sp->hdr.serial, seq, sp->hdr.flags);
436 437
437 if (call->state >= RXRPC_CALL_COMPLETE) 438 state = READ_ONCE(call->state);
439 if (state >= RXRPC_CALL_COMPLETE)
438 return; 440 return;
439 441
440 /* Received data implicitly ACKs all of the request packets we sent 442 /* Received data implicitly ACKs all of the request packets we sent
441 * when we're acting as a client. 443 * when we're acting as a client.
442 */ 444 */
443 if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || 445 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
444 call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 446 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
445 !rxrpc_receiving_reply(call)) 447 !rxrpc_receiving_reply(call))
446 return; 448 return;
447 449
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
650 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 652 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
651 struct rxrpc_peer *peer; 653 struct rxrpc_peer *peer;
652 unsigned int mtu; 654 unsigned int mtu;
655 bool wake = false;
653 u32 rwind = ntohl(ackinfo->rwind); 656 u32 rwind = ntohl(ackinfo->rwind);
654 657
655 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 658 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
657 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), 660 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
658 rwind, ntohl(ackinfo->jumbo_max)); 661 rwind, ntohl(ackinfo->jumbo_max));
659 662
660 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) 663 if (call->tx_winsize != rwind) {
661 rwind = RXRPC_RXTX_BUFF_SIZE - 1; 664 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
662 call->tx_winsize = rwind; 665 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
666 if (rwind > call->tx_winsize)
667 wake = true;
668 call->tx_winsize = rwind;
669 }
670
663 if (call->cong_ssthresh > rwind) 671 if (call->cong_ssthresh > rwind)
664 call->cong_ssthresh = rwind; 672 call->cong_ssthresh = rwind;
665 673
@@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
673 spin_unlock_bh(&peer->lock); 681 spin_unlock_bh(&peer->lock);
674 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 682 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
675 } 683 }
684
685 if (wake)
686 wake_up(&call->waitq);
676} 687}
677 688
678/* 689/*
@@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
799 return rxrpc_proto_abort("AK0", call, 0); 810 return rxrpc_proto_abort("AK0", call, 0);
800 811
801 /* Ignore ACKs unless we are or have just been transmitting. */ 812 /* Ignore ACKs unless we are or have just been transmitting. */
802 switch (call->state) { 813 switch (READ_ONCE(call->state)) {
803 case RXRPC_CALL_CLIENT_SEND_REQUEST: 814 case RXRPC_CALL_CLIENT_SEND_REQUEST:
804 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 815 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
805 case RXRPC_CALL_SERVER_SEND_REPLY: 816 case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
940static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 951static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
941 struct rxrpc_call *call) 952 struct rxrpc_call *call)
942{ 953{
943 switch (call->state) { 954 switch (READ_ONCE(call->state)) {
944 case RXRPC_CALL_SERVER_AWAIT_ACK: 955 case RXRPC_CALL_SERVER_AWAIT_ACK:
945 rxrpc_call_completed(call); 956 rxrpc_call_completed(call);
946 break; 957 break;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 6491ca46a03f..3e2f1a8e9c5b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -527,7 +527,7 @@ try_again:
527 msg->msg_namelen = len; 527 msg->msg_namelen = len;
528 } 528 }
529 529
530 switch (call->state) { 530 switch (READ_ONCE(call->state)) {
531 case RXRPC_CALL_SERVER_ACCEPTING: 531 case RXRPC_CALL_SERVER_ACCEPTING:
532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
533 break; 533 break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
640 640
641 mutex_lock(&call->user_mutex); 641 mutex_lock(&call->user_mutex);
642 642
643 switch (call->state) { 643 switch (READ_ONCE(call->state)) {
644 case RXRPC_CALL_CLIENT_RECV_REPLY: 644 case RXRPC_CALL_CLIENT_RECV_REPLY:
645 case RXRPC_CALL_SERVER_RECV_REQUEST: 645 case RXRPC_CALL_SERVER_RECV_REQUEST:
646 case RXRPC_CALL_SERVER_ACK_REQUEST: 646 case RXRPC_CALL_SERVER_ACK_REQUEST:
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bc2d3dcff9de..97ab214ca411 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
489 __releases(&rx->sk.sk_lock.slock) 489 __releases(&rx->sk.sk_lock.slock)
490{ 490{
491 enum rxrpc_call_state state;
491 enum rxrpc_command cmd; 492 enum rxrpc_command cmd;
492 struct rxrpc_call *call; 493 struct rxrpc_call *call;
493 unsigned long user_call_ID = 0; 494 unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
526 return PTR_ERR(call); 527 return PTR_ERR(call);
527 /* ... and we have the call lock. */ 528 /* ... and we have the call lock. */
528 } else { 529 } else {
529 ret = -EBUSY; 530 switch (READ_ONCE(call->state)) {
530 if (call->state == RXRPC_CALL_UNINITIALISED || 531 case RXRPC_CALL_UNINITIALISED:
531 call->state == RXRPC_CALL_CLIENT_AWAIT_CONN || 532 case RXRPC_CALL_CLIENT_AWAIT_CONN:
532 call->state == RXRPC_CALL_SERVER_PREALLOC || 533 case RXRPC_CALL_SERVER_PREALLOC:
533 call->state == RXRPC_CALL_SERVER_SECURING || 534 case RXRPC_CALL_SERVER_SECURING:
534 call->state == RXRPC_CALL_SERVER_ACCEPTING) 535 case RXRPC_CALL_SERVER_ACCEPTING:
536 ret = -EBUSY;
535 goto error_release_sock; 537 goto error_release_sock;
538 default:
539 break;
540 }
536 541
537 ret = mutex_lock_interruptible(&call->user_mutex); 542 ret = mutex_lock_interruptible(&call->user_mutex);
538 release_sock(&rx->sk); 543 release_sock(&rx->sk);
@@ -542,10 +547,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
542 } 547 }
543 } 548 }
544 549
550 state = READ_ONCE(call->state);
545 _debug("CALL %d USR %lx ST %d on CONN %p", 551 _debug("CALL %d USR %lx ST %d on CONN %p",
546 call->debug_id, call->user_call_ID, call->state, call->conn); 552 call->debug_id, call->user_call_ID, state, call->conn);
547 553
548 if (call->state >= RXRPC_CALL_COMPLETE) { 554 if (state >= RXRPC_CALL_COMPLETE) {
549 /* it's too late for this call */ 555 /* it's too late for this call */
550 ret = -ESHUTDOWN; 556 ret = -ESHUTDOWN;
551 } else if (cmd == RXRPC_CMD_SEND_ABORT) { 557 } else if (cmd == RXRPC_CMD_SEND_ABORT) {
@@ -555,12 +561,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
555 } else if (cmd != RXRPC_CMD_SEND_DATA) { 561 } else if (cmd != RXRPC_CMD_SEND_DATA) {
556 ret = -EINVAL; 562 ret = -EINVAL;
557 } else if (rxrpc_is_client_call(call) && 563 } else if (rxrpc_is_client_call(call) &&
558 call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 564 state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
559 /* request phase complete for this client call */ 565 /* request phase complete for this client call */
560 ret = -EPROTO; 566 ret = -EPROTO;
561 } else if (rxrpc_is_service_call(call) && 567 } else if (rxrpc_is_service_call(call) &&
562 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 568 state != RXRPC_CALL_SERVER_ACK_REQUEST &&
563 call->state != RXRPC_CALL_SERVER_SEND_REPLY) { 569 state != RXRPC_CALL_SERVER_SEND_REPLY) {
564 /* Reply phase not begun or not complete for service call. */ 570 /* Reply phase not begun or not complete for service call. */
565 ret = -EPROTO; 571 ret = -EPROTO;
566 } else { 572 } else {
@@ -605,14 +611,21 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
605 _debug("CALL %d USR %lx ST %d on CONN %p", 611 _debug("CALL %d USR %lx ST %d on CONN %p",
606 call->debug_id, call->user_call_ID, call->state, call->conn); 612 call->debug_id, call->user_call_ID, call->state, call->conn);
607 613
608 if (call->state >= RXRPC_CALL_COMPLETE) { 614 switch (READ_ONCE(call->state)) {
609 ret = -ESHUTDOWN; /* it's too late for this call */ 615 case RXRPC_CALL_CLIENT_SEND_REQUEST:
610 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && 616 case RXRPC_CALL_SERVER_ACK_REQUEST:
611 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 617 case RXRPC_CALL_SERVER_SEND_REPLY:
612 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
613 ret = -EPROTO; /* request phase complete for this client call */
614 } else {
615 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); 618 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
619 break;
620 case RXRPC_CALL_COMPLETE:
621 read_lock_bh(&call->state_lock);
622 ret = -call->error;
623 read_unlock_bh(&call->state_lock);
624 break;
625 default:
626 /* Request phase complete for this client call */
627 ret = -EPROTO;
628 break;
616 } 629 }
617 630
618 mutex_unlock(&call->user_mutex); 631 mutex_unlock(&call->user_mutex);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ab8062909962..f9bb43c25697 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
113 if (ret < 0) 113 if (ret < 0)
114 return ret; 114 return ret;
115 115
116 if (!tb[TCA_CONNMARK_PARMS])
117 return -EINVAL;
118
116 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
117 120
118 if (!tcf_hash_check(tn, parm->index, a, bind)) { 121 if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3b7074e23024..c736627f8f4a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
228 228
229 return skb->len; 229 return skb->len;
230nla_put_failure: 230nla_put_failure:
231 rcu_read_unlock();
232 nlmsg_trim(skb, b); 231 nlmsg_trim(skb, b);
233 return -1; 232 return -1;
234} 233}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 063baac5b9fe..961ee59f696a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
640 640
641/* Create and initialize a new sk for the socket to be returned by accept(). */ 641/* Create and initialize a new sk for the socket to be returned by accept(). */
642static struct sock *sctp_v6_create_accept_sk(struct sock *sk, 642static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
643 struct sctp_association *asoc) 643 struct sctp_association *asoc,
644 bool kern)
644{ 645{
645 struct sock *newsk; 646 struct sock *newsk;
646 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 647 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
647 struct sctp6_sock *newsctp6sk; 648 struct sctp6_sock *newsctp6sk;
648 struct ipv6_txoptions *opt; 649 struct ipv6_txoptions *opt;
649 650
650 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 651 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
651 if (!newsk) 652 if (!newsk)
652 goto out; 653 goto out;
653 654
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1b6d4574d2b0..989a900383b5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
575 575
576/* Create and initialize a new sk for the socket returned by accept(). */ 576/* Create and initialize a new sk for the socket returned by accept(). */
577static struct sock *sctp_v4_create_accept_sk(struct sock *sk, 577static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
578 struct sctp_association *asoc) 578 struct sctp_association *asoc,
579 bool kern)
579{ 580{
580 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, 581 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
581 sk->sk_prot, 0); 582 sk->sk_prot, kern);
582 struct inet_sock *newinet; 583 struct inet_sock *newinet;
583 584
584 if (!newsk) 585 if (!newsk)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6f0a9be50f50..0f378ea2ae38 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4116,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
4116 * descriptor will be returned from accept() to represent the newly 4116 * descriptor will be returned from accept() to represent the newly
4117 * formed association. 4117 * formed association.
4118 */ 4118 */
4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4120{ 4120{
4121 struct sctp_sock *sp; 4121 struct sctp_sock *sp;
4122 struct sctp_endpoint *ep; 4122 struct sctp_endpoint *ep;
@@ -4151,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
4151 */ 4151 */
4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4153 4153
4154 newsk = sp->pf->create_accept_sk(sk, asoc); 4154 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4155 if (!newsk) { 4155 if (!newsk) {
4156 error = -ENOMEM; 4156 error = -ENOMEM;
4157 goto out; 4157 goto out;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 85837ab90e89..093803786eac 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -944,7 +944,7 @@ out:
944} 944}
945 945
946static int smc_accept(struct socket *sock, struct socket *new_sock, 946static int smc_accept(struct socket *sock, struct socket *new_sock,
947 int flags) 947 int flags, bool kern)
948{ 948{
949 struct sock *sk = sock->sk, *nsk; 949 struct sock *sk = sock->sk, *nsk;
950 DECLARE_WAITQUEUE(wait, current); 950 DECLARE_WAITQUEUE(wait, current);
diff --git a/net/socket.c b/net/socket.c
index 2c1e8677ff2d..e034fe4164be 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1506,7 +1506,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1506 if (err) 1506 if (err)
1507 goto out_fd; 1507 goto out_fd;
1508 1508
1509 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1509 err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
1510 if (err < 0) 1510 if (err < 0)
1511 goto out_fd; 1511 goto out_fd;
1512 1512
@@ -1731,6 +1731,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1731 /* We assume all kernel code knows the size of sockaddr_storage */ 1731 /* We assume all kernel code knows the size of sockaddr_storage */
1732 msg.msg_namelen = 0; 1732 msg.msg_namelen = 0;
1733 msg.msg_iocb = NULL; 1733 msg.msg_iocb = NULL;
1734 msg.msg_flags = 0;
1734 if (sock->file->f_flags & O_NONBLOCK) 1735 if (sock->file->f_flags & O_NONBLOCK)
1735 flags |= MSG_DONTWAIT; 1736 flags |= MSG_DONTWAIT;
1736 err = sock_recvmsg(sock, &msg, flags); 1737 err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3239,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3238 if (err < 0) 3239 if (err < 0)
3239 goto done; 3240 goto done;
3240 3241
3241 err = sock->ops->accept(sock, *newsock, flags); 3242 err = sock->ops->accept(sock, *newsock, flags, true);
3242 if (err < 0) { 3243 if (err < 0) {
3243 sock_release(*newsock); 3244 sock_release(*newsock);
3244 *newsock = NULL; 3245 *newsock = NULL;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 81cd31acf690..3b332b395045 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
503 struct ib_cq *sendcq, *recvcq; 503 struct ib_cq *sendcq, *recvcq;
504 int rc; 504 int rc;
505 505
506 max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); 506 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
507 RPCRDMA_MAX_SEND_SGES);
507 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 508 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
508 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); 509 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
509 return -ENOMEM; 510 return -ENOMEM;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43e4045e72bc..7130e73bd42c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
115static void tipc_write_space(struct sock *sk); 115static void tipc_write_space(struct sock *sk);
116static void tipc_sock_destruct(struct sock *sk); 116static void tipc_sock_destruct(struct sock *sk);
117static int tipc_release(struct socket *sock); 117static int tipc_release(struct socket *sock);
118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
119 bool kern);
119static void tipc_sk_timeout(unsigned long data); 120static void tipc_sk_timeout(unsigned long data);
120static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 121static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
121 struct tipc_name_seq const *seq); 122 struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
2029 * 2030 *
2030 * Returns 0 on success, errno otherwise 2031 * Returns 0 on success, errno otherwise
2031 */ 2032 */
2032static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) 2033static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2034 bool kern)
2033{ 2035{
2034 struct sock *new_sk, *sk = sock->sk; 2036 struct sock *new_sk, *sk = sock->sk;
2035 struct sk_buff *buf; 2037 struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2051 2053
2052 buf = skb_peek(&sk->sk_receive_queue); 2054 buf = skb_peek(&sk->sk_receive_queue);
2053 2055
2054 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0); 2056 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2055 if (res) 2057 if (res)
2056 goto exit; 2058 goto exit;
2057 security_sk_clone(sock->sk, new_sock->sk); 2059 security_sk_clone(sock->sk, new_sock->sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ee37b390260a..928691c43408 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
636static int unix_stream_connect(struct socket *, struct sockaddr *, 636static int unix_stream_connect(struct socket *, struct sockaddr *,
637 int addr_len, int flags); 637 int addr_len, int flags);
638static int unix_socketpair(struct socket *, struct socket *); 638static int unix_socketpair(struct socket *, struct socket *);
639static int unix_accept(struct socket *, struct socket *, int); 639static int unix_accept(struct socket *, struct socket *, int, bool);
640static int unix_getname(struct socket *, struct sockaddr *, int *, int); 640static int unix_getname(struct socket *, struct sockaddr *, int *, int);
641static unsigned int unix_poll(struct file *, struct socket *, poll_table *); 641static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
642static unsigned int unix_dgram_poll(struct file *, struct socket *, 642static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
1402 set_bit(SOCK_PASSSEC, &new->flags); 1402 set_bit(SOCK_PASSSEC, &new->flags);
1403} 1403}
1404 1404
1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1406 bool kern)
1406{ 1407{
1407 struct sock *sk = sock->sk; 1408 struct sock *sk = sock->sk;
1408 struct sock *tsk; 1409 struct sock *tsk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9192ead66751..9f770f33c100 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1250,7 +1250,8 @@ out:
1250 return err; 1250 return err;
1251} 1251}
1252 1252
1253static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) 1253static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1254 bool kern)
1254{ 1255{
1255 struct sock *listener; 1256 struct sock *listener;
1256 int err; 1257 int err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fd28a49dbe8f..8b911c29860e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
852 return rc; 852 return rc;
853} 853}
854 854
855static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 855static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
856 bool kern)
856{ 857{
857 struct sock *sk = sock->sk; 858 struct sock *sk = sock->sk;
858 struct sock *newsk; 859 struct sock *newsk;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0806dccdf507..236cbbc0ab9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
1243} 1243}
1244 1244
1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1246 const struct flowi *fl) 1246 const struct flowi *fl, u16 family)
1247{ 1247{
1248 struct xfrm_policy *pol; 1248 struct xfrm_policy *pol;
1249 1249
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1251 again: 1251 again:
1252 pol = rcu_dereference(sk->sk_policy[dir]); 1252 pol = rcu_dereference(sk->sk_policy[dir]);
1253 if (pol != NULL) { 1253 if (pol != NULL) {
1254 bool match = xfrm_selector_match(&pol->selector, fl, 1254 bool match = xfrm_selector_match(&pol->selector, fl, family);
1255 sk->sk_family);
1256 int err = 0; 1255 int err = 0;
1257 1256
1258 if (match) { 1257 if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2239 sk = sk_const_to_full_sk(sk); 2238 sk = sk_const_to_full_sk(sk);
2240 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2239 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2241 num_pols = 1; 2240 num_pols = 1;
2242 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2241 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2243 err = xfrm_expand_policies(fl, family, pols, 2242 err = xfrm_expand_policies(fl, family, pols,
2244 &num_pols, &num_xfrms); 2243 &num_pols, &num_xfrms);
2245 if (err < 0) 2244 if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2518 pol = NULL; 2517 pol = NULL;
2519 sk = sk_to_full_sk(sk); 2518 sk = sk_to_full_sk(sk);
2520 if (sk && sk->sk_policy[dir]) { 2519 if (sk && sk->sk_policy[dir]) {
2521 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2520 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2522 if (IS_ERR(pol)) { 2521 if (IS_ERR(pol)) {
2523 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2524 return 0; 2523 return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
3069{ 3068{
3070 int rv; 3069 int rv;
3071 3070
3071 /* Initialize the per-net locks here */
3072 spin_lock_init(&net->xfrm.xfrm_state_lock);
3073 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3074 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3075
3072 rv = xfrm_statistics_init(net); 3076 rv = xfrm_statistics_init(net);
3073 if (rv < 0) 3077 if (rv < 0)
3074 goto out_statistics; 3078 goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
3085 if (rv < 0) 3089 if (rv < 0)
3086 goto out; 3090 goto out;
3087 3091
3088 /* Initialize the per-net locks here */
3089 spin_lock_init(&net->xfrm.xfrm_state_lock);
3090 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3091 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3092
3093 return 0; 3092 return 0;
3094 3093
3095out: 3094out: