aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Makefile4
-rw-r--r--net/batman-adv/unicast.c15
-rw-r--r--net/bluetooth/l2cap.c1
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c42
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/ceph/messenger.c133
-rw-r--r--net/ceph/pagevec.c18
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/dcb/dcbnl.c11
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/status.c7
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c11
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/nf_tproxy_core.c27
-rw-r--r--net/netfilter/xt_TPROXY.c22
-rw-r--r--net/netfilter/xt_socket.c13
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rds/loop.c11
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sctp/sm_make_chunk.c10
-rw-r--r--net/sunrpc/sched.c75
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/wireless/wext-compat.c4
-rw-r--r--net/x25/x25_facilities.c28
-rw-r--r--net/x25/x25_in.c14
-rw-r--r--net/x25/x25_link.c5
-rw-r--r--net/xfrm/xfrm_policy.c7
58 files changed, 484 insertions, 219 deletions
diff --git a/net/Makefile b/net/Makefile
index a3330ebe2c53..a51d9465e628 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
19obj-$(CONFIG_INET) += ipv4/ 19obj-$(CONFIG_INET) += ipv4/
20obj-$(CONFIG_XFRM) += xfrm/ 20obj-$(CONFIG_XFRM) += xfrm/
21obj-$(CONFIG_UNIX) += unix/ 21obj-$(CONFIG_UNIX) += unix/
22ifneq ($(CONFIG_IPV6),) 22obj-$(CONFIG_NET) += ipv6/
23obj-y += ipv6/
24endif
25obj-$(CONFIG_PACKET) += packet/ 23obj-$(CONFIG_PACKET) += packet/
26obj-$(CONFIG_NET_KEY) += key/ 24obj-$(CONFIG_NET_KEY) += key/
27obj-$(CONFIG_BRIDGE) += bridge/ 25obj-$(CONFIG_BRIDGE) += bridge/
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index ee41fef04b21..d1a611322549 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
50 skb = tfp->skb; 50 skb = tfp->skb;
51 } 51 }
52 52
53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
54 goto err;
55
53 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); 56 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
54 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) { 57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
55 /* free buffered skb, skb will be freed later */ 58 goto err;
56 kfree_skb(tfp->skb);
57 return NULL;
58 }
59 59
60 /* move free entry to end */ 60 /* move free entry to end */
61 tfp->skb = NULL; 61 tfp->skb = NULL;
@@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
70 unicast_packet->packet_type = BAT_UNICAST; 70 unicast_packet->packet_type = BAT_UNICAST;
71 71
72 return skb; 72 return skb;
73
74err:
75 /* free buffered skb, skb will be freed later */
76 kfree_skb(tfp->skb);
77 return NULL;
73} 78}
74 79
75static void frag_create_entry(struct list_head *head, struct sk_buff *skb) 80static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 7550abb0c96a..675614e38e14 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
859 result = L2CAP_CR_SEC_BLOCK; 859 result = L2CAP_CR_SEC_BLOCK;
860 else 860 else
861 result = L2CAP_CR_BAD_PSM; 861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
862 863
863 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
864 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
727 break; 727 break;
728 } 728 }
729 729
730 tty_unlock();
730 schedule(); 731 schedule();
732 tty_lock();
731 } 733 }
732 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
733 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae462cb4..6dee7bf648a9 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@ config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP 8 select STP
9 depends on IPV6 || IPV6=n
9 ---help--- 10 ---help---
10 If you say Y here, then your Linux box will be able to act as an 11 If you say Y here, then your Linux box will be able to act as an
11 Ethernet bridge, which means that the different Ethernet segments it 12 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1b776f..88e4aa9cb1f9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
80 if (is_multicast_ether_addr(dest)) { 80 if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 81 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && !hlist_unhashed(&mdst->mglist)) || 83 if ((mdst && mdst->mglist) ||
84 br_multicast_is_router(br)) 84 br_multicast_is_router(br))
85 skb2 = skb; 85 skb2 = skb;
86 br_multicast_forward(mdst, skb, skb2); 86 br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21acb34..030a002ff8ee 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 41{
42 if (ipv6_addr_is_multicast(addr) && 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
43 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
@@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data)
232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 231 if (!netif_running(br->dev) || timer_pending(&mp->timer))
233 goto out; 232 goto out;
234 233
235 if (!hlist_unhashed(&mp->mglist)) 234 mp->mglist = false;
236 hlist_del_init(&mp->mglist);
237 235
238 if (mp->ports) 236 if (mp->ports)
239 goto out; 237 goto out;
@@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
276 del_timer(&p->query_timer); 274 del_timer(&p->query_timer);
277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg);
278 276
279 if (!mp->ports && hlist_unhashed(&mp->mglist) && 277 if (!mp->ports && !mp->mglist &&
280 netif_running(br->dev)) 278 netif_running(br->dev))
281 mod_timer(&mp->timer, jiffies); 279 mod_timer(&mp->timer, jiffies);
282 280
@@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
436 eth = eth_hdr(skb); 434 eth = eth_hdr(skb);
437 435
438 memcpy(eth->h_source, br->dev->dev_addr, 6); 436 memcpy(eth->h_source, br->dev->dev_addr, 6);
439 ipv6_eth_mc_map(group, eth->h_dest);
440 eth->h_proto = htons(ETH_P_IPV6); 437 eth->h_proto = htons(ETH_P_IPV6);
441 skb_put(skb, sizeof(*eth)); 438 skb_put(skb, sizeof(*eth));
442 439
@@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
448 ip6h->payload_len = htons(8 + sizeof(*mldq)); 445 ip6h->payload_len = htons(8 + sizeof(*mldq));
449 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
450 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
451 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 448 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
449 &ip6h->saddr);
452 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 450 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
453 452
454 hopopt = (u8 *)(ip6h + 1); 453 hopopt = (u8 *)(ip6h + 1);
455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
528 struct net_bridge *br = mp->br; 527 struct net_bridge *br = mp->br;
529 528
530 spin_lock(&br->multicast_lock); 529 spin_lock(&br->multicast_lock);
531 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 530 if (!netif_running(br->dev) || !mp->mglist ||
532 mp->queries_sent >= br->multicast_last_member_count) 531 mp->queries_sent >= br->multicast_last_member_count)
533 goto out; 532 goto out;
534 533
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
719 goto err; 718 goto err;
720 719
721 if (!port) { 720 if (!port) {
722 hlist_add_head(&mp->mglist, &br->mglist); 721 mp->mglist = true;
723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 722 mod_timer(&mp->timer, now + br->multicast_membership_interval);
724 goto out; 723 goto out;
725 } 724 }
@@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
781{ 780{
782 struct br_ip br_group; 781 struct br_ip br_group;
783 782
784 if (ipv6_is_local_multicast(group)) 783 if (!ipv6_is_transient_multicast(group))
785 return 0; 784 return 0;
786 785
787 ipv6_addr_copy(&br_group.u.ip6, group); 786 ipv6_addr_copy(&br_group.u.ip6, group);
788 br_group.proto = htons(ETH_P_IP); 787 br_group.proto = htons(ETH_P_IPV6);
789 788
790 return br_multicast_add_group(br, port, &br_group); 789 return br_multicast_add_group(br, port, &br_group);
791} 790}
@@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1014 1013
1015 nsrcs = skb_header_pointer(skb, 1014 nsrcs = skb_header_pointer(skb,
1016 len + offsetof(struct mld2_grec, 1015 len + offsetof(struct mld2_grec,
1017 grec_mca), 1016 grec_nsrcs),
1018 sizeof(_nsrcs), &_nsrcs); 1017 sizeof(_nsrcs), &_nsrcs);
1019 if (!nsrcs) 1018 if (!nsrcs)
1020 return -EINVAL; 1019 return -EINVAL;
1021 1020
1022 if (!pskb_may_pull(skb, 1021 if (!pskb_may_pull(skb,
1023 len + sizeof(*grec) + 1022 len + sizeof(*grec) +
1024 sizeof(struct in6_addr) * (*nsrcs))) 1023 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1025 return -EINVAL; 1024 return -EINVAL;
1026 1025
1027 grec = (struct mld2_grec *)(skb->data + len); 1026 grec = (struct mld2_grec *)(skb->data + len);
1028 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1027 len += sizeof(*grec) +
1028 sizeof(struct in6_addr) * ntohs(*nsrcs);
1029 1029
1030 /* We treat these as MLDv1 reports for now. */ 1030 /* We treat these as MLDv1 reports for now. */
1031 switch (grec->grec_type) { 1031 switch (grec->grec_type) {
@@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1165 1165
1166 max_delay *= br->multicast_last_member_count; 1166 max_delay *= br->multicast_last_member_count;
1167 1167
1168 if (!hlist_unhashed(&mp->mglist) && 1168 if (mp->mglist &&
1169 (timer_pending(&mp->timer) ? 1169 (timer_pending(&mp->timer) ?
1170 time_after(mp->timer.expires, now + max_delay) : 1170 time_after(mp->timer.expires, now + max_delay) :
1171 try_to_del_timer_sync(&mp->timer) >= 0)) 1171 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1177 if (timer_pending(&p->timer) ? 1177 if (timer_pending(&p->timer) ?
1178 time_after(p->timer.expires, now + max_delay) : 1178 time_after(p->timer.expires, now + max_delay) :
1179 try_to_del_timer_sync(&p->timer) >= 0) 1179 try_to_del_timer_sync(&p->timer) >= 0)
1180 mod_timer(&mp->timer, now + max_delay); 1180 mod_timer(&p->timer, now + max_delay);
1181 } 1181 }
1182 1182
1183out: 1183out:
@@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1236 goto out; 1236 goto out;
1237 1237
1238 max_delay *= br->multicast_last_member_count; 1238 max_delay *= br->multicast_last_member_count;
1239 if (!hlist_unhashed(&mp->mglist) && 1239 if (mp->mglist &&
1240 (timer_pending(&mp->timer) ? 1240 (timer_pending(&mp->timer) ?
1241 time_after(mp->timer.expires, now + max_delay) : 1241 time_after(mp->timer.expires, now + max_delay) :
1242 try_to_del_timer_sync(&mp->timer) >= 0)) 1242 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1248 if (timer_pending(&p->timer) ? 1248 if (timer_pending(&p->timer) ?
1249 time_after(p->timer.expires, now + max_delay) : 1249 time_after(p->timer.expires, now + max_delay) :
1250 try_to_del_timer_sync(&p->timer) >= 0) 1250 try_to_del_timer_sync(&p->timer) >= 0)
1251 mod_timer(&mp->timer, now + max_delay); 1251 mod_timer(&p->timer, now + max_delay);
1252 } 1252 }
1253 1253
1254out: 1254out:
@@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1283 br->multicast_last_member_interval; 1283 br->multicast_last_member_interval;
1284 1284
1285 if (!port) { 1285 if (!port) {
1286 if (!hlist_unhashed(&mp->mglist) && 1286 if (mp->mglist &&
1287 (timer_pending(&mp->timer) ? 1287 (timer_pending(&mp->timer) ?
1288 time_after(mp->timer.expires, time) : 1288 time_after(mp->timer.expires, time) :
1289 try_to_del_timer_sync(&mp->timer) >= 0)) { 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1341{ 1341{
1342 struct br_ip br_group; 1342 struct br_ip br_group;
1343 1343
1344 if (ipv6_is_local_multicast(group)) 1344 if (!ipv6_is_transient_multicast(group))
1345 return; 1345 return;
1346 1346
1347 ipv6_addr_copy(&br_group.u.ip6, group); 1347 ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac7734bfc..4e1b620b6be6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
84struct net_bridge_mdb_entry 84struct net_bridge_mdb_entry
85{ 85{
86 struct hlist_node hlist[2]; 86 struct hlist_node hlist[2];
87 struct hlist_node mglist;
88 struct net_bridge *br; 87 struct net_bridge *br;
89 struct net_bridge_port_group __rcu *ports; 88 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 89 struct rcu_head rcu;
91 struct timer_list timer; 90 struct timer_list timer;
92 struct timer_list query_timer; 91 struct timer_list query_timer;
93 struct br_ip addr; 92 struct br_ip addr;
93 bool mglist;
94 u32 queries_sent; 94 u32 queries_sent;
95}; 95};
96 96
@@ -238,7 +238,6 @@ struct net_bridge
238 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
239 struct net_bridge_mdb_htable __rcu *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
240 struct hlist_head router_list; 240 struct hlist_head router_list;
241 struct hlist_head mglist;
242 241
243 struct timer_list multicast_router_timer; 242 struct timer_list multicast_router_timer;
244 struct timer_list multicast_querier_timer; 243 struct timer_list multicast_querier_timer;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index fa9dab372b68..6008d6dc18a0 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev)
394 priv->conn_req.sockaddr.u.dgm.connection_id = -1; 394 priv->conn_req.sockaddr.u.dgm.connection_id = -1;
395 priv->flowenabled = false; 395 priv->flowenabled = false;
396 396
397 ASSERT_RTNL();
398 init_waitqueue_head(&priv->netmgmt_wq); 397 init_waitqueue_head(&priv->netmgmt_wq);
399 list_add(&priv->list_field, &chnl_net_list);
400} 398}
401 399
402 400
@@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
453 ret = register_netdevice(dev); 451 ret = register_netdevice(dev);
454 if (ret) 452 if (ret)
455 pr_warn("device rtml registration failed\n"); 453 pr_warn("device rtml registration failed\n");
454 else
455 list_add(&caifdev->list_field, &chnl_net_list);
456 return ret; 456 return ret;
457} 457}
458 458
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dff633d62e5b..05f357828a2f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
252{ 252{
253 struct kvec iov = {buf, len}; 253 struct kvec iov = {buf, len};
254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
255 int r;
255 256
256 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 if (r == -EAGAIN)
259 r = 0;
260 return r;
257} 261}
258 262
259/* 263/*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
264 size_t kvlen, size_t len, int more) 268 size_t kvlen, size_t len, int more)
265{ 269{
266 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
271 int r;
267 272
268 if (more) 273 if (more)
269 msg.msg_flags |= MSG_MORE; 274 msg.msg_flags |= MSG_MORE;
270 else 275 else
271 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
272 277
273 return kernel_sendmsg(sock, &msg, iov, kvlen, len); 278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
279 if (r == -EAGAIN)
280 r = 0;
281 return r;
274} 282}
275 283
276 284
@@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
328 ceph_msg_put(con->out_msg); 336 ceph_msg_put(con->out_msg);
329 con->out_msg = NULL; 337 con->out_msg = NULL;
330 } 338 }
331 con->out_keepalive_pending = false;
332 con->in_seq = 0; 339 con->in_seq = 0;
333 con->in_seq_acked = 0; 340 con->in_seq_acked = 0;
334} 341}
@@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
847 (msg->pages || msg->pagelist || msg->bio || in_trail)) 854 (msg->pages || msg->pagelist || msg->bio || in_trail))
848 kunmap(page); 855 kunmap(page);
849 856
857 if (ret == -EAGAIN)
858 ret = 0;
850 if (ret <= 0) 859 if (ret <= 0)
851 goto out; 860 goto out;
852 861
@@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
1238 con->auth_retry); 1247 con->auth_retry);
1239 if (con->auth_retry == 2) { 1248 if (con->auth_retry == 2) {
1240 con->error_msg = "connect authorization failure"; 1249 con->error_msg = "connect authorization failure";
1241 reset_connection(con);
1242 set_bit(CLOSED, &con->state);
1243 return -1; 1250 return -1;
1244 } 1251 }
1245 con->auth_retry = 1; 1252 con->auth_retry = 1;
@@ -1705,14 +1712,6 @@ more:
1705 1712
1706 /* open the socket first? */ 1713 /* open the socket first? */
1707 if (con->sock == NULL) { 1714 if (con->sock == NULL) {
1708 /*
1709 * if we were STANDBY and are reconnecting _this_
1710 * connection, bump connect_seq now. Always bump
1711 * global_seq.
1712 */
1713 if (test_and_clear_bit(STANDBY, &con->state))
1714 con->connect_seq++;
1715
1716 prepare_write_banner(msgr, con); 1715 prepare_write_banner(msgr, con);
1717 prepare_write_connect(msgr, con, 1); 1716 prepare_write_connect(msgr, con, 1);
1718 prepare_read_banner(con); 1717 prepare_read_banner(con);
@@ -1737,16 +1736,12 @@ more_kvec:
1737 if (con->out_skip) { 1736 if (con->out_skip) {
1738 ret = write_partial_skip(con); 1737 ret = write_partial_skip(con);
1739 if (ret <= 0) 1738 if (ret <= 0)
1740 goto done; 1739 goto out;
1741 if (ret < 0) {
1742 dout("try_write write_partial_skip err %d\n", ret);
1743 goto done;
1744 }
1745 } 1740 }
1746 if (con->out_kvec_left) { 1741 if (con->out_kvec_left) {
1747 ret = write_partial_kvec(con); 1742 ret = write_partial_kvec(con);
1748 if (ret <= 0) 1743 if (ret <= 0)
1749 goto done; 1744 goto out;
1750 } 1745 }
1751 1746
1752 /* msg pages? */ 1747 /* msg pages? */
@@ -1761,11 +1756,11 @@ more_kvec:
1761 if (ret == 1) 1756 if (ret == 1)
1762 goto more_kvec; /* we need to send the footer, too! */ 1757 goto more_kvec; /* we need to send the footer, too! */
1763 if (ret == 0) 1758 if (ret == 0)
1764 goto done; 1759 goto out;
1765 if (ret < 0) { 1760 if (ret < 0) {
1766 dout("try_write write_partial_msg_pages err %d\n", 1761 dout("try_write write_partial_msg_pages err %d\n",
1767 ret); 1762 ret);
1768 goto done; 1763 goto out;
1769 } 1764 }
1770 } 1765 }
1771 1766
@@ -1789,10 +1784,9 @@ do_next:
1789 /* Nothing to do! */ 1784 /* Nothing to do! */
1790 clear_bit(WRITE_PENDING, &con->state); 1785 clear_bit(WRITE_PENDING, &con->state);
1791 dout("try_write nothing else to write.\n"); 1786 dout("try_write nothing else to write.\n");
1792done:
1793 ret = 0; 1787 ret = 0;
1794out: 1788out:
1795 dout("try_write done on %p\n", con); 1789 dout("try_write done on %p ret %d\n", con, ret);
1796 return ret; 1790 return ret;
1797} 1791}
1798 1792
@@ -1821,19 +1815,17 @@ more:
1821 dout("try_read connecting\n"); 1815 dout("try_read connecting\n");
1822 ret = read_partial_banner(con); 1816 ret = read_partial_banner(con);
1823 if (ret <= 0) 1817 if (ret <= 0)
1824 goto done;
1825 if (process_banner(con) < 0) {
1826 ret = -1;
1827 goto out; 1818 goto out;
1828 } 1819 ret = process_banner(con);
1820 if (ret < 0)
1821 goto out;
1829 } 1822 }
1830 ret = read_partial_connect(con); 1823 ret = read_partial_connect(con);
1831 if (ret <= 0) 1824 if (ret <= 0)
1832 goto done;
1833 if (process_connect(con) < 0) {
1834 ret = -1;
1835 goto out; 1825 goto out;
1836 } 1826 ret = process_connect(con);
1827 if (ret < 0)
1828 goto out;
1837 goto more; 1829 goto more;
1838 } 1830 }
1839 1831
@@ -1848,7 +1840,7 @@ more:
1848 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1840 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1849 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1841 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1850 if (ret <= 0) 1842 if (ret <= 0)
1851 goto done; 1843 goto out;
1852 con->in_base_pos += ret; 1844 con->in_base_pos += ret;
1853 if (con->in_base_pos) 1845 if (con->in_base_pos)
1854 goto more; 1846 goto more;
@@ -1859,7 +1851,7 @@ more:
1859 */ 1851 */
1860 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1852 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1861 if (ret <= 0) 1853 if (ret <= 0)
1862 goto done; 1854 goto out;
1863 dout("try_read got tag %d\n", (int)con->in_tag); 1855 dout("try_read got tag %d\n", (int)con->in_tag);
1864 switch (con->in_tag) { 1856 switch (con->in_tag) {
1865 case CEPH_MSGR_TAG_MSG: 1857 case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1862,7 @@ more:
1870 break; 1862 break;
1871 case CEPH_MSGR_TAG_CLOSE: 1863 case CEPH_MSGR_TAG_CLOSE:
1872 set_bit(CLOSED, &con->state); /* fixme */ 1864 set_bit(CLOSED, &con->state); /* fixme */
1873 goto done; 1865 goto out;
1874 default: 1866 default:
1875 goto bad_tag; 1867 goto bad_tag;
1876 } 1868 }
@@ -1882,13 +1874,12 @@ more:
1882 case -EBADMSG: 1874 case -EBADMSG:
1883 con->error_msg = "bad crc"; 1875 con->error_msg = "bad crc";
1884 ret = -EIO; 1876 ret = -EIO;
1885 goto out; 1877 break;
1886 case -EIO: 1878 case -EIO:
1887 con->error_msg = "io error"; 1879 con->error_msg = "io error";
1888 goto out; 1880 break;
1889 default:
1890 goto done;
1891 } 1881 }
1882 goto out;
1892 } 1883 }
1893 if (con->in_tag == CEPH_MSGR_TAG_READY) 1884 if (con->in_tag == CEPH_MSGR_TAG_READY)
1894 goto more; 1885 goto more;
@@ -1898,15 +1889,13 @@ more:
1898 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 1889 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1899 ret = read_partial_ack(con); 1890 ret = read_partial_ack(con);
1900 if (ret <= 0) 1891 if (ret <= 0)
1901 goto done; 1892 goto out;
1902 process_ack(con); 1893 process_ack(con);
1903 goto more; 1894 goto more;
1904 } 1895 }
1905 1896
1906done:
1907 ret = 0;
1908out: 1897out:
1909 dout("try_read done on %p\n", con); 1898 dout("try_read done on %p ret %d\n", con, ret);
1910 return ret; 1899 return ret;
1911 1900
1912bad_tag: 1901bad_tag:
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
1951 work.work); 1940 work.work);
1952 1941
1953 mutex_lock(&con->mutex); 1942 mutex_lock(&con->mutex);
1943 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1946 round_jiffies_relative(con->delay))) {
1947 dout("con_work %p backoff %lu\n", con, con->delay);
1948 mutex_unlock(&con->mutex);
1949 return;
1950 } else {
1951 con->ops->put(con);
1952 dout("con_work %p FAILED to back off %lu\n", con,
1953 con->delay);
1954 }
1955 }
1954 1956
1957 if (test_bit(STANDBY, &con->state)) {
1958 dout("con_work %p STANDBY\n", con);
1959 goto done;
1960 }
1955 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1961 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1956 dout("con_work CLOSED\n"); 1962 dout("con_work CLOSED\n");
1957 con_close_socket(con); 1963 con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
2008 /* Requeue anything that hasn't been acked */ 2014 /* Requeue anything that hasn't been acked */
2009 list_splice_init(&con->out_sent, &con->out_queue); 2015 list_splice_init(&con->out_sent, &con->out_queue);
2010 2016
2011 /* If there are no messages in the queue, place the connection 2017 /* If there are no messages queued or keepalive pending, place
2012 * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2018 * the connection in a STANDBY state */
2013 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2019 if (list_empty(&con->out_queue) &&
2014 dout("fault setting STANDBY\n"); 2020 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2021 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2022 clear_bit(WRITE_PENDING, &con->state);
2015 set_bit(STANDBY, &con->state); 2023 set_bit(STANDBY, &con->state);
2016 } else { 2024 } else {
2017 /* retry after a delay. */ 2025 /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
2019 con->delay = BASE_DELAY_INTERVAL; 2027 con->delay = BASE_DELAY_INTERVAL;
2020 else if (con->delay < MAX_DELAY_INTERVAL) 2028 else if (con->delay < MAX_DELAY_INTERVAL)
2021 con->delay *= 2; 2029 con->delay *= 2;
2022 dout("fault queueing %p delay %lu\n", con, con->delay);
2023 con->ops->get(con); 2030 con->ops->get(con);
2024 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2031 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2025 round_jiffies_relative(con->delay)) == 0) 2032 round_jiffies_relative(con->delay))) {
2033 dout("fault queued %p delay %lu\n", con, con->delay);
2034 } else {
2026 con->ops->put(con); 2035 con->ops->put(con);
2036 dout("fault failed to queue %p delay %lu, backoff\n",
2037 con, con->delay);
2038 /*
2039 * In many cases we see a socket state change
2040 * while con_work is running and end up
2041 * queuing (non-delayed) work, such that we
2042 * can't backoff with a delay. Set a flag so
2043 * that when con_work restarts we schedule the
2044 * delay then.
2045 */
2046 set_bit(BACKOFF, &con->state);
2047 }
2027 } 2048 }
2028 2049
2029out_unlock: 2050out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
2094} 2115}
2095EXPORT_SYMBOL(ceph_messenger_destroy); 2116EXPORT_SYMBOL(ceph_messenger_destroy);
2096 2117
2118static void clear_standby(struct ceph_connection *con)
2119{
2120 /* come back from STANDBY? */
2121 if (test_and_clear_bit(STANDBY, &con->state)) {
2122 mutex_lock(&con->mutex);
2123 dout("clear_standby %p and ++connect_seq\n", con);
2124 con->connect_seq++;
2125 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2126 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2127 mutex_unlock(&con->mutex);
2128 }
2129}
2130
2097/* 2131/*
2098 * Queue up an outgoing message on the given connection. 2132 * Queue up an outgoing message on the given connection.
2099 */ 2133 */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2126 2160
2127 /* if there wasn't anything waiting to send before, queue 2161 /* if there wasn't anything waiting to send before, queue
2128 * new work */ 2162 * new work */
2163 clear_standby(con);
2129 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2130 queue_con(con); 2165 queue_con(con);
2131} 2166}
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2191 */ 2226 */
2192void ceph_con_keepalive(struct ceph_connection *con) 2227void ceph_con_keepalive(struct ceph_connection *con)
2193{ 2228{
2229 dout("con_keepalive %p\n", con);
2230 clear_standby(con);
2194 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2231 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2195 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2232 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2196 queue_con(con); 2233 queue_con(con);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 1a040e64c69f..cd9c21df87d1 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, bool write_page) 16 int num_pages, bool write_page)
17{ 17{
18 struct page **pages; 18 struct page **pages;
19 int rc; 19 int got = 0;
20 int rc = 0;
20 21
21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
22 if (!pages) 23 if (!pages)
23 return ERR_PTR(-ENOMEM); 24 return ERR_PTR(-ENOMEM);
24 25
25 down_read(&current->mm->mmap_sem); 26 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 while (got < num_pages) {
27 num_pages, write_page, 0, pages, NULL); 28 rc = get_user_pages(current, current->mm,
29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 num_pages - got, write_page, 0, pages + got, NULL);
31 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
35 }
28 up_read(&current->mm->mmap_sem); 36 up_read(&current->mm->mmap_sem);
29 if (rc < num_pages) 37 if (rc < 0)
30 goto fail; 38 goto fail;
31 return pages; 39 return pages;
32 40
33fail: 41fail:
34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 42 ceph_put_page_vector(pages, got, false);
35 return ERR_PTR(rc); 43 return ERR_PTR(rc);
36} 44}
37EXPORT_SYMBOL(ceph_get_direct_page_vector); 45EXPORT_SYMBOL(ceph_get_direct_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index b6d0bf875a8e..6561021d22d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
1114void dev_load(struct net *net, const char *name) 1114void dev_load(struct net *net, const char *name)
1115{ 1115{
1116 struct net_device *dev; 1116 struct net_device *dev;
1117 int no_module;
1117 1118
1118 rcu_read_lock(); 1119 rcu_read_lock();
1119 dev = dev_get_by_name_rcu(net, name); 1120 dev = dev_get_by_name_rcu(net, name);
1120 rcu_read_unlock(); 1121 rcu_read_unlock();
1121 1122
1122 if (!dev && capable(CAP_NET_ADMIN)) 1123 no_module = !dev;
1123 request_module("%s", name); 1124 if (no_module && capable(CAP_NET_ADMIN))
1125 no_module = request_module("netdev-%s", name);
1126 if (no_module && capable(CAP_SYS_MODULE)) {
1127 if (!request_module("%s", name))
1128 pr_err("Loading kernel module for a network device "
1129"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1130"instead\n", name);
1131 }
1124} 1132}
1125EXPORT_SYMBOL(dev_load); 1133EXPORT_SYMBOL(dev_load);
1126 1134
@@ -1280,10 +1288,13 @@ static int __dev_close_many(struct list_head *head)
1280 1288
1281static int __dev_close(struct net_device *dev) 1289static int __dev_close(struct net_device *dev)
1282{ 1290{
1291 int retval;
1283 LIST_HEAD(single); 1292 LIST_HEAD(single);
1284 1293
1285 list_add(&dev->unreg_list, &single); 1294 list_add(&dev->unreg_list, &single);
1286 return __dev_close_many(&single); 1295 retval = __dev_close_many(&single);
1296 list_del(&single);
1297 return retval;
1287} 1298}
1288 1299
1289int dev_close_many(struct list_head *head) 1300int dev_close_many(struct list_head *head)
@@ -1325,7 +1336,7 @@ int dev_close(struct net_device *dev)
1325 1336
1326 list_add(&dev->unreg_list, &single); 1337 list_add(&dev->unreg_list, &single);
1327 dev_close_many(&single); 1338 dev_close_many(&single);
1328 1339 list_del(&single);
1329 return 0; 1340 return 0;
1330} 1341}
1331EXPORT_SYMBOL(dev_close); 1342EXPORT_SYMBOL(dev_close);
@@ -5063,6 +5074,7 @@ static void rollback_registered(struct net_device *dev)
5063 5074
5064 list_add(&dev->unreg_list, &single); 5075 list_add(&dev->unreg_list, &single);
5065 rollback_registered_many(&single); 5076 rollback_registered_many(&single);
5077 list_del(&single);
5066} 5078}
5067 5079
5068unsigned long netdev_fix_features(unsigned long features, const char *name) 5080unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -5660,30 +5672,35 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5660 5672
5661 dev_net_set(dev, &init_net); 5673 dev_net_set(dev, &init_net);
5662 5674
5675 dev->gso_max_size = GSO_MAX_SIZE;
5676
5677 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5678 dev->ethtool_ntuple_list.count = 0;
5679 INIT_LIST_HEAD(&dev->napi_list);
5680 INIT_LIST_HEAD(&dev->unreg_list);
5681 INIT_LIST_HEAD(&dev->link_watch_list);
5682 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5683 setup(dev);
5684
5663 dev->num_tx_queues = txqs; 5685 dev->num_tx_queues = txqs;
5664 dev->real_num_tx_queues = txqs; 5686 dev->real_num_tx_queues = txqs;
5665 if (netif_alloc_netdev_queues(dev)) 5687 if (netif_alloc_netdev_queues(dev))
5666 goto free_pcpu; 5688 goto free_all;
5667 5689
5668#ifdef CONFIG_RPS 5690#ifdef CONFIG_RPS
5669 dev->num_rx_queues = rxqs; 5691 dev->num_rx_queues = rxqs;
5670 dev->real_num_rx_queues = rxqs; 5692 dev->real_num_rx_queues = rxqs;
5671 if (netif_alloc_rx_queues(dev)) 5693 if (netif_alloc_rx_queues(dev))
5672 goto free_pcpu; 5694 goto free_all;
5673#endif 5695#endif
5674 5696
5675 dev->gso_max_size = GSO_MAX_SIZE;
5676
5677 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5678 dev->ethtool_ntuple_list.count = 0;
5679 INIT_LIST_HEAD(&dev->napi_list);
5680 INIT_LIST_HEAD(&dev->unreg_list);
5681 INIT_LIST_HEAD(&dev->link_watch_list);
5682 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5683 setup(dev);
5684 strcpy(dev->name, name); 5697 strcpy(dev->name, name);
5685 return dev; 5698 return dev;
5686 5699
5700free_all:
5701 free_netdev(dev);
5702 return NULL;
5703
5687free_pcpu: 5704free_pcpu:
5688 free_percpu(dev->pcpu_refcnt); 5705 free_percpu(dev->pcpu_refcnt);
5689 kfree(dev->_tx); 5706 kfree(dev->_tx);
@@ -6211,6 +6228,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6211 } 6228 }
6212 } 6229 }
6213 unregister_netdevice_many(&dev_kill_list); 6230 unregister_netdevice_many(&dev_kill_list);
6231 list_del(&dev_kill_list);
6214 rtnl_unlock(); 6232 rtnl_unlock();
6215} 6233}
6216 6234
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..133fd22ea287 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
144 144
145 list_for_each_entry(ha, &from_list->list, list) { 145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type; 146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 __hw_addr_del(to_list, ha->addr, addr_len, type);
148 } 148 }
149} 149}
150EXPORT_SYMBOL(__hw_addr_del_multiple); 150EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..b5bada92f637 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3321 pkt_dev->started_at); 3321 pkt_dev->started_at);
3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3323 3323
3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", 3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3325 (unsigned long long)ktime_to_us(elapsed), 3325 (unsigned long long)ktime_to_us(elapsed),
3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3327 (unsigned long long)ktime_to_us(idle), 3327 (unsigned long long)ktime_to_us(idle),
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 6b03f561caec..c44348adba3b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
626 dcb->cmd = DCB_CMD_GAPP; 626 dcb->cmd = DCB_CMD_GAPP;
627 627
628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
629 if (!app_nest)
630 goto out_cancel;
631
629 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
630 if (ret) 633 if (ret)
631 goto out_cancel; 634 goto out_cancel;
@@ -1190,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1190 goto err; 1193 goto err;
1191 } 1194 }
1192 1195
1193 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1194 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1195 err = ops->ieee_setpfc(netdev, pfc); 1198 err = ops->ieee_setpfc(netdev, pfc);
1196 if (err) 1199 if (err)
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp);
1613u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1616u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1614{ 1617{
1615 struct dcb_app_type *itr; 1618 struct dcb_app_type *itr;
1619 struct dcb_app_type event;
1620
1621 memcpy(&event.name, dev->name, sizeof(event.name));
1622 memcpy(&event.app, new, sizeof(event.app));
1616 1623
1617 spin_lock(&dcb_lock); 1624 spin_lock(&dcb_lock);
1618 /* Search for existing match and replace */ 1625 /* Search for existing match and replace */
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1644 } 1651 }
1645out: 1652out:
1646 spin_unlock(&dcb_lock); 1653 spin_unlock(&dcb_lock);
1647 call_dcbevent_notifiers(DCB_APP_EVENT, new); 1654 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1648 return 0; 1655 return 0;
1649} 1656}
1650EXPORT_SYMBOL(dcb_setapp); 1657EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
614 /* Caller (dccp_v4_do_rcv) will send Reset */ 614 /* Caller (dccp_v4_do_rcv) will send Reset */
615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616 return 1; 616 return 1;
617 } else if (sk->sk_state == DCCP_CLOSED) {
618 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619 return 1;
617 } 620 }
618 621
619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
668 } 671 }
669 672
670 switch (sk->sk_state) { 673 switch (sk->sk_state) {
671 case DCCP_CLOSED:
672 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
673 return 1;
674
675 case DCCP_REQUESTING: 674 case DCCP_REQUESTING:
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 675 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677 if (queued >= 0) 676 if (queued >= 0)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a6af39..cfa7a5e1c5c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
67 size_t result_len = 0; 67 size_t result_len = 0;
68 const char *data = _data, *end, *opt; 68 const char *data = _data, *end, *opt;
69 69
70 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%*.*s',%zu",
71 key->serial, key->description, data, datalen); 71 key->serial, key->description,
72 (int)datalen, (int)datalen, data, datalen);
72 73
73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
74 return -EINVAL; 75 return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
217 seq_printf(m, ": %u", key->datalen); 218 seq_printf(m, ": %u", key->datalen);
218} 219}
219 220
221/*
222 * read the DNS data
223 * - the key's semaphore is read-locked
224 */
225static long dns_resolver_read(const struct key *key,
226 char __user *buffer, size_t buflen)
227{
228 if (key->type_data.x[0])
229 return key->type_data.x[0];
230
231 return user_read(key, buffer, buflen);
232}
233
220struct key_type key_type_dns_resolver = { 234struct key_type key_type_dns_resolver = {
221 .name = "dns_resolver", 235 .name = "dns_resolver",
222 .instantiate = dns_resolver_instantiate, 236 .instantiate = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
224 .revoke = user_revoke, 238 .revoke = user_revoke,
225 .destroy = user_destroy, 239 .destroy = user_destroy,
226 .describe = dns_resolver_describe, 240 .describe = dns_resolver_describe,
227 .read = user_read, 241 .read = dns_resolver_read,
228}; 242};
229 243
230static int __init init_dns_resolver(void) 244static int __init init_dns_resolver(void)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b337bd..036652c8166d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
670 ifap = &ifa->ifa_next) { 670 ifap = &ifa->ifa_next) {
671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) && 671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
672 sin_orig.sin_addr.s_addr == 672 sin_orig.sin_addr.s_addr ==
673 ifa->ifa_address) { 673 ifa->ifa_local) {
674 break; /* found */ 674 break; /* found */
675 } 675 }
676 } 676 }
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
1030 return mtu >= 68; 1030 return mtu >= 68;
1031} 1031}
1032 1032
1033static void inetdev_send_gratuitous_arp(struct net_device *dev,
1034 struct in_device *in_dev)
1035
1036{
1037 struct in_ifaddr *ifa = in_dev->ifa_list;
1038
1039 if (!ifa)
1040 return;
1041
1042 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1043 ifa->ifa_local, dev,
1044 ifa->ifa_local, NULL,
1045 dev->dev_addr, NULL);
1046}
1047
1033/* Called only under RTNL semaphore */ 1048/* Called only under RTNL semaphore */
1034 1049
1035static int inetdev_event(struct notifier_block *this, unsigned long event, 1050static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1082 } 1097 }
1083 ip_mc_up(in_dev); 1098 ip_mc_up(in_dev);
1084 /* fall through */ 1099 /* fall through */
1085 case NETDEV_NOTIFY_PEERS:
1086 case NETDEV_CHANGEADDR: 1100 case NETDEV_CHANGEADDR:
1101 if (!IN_DEV_ARP_NOTIFY(in_dev))
1102 break;
1103 /* fall through */
1104 case NETDEV_NOTIFY_PEERS:
1087 /* Send gratuitous ARP to notify of link change */ 1105 /* Send gratuitous ARP to notify of link change */
1088 if (IN_DEV_ARP_NOTIFY(in_dev)) { 1106 inetdev_send_gratuitous_arp(dev, in_dev);
1089 struct in_ifaddr *ifa = in_dev->ifa_list;
1090
1091 if (ifa)
1092 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1093 ifa->ifa_address, dev,
1094 ifa->ifa_address, NULL,
1095 dev->dev_addr, NULL);
1096 }
1097 break; 1107 break;
1098 case NETDEV_DOWN: 1108 case NETDEV_DOWN:
1099 ip_mc_down(in_dev); 1109 ip_mc_down(in_dev);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909cf701..3c8dfa16614d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@ restart:
505 } 505 }
506 506
507 rcu_read_unlock(); 507 rcu_read_unlock();
508 local_bh_disable();
508 inet_twsk_deschedule(tw, twdr); 509 inet_twsk_deschedule(tw, twdr);
510 local_bh_enable();
509 inet_twsk_put(tw); 511 inet_twsk_put(tw);
510 goto restart_rcu; 512 goto restart_rcu;
511 } 513 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e34e49..d1d0e2c256fc 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
775 .fl4_dst = dst, 775 .fl4_dst = dst,
776 .fl4_src = tiph->saddr, 776 .fl4_src = tiph->saddr,
777 .fl4_tos = RT_TOS(tos), 777 .fl4_tos = RT_TOS(tos),
778 .proto = IPPROTO_GRE,
778 .fl_gre_key = tunnel->parms.o_key 779 .fl_gre_key = tunnel->parms.o_key
779 }; 780 };
780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -1764,4 +1765,4 @@ module_exit(ipgre_fini);
1764MODULE_LICENSE("GPL"); 1765MODULE_LICENSE("GPL");
1765MODULE_ALIAS_RTNL_LINK("gre"); 1766MODULE_ALIAS_RTNL_LINK("gre");
1766MODULE_ALIAS_RTNL_LINK("gretap"); 1767MODULE_ALIAS_RTNL_LINK("gretap");
1767MODULE_ALIAS("gre0"); 1768MODULE_ALIAS_NETDEV("gre0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52fba54a..a5f58e7cbb26 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void)
913module_init(ipip_init); 913module_init(ipip_init);
914module_exit(ipip_fini); 914module_exit(ipip_fini);
915MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0"); 916MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 788a3e74834e..6ed6603c2f6d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2722 .destroy = ipv4_dst_destroy, 2722 .destroy = ipv4_dst_destroy,
2723 .check = ipv4_blackhole_dst_check, 2723 .check = ipv4_blackhole_dst_check,
2724 .default_mtu = ipv4_blackhole_default_mtu, 2724 .default_mtu = ipv4_blackhole_default_mtu,
2725 .default_advmss = ipv4_default_advmss,
2725 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2726 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2726}; 2727};
2727 2728
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb7f82ebf4a3..65f6c0406245 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1222 } 1222 }
1223 1223
1224 /* D-SACK for already forgotten data... Do dumb counting. */ 1224 /* D-SACK for already forgotten data... Do dumb counting. */
1225 if (dup_sack && 1225 if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1226 !after(end_seq_0, prior_snd_una) && 1226 !after(end_seq_0, prior_snd_una) &&
1227 after(end_seq_0, tp->undo_marker)) 1227 after(end_seq_0, tp->undo_marker))
1228 tp->undo_retrans--; 1228 tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1299 1299
1300 /* Account D-SACK for retransmitted packet. */ 1300 /* Account D-SACK for retransmitted packet. */
1301 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1301 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1302 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1302 if (tp->undo_marker && tp->undo_retrans &&
1303 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1303 tp->undo_retrans--; 1304 tp->undo_retrans--;
1304 if (sacked & TCPCB_SACKED_ACKED) 1305 if (sacked & TCPCB_SACKED_ACKED)
1305 state->reord = min(fack_count, state->reord); 1306 state->reord = min(fack_count, state->reord);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 406f320336e6..dfa5beb0c1c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2162 if (!tp->retrans_stamp) 2162 if (!tp->retrans_stamp)
2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2164 2164
2165 tp->undo_retrans++; 2165 tp->undo_retrans += tcp_skb_pcount(skb);
2166 2166
2167 /* snd_nxt is stored to detect loss of retransmitted segment, 2167 /* snd_nxt is stored to detect loss of retransmitted segment,
2168 * see tcp_input.c tcp_sacktag_write_queue(). 2168 * see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e697bd..e528a42a52be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
57MODULE_AUTHOR("Ville Nuorvala"); 57MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_ALIAS_NETDEV("ip6tnl0");
60 61
61#ifdef IP6_TNL_DEBUG 62#ifdef IP6_TNL_DEBUG
62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c88891a753..de338037a736 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@ fallback:
410 if (p != NULL) { 410 if (p != NULL) {
411 sb_add(m, "%02x", *p++); 411 sb_add(m, "%02x", *p++);
412 for (i = 1; i < len; i++) 412 for (i = 1; i < len; i++)
413 sb_add(m, ":%02x", p[i]); 413 sb_add(m, ":%02x", *p++);
414 } 414 }
415 sb_add(m, " "); 415 sb_add(m, " ");
416 416
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1c29f95695de..e7db7014e89f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
128 .destroy = ip6_dst_destroy, 128 .destroy = ip6_dst_destroy,
129 .check = ip6_dst_check, 129 .check = ip6_dst_check,
130 .default_mtu = ip6_blackhole_default_mtu, 130 .default_mtu = ip6_blackhole_default_mtu,
131 .default_advmss = ip6_default_advmss,
131 .update_pmtu = ip6_rt_blackhole_update_pmtu, 132 .update_pmtu = ip6_rt_blackhole_update_pmtu,
132}; 133};
133 134
@@ -738,8 +739,10 @@ restart:
738 739
739 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
740 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
741 else 742 else if (!(rt->dst.flags & DST_HOST))
742 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
744 else
745 goto out2;
743 746
744 dst_release(&rt->dst); 747 dst_release(&rt->dst);
745 rt = nrt ? : net->ipv6.ip6_null_entry; 748 rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2556,14 +2559,16 @@ static
2556int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2559int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2557 void __user *buffer, size_t *lenp, loff_t *ppos) 2560 void __user *buffer, size_t *lenp, loff_t *ppos)
2558{ 2561{
2559 struct net *net = current->nsproxy->net_ns; 2562 struct net *net;
2560 int delay = net->ipv6.sysctl.flush_delay; 2563 int delay;
2561 if (write) { 2564 if (!write)
2562 proc_dointvec(ctl, write, buffer, lenp, ppos);
2563 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2564 return 0;
2565 } else
2566 return -EINVAL; 2565 return -EINVAL;
2566
2567 net = (struct net *)ctl->extra1;
2568 delay = net->ipv6.sysctl.flush_delay;
2569 proc_dointvec(ctl, write, buffer, lenp, ppos);
2570 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2571 return 0;
2567} 2572}
2568 2573
2569ctl_table ipv6_route_table_template[] = { 2574ctl_table ipv6_route_table_template[] = {
@@ -2650,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2650 2655
2651 if (table) { 2656 if (table) {
2652 table[0].data = &net->ipv6.sysctl.flush_delay; 2657 table[0].data = &net->ipv6.sysctl.flush_delay;
2658 table[0].extra1 = net;
2653 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2659 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2654 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2660 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2655 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2661 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..d2c16e10f650 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1290,4 +1290,4 @@ static int __init sit_init(void)
1290module_init(sit_init); 1290module_init(sit_init);
1291module_exit(sit_cleanup); 1291module_exit(sit_cleanup);
1292MODULE_LICENSE("GPL"); 1292MODULE_LICENSE("GPL");
1293MODULE_ALIAS("sit0"); 1293MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4bc8a9250cfd..9cd73b11506e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1822,6 +1822,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1822 *cookie ^= 2; 1822 *cookie ^= 2;
1823 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 1823 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
1824 local->hw_roc_skb = skb; 1824 local->hw_roc_skb = skb;
1825 local->hw_roc_skb_for_status = skb;
1825 mutex_unlock(&local->mtx); 1826 mutex_unlock(&local->mtx);
1826 1827
1827 return 0; 1828 return 0;
@@ -1875,6 +1876,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
1875 if (ret == 0) { 1876 if (ret == 0) {
1876 kfree_skb(local->hw_roc_skb); 1877 kfree_skb(local->hw_roc_skb);
1877 local->hw_roc_skb = NULL; 1878 local->hw_roc_skb = NULL;
1879 local->hw_roc_skb_for_status = NULL;
1878 } 1880 }
1879 1881
1880 mutex_unlock(&local->mtx); 1882 mutex_unlock(&local->mtx);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c47d7c0e48a4..533fd32f49ff 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -953,7 +953,7 @@ struct ieee80211_local {
953 953
954 struct ieee80211_channel *hw_roc_channel; 954 struct ieee80211_channel *hw_roc_channel;
955 struct net_device *hw_roc_dev; 955 struct net_device *hw_roc_dev;
956 struct sk_buff *hw_roc_skb; 956 struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
957 struct work_struct hw_roc_start, hw_roc_done; 957 struct work_struct hw_roc_start, hw_roc_done;
958 enum nl80211_channel_type hw_roc_channel_type; 958 enum nl80211_channel_type hw_roc_channel_type;
959 unsigned int hw_roc_duration; 959 unsigned int hw_roc_duration;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba456744e..7a10a8d1b2d0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1229 } 1229 }
1230 mutex_unlock(&local->iflist_mtx); 1230 mutex_unlock(&local->iflist_mtx);
1231 unregister_netdevice_many(&unreg_list); 1231 unregister_netdevice_many(&unreg_list);
1232 list_del(&unreg_list);
1232} 1233}
1233 1234
1234static u32 ieee80211_idle_off(struct ieee80211_local *local, 1235static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e33746..c9ceb4d57ab0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1033 if (is_multicast_ether_addr(hdr->addr1)) 1033 if (is_multicast_ether_addr(hdr->addr1))
1034 return; 1034 return;
1035 1035
1036 /*
1037 * In case we receive frames after disassociation.
1038 */
1039 if (!sdata->u.mgd.associated)
1040 return;
1041
1036 ieee80211_sta_reset_conn_monitor(sdata); 1042 ieee80211_sta_reset_conn_monitor(sdata);
1037} 1043}
1038 1044
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 38a797217a91..071ac95c4aa0 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -323,6 +323,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
323 323
324 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 324 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
325 struct ieee80211_work *wk; 325 struct ieee80211_work *wk;
326 u64 cookie = (unsigned long)skb;
326 327
327 rcu_read_lock(); 328 rcu_read_lock();
328 list_for_each_entry_rcu(wk, &local->work_list, list) { 329 list_for_each_entry_rcu(wk, &local->work_list, list) {
@@ -334,8 +335,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
334 break; 335 break;
335 } 336 }
336 rcu_read_unlock(); 337 rcu_read_unlock();
338 if (local->hw_roc_skb_for_status == skb) {
339 cookie = local->hw_roc_cookie ^ 2;
340 local->hw_roc_skb_for_status = NULL;
341 }
337 cfg80211_mgmt_tx_status( 342 cfg80211_mgmt_tx_status(
338 skb->dev, (unsigned long) skb, skb->data, skb->len, 343 skb->dev, cookie, skb->data, skb->len,
339 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 344 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
340 } 345 }
341 346
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b64b42bc774b..b0beaa58246b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1547,7 +1547,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1547 skb_orphan(skb); 1547 skb_orphan(skb);
1548 } 1548 }
1549 1549
1550 if (skb_header_cloned(skb)) 1550 if (skb_cloned(skb))
1551 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1551 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1552 else if (head_need || tail_need) 1552 else if (head_need || tail_need)
1553 I802_DEBUG_INC(local->tx_expand_skb_head); 1553 I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cf68700abffa..d036597aabbe 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1210 switch (sdata->vif.type) { 1210 switch (sdata->vif.type) {
1211 case NL80211_IFTYPE_STATION: 1211 case NL80211_IFTYPE_STATION:
1212 changed |= BSS_CHANGED_ASSOC; 1212 changed |= BSS_CHANGED_ASSOC;
1213 mutex_lock(&sdata->u.mgd.mtx);
1213 ieee80211_bss_info_change_notify(sdata, changed); 1214 ieee80211_bss_info_change_notify(sdata, changed);
1215 mutex_unlock(&sdata->u.mgd.mtx);
1214 break; 1216 break;
1215 case NL80211_IFTYPE_ADHOC: 1217 case NL80211_IFTYPE_ADHOC:
1216 changed |= BSS_CHANGED_IBSS; 1218 changed |= BSS_CHANGED_IBSS;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe290c04..4aa614b8a96a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head,
133 133
134 /* Optimization: we don't need to hold module 134 /* Optimization: we don't need to hold module
135 reference here, since function can't sleep. --RR */ 135 reference here, since function can't sleep. --RR */
136repeat:
136 verdict = elem->hook(hook, skb, indev, outdev, okfn); 137 verdict = elem->hook(hook, skb, indev, outdev, okfn);
137 if (verdict != NF_ACCEPT) { 138 if (verdict != NF_ACCEPT) {
138#ifdef CONFIG_NETFILTER_DEBUG 139#ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
145#endif 146#endif
146 if (verdict != NF_REPEAT) 147 if (verdict != NF_REPEAT)
147 return verdict; 148 return verdict;
148 *i = (*i)->prev; 149 goto repeat;
149 } 150 }
150 } 151 }
151 return NF_ACCEPT; 152 return NF_ACCEPT;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..ba98e1308f3c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
808 dest->u_threshold = udest->u_threshold; 808 dest->u_threshold = udest->u_threshold;
809 dest->l_threshold = udest->l_threshold; 809 dest->l_threshold = udest->l_threshold;
810 810
811 spin_lock(&dest->dst_lock); 811 spin_lock_bh(&dest->dst_lock);
812 ip_vs_dst_reset(dest); 812 ip_vs_dst_reset(dest);
813 spin_unlock(&dest->dst_lock); 813 spin_unlock_bh(&dest->dst_lock);
814 814
815 if (add) 815 if (add)
816 ip_vs_new_estimator(&dest->stats); 816 ip_vs_new_estimator(&dest->stats);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e61511929c66..84f4fcc5884b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -942,8 +942,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
942 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 942 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
943 nf_conntrack_event_cache(IPCT_REPLY, ct); 943 nf_conntrack_event_cache(IPCT_REPLY, ct);
944out: 944out:
945 if (tmpl) 945 if (tmpl) {
946 nf_ct_put(tmpl); 946 /* Special case: we have to repeat this hook, assign the
947 * template again to this packet. We assume that this packet
948 * has no conntrack assigned. This is used by nf_ct_tcp. */
949 if (ret == NF_REPEAT)
950 skb->nfct = (struct nf_conntrack *)tmpl;
951 else
952 nf_ct_put(tmpl);
953 }
947 954
948 return ret; 955 return ret;
949} 956}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..91816998ed86 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
85 85
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
87{ 87{
88 if (pf >= ARRAY_SIZE(nf_loggers))
89 return -EINVAL;
88 mutex_lock(&nf_log_mutex); 90 mutex_lock(&nf_log_mutex);
89 if (__find_logger(pf, logger->name) == NULL) { 91 if (__find_logger(pf, logger->name) == NULL) {
90 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
98 100
99void nf_log_unbind_pf(u_int8_t pf) 101void nf_log_unbind_pf(u_int8_t pf)
100{ 102{
103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return;
101 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
102 rcu_assign_pointer(nf_loggers[pf], NULL); 106 rcu_assign_pointer(nf_loggers[pf], NULL);
103 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87befb04c0..474d621cbc2e 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
28 skb->destructor = NULL; 28 skb->destructor = NULL;
29 29
30 if (sk) 30 if (sk)
31 nf_tproxy_put_sock(sk); 31 sock_put(sk);
32} 32}
33 33
34/* consumes sk */ 34/* consumes sk */
35int 35void
36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
37{ 37{
38 bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? 38 /* assigning tw sockets complicates things; most
39 inet_twsk(sk)->tw_transparent : 39 * skb->sk->X checks would have to test sk->sk_state first */
40 inet_sk(sk)->transparent; 40 if (sk->sk_state == TCP_TIME_WAIT) {
41 41 inet_twsk_put(inet_twsk(sk));
42 if (transparent) { 42 return;
43 skb_orphan(skb); 43 }
44 skb->sk = sk; 44
45 skb->destructor = nf_tproxy_destructor; 45 skb_orphan(skb);
46 return 1; 46 skb->sk = sk;
47 } else 47 skb->destructor = nf_tproxy_destructor;
48 nf_tproxy_put_sock(sk);
49
50 return 0;
51} 48}
52EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); 49EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
53 50
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f47a2a..dcfd57eb9d02 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
33#include <net/netfilter/nf_tproxy_core.h> 33#include <net/netfilter/nf_tproxy_core.h>
34#include <linux/netfilter/xt_TPROXY.h> 34#include <linux/netfilter/xt_TPROXY.h>
35 35
36static bool tproxy_sk_is_transparent(struct sock *sk)
37{
38 if (sk->sk_state != TCP_TIME_WAIT) {
39 if (inet_sk(sk)->transparent)
40 return true;
41 sock_put(sk);
42 } else {
43 if (inet_twsk(sk)->tw_transparent)
44 return true;
45 inet_twsk_put(inet_twsk(sk));
46 }
47 return false;
48}
49
36static inline __be32 50static inline __be32
37tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) 51tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
38{ 52{
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
141 skb->dev, NFT_LOOKUP_LISTENER); 155 skb->dev, NFT_LOOKUP_LISTENER);
142 156
143 /* NOTE: assign_sock consumes our sk reference */ 157 /* NOTE: assign_sock consumes our sk reference */
144 if (sk && nf_tproxy_assign_sock(skb, sk)) { 158 if (sk && tproxy_sk_is_transparent(sk)) {
145 /* This should be in a separate target, but we don't do multiple 159 /* This should be in a separate target, but we don't do multiple
146 targets on the same rule yet */ 160 targets on the same rule yet */
147 skb->mark = (skb->mark & ~mark_mask) ^ mark_value; 161 skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
149 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", 163 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
150 iph->protocol, &iph->daddr, ntohs(hp->dest), 164 iph->protocol, &iph->daddr, ntohs(hp->dest),
151 &laddr, ntohs(lport), skb->mark); 165 &laddr, ntohs(lport), skb->mark);
166
167 nf_tproxy_assign_sock(skb, sk);
152 return NF_ACCEPT; 168 return NF_ACCEPT;
153 } 169 }
154 170
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
306 par->in, NFT_LOOKUP_LISTENER); 322 par->in, NFT_LOOKUP_LISTENER);
307 323
308 /* NOTE: assign_sock consumes our sk reference */ 324 /* NOTE: assign_sock consumes our sk reference */
309 if (sk && nf_tproxy_assign_sock(skb, sk)) { 325 if (sk && tproxy_sk_is_transparent(sk)) {
310 /* This should be in a separate target, but we don't do multiple 326 /* This should be in a separate target, but we don't do multiple
311 targets on the same rule yet */ 327 targets on the same rule yet */
312 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; 328 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
314 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", 330 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
315 tproto, &iph->saddr, ntohs(hp->source), 331 tproto, &iph->saddr, ntohs(hp->source),
316 laddr, ntohs(lport), skb->mark); 332 laddr, ntohs(lport), skb->mark);
333
334 nf_tproxy_assign_sock(skb, sk);
317 return NF_ACCEPT; 335 return NF_ACCEPT;
318 } 336 }
319 337
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae838303..9cc46356b577 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
35#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
36#endif 36#endif
37 37
38static void
39xt_socket_put_sk(struct sock *sk)
40{
41 if (sk->sk_state == TCP_TIME_WAIT)
42 inet_twsk_put(inet_twsk(sk));
43 else
44 sock_put(sk);
45}
46
38static int 47static int
39extract_icmp4_fields(const struct sk_buff *skb, 48extract_icmp4_fields(const struct sk_buff *skb,
40 u8 *protocol, 49 u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
164 (sk->sk_state == TCP_TIME_WAIT && 173 (sk->sk_state == TCP_TIME_WAIT &&
165 inet_twsk(sk)->tw_transparent)); 174 inet_twsk(sk)->tw_transparent));
166 175
167 nf_tproxy_put_sock(sk); 176 xt_socket_put_sk(sk);
168 177
169 if (wildcard || !transparent) 178 if (wildcard || !transparent)
170 sk = NULL; 179 sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
298 (sk->sk_state == TCP_TIME_WAIT && 307 (sk->sk_state == TCP_TIME_WAIT &&
299 inet_twsk(sk)->tw_transparent)); 308 inet_twsk(sk)->tw_transparent));
300 309
301 nf_tproxy_put_sock(sk); 310 xt_socket_put_sk(sk);
302 311
303 if (wildcard || !transparent) 312 if (wildcard || !transparent)
304 sk = NULL; 313 sk = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..1f924595bdef 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1408 size_t copied;
1409 struct sk_buff *skb, *data_skb; 1409 struct sk_buff *skb, *data_skb;
1410 int err; 1410 int err, ret;
1411 1411
1412 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
1413 return -EOPNOTSUPP; 1413 return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1470 1470
1471 skb_free_datagram(sk, skb); 1471 skb_free_datagram(sk, skb);
1472 1472
1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1474 netlink_dump(sk); 1474 ret = netlink_dump(sk);
1475 if (ret) {
1476 sk->sk_err = ret;
1477 sk->sk_error_report(sk);
1478 }
1479 }
1475 1480
1476 scm_recv(sock, msg, siocb->scm, flags); 1481 scm_recv(sock, msg, siocb->scm, flags);
1477out: 1482out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1736 struct netlink_callback *cb; 1741 struct netlink_callback *cb;
1737 struct sock *sk; 1742 struct sock *sk;
1738 struct netlink_sock *nlk; 1743 struct netlink_sock *nlk;
1744 int ret;
1739 1745
1740 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1741 if (cb == NULL) 1747 if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1764 nlk->cb = cb; 1770 nlk->cb = cb;
1765 mutex_unlock(nlk->cb_mutex); 1771 mutex_unlock(nlk->cb_mutex);
1766 1772
1767 netlink_dump(sk); 1773 ret = netlink_dump(sk);
1774
1768 sock_put(sk); 1775 sock_put(sk);
1769 1776
1777 if (ret)
1778 return ret;
1779
1770 /* We successfully started a dump, by returning -EINTR we 1780 /* We successfully started a dump, by returning -EINTR we
1771 * signal not to send ACK even if it was requested. 1781 * signal not to send ACK even if it was requested.
1772 */ 1782 */
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 71f373c421bc..c47a511f203d 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
551 if (conn->c_loopback 551 if (conn->c_loopback
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
557 return ret;
555 } 558 }
556 559
557 /* FIXME we may overallocate here */ 560 /* FIXME we may overallocate here */
diff --git a/net/rds/loop.c b/net/rds/loop.c
index aeec1d483b17..bca6761a3ca2 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 struct scatterlist *sgp = &rm->data.op_sg[sg];
65 int ret = sizeof(struct rds_header) +
66 be32_to_cpu(rm->m_inc.i_hdr.h_len);
67
64 /* Do not send cong updates to loopback */ 68 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 69 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 70 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 71 ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
72 goto out;
68 } 73 }
69 74
70 BUG_ON(hdr_off || sg || off); 75 BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
80 NULL); 85 NULL);
81 86
82 rds_inc_put(&rm->m_inc); 87 rds_inc_put(&rm->m_inc);
83 88out:
84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 89 return ret;
85} 90}
86 91
87/* 92/*
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 89315009bab1..1a2b0633fece 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
423 goto protocol_error; 423 goto protocol_error;
424 } 424 }
425 425
426 case RXRPC_PACKET_TYPE_ACKALL:
426 case RXRPC_PACKET_TYPE_ACK: 427 case RXRPC_PACKET_TYPE_ACK:
427 /* ACK processing is done in process context */ 428 /* ACK processing is done in process context */
428 read_lock_bh(&call->state_lock); 429 read_lock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
89 return ret; 89 return ret;
90 90
91 plen -= sizeof(*token); 91 plen -= sizeof(*token);
92 token = kmalloc(sizeof(*token), GFP_KERNEL); 92 token = kzalloc(sizeof(*token), GFP_KERNEL);
93 if (!token) 93 if (!token)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 token->kad = kmalloc(plen, GFP_KERNEL); 96 token->kad = kzalloc(plen, GFP_KERNEL);
97 if (!token->kad) { 97 if (!token->kad) {
98 kfree(token); 98 kfree(token);
99 return -ENOMEM; 99 return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
731 goto error; 731 goto error;
732 732
733 ret = -ENOMEM; 733 ret = -ENOMEM;
734 token = kmalloc(sizeof(*token), GFP_KERNEL); 734 token = kzalloc(sizeof(*token), GFP_KERNEL);
735 if (!token) 735 if (!token)
736 goto error; 736 goto error;
737 token->kad = kmalloc(plen, GFP_KERNEL); 737 token->kad = kzalloc(plen, GFP_KERNEL);
738 if (!token->kad) 738 if (!token->kad)
739 goto error_free; 739 goto error_free;
740 740
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..1bc698039ae2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev)
839 839
840 list_add(&dev->unreg_list, &single); 840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single); 841 dev_deactivate_many(&single);
842 list_del(&single);
842} 843}
843 844
844static void dev_init_scheduler_queue(struct net_device *dev, 845static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0962ca..b23428f3c0dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
2029 *errp = sctp_make_op_error_fixed(asoc, chunk); 2029 *errp = sctp_make_op_error_fixed(asoc, chunk);
2030 2030
2031 if (*errp) { 2031 if (*errp) {
2032 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2032 if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
2033 WORD_ROUND(ntohs(param.p->length))); 2033 WORD_ROUND(ntohs(param.p->length))))
2034 sctp_addto_chunk_fixed(*errp, 2034 sctp_addto_chunk_fixed(*errp,
2035 WORD_ROUND(ntohs(param.p->length)), 2035 WORD_ROUND(ntohs(param.p->length)),
2036 param.v); 2036 param.v);
2037 } else { 2037 } else {
2038 /* If there is no memory for generating the ERROR 2038 /* If there is no memory for generating the ERROR
2039 * report as specified, an ABORT will be triggered 2039 * report as specified, an ABORT will be triggered
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09b164e..59e599498e37 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
252 252
253/* 253/*
254 * Mark an RPC call as having completed by clearing the 'active' bit 254 * Mark an RPC call as having completed by clearing the 'active' bit
255 * and then waking up all tasks that were sleeping.
255 */ 256 */
256static void rpc_mark_complete_task(struct rpc_task *task) 257static int rpc_complete_task(struct rpc_task *task)
257{ 258{
258 smp_mb__before_clear_bit(); 259 void *m = &task->tk_runstate;
260 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
261 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
262 unsigned long flags;
263 int ret;
264
265 spin_lock_irqsave(&wq->lock, flags);
259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 266 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 smp_mb__after_clear_bit(); 267 ret = atomic_dec_and_test(&task->tk_count);
261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 268 if (waitqueue_active(wq))
269 __wake_up_locked_key(wq, TASK_NORMAL, &k);
270 spin_unlock_irqrestore(&wq->lock, flags);
271 return ret;
262} 272}
263 273
264/* 274/*
265 * Allow callers to wait for completion of an RPC call 275 * Allow callers to wait for completion of an RPC call
276 *
277 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
278 * to enforce taking of the wq->lock and hence avoid races with
279 * rpc_complete_task().
266 */ 280 */
267int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 281int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
268{ 282{
269 if (action == NULL) 283 if (action == NULL)
270 action = rpc_wait_bit_killable; 284 action = rpc_wait_bit_killable;
271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 285 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
272 action, TASK_KILLABLE); 286 action, TASK_KILLABLE);
273} 287}
274EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 288EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
857 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 871 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
858} 872}
859 873
860void rpc_put_task(struct rpc_task *task) 874static void rpc_release_resources_task(struct rpc_task *task)
861{ 875{
862 if (!atomic_dec_and_test(&task->tk_count))
863 return;
864 /* Release resources */
865 if (task->tk_rqstp) 876 if (task->tk_rqstp)
866 xprt_release(task); 877 xprt_release(task);
867 if (task->tk_msg.rpc_cred) 878 if (task->tk_msg.rpc_cred)
868 put_rpccred(task->tk_msg.rpc_cred); 879 put_rpccred(task->tk_msg.rpc_cred);
869 rpc_task_release_client(task); 880 rpc_task_release_client(task);
870 if (task->tk_workqueue != NULL) { 881}
882
883static void rpc_final_put_task(struct rpc_task *task,
884 struct workqueue_struct *q)
885{
886 if (q != NULL) {
871 INIT_WORK(&task->u.tk_work, rpc_async_release); 887 INIT_WORK(&task->u.tk_work, rpc_async_release);
872 queue_work(task->tk_workqueue, &task->u.tk_work); 888 queue_work(q, &task->u.tk_work);
873 } else 889 } else
874 rpc_free_task(task); 890 rpc_free_task(task);
875} 891}
892
893static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
894{
895 if (atomic_dec_and_test(&task->tk_count)) {
896 rpc_release_resources_task(task);
897 rpc_final_put_task(task, q);
898 }
899}
900
901void rpc_put_task(struct rpc_task *task)
902{
903 rpc_do_put_task(task, NULL);
904}
876EXPORT_SYMBOL_GPL(rpc_put_task); 905EXPORT_SYMBOL_GPL(rpc_put_task);
877 906
907void rpc_put_task_async(struct rpc_task *task)
908{
909 rpc_do_put_task(task, task->tk_workqueue);
910}
911EXPORT_SYMBOL_GPL(rpc_put_task_async);
912
878static void rpc_release_task(struct rpc_task *task) 913static void rpc_release_task(struct rpc_task *task)
879{ 914{
880 dprintk("RPC: %5u release task\n", task->tk_pid); 915 dprintk("RPC: %5u release task\n", task->tk_pid);
881 916
882 BUG_ON (RPC_IS_QUEUED(task)); 917 BUG_ON (RPC_IS_QUEUED(task));
883 918
884 /* Wake up anyone who is waiting for task completion */ 919 rpc_release_resources_task(task);
885 rpc_mark_complete_task(task);
886 920
887 rpc_put_task(task); 921 /*
922 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
923 * so it should be safe to use task->tk_count as a test for whether
924 * or not any other processes still hold references to our rpc_task.
925 */
926 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
927 /* Wake up anyone who may be waiting for task completion */
928 if (!rpc_complete_task(task))
929 return;
930 } else {
931 if (!atomic_dec_and_test(&task->tk_count))
932 return;
933 }
934 rpc_final_put_task(task, task->tk_workqueue);
888} 935}
889 936
890int rpciod_up(void) 937int rpciod_up(void)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9df1eadc912a..1a10dcd999ea 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1335 p, 0, length, DMA_FROM_DEVICE); 1335 p, 0, length, DMA_FROM_DEVICE);
1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { 1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1337 put_page(p); 1337 put_page(p);
1338 svc_rdma_put_context(ctxt, 1);
1338 return; 1339 return;
1339 } 1340 }
1340 atomic_inc(&xprt->sc_dma_used); 1341 atomic_inc(&xprt->sc_dma_used);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c431f5a57960..be96d429b475 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1631 } 1631 }
1632 xs_reclassify_socket(family, sock); 1632 xs_reclassify_socket(family, sock);
1633 1633
1634 if (xs_bind(transport, sock)) { 1634 err = xs_bind(transport, sock);
1635 if (err) {
1635 sock_release(sock); 1636 sock_release(sock);
1636 goto out; 1637 goto out;
1637 } 1638 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..437a99e560e1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 1724
1725 msg->msg_namelen = 0; 1725 msg->msg_namelen = 0;
1726 1726
1727 mutex_lock(&u->readlock); 1727 err = mutex_lock_interruptible(&u->readlock);
1728 if (err) {
1729 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1730 goto out;
1731 }
1728 1732
1729 skb = skb_recv_datagram(sk, flags, noblock, &err); 1733 skb = skb_recv_datagram(sk, flags, noblock, &err);
1730 if (!skb) { 1734 if (!skb) {
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1864 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1868 memset(&tmp_scm, 0, sizeof(tmp_scm));
1865 } 1869 }
1866 1870
1867 mutex_lock(&u->readlock); 1871 err = mutex_lock_interruptible(&u->readlock);
1872 if (err) {
1873 err = sock_intr_errno(timeo);
1874 goto out;
1875 }
1868 1876
1869 do { 1877 do {
1870 int chunk; 1878 int chunk;
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895 1903
1896 timeo = unix_stream_data_wait(sk, timeo); 1904 timeo = unix_stream_data_wait(sk, timeo);
1897 1905
1898 if (signal_pending(current)) { 1906 if (signal_pending(current)
1907 || mutex_lock_interruptible(&u->readlock)) {
1899 err = sock_intr_errno(timeo); 1908 err = sock_intr_errno(timeo);
1900 goto out; 1909 goto out;
1901 } 1910 }
1902 mutex_lock(&u->readlock); 1911
1903 continue; 1912 continue;
1904 unlock: 1913 unlock:
1905 unix_state_unlock(sk); 1914 unix_state_unlock(sk);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4e4cd5..d112f038edf0 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
802 return freq; 802 return freq;
803 if (freq == 0) 803 if (freq == 0)
804 return -EINVAL; 804 return -EINVAL;
805 wdev_lock(wdev);
806 mutex_lock(&rdev->devlist_mtx); 805 mutex_lock(&rdev->devlist_mtx);
806 wdev_lock(wdev);
807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
808 mutex_unlock(&rdev->devlist_mtx);
809 wdev_unlock(wdev); 808 wdev_unlock(wdev);
809 mutex_unlock(&rdev->devlist_mtx);
810 return err; 810 return err;
811 default: 811 default:
812 return -EOPNOTSUPP; 812 return -EOPNOTSUPP;
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8f6420..406207515b5e 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/x25.h> 28#include <net/x25.h>
29 29
30/* 30/**
31 * Parse a set of facilities into the facilities structures. Unrecognised 31 * x25_parse_facilities - Parse facilities from skb into the facilities structs
32 * facilities are written to the debug log file. 32 *
33 * @skb: sk_buff to parse
34 * @facilities: Regular facilites, updated as facilities are found
35 * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
36 * @vc_fac_mask: mask is updated with all facilities found
37 *
38 * Return codes:
39 * -1 - Parsing error, caller should drop call and clean up
40 * 0 - Parse OK, this skb has no facilities
41 * >0 - Parse OK, returns the length of the facilities header
42 *
33 */ 43 */
34int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, 44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
62 switch (*p & X25_FAC_CLASS_MASK) { 72 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 73 case X25_FAC_CLASS_A:
64 if (len < 2) 74 if (len < 2)
65 return 0; 75 return -1;
66 switch (*p) { 76 switch (*p) {
67 case X25_FAC_REVERSE: 77 case X25_FAC_REVERSE:
68 if((p[1] & 0x81) == 0x81) { 78 if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
107 break; 117 break;
108 case X25_FAC_CLASS_B: 118 case X25_FAC_CLASS_B:
109 if (len < 3) 119 if (len < 3)
110 return 0; 120 return -1;
111 switch (*p) { 121 switch (*p) {
112 case X25_FAC_PACKET_SIZE: 122 case X25_FAC_PACKET_SIZE:
113 facilities->pacsize_in = p[1]; 123 facilities->pacsize_in = p[1];
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
130 break; 140 break;
131 case X25_FAC_CLASS_C: 141 case X25_FAC_CLASS_C:
132 if (len < 4) 142 if (len < 4)
133 return 0; 143 return -1;
134 printk(KERN_DEBUG "X.25: unknown facility %02X, " 144 printk(KERN_DEBUG "X.25: unknown facility %02X, "
135 "values %02X, %02X, %02X\n", 145 "values %02X, %02X, %02X\n",
136 p[0], p[1], p[2], p[3]); 146 p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
139 break; 149 break;
140 case X25_FAC_CLASS_D: 150 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2) 151 if (len < p[1] + 2)
142 return 0; 152 return -1;
143 switch (*p) { 153 switch (*p) {
144 case X25_FAC_CALLING_AE: 154 case X25_FAC_CALLING_AE:
145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 155 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
146 return 0; 156 return -1;
147 dte_facs->calling_len = p[2]; 157 dte_facs->calling_len = p[2];
148 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 158 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
149 *vc_fac_mask |= X25_MASK_CALLING_AE; 159 *vc_fac_mask |= X25_MASK_CALLING_AE;
150 break; 160 break;
151 case X25_FAC_CALLED_AE: 161 case X25_FAC_CALLED_AE:
152 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 162 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
153 return 0; 163 return -1;
154 dte_facs->called_len = p[2]; 164 dte_facs->called_len = p[2];
155 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 165 memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
156 *vc_fac_mask |= X25_MASK_CALLED_AE; 166 *vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f022be69..15de65f04719 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
91{ 91{
92 struct x25_address source_addr, dest_addr; 92 struct x25_address source_addr, dest_addr;
93 int len; 93 int len;
94 struct x25_sock *x25 = x25_sk(sk);
94 95
95 switch (frametype) { 96 switch (frametype) {
96 case X25_CALL_ACCEPTED: { 97 case X25_CALL_ACCEPTED: {
97 struct x25_sock *x25 = x25_sk(sk);
98 98
99 x25_stop_timer(sk); 99 x25_stop_timer(sk);
100 x25->condition = 0x00; 100 x25->condition = 0x00;
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
113 &dest_addr); 113 &dest_addr);
114 if (len > 0) 114 if (len > 0)
115 skb_pull(skb, len); 115 skb_pull(skb, len);
116 else if (len < 0)
117 goto out_clear;
116 118
117 len = x25_parse_facilities(skb, &x25->facilities, 119 len = x25_parse_facilities(skb, &x25->facilities,
118 &x25->dte_facilities, 120 &x25->dte_facilities,
119 &x25->vc_facil_mask); 121 &x25->vc_facil_mask);
120 if (len > 0) 122 if (len > 0)
121 skb_pull(skb, len); 123 skb_pull(skb, len);
122 else 124 else if (len < 0)
123 return -1; 125 goto out_clear;
124 /* 126 /*
125 * Copy any Call User Data. 127 * Copy any Call User Data.
126 */ 128 */
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
144 } 146 }
145 147
146 return 0; 148 return 0;
149
150out_clear:
151 x25_write_internal(sk, X25_CLEAR_REQUEST);
152 x25->state = X25_STATE_2;
153 x25_start_t23timer(sk);
154 return 0;
147} 155}
148 156
149/* 157/*
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4cbc942f762a..21306928d47f 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -396,9 +396,12 @@ void __exit x25_link_free(void)
396 write_lock_bh(&x25_neigh_list_lock); 396 write_lock_bh(&x25_neigh_list_lock);
397 397
398 list_for_each_safe(entry, tmp, &x25_neigh_list) { 398 list_for_each_safe(entry, tmp, &x25_neigh_list) {
399 struct net_device *dev;
400
399 nb = list_entry(entry, struct x25_neigh, node); 401 nb = list_entry(entry, struct x25_neigh, node);
402 dev = nb->dev;
400 __x25_remove_neigh(nb); 403 __x25_remove_neigh(nb);
401 dev_put(nb->dev); 404 dev_put(dev);
402 } 405 }
403 write_unlock_bh(&x25_neigh_list_lock); 406 write_unlock_bh(&x25_neigh_list_lock);
404} 407}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef404c794..6459588befc3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1340 default: 1340 default:
1341 BUG(); 1341 BUG();
1342 } 1342 }
1343 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1343 xdst = dst_alloc(dst_ops);
1344 xfrm_policy_put_afinfo(afinfo); 1344 xfrm_policy_put_afinfo(afinfo);
1345 1345
1346 xdst->flo.ops = &xfrm_bundle_fc_ops; 1346 if (likely(xdst))
1347 xdst->flo.ops = &xfrm_bundle_fc_ops;
1348 else
1349 xdst = ERR_PTR(-ENOBUFS);
1347 1350
1348 return xdst; 1351 return xdst;
1349} 1352}