aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Makefile4
-rw-r--r--net/batman-adv/unicast.c15
-rw-r--r--net/batman-adv/vis.c14
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c4
-rw-r--r--net/bluetooth/hci_event.c9
-rw-r--r--net/bluetooth/l2cap.c85
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c42
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/ceph/messenger.c133
-rw-r--r--net/ceph/pagevec.c18
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/dcb/dcbnl.c24
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/af_inet.c16
-rw-r--r--net/ipv4/arp.c11
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c76
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c6
-rw-r--r--net/ipv4/raw.c19
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/addrconf.c81
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6mr.c75
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/raw.c19
-rw-r--r--net/ipv6/route.c37
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c9
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/status.c7
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c11
-rw-r--r--net/netfilter/nf_conntrack_ecache.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/nf_tproxy_core.c27
-rw-r--r--net/netfilter/xt_TPROXY.c22
-rw-r--r--net/netfilter/xt_iprange.c16
-rw-r--r--net/netfilter/xt_socket.c13
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rds/loop.c11
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c5
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_red.c11
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sched/sch_tbf.c2
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/sm_make_chunk.c10
-rw-r--r--net/sunrpc/sched.c75
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/unix/af_unix.c19
-rw-r--r--net/unix/garbage.c2
-rw-r--r--net/wireless/wext-compat.c4
-rw-r--r--net/x25/x25_facilities.c28
-rw-r--r--net/x25/x25_in.c14
-rw-r--r--net/x25/x25_link.c5
-rw-r--r--net/xfrm/xfrm_policy.c7
100 files changed, 893 insertions, 419 deletions
diff --git a/net/Makefile b/net/Makefile
index a3330ebe2c53..a51d9465e628 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
19obj-$(CONFIG_INET) += ipv4/ 19obj-$(CONFIG_INET) += ipv4/
20obj-$(CONFIG_XFRM) += xfrm/ 20obj-$(CONFIG_XFRM) += xfrm/
21obj-$(CONFIG_UNIX) += unix/ 21obj-$(CONFIG_UNIX) += unix/
22ifneq ($(CONFIG_IPV6),) 22obj-$(CONFIG_NET) += ipv6/
23obj-y += ipv6/
24endif
25obj-$(CONFIG_PACKET) += packet/ 23obj-$(CONFIG_PACKET) += packet/
26obj-$(CONFIG_NET_KEY) += key/ 24obj-$(CONFIG_NET_KEY) += key/
27obj-$(CONFIG_BRIDGE) += bridge/ 25obj-$(CONFIG_BRIDGE) += bridge/
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index ee41fef04b21..d1a611322549 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
50 skb = tfp->skb; 50 skb = tfp->skb;
51 } 51 }
52 52
53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
54 goto err;
55
53 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); 56 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
54 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) { 57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
55 /* free buffered skb, skb will be freed later */ 58 goto err;
56 kfree_skb(tfp->skb);
57 return NULL;
58 }
59 59
60 /* move free entry to end */ 60 /* move free entry to end */
61 tfp->skb = NULL; 61 tfp->skb = NULL;
@@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
70 unicast_packet->packet_type = BAT_UNICAST; 70 unicast_packet->packet_type = BAT_UNICAST;
71 71
72 return skb; 72 return skb;
73
74err:
75 /* free buffered skb, skb will be freed later */
76 kfree_skb(tfp->skb);
77 return NULL;
73} 78}
74 79
75static void frag_create_entry(struct list_head *head, struct sk_buff *skb) 80static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cd4c4231fa48..de1022cacaf7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -64,6 +64,7 @@ static void free_info(struct kref *ref)
64 64
65 spin_unlock_bh(&bat_priv->vis_list_lock); 65 spin_unlock_bh(&bat_priv->vis_list_lock);
66 kfree_skb(info->skb_packet); 66 kfree_skb(info->skb_packet);
67 kfree(info);
67} 68}
68 69
69/* Compare two vis packets, used by the hashing algorithm */ 70/* Compare two vis packets, used by the hashing algorithm */
@@ -268,10 +269,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
268 buff_pos += sprintf(buff + buff_pos, "%pM,", 269 buff_pos += sprintf(buff + buff_pos, "%pM,",
269 entry->addr); 270 entry->addr);
270 271
271 for (i = 0; i < packet->entries; i++) 272 for (j = 0; j < packet->entries; j++)
272 buff_pos += vis_data_read_entry( 273 buff_pos += vis_data_read_entry(
273 buff + buff_pos, 274 buff + buff_pos,
274 &entries[i], 275 &entries[j],
275 entry->addr, 276 entry->addr,
276 entry->primary); 277 entry->primary);
277 278
@@ -444,7 +445,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
444 info); 445 info);
445 if (hash_added < 0) { 446 if (hash_added < 0) {
446 /* did not work (for some reason) */ 447 /* did not work (for some reason) */
447 kref_put(&old_info->refcount, free_info); 448 kref_put(&info->refcount, free_info);
448 info = NULL; 449 info = NULL;
449 } 450 }
450 451
@@ -815,7 +816,7 @@ static void send_vis_packets(struct work_struct *work)
815 container_of(work, struct delayed_work, work); 816 container_of(work, struct delayed_work, work);
816 struct bat_priv *bat_priv = 817 struct bat_priv *bat_priv =
817 container_of(delayed_work, struct bat_priv, vis_work); 818 container_of(delayed_work, struct bat_priv, vis_work);
818 struct vis_info *info, *temp; 819 struct vis_info *info;
819 820
820 spin_lock_bh(&bat_priv->vis_hash_lock); 821 spin_lock_bh(&bat_priv->vis_hash_lock);
821 purge_vis_packets(bat_priv); 822 purge_vis_packets(bat_priv);
@@ -825,8 +826,9 @@ static void send_vis_packets(struct work_struct *work)
825 send_list_add(bat_priv, bat_priv->my_vis_info); 826 send_list_add(bat_priv, bat_priv->my_vis_info);
826 } 827 }
827 828
828 list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list, 829 while (!list_empty(&bat_priv->vis_send_list)) {
829 send_list) { 830 info = list_first_entry(&bat_priv->vis_send_list,
831 typeof(*info), send_list);
830 832
831 kref_get(&info->refcount); 833 kref_get(&info->refcount);
832 spin_unlock_bh(&bat_priv->vis_hash_lock); 834 spin_unlock_bh(&bat_priv->vis_hash_lock);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6b90a4191734..99cd8d9d891b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -379,14 +379,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
379 hci_conn_hold(acl); 379 hci_conn_hold(acl);
380 380
381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
382 acl->sec_level = sec_level; 382 acl->sec_level = BT_SECURITY_LOW;
383 acl->pending_sec_level = sec_level;
383 acl->auth_type = auth_type; 384 acl->auth_type = auth_type;
384 hci_acl_connect(acl); 385 hci_acl_connect(acl);
385 } else {
386 if (acl->sec_level < sec_level)
387 acl->sec_level = sec_level;
388 if (acl->auth_type < auth_type)
389 acl->auth_type = auth_type;
390 } 386 }
391 387
392 if (type == ACL_LINK) 388 if (type == ACL_LINK)
@@ -442,11 +438,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
442{ 438{
443 BT_DBG("conn %p", conn); 439 BT_DBG("conn %p", conn);
444 440
441 if (conn->pending_sec_level > sec_level)
442 sec_level = conn->pending_sec_level;
443
445 if (sec_level > conn->sec_level) 444 if (sec_level > conn->sec_level)
446 conn->sec_level = sec_level; 445 conn->pending_sec_level = sec_level;
447 else if (conn->link_mode & HCI_LM_AUTH) 446 else if (conn->link_mode & HCI_LM_AUTH)
448 return 1; 447 return 1;
449 448
449 /* Make sure we preserve an existing MITM requirement*/
450 auth_type |= (conn->auth_type & 0x01);
451
450 conn->auth_type = auth_type; 452 conn->auth_type = auth_type;
451 453
452 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 454 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b602d881fd7..9c4541bc488a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1011,6 +1011,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
1011 1011
1012 destroy_workqueue(hdev->workqueue); 1012 destroy_workqueue(hdev->workqueue);
1013 1013
1014 hci_dev_lock_bh(hdev);
1015 hci_blacklist_clear(hdev);
1016 hci_dev_unlock_bh(hdev);
1017
1014 __hci_dev_put(hdev); 1018 __hci_dev_put(hdev);
1015 1019
1016 return 0; 1020 return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 38100170d380..a290854fdaa6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -692,13 +692,13 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
692 if (conn->state != BT_CONFIG || !conn->out) 692 if (conn->state != BT_CONFIG || !conn->out)
693 return 0; 693 return 0;
694 694
695 if (conn->sec_level == BT_SECURITY_SDP) 695 if (conn->pending_sec_level == BT_SECURITY_SDP)
696 return 0; 696 return 0;
697 697
698 /* Only request authentication for SSP connections or non-SSP 698 /* Only request authentication for SSP connections or non-SSP
699 * devices with sec_level HIGH */ 699 * devices with sec_level HIGH */
700 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 700 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
701 conn->sec_level != BT_SECURITY_HIGH) 701 conn->pending_sec_level != BT_SECURITY_HIGH)
702 return 0; 702 return 0;
703 703
704 return 1; 704 return 1;
@@ -1095,9 +1095,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1095 1095
1096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1097 if (conn) { 1097 if (conn) {
1098 if (!ev->status) 1098 if (!ev->status) {
1099 conn->link_mode |= HCI_LM_AUTH; 1099 conn->link_mode |= HCI_LM_AUTH;
1100 else 1100 conn->sec_level = conn->pending_sec_level;
1101 } else
1101 conn->sec_level = BT_SECURITY_LOW; 1102 conn->sec_level = BT_SECURITY_LOW;
1102 1103
1103 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1104 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c791fcda7b2d..675614e38e14 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -305,33 +305,44 @@ static void l2cap_chan_del(struct sock *sk, int err)
305 } 305 }
306} 306}
307 307
308/* Service level security */ 308static inline u8 l2cap_get_auth_type(struct sock *sk)
309static inline int l2cap_check_security(struct sock *sk)
310{ 309{
311 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 310 if (sk->sk_type == SOCK_RAW) {
312 __u8 auth_type; 311 switch (l2cap_pi(sk)->sec_level) {
312 case BT_SECURITY_HIGH:
313 return HCI_AT_DEDICATED_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 return HCI_AT_DEDICATED_BONDING;
316 default:
317 return HCI_AT_NO_BONDING;
318 }
319 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
313 322
314 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
315 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 323 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
316 auth_type = HCI_AT_NO_BONDING_MITM; 324 return HCI_AT_NO_BONDING_MITM;
317 else 325 else
318 auth_type = HCI_AT_NO_BONDING; 326 return HCI_AT_NO_BONDING;
319
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322 } else { 327 } else {
323 switch (l2cap_pi(sk)->sec_level) { 328 switch (l2cap_pi(sk)->sec_level) {
324 case BT_SECURITY_HIGH: 329 case BT_SECURITY_HIGH:
325 auth_type = HCI_AT_GENERAL_BONDING_MITM; 330 return HCI_AT_GENERAL_BONDING_MITM;
326 break;
327 case BT_SECURITY_MEDIUM: 331 case BT_SECURITY_MEDIUM:
328 auth_type = HCI_AT_GENERAL_BONDING; 332 return HCI_AT_GENERAL_BONDING;
329 break;
330 default: 333 default:
331 auth_type = HCI_AT_NO_BONDING; 334 return HCI_AT_NO_BONDING;
332 break;
333 } 335 }
334 } 336 }
337}
338
339/* Service level security */
340static inline int l2cap_check_security(struct sock *sk)
341{
342 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
343 __u8 auth_type;
344
345 auth_type = l2cap_get_auth_type(sk);
335 346
336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
337 auth_type); 348 auth_type);
@@ -848,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
848 result = L2CAP_CR_SEC_BLOCK; 859 result = L2CAP_CR_SEC_BLOCK;
849 else 860 else
850 result = L2CAP_CR_BAD_PSM; 861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
851 863
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
@@ -1068,39 +1080,7 @@ static int l2cap_do_connect(struct sock *sk)
1068 1080
1069 err = -ENOMEM; 1081 err = -ENOMEM;
1070 1082
1071 if (sk->sk_type == SOCK_RAW) { 1083 auth_type = l2cap_get_auth_type(sk);
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1075 break;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1078 break;
1079 default:
1080 auth_type = HCI_AT_NO_BONDING;
1081 break;
1082 }
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1086 else
1087 auth_type = HCI_AT_NO_BONDING;
1088
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1091 } else {
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1095 break;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1098 break;
1099 default:
1100 auth_type = HCI_AT_NO_BONDING;
1101 break;
1102 }
1103 }
1104 1084
1105 hcon = hci_connect(hdev, ACL_LINK, dst, 1085 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type); 1086 l2cap_pi(sk)->sec_level, auth_type);
@@ -1127,7 +1107,8 @@ static int l2cap_do_connect(struct sock *sk)
1127 if (sk->sk_type != SOCK_SEQPACKET && 1107 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) { 1108 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk); 1109 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED; 1110 if (l2cap_check_security(sk))
1111 sk->sk_state = BT_CONNECTED;
1131 } else 1112 } else
1132 l2cap_do_start(sk); 1113 l2cap_do_start(sk);
1133 } 1114 }
@@ -1893,8 +1874,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1893 if (pi->mode == L2CAP_MODE_STREAMING) { 1874 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk); 1875 l2cap_streaming_send(sk);
1895 } else { 1876 } else {
1896 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && 1877 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1897 pi->conn_state && L2CAP_CONN_WAIT_F) { 1878 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1898 err = len; 1879 err = len;
1899 break; 1880 break;
1900 } 1881 }
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ff8aaa736650..6b83776534fb 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,7 +1164,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1164 * initiator rfcomm_process_rx already calls 1164 * initiator rfcomm_process_rx already calls
1165 * rfcomm_session_put() */ 1165 * rfcomm_session_put() */
1166 if (s->sock->sk->sk_state != BT_CLOSED) 1166 if (s->sock->sk->sk_state != BT_CLOSED)
1167 rfcomm_session_put(s); 1167 if (list_empty(&s->dlcs))
1168 rfcomm_session_put(s);
1168 break; 1169 break;
1169 } 1170 }
1170 } 1171 }
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
727 break; 727 break;
728 } 728 }
729 729
730 tty_unlock();
730 schedule(); 731 schedule();
732 tty_lock();
731 } 733 }
732 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
733 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae462cb4..6dee7bf648a9 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@ config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP 8 select STP
9 depends on IPV6 || IPV6=n
9 ---help--- 10 ---help---
10 If you say Y here, then your Linux box will be able to act as an 11 If you say Y here, then your Linux box will be able to act as an
11 Ethernet bridge, which means that the different Ethernet segments it 12 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2872393b2939..88485cc74dc3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -328,12 +328,12 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
328 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); 328 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
329 if (fdb) { 329 if (fdb) {
330 memcpy(fdb->addr.addr, addr, ETH_ALEN); 330 memcpy(fdb->addr.addr, addr, ETH_ALEN);
331 hlist_add_head_rcu(&fdb->hlist, head);
332
333 fdb->dst = source; 331 fdb->dst = source;
334 fdb->is_local = is_local; 332 fdb->is_local = is_local;
335 fdb->is_static = is_local; 333 fdb->is_static = is_local;
336 fdb->ageing_timer = jiffies; 334 fdb->ageing_timer = jiffies;
335
336 hlist_add_head_rcu(&fdb->hlist, head);
337 } 337 }
338 return fdb; 338 return fdb;
339} 339}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1b776f..88e4aa9cb1f9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
80 if (is_multicast_ether_addr(dest)) { 80 if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 81 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && !hlist_unhashed(&mdst->mglist)) || 83 if ((mdst && mdst->mglist) ||
84 br_multicast_is_router(br)) 84 br_multicast_is_router(br))
85 skb2 = skb; 85 skb2 = skb;
86 br_multicast_forward(mdst, skb, skb2); 86 br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21acb34..030a002ff8ee 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 41{
42 if (ipv6_addr_is_multicast(addr) && 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
43 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
@@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data)
232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 231 if (!netif_running(br->dev) || timer_pending(&mp->timer))
233 goto out; 232 goto out;
234 233
235 if (!hlist_unhashed(&mp->mglist)) 234 mp->mglist = false;
236 hlist_del_init(&mp->mglist);
237 235
238 if (mp->ports) 236 if (mp->ports)
239 goto out; 237 goto out;
@@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
276 del_timer(&p->query_timer); 274 del_timer(&p->query_timer);
277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg);
278 276
279 if (!mp->ports && hlist_unhashed(&mp->mglist) && 277 if (!mp->ports && !mp->mglist &&
280 netif_running(br->dev)) 278 netif_running(br->dev))
281 mod_timer(&mp->timer, jiffies); 279 mod_timer(&mp->timer, jiffies);
282 280
@@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
436 eth = eth_hdr(skb); 434 eth = eth_hdr(skb);
437 435
438 memcpy(eth->h_source, br->dev->dev_addr, 6); 436 memcpy(eth->h_source, br->dev->dev_addr, 6);
439 ipv6_eth_mc_map(group, eth->h_dest);
440 eth->h_proto = htons(ETH_P_IPV6); 437 eth->h_proto = htons(ETH_P_IPV6);
441 skb_put(skb, sizeof(*eth)); 438 skb_put(skb, sizeof(*eth));
442 439
@@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
448 ip6h->payload_len = htons(8 + sizeof(*mldq)); 445 ip6h->payload_len = htons(8 + sizeof(*mldq));
449 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
450 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
451 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 448 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
449 &ip6h->saddr);
452 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 450 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
453 452
454 hopopt = (u8 *)(ip6h + 1); 453 hopopt = (u8 *)(ip6h + 1);
455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
528 struct net_bridge *br = mp->br; 527 struct net_bridge *br = mp->br;
529 528
530 spin_lock(&br->multicast_lock); 529 spin_lock(&br->multicast_lock);
531 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 530 if (!netif_running(br->dev) || !mp->mglist ||
532 mp->queries_sent >= br->multicast_last_member_count) 531 mp->queries_sent >= br->multicast_last_member_count)
533 goto out; 532 goto out;
534 533
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
719 goto err; 718 goto err;
720 719
721 if (!port) { 720 if (!port) {
722 hlist_add_head(&mp->mglist, &br->mglist); 721 mp->mglist = true;
723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 722 mod_timer(&mp->timer, now + br->multicast_membership_interval);
724 goto out; 723 goto out;
725 } 724 }
@@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
781{ 780{
782 struct br_ip br_group; 781 struct br_ip br_group;
783 782
784 if (ipv6_is_local_multicast(group)) 783 if (!ipv6_is_transient_multicast(group))
785 return 0; 784 return 0;
786 785
787 ipv6_addr_copy(&br_group.u.ip6, group); 786 ipv6_addr_copy(&br_group.u.ip6, group);
788 br_group.proto = htons(ETH_P_IP); 787 br_group.proto = htons(ETH_P_IPV6);
789 788
790 return br_multicast_add_group(br, port, &br_group); 789 return br_multicast_add_group(br, port, &br_group);
791} 790}
@@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1014 1013
1015 nsrcs = skb_header_pointer(skb, 1014 nsrcs = skb_header_pointer(skb,
1016 len + offsetof(struct mld2_grec, 1015 len + offsetof(struct mld2_grec,
1017 grec_mca), 1016 grec_nsrcs),
1018 sizeof(_nsrcs), &_nsrcs); 1017 sizeof(_nsrcs), &_nsrcs);
1019 if (!nsrcs) 1018 if (!nsrcs)
1020 return -EINVAL; 1019 return -EINVAL;
1021 1020
1022 if (!pskb_may_pull(skb, 1021 if (!pskb_may_pull(skb,
1023 len + sizeof(*grec) + 1022 len + sizeof(*grec) +
1024 sizeof(struct in6_addr) * (*nsrcs))) 1023 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1025 return -EINVAL; 1024 return -EINVAL;
1026 1025
1027 grec = (struct mld2_grec *)(skb->data + len); 1026 grec = (struct mld2_grec *)(skb->data + len);
1028 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1027 len += sizeof(*grec) +
1028 sizeof(struct in6_addr) * ntohs(*nsrcs);
1029 1029
1030 /* We treat these as MLDv1 reports for now. */ 1030 /* We treat these as MLDv1 reports for now. */
1031 switch (grec->grec_type) { 1031 switch (grec->grec_type) {
@@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1165 1165
1166 max_delay *= br->multicast_last_member_count; 1166 max_delay *= br->multicast_last_member_count;
1167 1167
1168 if (!hlist_unhashed(&mp->mglist) && 1168 if (mp->mglist &&
1169 (timer_pending(&mp->timer) ? 1169 (timer_pending(&mp->timer) ?
1170 time_after(mp->timer.expires, now + max_delay) : 1170 time_after(mp->timer.expires, now + max_delay) :
1171 try_to_del_timer_sync(&mp->timer) >= 0)) 1171 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1177 if (timer_pending(&p->timer) ? 1177 if (timer_pending(&p->timer) ?
1178 time_after(p->timer.expires, now + max_delay) : 1178 time_after(p->timer.expires, now + max_delay) :
1179 try_to_del_timer_sync(&p->timer) >= 0) 1179 try_to_del_timer_sync(&p->timer) >= 0)
1180 mod_timer(&mp->timer, now + max_delay); 1180 mod_timer(&p->timer, now + max_delay);
1181 } 1181 }
1182 1182
1183out: 1183out:
@@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1236 goto out; 1236 goto out;
1237 1237
1238 max_delay *= br->multicast_last_member_count; 1238 max_delay *= br->multicast_last_member_count;
1239 if (!hlist_unhashed(&mp->mglist) && 1239 if (mp->mglist &&
1240 (timer_pending(&mp->timer) ? 1240 (timer_pending(&mp->timer) ?
1241 time_after(mp->timer.expires, now + max_delay) : 1241 time_after(mp->timer.expires, now + max_delay) :
1242 try_to_del_timer_sync(&mp->timer) >= 0)) 1242 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1248 if (timer_pending(&p->timer) ? 1248 if (timer_pending(&p->timer) ?
1249 time_after(p->timer.expires, now + max_delay) : 1249 time_after(p->timer.expires, now + max_delay) :
1250 try_to_del_timer_sync(&p->timer) >= 0) 1250 try_to_del_timer_sync(&p->timer) >= 0)
1251 mod_timer(&mp->timer, now + max_delay); 1251 mod_timer(&p->timer, now + max_delay);
1252 } 1252 }
1253 1253
1254out: 1254out:
@@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1283 br->multicast_last_member_interval; 1283 br->multicast_last_member_interval;
1284 1284
1285 if (!port) { 1285 if (!port) {
1286 if (!hlist_unhashed(&mp->mglist) && 1286 if (mp->mglist &&
1287 (timer_pending(&mp->timer) ? 1287 (timer_pending(&mp->timer) ?
1288 time_after(mp->timer.expires, time) : 1288 time_after(mp->timer.expires, time) :
1289 try_to_del_timer_sync(&mp->timer) >= 0)) { 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1341{ 1341{
1342 struct br_ip br_group; 1342 struct br_ip br_group;
1343 1343
1344 if (ipv6_is_local_multicast(group)) 1344 if (!ipv6_is_transient_multicast(group))
1345 return; 1345 return;
1346 1346
1347 ipv6_addr_copy(&br_group.u.ip6, group); 1347 ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac7734bfc..4e1b620b6be6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
84struct net_bridge_mdb_entry 84struct net_bridge_mdb_entry
85{ 85{
86 struct hlist_node hlist[2]; 86 struct hlist_node hlist[2];
87 struct hlist_node mglist;
88 struct net_bridge *br; 87 struct net_bridge *br;
89 struct net_bridge_port_group __rcu *ports; 88 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 89 struct rcu_head rcu;
91 struct timer_list timer; 90 struct timer_list timer;
92 struct timer_list query_timer; 91 struct timer_list query_timer;
93 struct br_ip addr; 92 struct br_ip addr;
93 bool mglist;
94 u32 queries_sent; 94 u32 queries_sent;
95}; 95};
96 96
@@ -238,7 +238,6 @@ struct net_bridge
238 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
239 struct net_bridge_mdb_htable __rcu *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
240 struct hlist_head router_list; 240 struct hlist_head router_list;
241 struct hlist_head mglist;
242 241
243 struct timer_list multicast_router_timer; 242 struct timer_list multicast_router_timer;
244 struct timer_list multicast_querier_timer; 243 struct timer_list multicast_querier_timer;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index fa9dab372b68..6008d6dc18a0 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev)
394 priv->conn_req.sockaddr.u.dgm.connection_id = -1; 394 priv->conn_req.sockaddr.u.dgm.connection_id = -1;
395 priv->flowenabled = false; 395 priv->flowenabled = false;
396 396
397 ASSERT_RTNL();
398 init_waitqueue_head(&priv->netmgmt_wq); 397 init_waitqueue_head(&priv->netmgmt_wq);
399 list_add(&priv->list_field, &chnl_net_list);
400} 398}
401 399
402 400
@@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
453 ret = register_netdevice(dev); 451 ret = register_netdevice(dev);
454 if (ret) 452 if (ret)
455 pr_warn("device rtml registration failed\n"); 453 pr_warn("device rtml registration failed\n");
454 else
455 list_add(&caifdev->list_field, &chnl_net_list);
456 return ret; 456 return ret;
457} 457}
458 458
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dff633d62e5b..05f357828a2f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
252{ 252{
253 struct kvec iov = {buf, len}; 253 struct kvec iov = {buf, len};
254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
255 int r;
255 256
256 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 if (r == -EAGAIN)
259 r = 0;
260 return r;
257} 261}
258 262
259/* 263/*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
264 size_t kvlen, size_t len, int more) 268 size_t kvlen, size_t len, int more)
265{ 269{
266 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
271 int r;
267 272
268 if (more) 273 if (more)
269 msg.msg_flags |= MSG_MORE; 274 msg.msg_flags |= MSG_MORE;
270 else 275 else
271 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
272 277
273 return kernel_sendmsg(sock, &msg, iov, kvlen, len); 278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
279 if (r == -EAGAIN)
280 r = 0;
281 return r;
274} 282}
275 283
276 284
@@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
328 ceph_msg_put(con->out_msg); 336 ceph_msg_put(con->out_msg);
329 con->out_msg = NULL; 337 con->out_msg = NULL;
330 } 338 }
331 con->out_keepalive_pending = false;
332 con->in_seq = 0; 339 con->in_seq = 0;
333 con->in_seq_acked = 0; 340 con->in_seq_acked = 0;
334} 341}
@@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
847 (msg->pages || msg->pagelist || msg->bio || in_trail)) 854 (msg->pages || msg->pagelist || msg->bio || in_trail))
848 kunmap(page); 855 kunmap(page);
849 856
857 if (ret == -EAGAIN)
858 ret = 0;
850 if (ret <= 0) 859 if (ret <= 0)
851 goto out; 860 goto out;
852 861
@@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
1238 con->auth_retry); 1247 con->auth_retry);
1239 if (con->auth_retry == 2) { 1248 if (con->auth_retry == 2) {
1240 con->error_msg = "connect authorization failure"; 1249 con->error_msg = "connect authorization failure";
1241 reset_connection(con);
1242 set_bit(CLOSED, &con->state);
1243 return -1; 1250 return -1;
1244 } 1251 }
1245 con->auth_retry = 1; 1252 con->auth_retry = 1;
@@ -1705,14 +1712,6 @@ more:
1705 1712
1706 /* open the socket first? */ 1713 /* open the socket first? */
1707 if (con->sock == NULL) { 1714 if (con->sock == NULL) {
1708 /*
1709 * if we were STANDBY and are reconnecting _this_
1710 * connection, bump connect_seq now. Always bump
1711 * global_seq.
1712 */
1713 if (test_and_clear_bit(STANDBY, &con->state))
1714 con->connect_seq++;
1715
1716 prepare_write_banner(msgr, con); 1715 prepare_write_banner(msgr, con);
1717 prepare_write_connect(msgr, con, 1); 1716 prepare_write_connect(msgr, con, 1);
1718 prepare_read_banner(con); 1717 prepare_read_banner(con);
@@ -1737,16 +1736,12 @@ more_kvec:
1737 if (con->out_skip) { 1736 if (con->out_skip) {
1738 ret = write_partial_skip(con); 1737 ret = write_partial_skip(con);
1739 if (ret <= 0) 1738 if (ret <= 0)
1740 goto done; 1739 goto out;
1741 if (ret < 0) {
1742 dout("try_write write_partial_skip err %d\n", ret);
1743 goto done;
1744 }
1745 } 1740 }
1746 if (con->out_kvec_left) { 1741 if (con->out_kvec_left) {
1747 ret = write_partial_kvec(con); 1742 ret = write_partial_kvec(con);
1748 if (ret <= 0) 1743 if (ret <= 0)
1749 goto done; 1744 goto out;
1750 } 1745 }
1751 1746
1752 /* msg pages? */ 1747 /* msg pages? */
@@ -1761,11 +1756,11 @@ more_kvec:
1761 if (ret == 1) 1756 if (ret == 1)
1762 goto more_kvec; /* we need to send the footer, too! */ 1757 goto more_kvec; /* we need to send the footer, too! */
1763 if (ret == 0) 1758 if (ret == 0)
1764 goto done; 1759 goto out;
1765 if (ret < 0) { 1760 if (ret < 0) {
1766 dout("try_write write_partial_msg_pages err %d\n", 1761 dout("try_write write_partial_msg_pages err %d\n",
1767 ret); 1762 ret);
1768 goto done; 1763 goto out;
1769 } 1764 }
1770 } 1765 }
1771 1766
@@ -1789,10 +1784,9 @@ do_next:
1789 /* Nothing to do! */ 1784 /* Nothing to do! */
1790 clear_bit(WRITE_PENDING, &con->state); 1785 clear_bit(WRITE_PENDING, &con->state);
1791 dout("try_write nothing else to write.\n"); 1786 dout("try_write nothing else to write.\n");
1792done:
1793 ret = 0; 1787 ret = 0;
1794out: 1788out:
1795 dout("try_write done on %p\n", con); 1789 dout("try_write done on %p ret %d\n", con, ret);
1796 return ret; 1790 return ret;
1797} 1791}
1798 1792
@@ -1821,19 +1815,17 @@ more:
1821 dout("try_read connecting\n"); 1815 dout("try_read connecting\n");
1822 ret = read_partial_banner(con); 1816 ret = read_partial_banner(con);
1823 if (ret <= 0) 1817 if (ret <= 0)
1824 goto done;
1825 if (process_banner(con) < 0) {
1826 ret = -1;
1827 goto out; 1818 goto out;
1828 } 1819 ret = process_banner(con);
1820 if (ret < 0)
1821 goto out;
1829 } 1822 }
1830 ret = read_partial_connect(con); 1823 ret = read_partial_connect(con);
1831 if (ret <= 0) 1824 if (ret <= 0)
1832 goto done;
1833 if (process_connect(con) < 0) {
1834 ret = -1;
1835 goto out; 1825 goto out;
1836 } 1826 ret = process_connect(con);
1827 if (ret < 0)
1828 goto out;
1837 goto more; 1829 goto more;
1838 } 1830 }
1839 1831
@@ -1848,7 +1840,7 @@ more:
1848 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1840 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1849 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1841 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1850 if (ret <= 0) 1842 if (ret <= 0)
1851 goto done; 1843 goto out;
1852 con->in_base_pos += ret; 1844 con->in_base_pos += ret;
1853 if (con->in_base_pos) 1845 if (con->in_base_pos)
1854 goto more; 1846 goto more;
@@ -1859,7 +1851,7 @@ more:
1859 */ 1851 */
1860 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1852 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1861 if (ret <= 0) 1853 if (ret <= 0)
1862 goto done; 1854 goto out;
1863 dout("try_read got tag %d\n", (int)con->in_tag); 1855 dout("try_read got tag %d\n", (int)con->in_tag);
1864 switch (con->in_tag) { 1856 switch (con->in_tag) {
1865 case CEPH_MSGR_TAG_MSG: 1857 case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1862,7 @@ more:
1870 break; 1862 break;
1871 case CEPH_MSGR_TAG_CLOSE: 1863 case CEPH_MSGR_TAG_CLOSE:
1872 set_bit(CLOSED, &con->state); /* fixme */ 1864 set_bit(CLOSED, &con->state); /* fixme */
1873 goto done; 1865 goto out;
1874 default: 1866 default:
1875 goto bad_tag; 1867 goto bad_tag;
1876 } 1868 }
@@ -1882,13 +1874,12 @@ more:
1882 case -EBADMSG: 1874 case -EBADMSG:
1883 con->error_msg = "bad crc"; 1875 con->error_msg = "bad crc";
1884 ret = -EIO; 1876 ret = -EIO;
1885 goto out; 1877 break;
1886 case -EIO: 1878 case -EIO:
1887 con->error_msg = "io error"; 1879 con->error_msg = "io error";
1888 goto out; 1880 break;
1889 default:
1890 goto done;
1891 } 1881 }
1882 goto out;
1892 } 1883 }
1893 if (con->in_tag == CEPH_MSGR_TAG_READY) 1884 if (con->in_tag == CEPH_MSGR_TAG_READY)
1894 goto more; 1885 goto more;
@@ -1898,15 +1889,13 @@ more:
1898 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 1889 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1899 ret = read_partial_ack(con); 1890 ret = read_partial_ack(con);
1900 if (ret <= 0) 1891 if (ret <= 0)
1901 goto done; 1892 goto out;
1902 process_ack(con); 1893 process_ack(con);
1903 goto more; 1894 goto more;
1904 } 1895 }
1905 1896
1906done:
1907 ret = 0;
1908out: 1897out:
1909 dout("try_read done on %p\n", con); 1898 dout("try_read done on %p ret %d\n", con, ret);
1910 return ret; 1899 return ret;
1911 1900
1912bad_tag: 1901bad_tag:
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
1951 work.work); 1940 work.work);
1952 1941
1953 mutex_lock(&con->mutex); 1942 mutex_lock(&con->mutex);
1943 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1946 round_jiffies_relative(con->delay))) {
1947 dout("con_work %p backoff %lu\n", con, con->delay);
1948 mutex_unlock(&con->mutex);
1949 return;
1950 } else {
1951 con->ops->put(con);
1952 dout("con_work %p FAILED to back off %lu\n", con,
1953 con->delay);
1954 }
1955 }
1954 1956
1957 if (test_bit(STANDBY, &con->state)) {
1958 dout("con_work %p STANDBY\n", con);
1959 goto done;
1960 }
1955 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1961 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1956 dout("con_work CLOSED\n"); 1962 dout("con_work CLOSED\n");
1957 con_close_socket(con); 1963 con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
2008 /* Requeue anything that hasn't been acked */ 2014 /* Requeue anything that hasn't been acked */
2009 list_splice_init(&con->out_sent, &con->out_queue); 2015 list_splice_init(&con->out_sent, &con->out_queue);
2010 2016
2011 /* If there are no messages in the queue, place the connection 2017 /* If there are no messages queued or keepalive pending, place
2012 * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2018 * the connection in a STANDBY state */
2013 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2019 if (list_empty(&con->out_queue) &&
2014 dout("fault setting STANDBY\n"); 2020 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2021 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2022 clear_bit(WRITE_PENDING, &con->state);
2015 set_bit(STANDBY, &con->state); 2023 set_bit(STANDBY, &con->state);
2016 } else { 2024 } else {
2017 /* retry after a delay. */ 2025 /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
2019 con->delay = BASE_DELAY_INTERVAL; 2027 con->delay = BASE_DELAY_INTERVAL;
2020 else if (con->delay < MAX_DELAY_INTERVAL) 2028 else if (con->delay < MAX_DELAY_INTERVAL)
2021 con->delay *= 2; 2029 con->delay *= 2;
2022 dout("fault queueing %p delay %lu\n", con, con->delay);
2023 con->ops->get(con); 2030 con->ops->get(con);
2024 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2031 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2025 round_jiffies_relative(con->delay)) == 0) 2032 round_jiffies_relative(con->delay))) {
2033 dout("fault queued %p delay %lu\n", con, con->delay);
2034 } else {
2026 con->ops->put(con); 2035 con->ops->put(con);
2036 dout("fault failed to queue %p delay %lu, backoff\n",
2037 con, con->delay);
2038 /*
2039 * In many cases we see a socket state change
2040 * while con_work is running and end up
2041 * queuing (non-delayed) work, such that we
2042 * can't backoff with a delay. Set a flag so
2043 * that when con_work restarts we schedule the
2044 * delay then.
2045 */
2046 set_bit(BACKOFF, &con->state);
2047 }
2027 } 2048 }
2028 2049
2029out_unlock: 2050out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
2094} 2115}
2095EXPORT_SYMBOL(ceph_messenger_destroy); 2116EXPORT_SYMBOL(ceph_messenger_destroy);
2096 2117
2118static void clear_standby(struct ceph_connection *con)
2119{
2120 /* come back from STANDBY? */
2121 if (test_and_clear_bit(STANDBY, &con->state)) {
2122 mutex_lock(&con->mutex);
2123 dout("clear_standby %p and ++connect_seq\n", con);
2124 con->connect_seq++;
2125 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2126 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2127 mutex_unlock(&con->mutex);
2128 }
2129}
2130
2097/* 2131/*
2098 * Queue up an outgoing message on the given connection. 2132 * Queue up an outgoing message on the given connection.
2099 */ 2133 */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2126 2160
2127 /* if there wasn't anything waiting to send before, queue 2161 /* if there wasn't anything waiting to send before, queue
2128 * new work */ 2162 * new work */
2163 clear_standby(con);
2129 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2130 queue_con(con); 2165 queue_con(con);
2131} 2166}
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2191 */ 2226 */
2192void ceph_con_keepalive(struct ceph_connection *con) 2227void ceph_con_keepalive(struct ceph_connection *con)
2193{ 2228{
2229 dout("con_keepalive %p\n", con);
2230 clear_standby(con);
2194 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2231 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2195 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2232 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2196 queue_con(con); 2233 queue_con(con);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 1a040e64c69f..cd9c21df87d1 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, bool write_page) 16 int num_pages, bool write_page)
17{ 17{
18 struct page **pages; 18 struct page **pages;
19 int rc; 19 int got = 0;
20 int rc = 0;
20 21
21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
22 if (!pages) 23 if (!pages)
23 return ERR_PTR(-ENOMEM); 24 return ERR_PTR(-ENOMEM);
24 25
25 down_read(&current->mm->mmap_sem); 26 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 while (got < num_pages) {
27 num_pages, write_page, 0, pages, NULL); 28 rc = get_user_pages(current, current->mm,
29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 num_pages - got, write_page, 0, pages + got, NULL);
31 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
35 }
28 up_read(&current->mm->mmap_sem); 36 up_read(&current->mm->mmap_sem);
29 if (rc < num_pages) 37 if (rc < 0)
30 goto fail; 38 goto fail;
31 return pages; 39 return pages;
32 40
33fail: 41fail:
34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 42 ceph_put_page_vector(pages, got, false);
35 return ERR_PTR(rc); 43 return ERR_PTR(rc);
36} 44}
37EXPORT_SYMBOL(ceph_get_direct_page_vector); 45EXPORT_SYMBOL(ceph_get_direct_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index 7c6a46f80372..6561021d22d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@ EXPORT_SYMBOL(dev_get_by_index);
749 * @ha: hardware address 749 * @ha: hardware address
750 * 750 *
751 * Search for an interface by MAC address. Returns NULL if the device 751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold RCU 752 * is not found or a pointer to the device.
753 * The caller must hold RCU or RTNL.
753 * The returned device has not had its ref count increased 754 * The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking 755 * and the caller must therefore be careful about locking
755 * 756 *
@@ -1113,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
1113void dev_load(struct net *net, const char *name) 1114void dev_load(struct net *net, const char *name)
1114{ 1115{
1115 struct net_device *dev; 1116 struct net_device *dev;
1117 int no_module;
1116 1118
1117 rcu_read_lock(); 1119 rcu_read_lock();
1118 dev = dev_get_by_name_rcu(net, name); 1120 dev = dev_get_by_name_rcu(net, name);
1119 rcu_read_unlock(); 1121 rcu_read_unlock();
1120 1122
1121 if (!dev && capable(CAP_NET_ADMIN)) 1123 no_module = !dev;
1122 request_module("%s", name); 1124 if (no_module && capable(CAP_NET_ADMIN))
1125 no_module = request_module("netdev-%s", name);
1126 if (no_module && capable(CAP_SYS_MODULE)) {
1127 if (!request_module("%s", name))
1128 pr_err("Loading kernel module for a network device "
1129"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1130"instead\n", name);
1131 }
1123} 1132}
1124EXPORT_SYMBOL(dev_load); 1133EXPORT_SYMBOL(dev_load);
1125 1134
@@ -1279,10 +1288,13 @@ static int __dev_close_many(struct list_head *head)
1279 1288
1280static int __dev_close(struct net_device *dev) 1289static int __dev_close(struct net_device *dev)
1281{ 1290{
1291 int retval;
1282 LIST_HEAD(single); 1292 LIST_HEAD(single);
1283 1293
1284 list_add(&dev->unreg_list, &single); 1294 list_add(&dev->unreg_list, &single);
1285 return __dev_close_many(&single); 1295 retval = __dev_close_many(&single);
1296 list_del(&single);
1297 return retval;
1286} 1298}
1287 1299
1288int dev_close_many(struct list_head *head) 1300int dev_close_many(struct list_head *head)
@@ -1324,7 +1336,7 @@ int dev_close(struct net_device *dev)
1324 1336
1325 list_add(&dev->unreg_list, &single); 1337 list_add(&dev->unreg_list, &single);
1326 dev_close_many(&single); 1338 dev_close_many(&single);
1327 1339 list_del(&single);
1328 return 0; 1340 return 0;
1329} 1341}
1330EXPORT_SYMBOL(dev_close); 1342EXPORT_SYMBOL(dev_close);
@@ -2562,7 +2574,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2562 2574
2563 map = rcu_dereference(rxqueue->rps_map); 2575 map = rcu_dereference(rxqueue->rps_map);
2564 if (map) { 2576 if (map) {
2565 if (map->len == 1) { 2577 if (map->len == 1 &&
2578 !rcu_dereference_raw(rxqueue->rps_flow_table)) {
2566 tcpu = map->cpus[0]; 2579 tcpu = map->cpus[0];
2567 if (cpu_online(tcpu)) 2580 if (cpu_online(tcpu))
2568 cpu = tcpu; 2581 cpu = tcpu;
@@ -3423,6 +3436,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3423 __skb_pull(skb, skb_headlen(skb)); 3436 __skb_pull(skb, skb_headlen(skb));
3424 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3437 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3425 skb->vlan_tci = 0; 3438 skb->vlan_tci = 0;
3439 skb->dev = napi->dev;
3440 skb->skb_iif = 0;
3426 3441
3427 napi->skb = skb; 3442 napi->skb = skb;
3428} 3443}
@@ -5059,6 +5074,7 @@ static void rollback_registered(struct net_device *dev)
5059 5074
5060 list_add(&dev->unreg_list, &single); 5075 list_add(&dev->unreg_list, &single);
5061 rollback_registered_many(&single); 5076 rollback_registered_many(&single);
5077 list_del(&single);
5062} 5078}
5063 5079
5064unsigned long netdev_fix_features(unsigned long features, const char *name) 5080unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -5656,30 +5672,35 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5656 5672
5657 dev_net_set(dev, &init_net); 5673 dev_net_set(dev, &init_net);
5658 5674
5675 dev->gso_max_size = GSO_MAX_SIZE;
5676
5677 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5678 dev->ethtool_ntuple_list.count = 0;
5679 INIT_LIST_HEAD(&dev->napi_list);
5680 INIT_LIST_HEAD(&dev->unreg_list);
5681 INIT_LIST_HEAD(&dev->link_watch_list);
5682 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5683 setup(dev);
5684
5659 dev->num_tx_queues = txqs; 5685 dev->num_tx_queues = txqs;
5660 dev->real_num_tx_queues = txqs; 5686 dev->real_num_tx_queues = txqs;
5661 if (netif_alloc_netdev_queues(dev)) 5687 if (netif_alloc_netdev_queues(dev))
5662 goto free_pcpu; 5688 goto free_all;
5663 5689
5664#ifdef CONFIG_RPS 5690#ifdef CONFIG_RPS
5665 dev->num_rx_queues = rxqs; 5691 dev->num_rx_queues = rxqs;
5666 dev->real_num_rx_queues = rxqs; 5692 dev->real_num_rx_queues = rxqs;
5667 if (netif_alloc_rx_queues(dev)) 5693 if (netif_alloc_rx_queues(dev))
5668 goto free_pcpu; 5694 goto free_all;
5669#endif 5695#endif
5670 5696
5671 dev->gso_max_size = GSO_MAX_SIZE;
5672
5673 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5674 dev->ethtool_ntuple_list.count = 0;
5675 INIT_LIST_HEAD(&dev->napi_list);
5676 INIT_LIST_HEAD(&dev->unreg_list);
5677 INIT_LIST_HEAD(&dev->link_watch_list);
5678 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5679 setup(dev);
5680 strcpy(dev->name, name); 5697 strcpy(dev->name, name);
5681 return dev; 5698 return dev;
5682 5699
5700free_all:
5701 free_netdev(dev);
5702 return NULL;
5703
5683free_pcpu: 5704free_pcpu:
5684 free_percpu(dev->pcpu_refcnt); 5705 free_percpu(dev->pcpu_refcnt);
5685 kfree(dev->_tx); 5706 kfree(dev->_tx);
@@ -6207,6 +6228,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6207 } 6228 }
6208 } 6229 }
6209 unregister_netdevice_many(&dev_kill_list); 6230 unregister_netdevice_many(&dev_kill_list);
6231 list_del(&dev_kill_list);
6210 rtnl_unlock(); 6232 rtnl_unlock();
6211} 6233}
6212 6234
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..133fd22ea287 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
144 144
145 list_for_each_entry(ha, &from_list->list, list) { 145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type; 146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 __hw_addr_del(to_list, ha->addr, addr_len, type);
148 } 148 }
149} 149}
150EXPORT_SYMBOL(__hw_addr_del_multiple); 150EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 17741782a345..ff2302910b5e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
817 if (regs.len > reglen) 817 if (regs.len > reglen)
818 regs.len = reglen; 818 regs.len = reglen;
819 819
820 regbuf = vmalloc(reglen); 820 regbuf = vzalloc(reglen);
821 if (!regbuf) 821 if (!regbuf)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..b5bada92f637 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3321 pkt_dev->started_at); 3321 pkt_dev->started_at);
3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3323 3323
3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", 3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3325 (unsigned long long)ktime_to_us(elapsed), 3325 (unsigned long long)ktime_to_us(elapsed),
3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3327 (unsigned long long)ktime_to_us(idle), 3327 (unsigned long long)ktime_to_us(idle),
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..2d65c6bb24c1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,8 +1121,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1121 return -EOPNOTSUPP; 1121 return -EOPNOTSUPP;
1122 1122
1123 if (af_ops->validate_link_af) { 1123 if (af_ops->validate_link_af) {
1124 err = af_ops->validate_link_af(dev, 1124 err = af_ops->validate_link_af(dev, af);
1125 tb[IFLA_AF_SPEC]);
1126 if (err < 0) 1125 if (err < 0)
1127 return err; 1126 return err;
1128 } 1127 }
@@ -1672,6 +1671,9 @@ replay:
1672 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 1671 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
1673 1672
1674 dest_net = rtnl_link_get_net(net, tb); 1673 dest_net = rtnl_link_get_net(net, tb);
1674 if (IS_ERR(dest_net))
1675 return PTR_ERR(dest_net);
1676
1675 dev = rtnl_create_link(net, dest_net, ifname, ops, tb); 1677 dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
1676 1678
1677 if (IS_ERR(dev)) 1679 if (IS_ERR(dev))
diff --git a/net/core/scm.c b/net/core/scm.c
index bbe454450801..4c1ef026d695 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -95,7 +95,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
95 int fd = fdp[i]; 95 int fd = fdp[i];
96 struct file *file; 96 struct file *file;
97 97
98 if (fd < 0 || !(file = fget(fd))) 98 if (fd < 0 || !(file = fget_raw(fd)))
99 return -EBADF; 99 return -EBADF;
100 *fpp++ = file; 100 *fpp++ = file;
101 fpl->count++; 101 fpl->count++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d31bb36ae0dc..d883dcc78b6b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 shinfo = skb_shinfo(skb); 210 shinfo = skb_shinfo(skb);
211 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 211 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
212 atomic_set(&shinfo->dataref, 1); 212 atomic_set(&shinfo->dataref, 1);
213 kmemcheck_annotate_variable(shinfo->destructor_arg);
213 214
214 if (fclone) { 215 if (fclone) {
215 struct sk_buff *child = skb + 1; 216 struct sk_buff *child = skb + 1;
@@ -2744,8 +2745,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2744 2745
2745merge: 2746merge:
2746 if (offset > headlen) { 2747 if (offset > headlen) {
2747 skbinfo->frags[0].page_offset += offset - headlen; 2748 unsigned int eat = offset - headlen;
2748 skbinfo->frags[0].size -= offset - headlen; 2749
2750 skbinfo->frags[0].page_offset += eat;
2751 skbinfo->frags[0].size -= eat;
2752 skb->data_len -= eat;
2753 skb->len -= eat;
2749 offset = headlen; 2754 offset = headlen;
2750 } 2755 }
2751 2756
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d900ab99814a..c44348adba3b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -583,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
583 u8 up, idtype; 583 u8 up, idtype;
584 int ret = -EINVAL; 584 int ret = -EINVAL;
585 585
586 if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp) 586 if (!tb[DCB_ATTR_APP])
587 goto out; 587 goto out;
588 588
589 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 589 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
604 goto out; 604 goto out;
605 605
606 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 606 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
607 up = netdev->dcbnl_ops->getapp(netdev, idtype, id); 607
608 if (netdev->dcbnl_ops->getapp) {
609 up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
610 } else {
611 struct dcb_app app = {
612 .selector = idtype,
613 .protocol = id,
614 };
615 up = dcb_getapp(netdev, &app);
616 }
608 617
609 /* send this back */ 618 /* send this back */
610 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 619 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -617,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
617 dcb->cmd = DCB_CMD_GAPP; 626 dcb->cmd = DCB_CMD_GAPP;
618 627
619 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
629 if (!app_nest)
630 goto out_cancel;
631
620 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
621 if (ret) 633 if (ret)
622 goto out_cancel; 634 goto out_cancel;
@@ -1181,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1181 goto err; 1193 goto err;
1182 } 1194 }
1183 1195
1184 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1185 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1186 err = ops->ieee_setpfc(netdev, pfc); 1198 err = ops->ieee_setpfc(netdev, pfc);
1187 if (err) 1199 if (err)
@@ -1604,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp);
1604u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1616u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1605{ 1617{
1606 struct dcb_app_type *itr; 1618 struct dcb_app_type *itr;
1619 struct dcb_app_type event;
1620
1621 memcpy(&event.name, dev->name, sizeof(event.name));
1622 memcpy(&event.app, new, sizeof(event.app));
1607 1623
1608 spin_lock(&dcb_lock); 1624 spin_lock(&dcb_lock);
1609 /* Search for existing match and replace */ 1625 /* Search for existing match and replace */
@@ -1635,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1635 } 1651 }
1636out: 1652out:
1637 spin_unlock(&dcb_lock); 1653 spin_unlock(&dcb_lock);
1638 call_dcbevent_notifiers(DCB_APP_EVENT, new); 1654 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1639 return 0; 1655 return 0;
1640} 1656}
1641EXPORT_SYMBOL(dcb_setapp); 1657EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
614 /* Caller (dccp_v4_do_rcv) will send Reset */ 614 /* Caller (dccp_v4_do_rcv) will send Reset */
615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616 return 1; 616 return 1;
617 } else if (sk->sk_state == DCCP_CLOSED) {
618 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619 return 1;
617 } 620 }
618 621
619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
668 } 671 }
669 672
670 switch (sk->sk_state) { 673 switch (sk->sk_state) {
671 case DCCP_CLOSED:
672 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
673 return 1;
674
675 case DCCP_REQUESTING: 674 case DCCP_REQUESTING:
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 675 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677 if (queued >= 0) 676 if (queued >= 0)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a6af39..cfa7a5e1c5c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
67 size_t result_len = 0; 67 size_t result_len = 0;
68 const char *data = _data, *end, *opt; 68 const char *data = _data, *end, *opt;
69 69
70 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%*.*s',%zu",
71 key->serial, key->description, data, datalen); 71 key->serial, key->description,
72 (int)datalen, (int)datalen, data, datalen);
72 73
73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
74 return -EINVAL; 75 return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
217 seq_printf(m, ": %u", key->datalen); 218 seq_printf(m, ": %u", key->datalen);
218} 219}
219 220
221/*
222 * read the DNS data
223 * - the key's semaphore is read-locked
224 */
225static long dns_resolver_read(const struct key *key,
226 char __user *buffer, size_t buflen)
227{
228 if (key->type_data.x[0])
229 return key->type_data.x[0];
230
231 return user_read(key, buffer, buflen);
232}
233
220struct key_type key_type_dns_resolver = { 234struct key_type key_type_dns_resolver = {
221 .name = "dns_resolver", 235 .name = "dns_resolver",
222 .instantiate = dns_resolver_instantiate, 236 .instantiate = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
224 .revoke = user_revoke, 238 .revoke = user_revoke,
225 .destroy = user_destroy, 239 .destroy = user_destroy,
226 .describe = dns_resolver_describe, 240 .describe = dns_resolver_describe,
227 .read = user_read, 241 .read = dns_resolver_read,
228}; 242};
229 243
230static int __init init_dns_resolver(void) 244static int __init init_dns_resolver(void)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a74e1f4..3fb14b7c13cf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
428} 428}
429module_exit(dsa_cleanup_module); 429module_exit(dsa_cleanup_module);
430 430
431MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>") 431MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
432MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); 432MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
433MODULE_LICENSE("GPL"); 433MODULE_LICENSE("GPL");
434MODULE_ALIAS("platform:dsa"); 434MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a586b4..0c2826337919 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
265static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, 265static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
266 struct msghdr *msg, size_t len) 266 struct msghdr *msg, size_t len)
267{ 267{
268 struct sock *sk = sock->sk;
269 struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name; 268 struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
270 struct net_device *dev; 269 struct net_device *dev;
271 struct ec_addr addr; 270 struct ec_addr addr;
272 int err; 271 int err;
273 unsigned char port, cb; 272 unsigned char port, cb;
274#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) 273#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
274 struct sock *sk = sock->sk;
275 struct sk_buff *skb; 275 struct sk_buff *skb;
276 struct ec_cb *eb; 276 struct ec_cb *eb;
277#endif 277#endif
@@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
488 488
489error_free_buf: 489error_free_buf:
490 vfree(userbuf); 490 vfree(userbuf);
491error:
491#else 492#else
492 err = -EPROTOTYPE; 493 err = -EPROTOTYPE;
493#endif 494#endif
494 error:
495 mutex_unlock(&econet_mutex); 495 mutex_unlock(&econet_mutex);
496 496
497 return err; 497 return err;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b61107df6c..45b89d7bda5a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
880} 880}
881EXPORT_SYMBOL(inet_ioctl); 881EXPORT_SYMBOL(inet_ioctl);
882 882
883#ifdef CONFIG_COMPAT
884int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
885{
886 struct sock *sk = sock->sk;
887 int err = -ENOIOCTLCMD;
888
889 if (sk->sk_prot->compat_ioctl)
890 err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
891
892 return err;
893}
894#endif
895
883const struct proto_ops inet_stream_ops = { 896const struct proto_ops inet_stream_ops = {
884 .family = PF_INET, 897 .family = PF_INET,
885 .owner = THIS_MODULE, 898 .owner = THIS_MODULE,
@@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
903#ifdef CONFIG_COMPAT 916#ifdef CONFIG_COMPAT
904 .compat_setsockopt = compat_sock_common_setsockopt, 917 .compat_setsockopt = compat_sock_common_setsockopt,
905 .compat_getsockopt = compat_sock_common_getsockopt, 918 .compat_getsockopt = compat_sock_common_getsockopt,
919 .compat_ioctl = inet_compat_ioctl,
906#endif 920#endif
907}; 921};
908EXPORT_SYMBOL(inet_stream_ops); 922EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
929#ifdef CONFIG_COMPAT 943#ifdef CONFIG_COMPAT
930 .compat_setsockopt = compat_sock_common_setsockopt, 944 .compat_setsockopt = compat_sock_common_setsockopt,
931 .compat_getsockopt = compat_sock_common_getsockopt, 945 .compat_getsockopt = compat_sock_common_getsockopt,
946 .compat_ioctl = inet_compat_ioctl,
932#endif 947#endif
933}; 948};
934EXPORT_SYMBOL(inet_dgram_ops); 949EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
959#ifdef CONFIG_COMPAT 974#ifdef CONFIG_COMPAT
960 .compat_setsockopt = compat_sock_common_setsockopt, 975 .compat_setsockopt = compat_sock_common_setsockopt,
961 .compat_getsockopt = compat_sock_common_getsockopt, 976 .compat_getsockopt = compat_sock_common_getsockopt,
977 .compat_ioctl = inet_compat_ioctl,
962#endif 978#endif
963}; 979};
964 980
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 04c8b69fd426..7927589813b5 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 if (__in_dev_get_rcu(dev)) { 1020 if (__in_dev_get_rtnl(dev)) {
1021 IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on); 1021 IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
1022 return 0; 1022 return 0;
1023 } 1023 }
1024 return -ENXIO; 1024 return -ENXIO;
1025} 1025}
1026 1026
1027/* must be called with rcu_read_lock() */
1028static int arp_req_set_public(struct net *net, struct arpreq *r, 1027static int arp_req_set_public(struct net *net, struct arpreq *r,
1029 struct net_device *dev) 1028 struct net_device *dev)
1030{ 1029{
@@ -1233,10 +1232,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1233 if (!(r.arp_flags & ATF_NETMASK)) 1232 if (!(r.arp_flags & ATF_NETMASK))
1234 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1233 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
1235 htonl(0xFFFFFFFFUL); 1234 htonl(0xFFFFFFFFUL);
1236 rcu_read_lock(); 1235 rtnl_lock();
1237 if (r.arp_dev[0]) { 1236 if (r.arp_dev[0]) {
1238 err = -ENODEV; 1237 err = -ENODEV;
1239 dev = dev_get_by_name_rcu(net, r.arp_dev); 1238 dev = __dev_get_by_name(net, r.arp_dev);
1240 if (dev == NULL) 1239 if (dev == NULL)
1241 goto out; 1240 goto out;
1242 1241
@@ -1263,7 +1262,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1263 break; 1262 break;
1264 } 1263 }
1265out: 1264out:
1266 rcu_read_unlock(); 1265 rtnl_unlock();
1267 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r))) 1266 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
1268 err = -EFAULT; 1267 err = -EFAULT;
1269 return err; 1268 return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b337bd..036652c8166d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
670 ifap = &ifa->ifa_next) { 670 ifap = &ifa->ifa_next) {
671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) && 671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
672 sin_orig.sin_addr.s_addr == 672 sin_orig.sin_addr.s_addr ==
673 ifa->ifa_address) { 673 ifa->ifa_local) {
674 break; /* found */ 674 break; /* found */
675 } 675 }
676 } 676 }
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
1030 return mtu >= 68; 1030 return mtu >= 68;
1031} 1031}
1032 1032
1033static void inetdev_send_gratuitous_arp(struct net_device *dev,
1034 struct in_device *in_dev)
1035
1036{
1037 struct in_ifaddr *ifa = in_dev->ifa_list;
1038
1039 if (!ifa)
1040 return;
1041
1042 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1043 ifa->ifa_local, dev,
1044 ifa->ifa_local, NULL,
1045 dev->dev_addr, NULL);
1046}
1047
1033/* Called only under RTNL semaphore */ 1048/* Called only under RTNL semaphore */
1034 1049
1035static int inetdev_event(struct notifier_block *this, unsigned long event, 1050static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1082 } 1097 }
1083 ip_mc_up(in_dev); 1098 ip_mc_up(in_dev);
1084 /* fall through */ 1099 /* fall through */
1085 case NETDEV_NOTIFY_PEERS:
1086 case NETDEV_CHANGEADDR: 1100 case NETDEV_CHANGEADDR:
1101 if (!IN_DEV_ARP_NOTIFY(in_dev))
1102 break;
1103 /* fall through */
1104 case NETDEV_NOTIFY_PEERS:
1087 /* Send gratuitous ARP to notify of link change */ 1105 /* Send gratuitous ARP to notify of link change */
1088 if (IN_DEV_ARP_NOTIFY(in_dev)) { 1106 inetdev_send_gratuitous_arp(dev, in_dev);
1089 struct in_ifaddr *ifa = in_dev->ifa_list;
1090
1091 if (ifa)
1092 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1093 ifa->ifa_address, dev,
1094 ifa->ifa_address, NULL,
1095 dev->dev_addr, NULL);
1096 }
1097 break; 1107 break;
1098 case NETDEV_DOWN: 1108 case NETDEV_DOWN:
1099 ip_mc_down(in_dev); 1109 ip_mc_down(in_dev);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909cf701..3c8dfa16614d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@ restart:
505 } 505 }
506 506
507 rcu_read_unlock(); 507 rcu_read_unlock();
508 local_bh_disable();
508 inet_twsk_deschedule(tw, twdr); 509 inet_twsk_deschedule(tw, twdr);
510 local_bh_enable();
509 inet_twsk_put(tw); 511 inet_twsk_put(tw);
510 goto restart_rcu; 512 goto restart_rcu;
511 } 513 }
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc85751c74..a96e65674ac3 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -475,7 +475,7 @@ static int cleanup_once(unsigned long ttl)
475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) 475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
476{ 476{
477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 struct inet_peer_base *base = family_to_base(AF_INET); 478 struct inet_peer_base *base = family_to_base(daddr->family);
479 struct inet_peer *p; 479 struct inet_peer *p;
480 480
481 /* Look up for the address quickly, lockless. 481 /* Look up for the address quickly, lockless.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e34e49..d1d0e2c256fc 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
775 .fl4_dst = dst, 775 .fl4_dst = dst,
776 .fl4_src = tiph->saddr, 776 .fl4_src = tiph->saddr,
777 .fl4_tos = RT_TOS(tos), 777 .fl4_tos = RT_TOS(tos),
778 .proto = IPPROTO_GRE,
778 .fl_gre_key = tunnel->parms.o_key 779 .fl_gre_key = tunnel->parms.o_key
779 }; 780 };
780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -1764,4 +1765,4 @@ module_exit(ipgre_fini);
1764MODULE_LICENSE("GPL"); 1765MODULE_LICENSE("GPL");
1765MODULE_ALIAS_RTNL_LINK("gre"); 1766MODULE_ALIAS_RTNL_LINK("gre");
1766MODULE_ALIAS_RTNL_LINK("gretap"); 1767MODULE_ALIAS_RTNL_LINK("gretap");
1767MODULE_ALIAS("gre0"); 1768MODULE_ALIAS_NETDEV("gre0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52fba54a..a5f58e7cbb26 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void)
913module_init(ipip_init); 913module_init(ipip_init);
914module_exit(ipip_fini); 914module_exit(ipip_fini);
915MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0"); 916MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9afd73e0..8b65a12654e7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
60#include <linux/notifier.h> 60#include <linux/notifier.h>
61#include <linux/if_arp.h> 61#include <linux/if_arp.h>
62#include <linux/netfilter_ipv4.h> 62#include <linux/netfilter_ipv4.h>
63#include <linux/compat.h>
63#include <net/ipip.h> 64#include <net/ipip.h>
64#include <net/checksum.h> 65#include <net/checksum.h>
65#include <net/netlink.h> 66#include <net/netlink.h>
@@ -1434,6 +1435,81 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1434 } 1435 }
1435} 1436}
1436 1437
1438#ifdef CONFIG_COMPAT
1439struct compat_sioc_sg_req {
1440 struct in_addr src;
1441 struct in_addr grp;
1442 compat_ulong_t pktcnt;
1443 compat_ulong_t bytecnt;
1444 compat_ulong_t wrong_if;
1445};
1446
1447struct compat_sioc_vif_req {
1448 vifi_t vifi; /* Which iface */
1449 compat_ulong_t icount;
1450 compat_ulong_t ocount;
1451 compat_ulong_t ibytes;
1452 compat_ulong_t obytes;
1453};
1454
1455int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1456{
1457 struct compat_sioc_sg_req sr;
1458 struct compat_sioc_vif_req vr;
1459 struct vif_device *vif;
1460 struct mfc_cache *c;
1461 struct net *net = sock_net(sk);
1462 struct mr_table *mrt;
1463
1464 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1465 if (mrt == NULL)
1466 return -ENOENT;
1467
1468 switch (cmd) {
1469 case SIOCGETVIFCNT:
1470 if (copy_from_user(&vr, arg, sizeof(vr)))
1471 return -EFAULT;
1472 if (vr.vifi >= mrt->maxvif)
1473 return -EINVAL;
1474 read_lock(&mrt_lock);
1475 vif = &mrt->vif_table[vr.vifi];
1476 if (VIF_EXISTS(mrt, vr.vifi)) {
1477 vr.icount = vif->pkt_in;
1478 vr.ocount = vif->pkt_out;
1479 vr.ibytes = vif->bytes_in;
1480 vr.obytes = vif->bytes_out;
1481 read_unlock(&mrt_lock);
1482
1483 if (copy_to_user(arg, &vr, sizeof(vr)))
1484 return -EFAULT;
1485 return 0;
1486 }
1487 read_unlock(&mrt_lock);
1488 return -EADDRNOTAVAIL;
1489 case SIOCGETSGCNT:
1490 if (copy_from_user(&sr, arg, sizeof(sr)))
1491 return -EFAULT;
1492
1493 rcu_read_lock();
1494 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1495 if (c) {
1496 sr.pktcnt = c->mfc_un.res.pkt;
1497 sr.bytecnt = c->mfc_un.res.bytes;
1498 sr.wrong_if = c->mfc_un.res.wrong_if;
1499 rcu_read_unlock();
1500
1501 if (copy_to_user(arg, &sr, sizeof(sr)))
1502 return -EFAULT;
1503 return 0;
1504 }
1505 rcu_read_unlock();
1506 return -EADDRNOTAVAIL;
1507 default:
1508 return -ENOIOCTLCMD;
1509 }
1510}
1511#endif
1512
1437 1513
1438static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1514static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1439{ 1515{
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b8ddcc480ed9..a5e52a9f0a12 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par)
60 60
61 if (mangle->flags & ~ARPT_MANGLE_MASK || 61 if (mangle->flags & ~ARPT_MANGLE_MASK ||
62 !(mangle->flags & ARPT_MANGLE_MASK)) 62 !(mangle->flags & ARPT_MANGLE_MASK))
63 return false; 63 return -EINVAL;
64 64
65 if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && 65 if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
66 mangle->target != XT_CONTINUE) 66 mangle->target != XT_CONTINUE)
67 return false; 67 return -EINVAL;
68 return true; 68 return 0;
69} 69}
70 70
71static struct xt_target arpt_mangle_reg __read_mostly = { 71static struct xt_target arpt_mangle_reg __read_mostly = {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab786e81..6390ba299b3d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
76#include <linux/seq_file.h> 76#include <linux/seq_file.h>
77#include <linux/netfilter.h> 77#include <linux/netfilter.h>
78#include <linux/netfilter_ipv4.h> 78#include <linux/netfilter_ipv4.h>
79#include <linux/compat.h>
79 80
80static struct raw_hashinfo raw_v4_hashinfo = { 81static struct raw_hashinfo raw_v4_hashinfo = {
81 .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), 82 .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
838 } 839 }
839} 840}
840 841
842#ifdef CONFIG_COMPAT
843static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
844{
845 switch (cmd) {
846 case SIOCOUTQ:
847 case SIOCINQ:
848 return -ENOIOCTLCMD;
849 default:
850#ifdef CONFIG_IP_MROUTE
851 return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
852#else
853 return -ENOIOCTLCMD;
854#endif
855 }
856}
857#endif
858
841struct proto raw_prot = { 859struct proto raw_prot = {
842 .name = "RAW", 860 .name = "RAW",
843 .owner = THIS_MODULE, 861 .owner = THIS_MODULE,
@@ -860,6 +878,7 @@ struct proto raw_prot = {
860#ifdef CONFIG_COMPAT 878#ifdef CONFIG_COMPAT
861 .compat_setsockopt = compat_raw_setsockopt, 879 .compat_setsockopt = compat_raw_setsockopt,
862 .compat_getsockopt = compat_raw_getsockopt, 880 .compat_getsockopt = compat_raw_getsockopt,
881 .compat_ioctl = compat_raw_ioctl,
863#endif 882#endif
864}; 883};
865 884
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 351dc4e85242..6ed6603c2f6d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2707,6 +2707,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
2707 return NULL; 2707 return NULL;
2708} 2708}
2709 2709
2710static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2711{
2712 return 0;
2713}
2714
2710static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 2715static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2711{ 2716{
2712} 2717}
@@ -2716,6 +2721,8 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2716 .protocol = cpu_to_be16(ETH_P_IP), 2721 .protocol = cpu_to_be16(ETH_P_IP),
2717 .destroy = ipv4_dst_destroy, 2722 .destroy = ipv4_dst_destroy,
2718 .check = ipv4_blackhole_dst_check, 2723 .check = ipv4_blackhole_dst_check,
2724 .default_mtu = ipv4_blackhole_default_mtu,
2725 .default_advmss = ipv4_default_advmss,
2719 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2726 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2720}; 2727};
2721 2728
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2549b29b062d..65f6c0406245 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1222 } 1222 }
1223 1223
1224 /* D-SACK for already forgotten data... Do dumb counting. */ 1224 /* D-SACK for already forgotten data... Do dumb counting. */
1225 if (dup_sack && 1225 if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1226 !after(end_seq_0, prior_snd_una) && 1226 !after(end_seq_0, prior_snd_una) &&
1227 after(end_seq_0, tp->undo_marker)) 1227 after(end_seq_0, tp->undo_marker))
1228 tp->undo_retrans--; 1228 tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1299 1299
1300 /* Account D-SACK for retransmitted packet. */ 1300 /* Account D-SACK for retransmitted packet. */
1301 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1301 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1302 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1302 if (tp->undo_marker && tp->undo_retrans &&
1303 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1303 tp->undo_retrans--; 1304 tp->undo_retrans--;
1304 if (sacked & TCPCB_SACKED_ACKED) 1305 if (sacked & TCPCB_SACKED_ACKED)
1305 state->reord = min(fack_count, state->reord); 1306 state->reord = min(fack_count, state->reord);
@@ -4399,7 +4400,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4399 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4400 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
4400 tp->ucopy.len -= chunk; 4401 tp->ucopy.len -= chunk;
4401 tp->copied_seq += chunk; 4402 tp->copied_seq += chunk;
4402 eaten = (chunk == skb->len && !th->fin); 4403 eaten = (chunk == skb->len);
4403 tcp_rcv_space_adjust(sk); 4404 tcp_rcv_space_adjust(sk);
4404 } 4405 }
4405 local_bh_disable(); 4406 local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 856f68466d49..02f583b3744a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,7 +1994,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1994 } 1994 }
1995 req = req->dl_next; 1995 req = req->dl_next;
1996 } 1996 }
1997 st->offset = 0;
1998 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) 1997 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1999 break; 1998 break;
2000get_req: 1999get_req:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 406f320336e6..dfa5beb0c1c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2162 if (!tp->retrans_stamp) 2162 if (!tp->retrans_stamp)
2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2164 2164
2165 tp->undo_retrans++; 2165 tp->undo_retrans += tcp_skb_pcount(skb);
2166 2166
2167 /* snd_nxt is stored to detect loss of retransmitted segment, 2167 /* snd_nxt is stored to detect loss of retransmitted segment,
2168 * see tcp_input.c tcp_sacktag_write_queue(). 2168 * see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 24a1cf110d80..fd6782e3a038 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2661,14 +2661,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2661 struct net *net = dev_net(dev); 2661 struct net *net = dev_net(dev);
2662 struct inet6_dev *idev; 2662 struct inet6_dev *idev;
2663 struct inet6_ifaddr *ifa; 2663 struct inet6_ifaddr *ifa;
2664 LIST_HEAD(keep_list); 2664 int state, i;
2665 int state;
2666 2665
2667 ASSERT_RTNL(); 2666 ASSERT_RTNL();
2668 2667
2669 /* Flush routes if device is being removed or it is not loopback */ 2668 rt6_ifdown(net, dev);
2670 if (how || !(dev->flags & IFF_LOOPBACK)) 2669 neigh_ifdown(&nd_tbl, dev);
2671 rt6_ifdown(net, dev);
2672 2670
2673 idev = __in6_dev_get(dev); 2671 idev = __in6_dev_get(dev);
2674 if (idev == NULL) 2672 if (idev == NULL)
@@ -2689,6 +2687,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2689 2687
2690 } 2688 }
2691 2689
2690 /* Step 2: clear hash table */
2691 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
2692 struct hlist_head *h = &inet6_addr_lst[i];
2693 struct hlist_node *n;
2694
2695 spin_lock_bh(&addrconf_hash_lock);
2696 restart:
2697 hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
2698 if (ifa->idev == idev) {
2699 hlist_del_init_rcu(&ifa->addr_lst);
2700 addrconf_del_timer(ifa);
2701 goto restart;
2702 }
2703 }
2704 spin_unlock_bh(&addrconf_hash_lock);
2705 }
2706
2692 write_lock_bh(&idev->lock); 2707 write_lock_bh(&idev->lock);
2693 2708
2694 /* Step 2: clear flags for stateless addrconf */ 2709 /* Step 2: clear flags for stateless addrconf */
@@ -2722,52 +2737,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2722 struct inet6_ifaddr, if_list); 2737 struct inet6_ifaddr, if_list);
2723 addrconf_del_timer(ifa); 2738 addrconf_del_timer(ifa);
2724 2739
2725 /* If just doing link down, and address is permanent 2740 list_del(&ifa->if_list);
2726 and not link-local, then retain it. */
2727 if (!how &&
2728 (ifa->flags&IFA_F_PERMANENT) &&
2729 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2730 list_move_tail(&ifa->if_list, &keep_list);
2731
2732 /* If not doing DAD on this address, just keep it. */
2733 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
2734 idev->cnf.accept_dad <= 0 ||
2735 (ifa->flags & IFA_F_NODAD))
2736 continue;
2737 2741
2738 /* If it was tentative already, no need to notify */ 2742 write_unlock_bh(&idev->lock);
2739 if (ifa->flags & IFA_F_TENTATIVE)
2740 continue;
2741 2743
2742 /* Flag it for later restoration when link comes up */ 2744 spin_lock_bh(&ifa->state_lock);
2743 ifa->flags |= IFA_F_TENTATIVE; 2745 state = ifa->state;
2744 ifa->state = INET6_IFADDR_STATE_DAD; 2746 ifa->state = INET6_IFADDR_STATE_DEAD;
2745 } else { 2747 spin_unlock_bh(&ifa->state_lock);
2746 list_del(&ifa->if_list);
2747
2748 /* clear hash table */
2749 spin_lock_bh(&addrconf_hash_lock);
2750 hlist_del_init_rcu(&ifa->addr_lst);
2751 spin_unlock_bh(&addrconf_hash_lock);
2752
2753 write_unlock_bh(&idev->lock);
2754 spin_lock_bh(&ifa->state_lock);
2755 state = ifa->state;
2756 ifa->state = INET6_IFADDR_STATE_DEAD;
2757 spin_unlock_bh(&ifa->state_lock);
2758
2759 if (state != INET6_IFADDR_STATE_DEAD) {
2760 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2761 atomic_notifier_call_chain(&inet6addr_chain,
2762 NETDEV_DOWN, ifa);
2763 }
2764 2748
2765 in6_ifa_put(ifa); 2749 if (state != INET6_IFADDR_STATE_DEAD) {
2766 write_lock_bh(&idev->lock); 2750 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2751 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
2767 } 2752 }
2768 } 2753 in6_ifa_put(ifa);
2769 2754
2770 list_splice(&keep_list, &idev->addr_list); 2755 write_lock_bh(&idev->lock);
2756 }
2771 2757
2772 write_unlock_bh(&idev->lock); 2758 write_unlock_bh(&idev->lock);
2773 2759
@@ -4156,8 +4142,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4156 addrconf_leave_solict(ifp->idev, &ifp->addr); 4142 addrconf_leave_solict(ifp->idev, &ifp->addr);
4157 dst_hold(&ifp->rt->dst); 4143 dst_hold(&ifp->rt->dst);
4158 4144
4159 if (ifp->state == INET6_IFADDR_STATE_DEAD && 4145 if (ip6_del_rt(ifp->rt))
4160 ip6_del_rt(ifp->rt))
4161 dst_free(&ifp->rt->dst); 4146 dst_free(&ifp->rt->dst);
4162 break; 4147 break;
4163 } 4148 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e697bd..e528a42a52be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
57MODULE_AUTHOR("Ville Nuorvala"); 57MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_ALIAS_NETDEV("ip6tnl0");
60 61
61#ifdef IP6_TNL_DEBUG 62#ifdef IP6_TNL_DEBUG
62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9fab274019c0..0e1d53bcf1e0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -34,6 +34,7 @@
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/compat.h>
37#include <net/protocol.h> 38#include <net/protocol.h>
38#include <linux/skbuff.h> 39#include <linux/skbuff.h>
39#include <net/sock.h> 40#include <net/sock.h>
@@ -1804,6 +1805,80 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1804 } 1805 }
1805} 1806}
1806 1807
1808#ifdef CONFIG_COMPAT
1809struct compat_sioc_sg_req6 {
1810 struct sockaddr_in6 src;
1811 struct sockaddr_in6 grp;
1812 compat_ulong_t pktcnt;
1813 compat_ulong_t bytecnt;
1814 compat_ulong_t wrong_if;
1815};
1816
1817struct compat_sioc_mif_req6 {
1818 mifi_t mifi;
1819 compat_ulong_t icount;
1820 compat_ulong_t ocount;
1821 compat_ulong_t ibytes;
1822 compat_ulong_t obytes;
1823};
1824
1825int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1826{
1827 struct compat_sioc_sg_req6 sr;
1828 struct compat_sioc_mif_req6 vr;
1829 struct mif_device *vif;
1830 struct mfc6_cache *c;
1831 struct net *net = sock_net(sk);
1832 struct mr6_table *mrt;
1833
1834 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1835 if (mrt == NULL)
1836 return -ENOENT;
1837
1838 switch (cmd) {
1839 case SIOCGETMIFCNT_IN6:
1840 if (copy_from_user(&vr, arg, sizeof(vr)))
1841 return -EFAULT;
1842 if (vr.mifi >= mrt->maxvif)
1843 return -EINVAL;
1844 read_lock(&mrt_lock);
1845 vif = &mrt->vif6_table[vr.mifi];
1846 if (MIF_EXISTS(mrt, vr.mifi)) {
1847 vr.icount = vif->pkt_in;
1848 vr.ocount = vif->pkt_out;
1849 vr.ibytes = vif->bytes_in;
1850 vr.obytes = vif->bytes_out;
1851 read_unlock(&mrt_lock);
1852
1853 if (copy_to_user(arg, &vr, sizeof(vr)))
1854 return -EFAULT;
1855 return 0;
1856 }
1857 read_unlock(&mrt_lock);
1858 return -EADDRNOTAVAIL;
1859 case SIOCGETSGCNT_IN6:
1860 if (copy_from_user(&sr, arg, sizeof(sr)))
1861 return -EFAULT;
1862
1863 read_lock(&mrt_lock);
1864 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1865 if (c) {
1866 sr.pktcnt = c->mfc_un.res.pkt;
1867 sr.bytecnt = c->mfc_un.res.bytes;
1868 sr.wrong_if = c->mfc_un.res.wrong_if;
1869 read_unlock(&mrt_lock);
1870
1871 if (copy_to_user(arg, &sr, sizeof(sr)))
1872 return -EFAULT;
1873 return 0;
1874 }
1875 read_unlock(&mrt_lock);
1876 return -EADDRNOTAVAIL;
1877 default:
1878 return -ENOIOCTLCMD;
1879 }
1880}
1881#endif
1807 1882
1808static inline int ip6mr_forward2_finish(struct sk_buff *skb) 1883static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1809{ 1884{
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c88891a753..de338037a736 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@ fallback:
410 if (p != NULL) { 410 if (p != NULL) {
411 sb_add(m, "%02x", *p++); 411 sb_add(m, "%02x", *p++);
412 for (i = 1; i < len; i++) 412 for (i = 1; i < len; i++)
413 sb_add(m, ":%02x", p[i]); 413 sb_add(m, ":%02x", *p++);
414 } 414 }
415 sb_add(m, " "); 415 sb_add(m, " ");
416 416
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c39526ba5e..c5b0915d106b 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -31,6 +31,7 @@
31#include <linux/netfilter.h> 31#include <linux/netfilter.h>
32#include <linux/netfilter_ipv6.h> 32#include <linux/netfilter_ipv6.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/compat.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/ioctls.h> 36#include <asm/ioctls.h>
36 37
@@ -1157,6 +1158,23 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1157 } 1158 }
1158} 1159}
1159 1160
1161#ifdef CONFIG_COMPAT
1162static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
1163{
1164 switch (cmd) {
1165 case SIOCOUTQ:
1166 case SIOCINQ:
1167 return -ENOIOCTLCMD;
1168 default:
1169#ifdef CONFIG_IPV6_MROUTE
1170 return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
1171#else
1172 return -ENOIOCTLCMD;
1173#endif
1174 }
1175}
1176#endif
1177
1160static void rawv6_close(struct sock *sk, long timeout) 1178static void rawv6_close(struct sock *sk, long timeout)
1161{ 1179{
1162 if (inet_sk(sk)->inet_num == IPPROTO_RAW) 1180 if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1233,7 @@ struct proto rawv6_prot = {
1215#ifdef CONFIG_COMPAT 1233#ifdef CONFIG_COMPAT
1216 .compat_setsockopt = compat_rawv6_setsockopt, 1234 .compat_setsockopt = compat_rawv6_setsockopt,
1217 .compat_getsockopt = compat_rawv6_getsockopt, 1235 .compat_getsockopt = compat_rawv6_getsockopt,
1236 .compat_ioctl = compat_rawv6_ioctl,
1218#endif 1237#endif
1219}; 1238};
1220 1239
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd0416f69..e7db7014e89f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
72#define RT6_TRACE(x...) do { ; } while (0) 72#define RT6_TRACE(x...) do { ; } while (0)
73#endif 73#endif
74 74
75#define CLONE_OFFLINK_ROUTE 0
76
77static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); 75static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
78static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 76static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79static unsigned int ip6_default_advmss(const struct dst_entry *dst); 77static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -115,6 +113,11 @@ static struct dst_ops ip6_dst_ops_template = {
115 .local_out = __ip6_local_out, 113 .local_out = __ip6_local_out,
116}; 114};
117 115
116static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
117{
118 return 0;
119}
120
118static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 121static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
119{ 122{
120} 123}
@@ -124,6 +127,8 @@ static struct dst_ops ip6_dst_blackhole_ops = {
124 .protocol = cpu_to_be16(ETH_P_IPV6), 127 .protocol = cpu_to_be16(ETH_P_IPV6),
125 .destroy = ip6_dst_destroy, 128 .destroy = ip6_dst_destroy,
126 .check = ip6_dst_check, 129 .check = ip6_dst_check,
130 .default_mtu = ip6_blackhole_default_mtu,
131 .default_advmss = ip6_default_advmss,
127 .update_pmtu = ip6_rt_blackhole_update_pmtu, 132 .update_pmtu = ip6_rt_blackhole_update_pmtu,
128}; 133};
129 134
@@ -196,7 +201,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
196 in6_dev_put(idev); 201 in6_dev_put(idev);
197 } 202 }
198 if (peer) { 203 if (peer) {
199 BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
200 rt->rt6i_peer = NULL; 204 rt->rt6i_peer = NULL;
201 inet_putpeer(peer); 205 inet_putpeer(peer);
202 } 206 }
@@ -206,9 +210,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
206{ 210{
207 struct inet_peer *peer; 211 struct inet_peer *peer;
208 212
209 if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
210 return;
211
212 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); 213 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
213 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) 214 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
214 inet_putpeer(peer); 215 inet_putpeer(peer);
@@ -738,13 +739,10 @@ restart:
738 739
739 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
740 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
741 else { 742 else if (!(rt->dst.flags & DST_HOST))
742#if CLONE_OFFLINK_ROUTE
743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
744#else 744 else
745 goto out2; 745 goto out2;
746#endif
747 }
748 746
749 dst_release(&rt->dst); 747 dst_release(&rt->dst);
750 rt = nrt ? : net->ipv6.ip6_null_entry; 748 rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2561,14 +2559,16 @@ static
2561int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2559int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2562 void __user *buffer, size_t *lenp, loff_t *ppos) 2560 void __user *buffer, size_t *lenp, loff_t *ppos)
2563{ 2561{
2564 struct net *net = current->nsproxy->net_ns; 2562 struct net *net;
2565 int delay = net->ipv6.sysctl.flush_delay; 2563 int delay;
2566 if (write) { 2564 if (!write)
2567 proc_dointvec(ctl, write, buffer, lenp, ppos);
2568 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2569 return 0;
2570 } else
2571 return -EINVAL; 2565 return -EINVAL;
2566
2567 net = (struct net *)ctl->extra1;
2568 delay = net->ipv6.sysctl.flush_delay;
2569 proc_dointvec(ctl, write, buffer, lenp, ppos);
2570 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2571 return 0;
2572} 2572}
2573 2573
2574ctl_table ipv6_route_table_template[] = { 2574ctl_table ipv6_route_table_template[] = {
@@ -2655,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2655 2655
2656 if (table) { 2656 if (table) {
2657 table[0].data = &net->ipv6.sysctl.flush_delay; 2657 table[0].data = &net->ipv6.sysctl.flush_delay;
2658 table[0].extra1 = net;
2658 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2659 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2659 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2660 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2660 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2661 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..d2c16e10f650 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1290,4 +1290,4 @@ static int __init sit_init(void)
1290module_init(sit_init); 1290module_init(sit_init);
1291module_exit(sit_cleanup); 1291module_exit(sit_cleanup);
1292MODULE_LICENSE("GPL"); 1292MODULE_LICENSE("GPL");
1293MODULE_ALIAS("sit0"); 1293MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fa1d8f4e0051..7cb65ef79f9c 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -15,6 +15,8 @@
15#include <net/addrconf.h> 15#include <net/addrconf.h>
16#include <net/inet_frag.h> 16#include <net/inet_frag.h>
17 17
18static struct ctl_table empty[1];
19
18static ctl_table ipv6_table_template[] = { 20static ctl_table ipv6_table_template[] = {
19 { 21 {
20 .procname = "route", 22 .procname = "route",
@@ -35,6 +37,12 @@ static ctl_table ipv6_table_template[] = {
35 .mode = 0644, 37 .mode = 0644,
36 .proc_handler = proc_dointvec 38 .proc_handler = proc_dointvec
37 }, 39 },
40 {
41 .procname = "neigh",
42 .maxlen = 0,
43 .mode = 0555,
44 .child = empty,
45 },
38 { } 46 { }
39}; 47};
40 48
@@ -152,7 +160,6 @@ static struct ctl_table_header *ip6_base;
152 160
153int ipv6_static_sysctl_register(void) 161int ipv6_static_sysctl_register(void)
154{ 162{
155 static struct ctl_table empty[1];
156 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); 163 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
157 if (ip6_base == NULL) 164 if (ip6_base == NULL)
158 return -ENOMEM; 165 return -ENOMEM;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023ea6e4..da87428681cc 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
98 if (!xdst->u.rt6.rt6i_idev) 98 if (!xdst->u.rt6.rt6i_idev)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
102 if (rt->rt6i_peer)
103 atomic_inc(&rt->rt6i_peer->refcnt);
104
101 /* Sheit... I remember I did this right. Apparently, 105 /* Sheit... I remember I did this right. Apparently,
102 * it was magically lost, so this code needs audit */ 106 * it was magically lost, so this code needs audit */
103 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | 107 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
216 220
217 if (likely(xdst->u.rt6.rt6i_idev)) 221 if (likely(xdst->u.rt6.rt6i_idev))
218 in6_dev_put(xdst->u.rt6.rt6i_idev); 222 in6_dev_put(xdst->u.rt6.rt6i_idev);
223 if (likely(xdst->u.rt6.rt6i_peer))
224 inet_putpeer(xdst->u.rt6.rt6i_peer);
219 xfrm_dst_destroy(xdst); 225 xfrm_dst_destroy(xdst);
220} 226}
221 227
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4bc8a9250cfd..9cd73b11506e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1822,6 +1822,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1822 *cookie ^= 2; 1822 *cookie ^= 2;
1823 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 1823 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
1824 local->hw_roc_skb = skb; 1824 local->hw_roc_skb = skb;
1825 local->hw_roc_skb_for_status = skb;
1825 mutex_unlock(&local->mtx); 1826 mutex_unlock(&local->mtx);
1826 1827
1827 return 0; 1828 return 0;
@@ -1875,6 +1876,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
1875 if (ret == 0) { 1876 if (ret == 0) {
1876 kfree_skb(local->hw_roc_skb); 1877 kfree_skb(local->hw_roc_skb);
1877 local->hw_roc_skb = NULL; 1878 local->hw_roc_skb = NULL;
1879 local->hw_roc_skb_for_status = NULL;
1878 } 1880 }
1879 1881
1880 mutex_unlock(&local->mtx); 1882 mutex_unlock(&local->mtx);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c47d7c0e48a4..533fd32f49ff 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -953,7 +953,7 @@ struct ieee80211_local {
953 953
954 struct ieee80211_channel *hw_roc_channel; 954 struct ieee80211_channel *hw_roc_channel;
955 struct net_device *hw_roc_dev; 955 struct net_device *hw_roc_dev;
956 struct sk_buff *hw_roc_skb; 956 struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
957 struct work_struct hw_roc_start, hw_roc_done; 957 struct work_struct hw_roc_start, hw_roc_done;
958 enum nl80211_channel_type hw_roc_channel_type; 958 enum nl80211_channel_type hw_roc_channel_type;
959 unsigned int hw_roc_duration; 959 unsigned int hw_roc_duration;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba456744e..7a10a8d1b2d0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1229 } 1229 }
1230 mutex_unlock(&local->iflist_mtx); 1230 mutex_unlock(&local->iflist_mtx);
1231 unregister_netdevice_many(&unreg_list); 1231 unregister_netdevice_many(&unreg_list);
1232 list_del(&unreg_list);
1232} 1233}
1233 1234
1234static u32 ieee80211_idle_off(struct ieee80211_local *local, 1235static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e33746..c9ceb4d57ab0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1033 if (is_multicast_ether_addr(hdr->addr1)) 1033 if (is_multicast_ether_addr(hdr->addr1))
1034 return; 1034 return;
1035 1035
1036 /*
1037 * In case we receive frames after disassociation.
1038 */
1039 if (!sdata->u.mgd.associated)
1040 return;
1041
1036 ieee80211_sta_reset_conn_monitor(sdata); 1042 ieee80211_sta_reset_conn_monitor(sdata);
1037} 1043}
1038 1044
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 38a797217a91..071ac95c4aa0 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -323,6 +323,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
323 323
324 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 324 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
325 struct ieee80211_work *wk; 325 struct ieee80211_work *wk;
326 u64 cookie = (unsigned long)skb;
326 327
327 rcu_read_lock(); 328 rcu_read_lock();
328 list_for_each_entry_rcu(wk, &local->work_list, list) { 329 list_for_each_entry_rcu(wk, &local->work_list, list) {
@@ -334,8 +335,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
334 break; 335 break;
335 } 336 }
336 rcu_read_unlock(); 337 rcu_read_unlock();
338 if (local->hw_roc_skb_for_status == skb) {
339 cookie = local->hw_roc_cookie ^ 2;
340 local->hw_roc_skb_for_status = NULL;
341 }
337 cfg80211_mgmt_tx_status( 342 cfg80211_mgmt_tx_status(
338 skb->dev, (unsigned long) skb, skb->data, skb->len, 343 skb->dev, cookie, skb->data, skb->len,
339 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 344 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
340 } 345 }
341 346
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5950e3abead9..b0beaa58246b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1547,7 +1547,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1547 skb_orphan(skb); 1547 skb_orphan(skb);
1548 } 1548 }
1549 1549
1550 if (skb_header_cloned(skb)) 1550 if (skb_cloned(skb))
1551 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1551 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1552 else if (head_need || tail_need) 1552 else if (head_need || tail_need)
1553 I802_DEBUG_INC(local->tx_expand_skb_head); 1553 I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2230,6 +2230,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2230 2230
2231 sdata = vif_to_sdata(vif); 2231 sdata = vif_to_sdata(vif);
2232 2232
2233 if (!ieee80211_sdata_running(sdata))
2234 goto out;
2235
2233 if (tim_offset) 2236 if (tim_offset)
2234 *tim_offset = 0; 2237 *tim_offset = 0;
2235 if (tim_length) 2238 if (tim_length)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cf68700abffa..d036597aabbe 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1210 switch (sdata->vif.type) { 1210 switch (sdata->vif.type) {
1211 case NL80211_IFTYPE_STATION: 1211 case NL80211_IFTYPE_STATION:
1212 changed |= BSS_CHANGED_ASSOC; 1212 changed |= BSS_CHANGED_ASSOC;
1213 mutex_lock(&sdata->u.mgd.mtx);
1213 ieee80211_bss_info_change_notify(sdata, changed); 1214 ieee80211_bss_info_change_notify(sdata, changed);
1215 mutex_unlock(&sdata->u.mgd.mtx);
1214 break; 1216 break;
1215 case NL80211_IFTYPE_ADHOC: 1217 case NL80211_IFTYPE_ADHOC:
1216 changed |= BSS_CHANGED_IBSS; 1218 changed |= BSS_CHANGED_IBSS;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe290c04..4aa614b8a96a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head,
133 133
134 /* Optimization: we don't need to hold module 134 /* Optimization: we don't need to hold module
135 reference here, since function can't sleep. --RR */ 135 reference here, since function can't sleep. --RR */
136repeat:
136 verdict = elem->hook(hook, skb, indev, outdev, okfn); 137 verdict = elem->hook(hook, skb, indev, outdev, okfn);
137 if (verdict != NF_ACCEPT) { 138 if (verdict != NF_ACCEPT) {
138#ifdef CONFIG_NETFILTER_DEBUG 139#ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
145#endif 146#endif
146 if (verdict != NF_REPEAT) 147 if (verdict != NF_REPEAT)
147 return verdict; 148 return verdict;
148 *i = (*i)->prev; 149 goto repeat;
149 } 150 }
150 } 151 }
151 return NF_ACCEPT; 152 return NF_ACCEPT;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..ba98e1308f3c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
808 dest->u_threshold = udest->u_threshold; 808 dest->u_threshold = udest->u_threshold;
809 dest->l_threshold = udest->l_threshold; 809 dest->l_threshold = udest->l_threshold;
810 810
811 spin_lock(&dest->dst_lock); 811 spin_lock_bh(&dest->dst_lock);
812 ip_vs_dst_reset(dest); 812 ip_vs_dst_reset(dest);
813 spin_unlock(&dest->dst_lock); 813 spin_unlock_bh(&dest->dst_lock);
814 814
815 if (add) 815 if (add)
816 ip_vs_new_estimator(&dest->stats); 816 ip_vs_new_estimator(&dest->stats);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e61511929c66..84f4fcc5884b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -942,8 +942,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
942 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 942 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
943 nf_conntrack_event_cache(IPCT_REPLY, ct); 943 nf_conntrack_event_cache(IPCT_REPLY, ct);
944out: 944out:
945 if (tmpl) 945 if (tmpl) {
946 nf_ct_put(tmpl); 946 /* Special case: we have to repeat this hook, assign the
947 * template again to this packet. We assume that this packet
948 * has no conntrack assigned. This is used by nf_ct_tcp. */
949 if (ret == NF_REPEAT)
950 skb->nfct = (struct nf_conntrack *)tmpl;
951 else
952 nf_ct_put(tmpl);
953 }
947 954
948 return ret; 955 return ret;
949} 956}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5702de35e2bb..63a1b915a7e4 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -63,6 +63,9 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
63 * this does not harm and it happens very rarely. */ 63 * this does not harm and it happens very rarely. */
64 unsigned long missed = e->missed; 64 unsigned long missed = e->missed;
65 65
66 if (!((events | missed) & e->ctmask))
67 goto out_unlock;
68
66 ret = notify->fcn(events | missed, &item); 69 ret = notify->fcn(events | missed, &item);
67 if (unlikely(ret < 0 || missed)) { 70 if (unlikely(ret < 0 || missed)) {
68 spin_lock_bh(&ct->lock); 71 spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 93297aaceb2b..eead9db6f899 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -667,6 +667,7 @@ restart:
667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
668 cb->nlh->nlmsg_seq, 668 cb->nlh->nlmsg_seq,
669 IPCTNL_MSG_CT_NEW, ct) < 0) { 669 IPCTNL_MSG_CT_NEW, ct) < 0) {
670 nf_conntrack_get(&ct->ct_general);
670 cb->args[1] = (unsigned long)ct; 671 cb->args[1] = (unsigned long)ct;
671 goto out; 672 goto out;
672 } 673 }
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..91816998ed86 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
85 85
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
87{ 87{
88 if (pf >= ARRAY_SIZE(nf_loggers))
89 return -EINVAL;
88 mutex_lock(&nf_log_mutex); 90 mutex_lock(&nf_log_mutex);
89 if (__find_logger(pf, logger->name) == NULL) { 91 if (__find_logger(pf, logger->name) == NULL) {
90 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
98 100
99void nf_log_unbind_pf(u_int8_t pf) 101void nf_log_unbind_pf(u_int8_t pf)
100{ 102{
103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return;
101 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
102 rcu_assign_pointer(nf_loggers[pf], NULL); 106 rcu_assign_pointer(nf_loggers[pf], NULL);
103 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87befb04c0..474d621cbc2e 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
28 skb->destructor = NULL; 28 skb->destructor = NULL;
29 29
30 if (sk) 30 if (sk)
31 nf_tproxy_put_sock(sk); 31 sock_put(sk);
32} 32}
33 33
34/* consumes sk */ 34/* consumes sk */
35int 35void
36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
37{ 37{
38 bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? 38 /* assigning tw sockets complicates things; most
39 inet_twsk(sk)->tw_transparent : 39 * skb->sk->X checks would have to test sk->sk_state first */
40 inet_sk(sk)->transparent; 40 if (sk->sk_state == TCP_TIME_WAIT) {
41 41 inet_twsk_put(inet_twsk(sk));
42 if (transparent) { 42 return;
43 skb_orphan(skb); 43 }
44 skb->sk = sk; 44
45 skb->destructor = nf_tproxy_destructor; 45 skb_orphan(skb);
46 return 1; 46 skb->sk = sk;
47 } else 47 skb->destructor = nf_tproxy_destructor;
48 nf_tproxy_put_sock(sk);
49
50 return 0;
51} 48}
52EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); 49EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
53 50
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f47a2a..dcfd57eb9d02 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
33#include <net/netfilter/nf_tproxy_core.h> 33#include <net/netfilter/nf_tproxy_core.h>
34#include <linux/netfilter/xt_TPROXY.h> 34#include <linux/netfilter/xt_TPROXY.h>
35 35
36static bool tproxy_sk_is_transparent(struct sock *sk)
37{
38 if (sk->sk_state != TCP_TIME_WAIT) {
39 if (inet_sk(sk)->transparent)
40 return true;
41 sock_put(sk);
42 } else {
43 if (inet_twsk(sk)->tw_transparent)
44 return true;
45 inet_twsk_put(inet_twsk(sk));
46 }
47 return false;
48}
49
36static inline __be32 50static inline __be32
37tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) 51tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
38{ 52{
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
141 skb->dev, NFT_LOOKUP_LISTENER); 155 skb->dev, NFT_LOOKUP_LISTENER);
142 156
143 /* NOTE: assign_sock consumes our sk reference */ 157 /* NOTE: assign_sock consumes our sk reference */
144 if (sk && nf_tproxy_assign_sock(skb, sk)) { 158 if (sk && tproxy_sk_is_transparent(sk)) {
145 /* This should be in a separate target, but we don't do multiple 159 /* This should be in a separate target, but we don't do multiple
146 targets on the same rule yet */ 160 targets on the same rule yet */
147 skb->mark = (skb->mark & ~mark_mask) ^ mark_value; 161 skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
149 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", 163 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
150 iph->protocol, &iph->daddr, ntohs(hp->dest), 164 iph->protocol, &iph->daddr, ntohs(hp->dest),
151 &laddr, ntohs(lport), skb->mark); 165 &laddr, ntohs(lport), skb->mark);
166
167 nf_tproxy_assign_sock(skb, sk);
152 return NF_ACCEPT; 168 return NF_ACCEPT;
153 } 169 }
154 170
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
306 par->in, NFT_LOOKUP_LISTENER); 322 par->in, NFT_LOOKUP_LISTENER);
307 323
308 /* NOTE: assign_sock consumes our sk reference */ 324 /* NOTE: assign_sock consumes our sk reference */
309 if (sk && nf_tproxy_assign_sock(skb, sk)) { 325 if (sk && tproxy_sk_is_transparent(sk)) {
310 /* This should be in a separate target, but we don't do multiple 326 /* This should be in a separate target, but we don't do multiple
311 targets on the same rule yet */ 327 targets on the same rule yet */
312 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; 328 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
314 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", 330 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
315 tproto, &iph->saddr, ntohs(hp->source), 331 tproto, &iph->saddr, ntohs(hp->source),
316 laddr, ntohs(lport), skb->mark); 332 laddr, ntohs(lport), skb->mark);
333
334 nf_tproxy_assign_sock(skb, sk);
317 return NF_ACCEPT; 335 return NF_ACCEPT;
318 } 336 }
319 337
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 88f7c3511c72..73c33a42f87f 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -53,15 +53,13 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
53} 53}
54 54
55static inline int 55static inline int
56iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b) 56iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
57{ 57{
58 unsigned int i; 58 unsigned int i;
59 int r;
60 59
61 for (i = 0; i < 4; ++i) { 60 for (i = 0; i < 4; ++i) {
62 r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]); 61 if (a->s6_addr32[i] != b->s6_addr32[i])
63 if (r != 0) 62 return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
64 return r;
65 } 63 }
66 64
67 return 0; 65 return 0;
@@ -75,15 +73,15 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
75 bool m; 73 bool m;
76 74
77 if (info->flags & IPRANGE_SRC) { 75 if (info->flags & IPRANGE_SRC) {
78 m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0; 76 m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
79 m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0; 77 m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
80 m ^= !!(info->flags & IPRANGE_SRC_INV); 78 m ^= !!(info->flags & IPRANGE_SRC_INV);
81 if (m) 79 if (m)
82 return false; 80 return false;
83 } 81 }
84 if (info->flags & IPRANGE_DST) { 82 if (info->flags & IPRANGE_DST) {
85 m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0; 83 m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
86 m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0; 84 m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
87 m ^= !!(info->flags & IPRANGE_DST_INV); 85 m ^= !!(info->flags & IPRANGE_DST_INV);
88 if (m) 86 if (m)
89 return false; 87 return false;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae838303..9cc46356b577 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
35#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
36#endif 36#endif
37 37
38static void
39xt_socket_put_sk(struct sock *sk)
40{
41 if (sk->sk_state == TCP_TIME_WAIT)
42 inet_twsk_put(inet_twsk(sk));
43 else
44 sock_put(sk);
45}
46
38static int 47static int
39extract_icmp4_fields(const struct sk_buff *skb, 48extract_icmp4_fields(const struct sk_buff *skb,
40 u8 *protocol, 49 u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
164 (sk->sk_state == TCP_TIME_WAIT && 173 (sk->sk_state == TCP_TIME_WAIT &&
165 inet_twsk(sk)->tw_transparent)); 174 inet_twsk(sk)->tw_transparent));
166 175
167 nf_tproxy_put_sock(sk); 176 xt_socket_put_sk(sk);
168 177
169 if (wildcard || !transparent) 178 if (wildcard || !transparent)
170 sk = NULL; 179 sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
298 (sk->sk_state == TCP_TIME_WAIT && 307 (sk->sk_state == TCP_TIME_WAIT &&
299 inet_twsk(sk)->tw_transparent)); 308 inet_twsk(sk)->tw_transparent));
300 309
301 nf_tproxy_put_sock(sk); 310 xt_socket_put_sk(sk);
302 311
303 if (wildcard || !transparent) 312 if (wildcard || !transparent)
304 sk = NULL; 313 sk = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..1f924595bdef 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1408 size_t copied;
1409 struct sk_buff *skb, *data_skb; 1409 struct sk_buff *skb, *data_skb;
1410 int err; 1410 int err, ret;
1411 1411
1412 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
1413 return -EOPNOTSUPP; 1413 return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1470 1470
1471 skb_free_datagram(sk, skb); 1471 skb_free_datagram(sk, skb);
1472 1472
1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1474 netlink_dump(sk); 1474 ret = netlink_dump(sk);
1475 if (ret) {
1476 sk->sk_err = ret;
1477 sk->sk_error_report(sk);
1478 }
1479 }
1475 1480
1476 scm_recv(sock, msg, siocb->scm, flags); 1481 scm_recv(sock, msg, siocb->scm, flags);
1477out: 1482out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1736 struct netlink_callback *cb; 1741 struct netlink_callback *cb;
1737 struct sock *sk; 1742 struct sock *sk;
1738 struct netlink_sock *nlk; 1743 struct netlink_sock *nlk;
1744 int ret;
1739 1745
1740 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1741 if (cb == NULL) 1747 if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1764 nlk->cb = cb; 1770 nlk->cb = cb;
1765 mutex_unlock(nlk->cb_mutex); 1771 mutex_unlock(nlk->cb_mutex);
1766 1772
1767 netlink_dump(sk); 1773 ret = netlink_dump(sk);
1774
1768 sock_put(sk); 1775 sock_put(sk);
1769 1776
1777 if (ret)
1778 return ret;
1779
1770 /* We successfully started a dump, by returning -EINTR we 1780 /* We successfully started a dump, by returning -EINTR we
1771 * signal not to send ACK even if it was requested. 1781 * signal not to send ACK even if it was requested.
1772 */ 1782 */
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 71f373c421bc..c47a511f203d 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
551 if (conn->c_loopback 551 if (conn->c_loopback
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
557 return ret;
555 } 558 }
556 559
557 /* FIXME we may overallocate here */ 560 /* FIXME we may overallocate here */
diff --git a/net/rds/loop.c b/net/rds/loop.c
index aeec1d483b17..bca6761a3ca2 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 struct scatterlist *sgp = &rm->data.op_sg[sg];
65 int ret = sizeof(struct rds_header) +
66 be32_to_cpu(rm->m_inc.i_hdr.h_len);
67
64 /* Do not send cong updates to loopback */ 68 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 69 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 70 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 71 ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
72 goto out;
68 } 73 }
69 74
70 BUG_ON(hdr_off || sg || off); 75 BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
80 NULL); 85 NULL);
81 86
82 rds_inc_put(&rm->m_inc); 87 rds_inc_put(&rm->m_inc);
83 88out:
84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 89 return ret;
85} 90}
86 91
87/* 92/*
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 89315009bab1..1a2b0633fece 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
423 goto protocol_error; 423 goto protocol_error;
424 } 424 }
425 425
426 case RXRPC_PACKET_TYPE_ACKALL:
426 case RXRPC_PACKET_TYPE_ACK: 427 case RXRPC_PACKET_TYPE_ACK:
427 /* ACK processing is done in process context */ 428 /* ACK processing is done in process context */
428 read_lock_bh(&call->state_lock); 429 read_lock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
89 return ret; 89 return ret;
90 90
91 plen -= sizeof(*token); 91 plen -= sizeof(*token);
92 token = kmalloc(sizeof(*token), GFP_KERNEL); 92 token = kzalloc(sizeof(*token), GFP_KERNEL);
93 if (!token) 93 if (!token)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 token->kad = kmalloc(plen, GFP_KERNEL); 96 token->kad = kzalloc(plen, GFP_KERNEL);
97 if (!token->kad) { 97 if (!token->kad) {
98 kfree(token); 98 kfree(token);
99 return -ENOMEM; 99 return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
731 goto error; 731 goto error;
732 732
733 ret = -ENOMEM; 733 ret = -ENOMEM;
734 token = kmalloc(sizeof(*token), GFP_KERNEL); 734 token = kzalloc(sizeof(*token), GFP_KERNEL);
735 if (!token) 735 if (!token)
736 goto error; 736 goto error;
737 token->kad = kmalloc(plen, GFP_KERNEL); 737 token->kad = kzalloc(plen, GFP_KERNEL);
738 if (!token->kad) 738 if (!token->kad)
739 goto error_free; 739 goto error_free;
740 740
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c210c5d..5f63ec58942c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 qdisc_bstats_update(sch, skb);
394 cbq_mark_toplevel(q, cl); 393 cbq_mark_toplevel(q, cl);
395 if (!cl->next_alive) 394 if (!cl->next_alive)
396 cbq_activate_class(cl); 395 cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
649 ret = qdisc_enqueue(skb, cl->q); 648 ret = qdisc_enqueue(skb, cl->q);
650 if (ret == NET_XMIT_SUCCESS) { 649 if (ret == NET_XMIT_SUCCESS) {
651 sch->q.qlen++; 650 sch->q.qlen++;
652 qdisc_bstats_update(sch, skb);
653 if (!cl->next_alive) 651 if (!cl->next_alive)
654 cbq_activate_class(cl); 652 cbq_activate_class(cl);
655 return 0; 653 return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
971 969
972 skb = cbq_dequeue_1(sch); 970 skb = cbq_dequeue_1(sch);
973 if (skb) { 971 if (skb) {
972 qdisc_bstats_update(sch, skb);
974 sch->q.qlen--; 973 sch->q.qlen--;
975 sch->flags &= ~TCQ_F_THROTTLED; 974 sch->flags &= ~TCQ_F_THROTTLED;
976 return skb; 975 return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e642eafc..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
376 } 376 }
377 377
378 bstats_update(&cl->bstats, skb); 378 bstats_update(&cl->bstats, skb);
379 qdisc_bstats_update(sch, skb);
380 379
381 sch->q.qlen++; 380 sch->q.qlen++;
382 return err; 381 return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
403 skb = qdisc_dequeue_peeked(cl->qdisc); 402 skb = qdisc_dequeue_peeked(cl->qdisc);
404 if (cl->qdisc->q.qlen == 0) 403 if (cl->qdisc->q.qlen == 0)
405 list_del(&cl->alist); 404 list_del(&cl->alist);
405 qdisc_bstats_update(sch, skb);
406 sch->q.qlen--; 406 sch->q.qlen--;
407 return skb; 407 return skb;
408 } 408 }
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd4408e..0f7bf3fdfea5 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 qdisc_bstats_update(sch, skb);
264 sch->q.qlen++; 263 sch->q.qlen++;
265 264
266 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
283 if (skb == NULL) 282 if (skb == NULL)
284 return NULL; 283 return NULL;
285 284
285 qdisc_bstats_update(sch, skb);
286 sch->q.qlen--; 286 sch->q.qlen--;
287 287
288 index = skb->tc_index & (p->indices - 1); 288 index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d6337e43c..d468b479aa93 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
46 46
47static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) 47static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
48{ 48{
49 struct sk_buff *skb_head;
50 struct fifo_sched_data *q = qdisc_priv(sch); 49 struct fifo_sched_data *q = qdisc_priv(sch);
51 50
52 if (likely(skb_queue_len(&sch->q) < q->limit)) 51 if (likely(skb_queue_len(&sch->q) < q->limit))
53 return qdisc_enqueue_tail(skb, sch); 52 return qdisc_enqueue_tail(skb, sch);
54 53
55 /* queue full, remove one skb to fulfill the limit */ 54 /* queue full, remove one skb to fulfill the limit */
56 skb_head = qdisc_dequeue_head(sch); 55 __qdisc_queue_drop_head(sch, &sch->q);
57 sch->qstats.drops++; 56 sch->qstats.drops++;
58 kfree_skb(skb_head);
59
60 qdisc_enqueue_tail(skb, sch); 57 qdisc_enqueue_tail(skb, sch);
61 58
62 return NET_XMIT_CN; 59 return NET_XMIT_CN;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..1bc698039ae2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev)
839 839
840 list_add(&dev->unreg_list, &single); 840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single); 841 dev_deactivate_many(&single);
842 list_del(&single);
842} 843}
843 844
844static void dev_init_scheduler_queue(struct net_device *dev, 845static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791d4f6c..14a799de1c35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 bstats_update(&cl->bstats, skb); 1602 bstats_update(&cl->bstats, skb);
1603 qdisc_bstats_update(sch, skb);
1604 sch->q.qlen++; 1603 sch->q.qlen++;
1605 1604
1606 return NET_XMIT_SUCCESS; 1605 return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
1666 } 1665 }
1667 1666
1668 sch->flags &= ~TCQ_F_THROTTLED; 1667 sch->flags &= ~TCQ_F_THROTTLED;
1668 qdisc_bstats_update(sch, skb);
1669 sch->q.qlen--; 1669 sch->q.qlen--;
1670 1670
1671 return skb; 1671 return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0c6836..fc12fe6f5597 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
574 } 574 }
575 575
576 sch->q.qlen++; 576 sch->q.qlen++;
577 qdisc_bstats_update(sch, skb);
578 return NET_XMIT_SUCCESS; 577 return NET_XMIT_SUCCESS;
579} 578}
580 579
@@ -842,7 +841,7 @@ next:
842 841
843static struct sk_buff *htb_dequeue(struct Qdisc *sch) 842static struct sk_buff *htb_dequeue(struct Qdisc *sch)
844{ 843{
845 struct sk_buff *skb = NULL; 844 struct sk_buff *skb;
846 struct htb_sched *q = qdisc_priv(sch); 845 struct htb_sched *q = qdisc_priv(sch);
847 int level; 846 int level;
848 psched_time_t next_event; 847 psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 850 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
852 skb = __skb_dequeue(&q->direct_queue); 851 skb = __skb_dequeue(&q->direct_queue);
853 if (skb != NULL) { 852 if (skb != NULL) {
853ok:
854 qdisc_bstats_update(sch, skb);
854 sch->flags &= ~TCQ_F_THROTTLED; 855 sch->flags &= ~TCQ_F_THROTTLED;
855 sch->q.qlen--; 856 sch->q.qlen--;
856 return skb; 857 return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
884 int prio = ffz(m); 885 int prio = ffz(m);
885 m |= 1 << prio; 886 m |= 1 << prio;
886 skb = htb_dequeue_tree(q, prio, level); 887 skb = htb_dequeue_tree(q, prio, level);
887 if (likely(skb != NULL)) { 888 if (likely(skb != NULL))
888 sch->q.qlen--; 889 goto ok;
889 sch->flags &= ~TCQ_F_THROTTLED;
890 goto fin;
891 }
892 } 890 }
893 } 891 }
894 sch->qstats.overlimits++; 892 sch->qstats.overlimits++;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da24763..436a2e75b322 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 qdisc_bstats_update(sch, skb);
87 sch->q.qlen++; 86 sch->q.qlen++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
112 qdisc = q->queues[q->curband]; 111 qdisc = q->queues[q->curband];
113 skb = qdisc->dequeue(qdisc); 112 skb = qdisc->dequeue(qdisc);
114 if (skb) { 113 if (skb) {
114 qdisc_bstats_update(sch, skb);
115 sch->q.qlen--; 115 sch->q.qlen--;
116 return skb; 116 return skb;
117 } 117 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce863479..6a3006b38dc5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 qdisc_bstats_update(sch, skb);
244 } else if (net_xmit_drop_count(ret)) { 243 } else if (net_xmit_drop_count(ret)) {
245 sch->qstats.drops++; 244 sch->qstats.drops++;
246 } 245 }
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
289 skb->tstamp.tv64 = 0; 288 skb->tstamp.tv64 = 0;
290#endif 289#endif
291 pr_debug("netem_dequeue: return skb=%p\n", skb); 290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 qdisc_bstats_update(sch, skb);
292 sch->q.qlen--; 292 sch->q.qlen--;
293 return skb; 293 return skb;
294 } 294 }
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
476 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
477 477
478 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
479 qdisc_bstats_update(sch, nskb);
480 479
481 return NET_XMIT_SUCCESS; 480 return NET_XMIT_SUCCESS;
482 } 481 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d49dd1..fbd710d619bf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 qdisc_bstats_update(sch, skb);
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
116 struct Qdisc *qdisc = q->queues[prio]; 115 struct Qdisc *qdisc = q->queues[prio];
117 struct sk_buff *skb = qdisc->dequeue(qdisc); 116 struct sk_buff *skb = qdisc->dequeue(qdisc);
118 if (skb) { 117 if (skb) {
118 qdisc_bstats_update(sch, skb);
119 sch->q.qlen--; 119 sch->q.qlen--;
120 return skb; 120 return skb;
121 } 121 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5a2c97..9f98dbd32d4c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 qdisc_bstats_update(sch, skb);
98 sch->q.qlen++; 97 sch->q.qlen++;
99 } else if (net_xmit_drop_count(ret)) { 98 } else if (net_xmit_drop_count(ret)) {
100 q->stats.pdrop++; 99 q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
114 struct Qdisc *child = q->qdisc; 113 struct Qdisc *child = q->qdisc;
115 114
116 skb = child->dequeue(child); 115 skb = child->dequeue(child);
117 if (skb) 116 if (skb) {
117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;
119 else if (!red_is_idling(&q->parms)) 119 } else {
120 red_start_of_idle_period(&q->parms); 120 if (!red_is_idling(&q->parms))
121 121 red_start_of_idle_period(&q->parms);
122 }
122 return skb; 123 return skb;
123} 124}
124 125
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53a634d..edea8cefec6c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
402 q->tail = slot; 402 q->tail = slot;
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit)
406 qdisc_bstats_update(sch, skb);
407 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
408 }
409 407
410 sfq_drop(sch); 408 sfq_drop(sch);
411 return NET_XMIT_CN; 409 return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
445 } 443 }
446 skb = slot_dequeue_head(slot); 444 skb = slot_dequeue_head(slot);
447 sfq_dec(q, a); 445 sfq_dec(q, a);
446 qdisc_bstats_update(sch, skb);
448 sch->q.qlen--; 447 sch->q.qlen--;
449 sch->qstats.backlog -= qdisc_pkt_len(skb); 448 sch->qstats.backlog -= qdisc_pkt_len(skb);
450 449
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e721811..e93165820c3f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 qdisc_bstats_update(sch, skb);
138 return NET_XMIT_SUCCESS; 137 return NET_XMIT_SUCCESS;
139} 138}
140 139
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
187 q->ptokens = ptoks; 186 q->ptokens = ptoks;
188 sch->q.qlen--; 187 sch->q.qlen--;
189 sch->flags &= ~TCQ_F_THROTTLED; 188 sch->flags &= ~TCQ_F_THROTTLED;
189 qdisc_bstats_update(sch, skb);
190 return skb; 190 return skb;
191 } 191 }
192 192
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 84ce48eadff4..d84e7329660f 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -87,7 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
87 87
88 if (q->q.qlen < dev->tx_queue_len) { 88 if (q->q.qlen < dev->tx_queue_len) {
89 __skb_queue_tail(&q->q, skb); 89 __skb_queue_tail(&q->q, skb);
90 qdisc_bstats_update(sch, skb);
91 return NET_XMIT_SUCCESS; 90 return NET_XMIT_SUCCESS;
92 } 91 }
93 92
@@ -111,6 +110,8 @@ teql_dequeue(struct Qdisc* sch)
111 dat->m->slaves = sch; 110 dat->m->slaves = sch;
112 netif_wake_queue(m); 111 netif_wake_queue(m);
113 } 112 }
113 } else {
114 qdisc_bstats_update(sch, skb);
114 } 115 }
115 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 116 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
116 return skb; 117 return skb;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0962ca..b23428f3c0dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
2029 *errp = sctp_make_op_error_fixed(asoc, chunk); 2029 *errp = sctp_make_op_error_fixed(asoc, chunk);
2030 2030
2031 if (*errp) { 2031 if (*errp) {
2032 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2032 if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
2033 WORD_ROUND(ntohs(param.p->length))); 2033 WORD_ROUND(ntohs(param.p->length))))
2034 sctp_addto_chunk_fixed(*errp, 2034 sctp_addto_chunk_fixed(*errp,
2035 WORD_ROUND(ntohs(param.p->length)), 2035 WORD_ROUND(ntohs(param.p->length)),
2036 param.v); 2036 param.v);
2037 } else { 2037 } else {
2038 /* If there is no memory for generating the ERROR 2038 /* If there is no memory for generating the ERROR
2039 * report as specified, an ABORT will be triggered 2039 * report as specified, an ABORT will be triggered
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09b164e..59e599498e37 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
252 252
253/* 253/*
254 * Mark an RPC call as having completed by clearing the 'active' bit 254 * Mark an RPC call as having completed by clearing the 'active' bit
255 * and then waking up all tasks that were sleeping.
255 */ 256 */
256static void rpc_mark_complete_task(struct rpc_task *task) 257static int rpc_complete_task(struct rpc_task *task)
257{ 258{
258 smp_mb__before_clear_bit(); 259 void *m = &task->tk_runstate;
260 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
261 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
262 unsigned long flags;
263 int ret;
264
265 spin_lock_irqsave(&wq->lock, flags);
259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 266 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 smp_mb__after_clear_bit(); 267 ret = atomic_dec_and_test(&task->tk_count);
261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 268 if (waitqueue_active(wq))
269 __wake_up_locked_key(wq, TASK_NORMAL, &k);
270 spin_unlock_irqrestore(&wq->lock, flags);
271 return ret;
262} 272}
263 273
264/* 274/*
265 * Allow callers to wait for completion of an RPC call 275 * Allow callers to wait for completion of an RPC call
276 *
277 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
278 * to enforce taking of the wq->lock and hence avoid races with
279 * rpc_complete_task().
266 */ 280 */
267int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 281int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
268{ 282{
269 if (action == NULL) 283 if (action == NULL)
270 action = rpc_wait_bit_killable; 284 action = rpc_wait_bit_killable;
271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 285 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
272 action, TASK_KILLABLE); 286 action, TASK_KILLABLE);
273} 287}
274EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 288EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
857 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 871 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
858} 872}
859 873
860void rpc_put_task(struct rpc_task *task) 874static void rpc_release_resources_task(struct rpc_task *task)
861{ 875{
862 if (!atomic_dec_and_test(&task->tk_count))
863 return;
864 /* Release resources */
865 if (task->tk_rqstp) 876 if (task->tk_rqstp)
866 xprt_release(task); 877 xprt_release(task);
867 if (task->tk_msg.rpc_cred) 878 if (task->tk_msg.rpc_cred)
868 put_rpccred(task->tk_msg.rpc_cred); 879 put_rpccred(task->tk_msg.rpc_cred);
869 rpc_task_release_client(task); 880 rpc_task_release_client(task);
870 if (task->tk_workqueue != NULL) { 881}
882
883static void rpc_final_put_task(struct rpc_task *task,
884 struct workqueue_struct *q)
885{
886 if (q != NULL) {
871 INIT_WORK(&task->u.tk_work, rpc_async_release); 887 INIT_WORK(&task->u.tk_work, rpc_async_release);
872 queue_work(task->tk_workqueue, &task->u.tk_work); 888 queue_work(q, &task->u.tk_work);
873 } else 889 } else
874 rpc_free_task(task); 890 rpc_free_task(task);
875} 891}
892
893static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
894{
895 if (atomic_dec_and_test(&task->tk_count)) {
896 rpc_release_resources_task(task);
897 rpc_final_put_task(task, q);
898 }
899}
900
901void rpc_put_task(struct rpc_task *task)
902{
903 rpc_do_put_task(task, NULL);
904}
876EXPORT_SYMBOL_GPL(rpc_put_task); 905EXPORT_SYMBOL_GPL(rpc_put_task);
877 906
907void rpc_put_task_async(struct rpc_task *task)
908{
909 rpc_do_put_task(task, task->tk_workqueue);
910}
911EXPORT_SYMBOL_GPL(rpc_put_task_async);
912
878static void rpc_release_task(struct rpc_task *task) 913static void rpc_release_task(struct rpc_task *task)
879{ 914{
880 dprintk("RPC: %5u release task\n", task->tk_pid); 915 dprintk("RPC: %5u release task\n", task->tk_pid);
881 916
882 BUG_ON (RPC_IS_QUEUED(task)); 917 BUG_ON (RPC_IS_QUEUED(task));
883 918
884 /* Wake up anyone who is waiting for task completion */ 919 rpc_release_resources_task(task);
885 rpc_mark_complete_task(task);
886 920
887 rpc_put_task(task); 921 /*
922 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
923 * so it should be safe to use task->tk_count as a test for whether
924 * or not any other processes still hold references to our rpc_task.
925 */
926 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
927 /* Wake up anyone who may be waiting for task completion */
928 if (!rpc_complete_task(task))
929 return;
930 } else {
931 if (!atomic_dec_and_test(&task->tk_count))
932 return;
933 }
934 rpc_final_put_task(task, task->tk_workqueue);
888} 935}
889 936
890int rpciod_up(void) 937int rpciod_up(void)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7bd3bbba4710..d802e941d365 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1609,9 +1609,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1609 */ 1609 */
1610static void svc_bc_sock_free(struct svc_xprt *xprt) 1610static void svc_bc_sock_free(struct svc_xprt *xprt)
1611{ 1611{
1612 if (xprt) { 1612 if (xprt)
1613 kfree(xprt->xpt_bc_sid);
1614 kfree(container_of(xprt, struct svc_sock, sk_xprt)); 1613 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1615 }
1616} 1614}
1617#endif /* CONFIG_NFS_V4_1 */ 1615#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9df1eadc912a..1a10dcd999ea 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1335 p, 0, length, DMA_FROM_DEVICE); 1335 p, 0, length, DMA_FROM_DEVICE);
1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { 1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1337 put_page(p); 1337 put_page(p);
1338 svc_rdma_put_context(ctxt, 1);
1338 return; 1339 return;
1339 } 1340 }
1340 atomic_inc(&xprt->sc_dma_used); 1341 atomic_inc(&xprt->sc_dma_used);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c431f5a57960..be96d429b475 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1631 } 1631 }
1632 xs_reclassify_socket(family, sock); 1632 xs_reclassify_socket(family, sock);
1633 1633
1634 if (xs_bind(transport, sock)) { 1634 err = xs_bind(transport, sock);
1635 if (err) {
1635 sock_release(sock); 1636 sock_release(sock);
1636 goto out; 1637 goto out;
1637 } 1638 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..ba5b8c208498 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -850,7 +850,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
850 * Get the parent directory, calculate the hash for last 850 * Get the parent directory, calculate the hash for last
851 * component. 851 * component.
852 */ 852 */
853 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); 853 err = kern_path_parent(sunaddr->sun_path, &nd);
854 if (err) 854 if (err)
855 goto out_mknod_parent; 855 goto out_mknod_parent;
856 856
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 1724
1725 msg->msg_namelen = 0; 1725 msg->msg_namelen = 0;
1726 1726
1727 mutex_lock(&u->readlock); 1727 err = mutex_lock_interruptible(&u->readlock);
1728 if (err) {
1729 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1730 goto out;
1731 }
1728 1732
1729 skb = skb_recv_datagram(sk, flags, noblock, &err); 1733 skb = skb_recv_datagram(sk, flags, noblock, &err);
1730 if (!skb) { 1734 if (!skb) {
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1864 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1868 memset(&tmp_scm, 0, sizeof(tmp_scm));
1865 } 1869 }
1866 1870
1867 mutex_lock(&u->readlock); 1871 err = mutex_lock_interruptible(&u->readlock);
1872 if (err) {
1873 err = sock_intr_errno(timeo);
1874 goto out;
1875 }
1868 1876
1869 do { 1877 do {
1870 int chunk; 1878 int chunk;
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895 1903
1896 timeo = unix_stream_data_wait(sk, timeo); 1904 timeo = unix_stream_data_wait(sk, timeo);
1897 1905
1898 if (signal_pending(current)) { 1906 if (signal_pending(current)
1907 || mutex_lock_interruptible(&u->readlock)) {
1899 err = sock_intr_errno(timeo); 1908 err = sock_intr_errno(timeo);
1900 goto out; 1909 goto out;
1901 } 1910 }
1902 mutex_lock(&u->readlock); 1911
1903 continue; 1912 continue;
1904 unlock: 1913 unlock:
1905 unix_state_unlock(sk); 1914 unix_state_unlock(sk);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index f89f83bf828e..b6f4b994eb35 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -104,7 +104,7 @@ struct sock *unix_get_socket(struct file *filp)
104 /* 104 /*
105 * Socket ? 105 * Socket ?
106 */ 106 */
107 if (S_ISSOCK(inode->i_mode)) { 107 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
108 struct socket *sock = SOCKET_I(inode); 108 struct socket *sock = SOCKET_I(inode);
109 struct sock *s = sock->sk; 109 struct sock *s = sock->sk;
110 110
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4e4cd5..d112f038edf0 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
802 return freq; 802 return freq;
803 if (freq == 0) 803 if (freq == 0)
804 return -EINVAL; 804 return -EINVAL;
805 wdev_lock(wdev);
806 mutex_lock(&rdev->devlist_mtx); 805 mutex_lock(&rdev->devlist_mtx);
806 wdev_lock(wdev);
807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
808 mutex_unlock(&rdev->devlist_mtx);
809 wdev_unlock(wdev); 808 wdev_unlock(wdev);
809 mutex_unlock(&rdev->devlist_mtx);
810 return err; 810 return err;
811 default: 811 default:
812 return -EOPNOTSUPP; 812 return -EOPNOTSUPP;
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8f6420..406207515b5e 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/x25.h> 28#include <net/x25.h>
29 29
30/* 30/**
31 * Parse a set of facilities into the facilities structures. Unrecognised 31 * x25_parse_facilities - Parse facilities from skb into the facilities structs
32 * facilities are written to the debug log file. 32 *
33 * @skb: sk_buff to parse
34 * @facilities: Regular facilites, updated as facilities are found
35 * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
36 * @vc_fac_mask: mask is updated with all facilities found
37 *
38 * Return codes:
39 * -1 - Parsing error, caller should drop call and clean up
40 * 0 - Parse OK, this skb has no facilities
41 * >0 - Parse OK, returns the length of the facilities header
42 *
33 */ 43 */
34int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, 44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
62 switch (*p & X25_FAC_CLASS_MASK) { 72 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 73 case X25_FAC_CLASS_A:
64 if (len < 2) 74 if (len < 2)
65 return 0; 75 return -1;
66 switch (*p) { 76 switch (*p) {
67 case X25_FAC_REVERSE: 77 case X25_FAC_REVERSE:
68 if((p[1] & 0x81) == 0x81) { 78 if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
107 break; 117 break;
108 case X25_FAC_CLASS_B: 118 case X25_FAC_CLASS_B:
109 if (len < 3) 119 if (len < 3)
110 return 0; 120 return -1;
111 switch (*p) { 121 switch (*p) {
112 case X25_FAC_PACKET_SIZE: 122 case X25_FAC_PACKET_SIZE:
113 facilities->pacsize_in = p[1]; 123 facilities->pacsize_in = p[1];
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
130 break; 140 break;
131 case X25_FAC_CLASS_C: 141 case X25_FAC_CLASS_C:
132 if (len < 4) 142 if (len < 4)
133 return 0; 143 return -1;
134 printk(KERN_DEBUG "X.25: unknown facility %02X, " 144 printk(KERN_DEBUG "X.25: unknown facility %02X, "
135 "values %02X, %02X, %02X\n", 145 "values %02X, %02X, %02X\n",
136 p[0], p[1], p[2], p[3]); 146 p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
139 break; 149 break;
140 case X25_FAC_CLASS_D: 150 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2) 151 if (len < p[1] + 2)
142 return 0; 152 return -1;
143 switch (*p) { 153 switch (*p) {
144 case X25_FAC_CALLING_AE: 154 case X25_FAC_CALLING_AE:
145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 155 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
146 return 0; 156 return -1;
147 dte_facs->calling_len = p[2]; 157 dte_facs->calling_len = p[2];
148 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 158 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
149 *vc_fac_mask |= X25_MASK_CALLING_AE; 159 *vc_fac_mask |= X25_MASK_CALLING_AE;
150 break; 160 break;
151 case X25_FAC_CALLED_AE: 161 case X25_FAC_CALLED_AE:
152 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 162 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
153 return 0; 163 return -1;
154 dte_facs->called_len = p[2]; 164 dte_facs->called_len = p[2];
155 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 165 memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
156 *vc_fac_mask |= X25_MASK_CALLED_AE; 166 *vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f022be69..15de65f04719 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
91{ 91{
92 struct x25_address source_addr, dest_addr; 92 struct x25_address source_addr, dest_addr;
93 int len; 93 int len;
94 struct x25_sock *x25 = x25_sk(sk);
94 95
95 switch (frametype) { 96 switch (frametype) {
96 case X25_CALL_ACCEPTED: { 97 case X25_CALL_ACCEPTED: {
97 struct x25_sock *x25 = x25_sk(sk);
98 98
99 x25_stop_timer(sk); 99 x25_stop_timer(sk);
100 x25->condition = 0x00; 100 x25->condition = 0x00;
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
113 &dest_addr); 113 &dest_addr);
114 if (len > 0) 114 if (len > 0)
115 skb_pull(skb, len); 115 skb_pull(skb, len);
116 else if (len < 0)
117 goto out_clear;
116 118
117 len = x25_parse_facilities(skb, &x25->facilities, 119 len = x25_parse_facilities(skb, &x25->facilities,
118 &x25->dte_facilities, 120 &x25->dte_facilities,
119 &x25->vc_facil_mask); 121 &x25->vc_facil_mask);
120 if (len > 0) 122 if (len > 0)
121 skb_pull(skb, len); 123 skb_pull(skb, len);
122 else 124 else if (len < 0)
123 return -1; 125 goto out_clear;
124 /* 126 /*
125 * Copy any Call User Data. 127 * Copy any Call User Data.
126 */ 128 */
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
144 } 146 }
145 147
146 return 0; 148 return 0;
149
150out_clear:
151 x25_write_internal(sk, X25_CLEAR_REQUEST);
152 x25->state = X25_STATE_2;
153 x25_start_t23timer(sk);
154 return 0;
147} 155}
148 156
149/* 157/*
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4cbc942f762a..21306928d47f 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -396,9 +396,12 @@ void __exit x25_link_free(void)
396 write_lock_bh(&x25_neigh_list_lock); 396 write_lock_bh(&x25_neigh_list_lock);
397 397
398 list_for_each_safe(entry, tmp, &x25_neigh_list) { 398 list_for_each_safe(entry, tmp, &x25_neigh_list) {
399 struct net_device *dev;
400
399 nb = list_entry(entry, struct x25_neigh, node); 401 nb = list_entry(entry, struct x25_neigh, node);
402 dev = nb->dev;
400 __x25_remove_neigh(nb); 403 __x25_remove_neigh(nb);
401 dev_put(nb->dev); 404 dev_put(dev);
402 } 405 }
403 write_unlock_bh(&x25_neigh_list_lock); 406 write_unlock_bh(&x25_neigh_list_lock);
404} 407}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef404c794..6459588befc3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1340 default: 1340 default:
1341 BUG(); 1341 BUG();
1342 } 1342 }
1343 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1343 xdst = dst_alloc(dst_ops);
1344 xfrm_policy_put_afinfo(afinfo); 1344 xfrm_policy_put_afinfo(afinfo);
1345 1345
1346 xdst->flo.ops = &xfrm_bundle_fc_ops; 1346 if (likely(xdst))
1347 xdst->flo.ops = &xfrm_bundle_fc_ops;
1348 else
1349 xdst = ERR_PTR(-ENOBUFS);
1347 1350
1348 return xdst; 1351 return xdst;
1349} 1352}