aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2012-05-21 15:39:42 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2012-05-21 15:39:42 -0400
commit0ad8c6a22d03a1598f7cc6585c65354dadca62ad (patch)
tree1507deef3d55d5f3c71b2f76924fe1f6c6211905 /net
parent8527f8e2934683e53405fbe876a4e6f4a0c46eb8 (diff)
parent76e10d158efb6d4516018846f60c2ab5501900bc (diff)
Merge tag 'v3.4' with SCSI updates, needed for subsequent firewire-sbp2 changes
Linux 3.4
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/ax25/af_ax25.c9
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_core.c42
-rw-r--r--net/bluetooth/hci_event.c14
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c17
-rw-r--r--net/bluetooth/mgmt.c15
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_multicast.c81
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/caif/chnl_net.c9
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/drop_monitor.c89
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c12
-rw-r--r--net/ipv4/tcp.c27
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/route.c71
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/nfc/llcp/commands.c4
-rw-r--r--net/openvswitch/datapath.c29
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/phonet/pn_dev.c21
-rw-r--r--net/sched/sch_gred.c7
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/clnt.c50
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/sunrpc_syms.c17
-rw-r--r--net/wireless/nl80211.c31
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/wireless/wext-core.c6
75 files changed, 622 insertions, 465 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb372..9757c193c86b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
158 } 158 }
159 159
160 skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); 160 skb->dev = vlan_dev_priv(dev)->real_dev;
161 len = skb->len; 161 len = skb->len;
162 if (netpoll_tx_running(dev)) 162 if (netpoll_tx_running(dev))
163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); 163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 0906c194a413..9d9a6a3edbd5 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -2011,16 +2011,17 @@ static void __exit ax25_exit(void)
2011 proc_net_remove(&init_net, "ax25_route"); 2011 proc_net_remove(&init_net, "ax25_route");
2012 proc_net_remove(&init_net, "ax25"); 2012 proc_net_remove(&init_net, "ax25");
2013 proc_net_remove(&init_net, "ax25_calls"); 2013 proc_net_remove(&init_net, "ax25_calls");
2014 ax25_rt_free();
2015 ax25_uid_free();
2016 ax25_dev_free();
2017 2014
2018 ax25_unregister_sysctl();
2019 unregister_netdevice_notifier(&ax25_dev_notifier); 2015 unregister_netdevice_notifier(&ax25_dev_notifier);
2016 ax25_unregister_sysctl();
2020 2017
2021 dev_remove_pack(&ax25_packet_type); 2018 dev_remove_pack(&ax25_packet_type);
2022 2019
2023 sock_unregister(PF_AX25); 2020 sock_unregister(PF_AX25);
2024 proto_unregister(&ax25_proto); 2021 proto_unregister(&ax25_proto);
2022
2023 ax25_rt_free();
2024 ax25_uid_free();
2025 ax25_dev_free();
2025} 2026}
2026module_exit(ax25_exit); 2027module_exit(ax25_exit);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 72eb187a5f60..6fb68a9743af 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
450 sk->sk_state == BT_CONFIG) 450 sk->sk_state == BT_CONFIG)
451 return mask; 451 return mask;
452 452
453 if (sock_writeable(sk)) 453 if (!bt_sk(sk)->suspended && sock_writeable(sk))
454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
455 else 455 else
456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e33af63a884a..d6dc44cd15b0 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -665,6 +665,11 @@ int hci_dev_open(__u16 dev)
665 665
666 hci_req_lock(hdev); 666 hci_req_lock(hdev);
667 667
668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
671 }
672
668 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
669 ret = -ERFKILL; 674 ret = -ERFKILL;
670 goto done; 675 goto done;
@@ -1210,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1210 return NULL; 1215 return NULL;
1211} 1216}
1212 1217
1213static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1214 u8 key_type, u8 old_key_type) 1219 u8 key_type, u8 old_key_type)
1215{ 1220{
1216 /* Legacy key */ 1221 /* Legacy key */
1217 if (key_type < 0x03) 1222 if (key_type < 0x03)
1218 return 1; 1223 return true;
1219 1224
1220 /* Debug keys are insecure so don't store them persistently */ 1225 /* Debug keys are insecure so don't store them persistently */
1221 if (key_type == HCI_LK_DEBUG_COMBINATION) 1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
1222 return 0; 1227 return false;
1223 1228
1224 /* Changed combination key and there's no previous one */ 1229 /* Changed combination key and there's no previous one */
1225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1226 return 0; 1231 return false;
1227 1232
1228 /* Security mode 3 case */ 1233 /* Security mode 3 case */
1229 if (!conn) 1234 if (!conn)
1230 return 1; 1235 return true;
1231 1236
1232 /* Neither local nor remote side had no-bonding as requirement */ 1237 /* Neither local nor remote side had no-bonding as requirement */
1233 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1234 return 1; 1239 return true;
1235 1240
1236 /* Local side had dedicated bonding as requirement */ 1241 /* Local side had dedicated bonding as requirement */
1237 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1238 return 1; 1243 return true;
1239 1244
1240 /* Remote side had dedicated bonding as requirement */ 1245 /* Remote side had dedicated bonding as requirement */
1241 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1242 return 1; 1247 return true;
1243 1248
1244 /* If none of the above criteria match, then don't store the key 1249 /* If none of the above criteria match, then don't store the key
1245 * persistently */ 1250 * persistently */
1246 return 0; 1251 return false;
1247} 1252}
1248 1253
1249struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1280,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1280 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1281{ 1286{
1282 struct link_key *key, *old_key; 1287 struct link_key *key, *old_key;
1283 u8 old_key_type, persistent; 1288 u8 old_key_type;
1289 bool persistent;
1284 1290
1285 old_key = hci_find_link_key(hdev, bdaddr); 1291 old_key = hci_find_link_key(hdev, bdaddr);
1286 if (old_key) { 1292 if (old_key) {
@@ -1323,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1323 1329
1324 mgmt_new_link_key(hdev, key, persistent); 1330 mgmt_new_link_key(hdev, key, persistent);
1325 1331
1326 if (!persistent) { 1332 if (conn)
1327 list_del(&key->list); 1333 conn->flush_key = !persistent;
1328 kfree(key);
1329 }
1330 1334
1331 return 0; 1335 return 0;
1332} 1336}
@@ -1849,6 +1853,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1849 1853
1850 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1854 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1851 1855
1856 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1857
1852 write_lock(&hci_dev_list_lock); 1858 write_lock(&hci_dev_list_lock);
1853 list_del(&hdev->list); 1859 list_del(&hdev->list);
1854 write_unlock(&hci_dev_list_lock); 1860 write_unlock(&hci_dev_list_lock);
@@ -2778,6 +2784,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2778 if (conn) { 2784 if (conn) {
2779 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2785 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2780 2786
2787 hci_dev_lock(hdev);
2788 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2789 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2790 mgmt_device_connected(hdev, &conn->dst, conn->type,
2791 conn->dst_type, 0, NULL, 0,
2792 conn->dev_class);
2793 hci_dev_unlock(hdev);
2794
2781 /* Send to upper protocol */ 2795 /* Send to upper protocol */
2782 l2cap_recv_acldata(conn, skb, flags); 2796 l2cap_recv_acldata(conn, skb, flags);
2783 return; 2797 return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..1266f78fa8e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1901 } 1901 }
1902 1902
1903 if (ev->status == 0) { 1903 if (ev->status == 0) {
1904 if (conn->type == ACL_LINK && conn->flush_key)
1905 hci_remove_link_key(hdev, &conn->dst);
1904 hci_proto_disconn_cfm(conn, ev->reason); 1906 hci_proto_disconn_cfm(conn, ev->reason);
1905 hci_conn_del(conn); 1907 hci_conn_del(conn);
1906 } 1908 }
@@ -2037,6 +2039,12 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
2037 2039
2038 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2040 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2039 2041
2042 if (ev->status && conn->state == BT_CONNECTED) {
2043 hci_acl_disconn(conn, 0x13);
2044 hci_conn_put(conn);
2045 goto unlock;
2046 }
2047
2040 if (conn->state == BT_CONFIG) { 2048 if (conn->state == BT_CONFIG) {
2041 if (!ev->status) 2049 if (!ev->status)
2042 conn->state = BT_CONNECTED; 2050 conn->state = BT_CONNECTED;
@@ -2047,6 +2055,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
2047 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2055 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2048 } 2056 }
2049 2057
2058unlock:
2050 hci_dev_unlock(hdev); 2059 hci_dev_unlock(hdev);
2051} 2060}
2052 2061
@@ -2100,7 +2109,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2100 goto unlock; 2109 goto unlock;
2101 } 2110 }
2102 2111
2103 if (!ev->status) { 2112 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2104 struct hci_cp_remote_name_req cp; 2113 struct hci_cp_remote_name_req cp;
2105 memset(&cp, 0, sizeof(cp)); 2114 memset(&cp, 0, sizeof(cp));
2106 bacpy(&cp.bdaddr, &conn->dst); 2115 bacpy(&cp.bdaddr, &conn->dst);
@@ -2311,6 +2320,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2311 2320
2312 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2321 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 hci_cc_user_passkey_neg_reply(hdev, skb); 2322 hci_cc_user_passkey_neg_reply(hdev, skb);
2323 break;
2314 2324
2315 case HCI_OP_LE_SET_SCAN_PARAM: 2325 case HCI_OP_LE_SET_SCAN_PARAM:
2316 hci_cc_le_set_scan_param(hdev, skb); 2326 hci_cc_le_set_scan_param(hdev, skb);
@@ -2868,7 +2878,7 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
2868 if (conn->state != BT_CONFIG) 2878 if (conn->state != BT_CONFIG)
2869 goto unlock; 2879 goto unlock;
2870 2880
2871 if (!ev->status) { 2881 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2872 struct hci_cp_remote_name_req cp; 2882 struct hci_cp_remote_name_req cp;
2873 memset(&cp, 0, sizeof(cp)); 2883 memset(&cp, 0, sizeof(cp));
2874 bacpy(&cp.bdaddr, &conn->dst); 2884 bacpy(&cp.bdaddr, &conn->dst);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b8e17e4dac8b..6f9c25b633a6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1308,6 +1308,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1308 if (chan->retry_count >= chan->remote_max_tx) { 1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan); 1310 l2cap_chan_unlock(chan);
1311 l2cap_chan_put(chan);
1311 return; 1312 return;
1312 } 1313 }
1313 1314
@@ -1316,6 +1317,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1316 1317
1317 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 l2cap_chan_unlock(chan); 1319 l2cap_chan_unlock(chan);
1320 l2cap_chan_put(chan);
1319} 1321}
1320 1322
1321static void l2cap_retrans_timeout(struct work_struct *work) 1323static void l2cap_retrans_timeout(struct work_struct *work)
@@ -1335,6 +1337,7 @@ static void l2cap_retrans_timeout(struct work_struct *work)
1335 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1336 1338
1337 l2cap_chan_unlock(chan); 1339 l2cap_chan_unlock(chan);
1340 l2cap_chan_put(chan);
1338} 1341}
1339 1342
1340static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1343static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -4586,6 +4589,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4586 4589
4587 if (!status && (chan->state == BT_CONNECTED || 4590 if (!status && (chan->state == BT_CONNECTED ||
4588 chan->state == BT_CONFIG)) { 4591 chan->state == BT_CONFIG)) {
4592 struct sock *sk = chan->sk;
4593
4594 bt_sk(sk)->suspended = false;
4595 sk->sk_state_change(sk);
4596
4589 l2cap_check_encryption(chan, encrypt); 4597 l2cap_check_encryption(chan, encrypt);
4590 l2cap_chan_unlock(chan); 4598 l2cap_chan_unlock(chan);
4591 continue; 4599 continue;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c4fe583b0af6..04e7c172d49c 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -82,7 +82,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82 } 82 }
83 83
84 if (la.l2_cid) 84 if (la.l2_cid)
85 err = l2cap_add_scid(chan, la.l2_cid); 85 err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
86 else 86 else
87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); 87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
88 88
@@ -123,7 +123,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
123 if (la.l2_cid && la.l2_psm) 123 if (la.l2_cid && la.l2_psm)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); 126 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
127 &la.l2_bdaddr);
127 if (err) 128 if (err)
128 return err; 129 return err;
129 130
@@ -591,10 +592,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
591 sk->sk_state = BT_CONFIG; 592 sk->sk_state = BT_CONFIG;
592 chan->state = BT_CONFIG; 593 chan->state = BT_CONFIG;
593 594
594 /* or for ACL link, under defer_setup time */ 595 /* or for ACL link */
595 } else if (sk->sk_state == BT_CONNECT2 && 596 } else if ((sk->sk_state == BT_CONNECT2 &&
596 bt_sk(sk)->defer_setup) { 597 bt_sk(sk)->defer_setup) ||
597 err = l2cap_chan_check_security(chan); 598 sk->sk_state == BT_CONNECTED) {
599 if (!l2cap_chan_check_security(chan))
600 bt_sk(sk)->suspended = true;
601 else
602 sk->sk_state_change(sk);
598 } else { 603 } else {
599 err = -EINVAL; 604 err = -EINVAL;
600 } 605 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fcff8887131..4bb03b111122 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2523,13 +2523,18 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2523 2523
2524 if (cp->val) { 2524 if (cp->val) {
2525 type = PAGE_SCAN_TYPE_INTERLACED; 2525 type = PAGE_SCAN_TYPE_INTERLACED;
2526 acp.interval = 0x0024; /* 22.5 msec page scan interval */ 2526
2527 /* 22.5 msec page scan interval */
2528 acp.interval = __constant_cpu_to_le16(0x0024);
2527 } else { 2529 } else {
2528 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 2530 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2529 acp.interval = 0x0800; /* default 1.28 sec page scan */ 2531
2532 /* default 1.28 sec page scan */
2533 acp.interval = __constant_cpu_to_le16(0x0800);
2530 } 2534 }
2531 2535
2532 acp.window = 0x0012; /* default 11.25 msec page scan window */ 2536 /* default 11.25 msec page scan window */
2537 acp.window = __constant_cpu_to_le16(0x0012);
2533 2538
2534 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), 2539 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2535 &acp); 2540 &acp);
@@ -2879,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2879 return 0; 2884 return 0;
2880} 2885}
2881 2886
2882int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) 2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
2883{ 2888{
2884 struct mgmt_ev_new_link_key ev; 2889 struct mgmt_ev_new_link_key ev;
2885 2890
@@ -2936,7 +2941,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2936 name, name_len); 2941 name, name_len);
2937 2942
2938 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) 2943 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
2939 eir_len = eir_append_data(&ev->eir[eir_len], eir_len, 2944 eir_len = eir_append_data(ev->eir, eir_len,
2940 EIR_CLASS_OF_DEV, dev_class, 3); 2945 EIR_CLASS_OF_DEV, dev_class, 3);
2941 2946
2942 put_unaligned_le16(eir_len, &ev->eir_len); 2947 put_unaligned_le16(eir_len, &ev->eir_len);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..a2098e3de500 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
47 kfree_skb(skb); 47 kfree_skb(skb);
48 } else { 48 } else {
49 skb_push(skb, ETH_HLEN); 49 skb_push(skb, ETH_HLEN);
50 br_drop_fake_rtable(skb);
50 dev_queue_xmit(skb); 51 dev_queue_xmit(skb);
51 } 52 }
52 53
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae9220b..27ca25ed7021 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
241 hlist_del_rcu(&mp->hlist[mdb->ver]); 241 hlist_del_rcu(&mp->hlist[mdb->ver]);
242 mdb->size--; 242 mdb->size--;
243 243
244 del_timer(&mp->query_timer);
245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 244 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 245
247out: 246out:
@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
271 rcu_assign_pointer(*pp, p->next); 270 rcu_assign_pointer(*pp, p->next);
272 hlist_del_init(&p->mglist); 271 hlist_del_init(&p->mglist);
273 del_timer(&p->timer); 272 del_timer(&p->timer);
274 del_timer(&p->query_timer);
275 call_rcu_bh(&p->rcu, br_multicast_free_pg); 273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
276 274
277 if (!mp->ports && !mp->mglist && 275 if (!mp->ports && !mp->mglist &&
@@ -507,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
507 return NULL; 505 return NULL;
508} 506}
509 507
510static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
511{
512 struct net_bridge *br = mp->br;
513 struct sk_buff *skb;
514
515 skb = br_multicast_alloc_query(br, &mp->addr);
516 if (!skb)
517 goto timer;
518
519 netif_rx(skb);
520
521timer:
522 if (++mp->queries_sent < br->multicast_last_member_count)
523 mod_timer(&mp->query_timer,
524 jiffies + br->multicast_last_member_interval);
525}
526
527static void br_multicast_group_query_expired(unsigned long data)
528{
529 struct net_bridge_mdb_entry *mp = (void *)data;
530 struct net_bridge *br = mp->br;
531
532 spin_lock(&br->multicast_lock);
533 if (!netif_running(br->dev) || !mp->mglist ||
534 mp->queries_sent >= br->multicast_last_member_count)
535 goto out;
536
537 br_multicast_send_group_query(mp);
538
539out:
540 spin_unlock(&br->multicast_lock);
541}
542
543static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
544{
545 struct net_bridge_port *port = pg->port;
546 struct net_bridge *br = port->br;
547 struct sk_buff *skb;
548
549 skb = br_multicast_alloc_query(br, &pg->addr);
550 if (!skb)
551 goto timer;
552
553 br_deliver(port, skb);
554
555timer:
556 if (++pg->queries_sent < br->multicast_last_member_count)
557 mod_timer(&pg->query_timer,
558 jiffies + br->multicast_last_member_interval);
559}
560
561static void br_multicast_port_group_query_expired(unsigned long data)
562{
563 struct net_bridge_port_group *pg = (void *)data;
564 struct net_bridge_port *port = pg->port;
565 struct net_bridge *br = port->br;
566
567 spin_lock(&br->multicast_lock);
568 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
569 pg->queries_sent >= br->multicast_last_member_count)
570 goto out;
571
572 br_multicast_send_port_group_query(pg);
573
574out:
575 spin_unlock(&br->multicast_lock);
576}
577
578static struct net_bridge_mdb_entry *br_multicast_get_group( 508static struct net_bridge_mdb_entry *br_multicast_get_group(
579 struct net_bridge *br, struct net_bridge_port *port, 509 struct net_bridge *br, struct net_bridge_port *port,
580 struct br_ip *group, int hash) 510 struct br_ip *group, int hash)
@@ -690,8 +620,6 @@ rehash:
690 mp->addr = *group; 620 mp->addr = *group;
691 setup_timer(&mp->timer, br_multicast_group_expired, 621 setup_timer(&mp->timer, br_multicast_group_expired,
692 (unsigned long)mp); 622 (unsigned long)mp);
693 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
694 (unsigned long)mp);
695 623
696 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 624 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
697 mdb->size++; 625 mdb->size++;
@@ -746,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
746 hlist_add_head(&p->mglist, &port->mglist); 674 hlist_add_head(&p->mglist, &port->mglist);
747 setup_timer(&p->timer, br_multicast_port_group_expired, 675 setup_timer(&p->timer, br_multicast_port_group_expired,
748 (unsigned long)p); 676 (unsigned long)p);
749 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
750 (unsigned long)p);
751 677
752 rcu_assign_pointer(*pp, p); 678 rcu_assign_pointer(*pp, p);
753 679
@@ -1291,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1291 time_after(mp->timer.expires, time) : 1217 time_after(mp->timer.expires, time) :
1292 try_to_del_timer_sync(&mp->timer) >= 0)) { 1218 try_to_del_timer_sync(&mp->timer) >= 0)) {
1293 mod_timer(&mp->timer, time); 1219 mod_timer(&mp->timer, time);
1294
1295 mp->queries_sent = 0;
1296 mod_timer(&mp->query_timer, now);
1297 } 1220 }
1298 1221
1299 goto out; 1222 goto out;
@@ -1310,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1310 time_after(p->timer.expires, time) : 1233 time_after(p->timer.expires, time) :
1311 try_to_del_timer_sync(&p->timer) >= 0)) { 1234 try_to_del_timer_sync(&p->timer) >= 0)) {
1312 mod_timer(&p->timer, time); 1235 mod_timer(&p->timer, time);
1313
1314 p->queries_sent = 0;
1315 mod_timer(&p->query_timer, now);
1316 } 1236 }
1317 1237
1318 break; 1238 break;
@@ -1681,7 +1601,6 @@ void br_multicast_stop(struct net_bridge *br)
1681 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1601 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1682 hlist[ver]) { 1602 hlist[ver]) {
1683 del_timer(&mp->timer); 1603 del_timer(&mp->timer);
1684 del_timer(&mp->query_timer);
1685 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1604 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1686 } 1605 }
1687 } 1606 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..d7f49b63ab0f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
156 rt->dst.dev = br->dev; 156 rt->dst.dev = br->dev;
157 rt->dst.path = &rt->dst; 157 rt->dst.path = &rt->dst;
158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 rt->dst.flags = DST_NOXFRM | DST_NOPEER; 159 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
160 rt->dst.ops = &fake_dst_ops; 160 rt->dst.ops = &fake_dst_ops;
161} 161}
162 162
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
694 const struct net_device *out, 694 const struct net_device *out,
695 int (*okfn)(struct sk_buff *)) 695 int (*okfn)(struct sk_buff *))
696{ 696{
697 struct rtable *rt = skb_rtable(skb); 697 br_drop_fake_rtable(skb);
698
699 if (rt && rt == bridge_parent_rtable(in))
700 skb_dst_drop(skb);
701
702 return NF_ACCEPT; 698 return NF_ACCEPT;
703} 699}
704 700
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63ad7a8..e1d882257877 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@ struct net_bridge_port_group {
82 struct hlist_node mglist; 82 struct hlist_node mglist;
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 struct timer_list timer; 84 struct timer_list timer;
85 struct timer_list query_timer;
86 struct br_ip addr; 85 struct br_ip addr;
87 u32 queries_sent;
88}; 86};
89 87
90struct net_bridge_mdb_entry 88struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
94 struct net_bridge_port_group __rcu *ports; 92 struct net_bridge_port_group __rcu *ports;
95 struct rcu_head rcu; 93 struct rcu_head rcu;
96 struct timer_list timer; 94 struct timer_list timer;
97 struct timer_list query_timer;
98 struct br_ip addr; 95 struct br_ip addr;
99 bool mglist; 96 bool mglist;
100 u32 queries_sent;
101}; 97};
102 98
103struct net_bridge_mdb_htable 99struct net_bridge_mdb_htable
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 20618dd3088b..d09340e1523f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
103 skb->protocol = htons(ETH_P_IPV6); 103 skb->protocol = htons(ETH_P_IPV6);
104 break; 104 break;
105 default: 105 default:
106 kfree_skb(skb);
106 priv->netdev->stats.rx_errors++; 107 priv->netdev->stats.rx_errors++;
107 return -EINVAL; 108 return -EINVAL;
108 } 109 }
@@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 221
221 if (skb->len > priv->netdev->mtu) { 222 if (skb->len > priv->netdev->mtu) {
222 pr_warn("Size of skb exceeded MTU\n"); 223 pr_warn("Size of skb exceeded MTU\n");
224 kfree_skb(skb);
223 dev->stats.tx_errors++; 225 dev->stats.tx_errors++;
224 return -ENOSPC; 226 return NETDEV_TX_OK;
225 } 227 }
226 228
227 if (!priv->flowenabled) { 229 if (!priv->flowenabled) {
228 pr_debug("dropping packets flow off\n"); 230 pr_debug("dropping packets flow off\n");
231 kfree_skb(skb);
229 dev->stats.tx_dropped++; 232 dev->stats.tx_dropped++;
230 return NETDEV_TX_BUSY; 233 return NETDEV_TX_OK;
231 } 234 }
232 235
233 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 236 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
242 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 245 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
243 if (result) { 246 if (result) {
244 dev->stats.tx_dropped++; 247 dev->stats.tx_dropped++;
245 return result; 248 return NETDEV_TX_OK;
246 } 249 }
247 250
248 /* Update statistics. */ 251 /* Update statistics. */
diff --git a/net/core/dev.c b/net/core/dev.c
index c25d453b2803..99e1d759f41e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
1409 * register_netdevice_notifier(). The notifier is unlinked into the 1409 * register_netdevice_notifier(). The notifier is unlinked into the
1410 * kernel structures and may then be reused. A negative errno code 1410 * kernel structures and may then be reused. A negative errno code
1411 * is returned on a failure. 1411 * is returned on a failure.
1412 *
1413 * After unregistering unregister and down device events are synthesized
1414 * for all devices on the device list to the removed notifier to remove
1415 * the need for special case cleanup code.
1412 */ 1416 */
1413 1417
1414int unregister_netdevice_notifier(struct notifier_block *nb) 1418int unregister_netdevice_notifier(struct notifier_block *nb)
1415{ 1419{
1420 struct net_device *dev;
1421 struct net *net;
1416 int err; 1422 int err;
1417 1423
1418 rtnl_lock(); 1424 rtnl_lock();
1419 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1425 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1426 if (err)
1427 goto unlock;
1428
1429 for_each_net(net) {
1430 for_each_netdev(net, dev) {
1431 if (dev->flags & IFF_UP) {
1432 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1433 nb->notifier_call(nb, NETDEV_DOWN, dev);
1434 }
1435 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1436 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1437 }
1438 }
1439unlock:
1420 rtnl_unlock(); 1440 rtnl_unlock();
1421 return err; 1441 return err;
1422} 1442}
@@ -1597,10 +1617,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1597 return NET_RX_DROP; 1617 return NET_RX_DROP;
1598 } 1618 }
1599 skb->skb_iif = 0; 1619 skb->skb_iif = 0;
1600 skb_set_dev(skb, dev); 1620 skb->dev = dev;
1621 skb_dst_drop(skb);
1601 skb->tstamp.tv64 = 0; 1622 skb->tstamp.tv64 = 0;
1602 skb->pkt_type = PACKET_HOST; 1623 skb->pkt_type = PACKET_HOST;
1603 skb->protocol = eth_type_trans(skb, dev); 1624 skb->protocol = eth_type_trans(skb, dev);
1625 skb->mark = 0;
1626 secpath_reset(skb);
1627 nf_reset(skb);
1604 return netif_rx(skb); 1628 return netif_rx(skb);
1605} 1629}
1606EXPORT_SYMBOL_GPL(dev_forward_skb); 1630EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1849,36 +1873,6 @@ void netif_device_attach(struct net_device *dev)
1849} 1873}
1850EXPORT_SYMBOL(netif_device_attach); 1874EXPORT_SYMBOL(netif_device_attach);
1851 1875
1852/**
1853 * skb_dev_set -- assign a new device to a buffer
1854 * @skb: buffer for the new device
1855 * @dev: network device
1856 *
1857 * If an skb is owned by a device already, we have to reset
1858 * all data private to the namespace a device belongs to
1859 * before assigning it a new device.
1860 */
1861#ifdef CONFIG_NET_NS
1862void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1863{
1864 skb_dst_drop(skb);
1865 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1866 secpath_reset(skb);
1867 nf_reset(skb);
1868 skb_init_secmark(skb);
1869 skb->mark = 0;
1870 skb->priority = 0;
1871 skb->nf_trace = 0;
1872 skb->ipvs_property = 0;
1873#ifdef CONFIG_NET_SCHED
1874 skb->tc_index = 0;
1875#endif
1876 }
1877 skb->dev = dev;
1878}
1879EXPORT_SYMBOL(skb_set_dev);
1880#endif /* CONFIG_NET_NS */
1881
1882static void skb_warn_bad_offload(const struct sk_buff *skb) 1876static void skb_warn_bad_offload(const struct sk_buff *skb)
1883{ 1877{
1884 static const netdev_features_t null_features = 0; 1878 static const netdev_features_t null_features = 0;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 187 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
152 msg->points[i].count++; 188 msg->points[i].count++;
189 atomic_inc(&data->dm_hit_count);
153 goto out; 190 goto out;
154 } 191 }
155 } 192 }
@@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
157 /* 194 /*
158 * We need to create a new entry 195 * We need to create a new entry
159 */ 196 */
160 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
161 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
162 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
163 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
169 } 206 }
170 207
171out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
172 return; 211 return;
173} 212}
174 213
@@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state)
213 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
214 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
215 254
216 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
217 256
218 if (state == trace_state) { 257 if (state == trace_state) {
219 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state)
252 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
253 292
254out_unlock: 293out_unlock:
255 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
256 295
257 return rc; 296 return rc;
258} 297}
@@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
295 334
296 new_stat->dev = dev; 335 new_stat->dev = dev;
297 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
298 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
299 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
300 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
301 break; 340 break;
302 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
303 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
304 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
305 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
306 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
311 } 350 }
312 } 351 }
313 } 352 }
314 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
315 break; 354 break;
316 } 355 }
317out: 356out:
@@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void)
367 406
368 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
369 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
370 reset_per_cpu_data(data); 409 data->cpu = cpu;
371 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
372 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
373 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
374 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
375 } 415 }
376 416
417
377 goto out; 418 goto out;
378 419
379out_unreg: 420out_unreg:
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..31a5ae51a45c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -83,21 +83,29 @@ assign:
83 83
84static int ops_init(const struct pernet_operations *ops, struct net *net) 84static int ops_init(const struct pernet_operations *ops, struct net *net)
85{ 85{
86 int err; 86 int err = -ENOMEM;
87 void *data = NULL;
88
87 if (ops->id && ops->size) { 89 if (ops->id && ops->size) {
88 void *data = kzalloc(ops->size, GFP_KERNEL); 90 data = kzalloc(ops->size, GFP_KERNEL);
89 if (!data) 91 if (!data)
90 return -ENOMEM; 92 goto out;
91 93
92 err = net_assign_generic(net, *ops->id, data); 94 err = net_assign_generic(net, *ops->id, data);
93 if (err) { 95 if (err)
94 kfree(data); 96 goto cleanup;
95 return err;
96 }
97 } 97 }
98 err = 0;
98 if (ops->init) 99 if (ops->init)
99 return ops->init(net); 100 err = ops->init(net);
100 return 0; 101 if (!err)
102 return 0;
103
104cleanup:
105 kfree(data);
106
107out:
108 return err;
101} 109}
102 110
103static void ops_free(const struct pernet_operations *ops, struct net *net) 111static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
448static int __register_pernet_operations(struct list_head *list, 456static int __register_pernet_operations(struct list_head *list,
449 struct pernet_operations *ops) 457 struct pernet_operations *ops)
450{ 458{
451 int err = 0; 459 return ops_init(ops, &init_net);
452 err = ops_init(ops, &init_net);
453 if (err)
454 ops_free(ops, &init_net);
455 return err;
456
457} 460}
458 461
459static void __unregister_pernet_operations(struct pernet_operations *ops) 462static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd503..b81369b6ddc0 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused,
1931{ 1931{
1932 struct net_device *dev = ptr; 1932 struct net_device *dev = ptr;
1933 1933
1934 if (!net_eq(dev_net(dev), &init_net)) 1934 if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
1935 return NOTIFY_DONE; 1935 return NOTIFY_DONE;
1936 1936
1937 /* It is OK that we do not hold the group lock right now, 1937 /* It is OK that we do not hold the group lock right now,
@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
3755{ 3755{
3756 struct pktgen_thread *t; 3756 struct pktgen_thread *t;
3757 struct list_head *q, *n; 3757 struct list_head *q, *n;
3758 LIST_HEAD(list);
3758 3759
3759 /* Stop all interfaces & threads */ 3760 /* Stop all interfaces & threads */
3760 pktgen_exiting = true; 3761 pktgen_exiting = true;
3761 3762
3762 list_for_each_safe(q, n, &pktgen_threads) { 3763 mutex_lock(&pktgen_thread_lock);
3764 list_splice_init(&pktgen_threads, &list);
3765 mutex_unlock(&pktgen_thread_lock);
3766
3767 list_for_each_safe(q, n, &list) {
3763 t = list_entry(q, struct pktgen_thread, th_list); 3768 t = list_entry(q, struct pktgen_thread, th_list);
3769 list_del(&t->th_list);
3764 kthread_stop(t->tsk); 3770 kthread_stop(t->tsk);
3765 kfree(t); 3771 kfree(t);
3766 } 3772 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index baf8d281152c..e59840010d45 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
952 goto adjust_others; 952 goto adjust_others;
953 } 953 }
954 954
955 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 955 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
956 gfp_mask);
956 if (!data) 957 if (!data)
957 goto nodata; 958 goto nodata;
959 size = SKB_WITH_OVERHEAD(ksize(data));
958 960
959 /* Copy only real data... and, alas, header. This should be 961 /* Copy only real data... and, alas, header. This should be
960 * optimized for the cases when header is void. 962 * optimized for the cases when header is void.
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 368515885368..840821b90bcd 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev)
1044 free_netdev(dev); 1044 free_netdev(dev);
1045} 1045}
1046 1046
1047static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1048{
1049 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1050 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
1051}
1052
1053static u16 lowpan_get_pan_id(const struct net_device *dev)
1054{
1055 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1056 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
1057}
1058
1059static u16 lowpan_get_short_addr(const struct net_device *dev)
1060{
1061 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1062 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1063}
1064
1047static struct header_ops lowpan_header_ops = { 1065static struct header_ops lowpan_header_ops = {
1048 .create = lowpan_header_create, 1066 .create = lowpan_header_create,
1049}; 1067};
@@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
1053 .ndo_set_mac_address = eth_mac_addr, 1071 .ndo_set_mac_address = eth_mac_addr,
1054}; 1072};
1055 1073
1074static struct ieee802154_mlme_ops lowpan_mlme = {
1075 .get_pan_id = lowpan_get_pan_id,
1076 .get_phy = lowpan_get_phy,
1077 .get_short_addr = lowpan_get_short_addr,
1078};
1079
1056static void lowpan_setup(struct net_device *dev) 1080static void lowpan_setup(struct net_device *dev)
1057{ 1081{
1058 pr_debug("(%s)\n", __func__); 1082 pr_debug("(%s)\n", __func__);
@@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev)
1070 1094
1071 dev->netdev_ops = &lowpan_netdev_ops; 1095 dev->netdev_ops = &lowpan_netdev_ops;
1072 dev->header_ops = &lowpan_header_ops; 1096 dev->header_ops = &lowpan_header_ops;
1097 dev->ml_priv = &lowpan_mlme;
1073 dev->destructor = lowpan_dev_free; 1098 dev->destructor = lowpan_dev_free;
1074} 1099}
1075 1100
@@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1143 list_add_tail(&entry->list, &lowpan_devices); 1168 list_add_tail(&entry->list, &lowpan_devices);
1144 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1169 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1145 1170
1171 spin_lock_init(&flist_lock);
1172
1146 register_netdevice(dev); 1173 register_netdevice(dev);
1147 1174
1148 return 0; 1175 return 0;
@@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1152{ 1179{
1153 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1180 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1154 struct net_device *real_dev = lowpan_dev->real_dev; 1181 struct net_device *real_dev = lowpan_dev->real_dev;
1155 struct lowpan_dev_record *entry; 1182 struct lowpan_dev_record *entry, *tmp;
1156 struct lowpan_dev_record *tmp; 1183 struct lowpan_fragment *frame, *tframe;
1157 1184
1158 ASSERT_RTNL(); 1185 ASSERT_RTNL();
1159 1186
1187 spin_lock(&flist_lock);
1188 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1189 del_timer(&frame->timer);
1190 list_del(&frame->list);
1191 dev_kfree_skb(frame->skb);
1192 kfree(frame);
1193 }
1194 spin_unlock(&flist_lock);
1195
1160 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1196 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1161 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1197 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1162 if (entry->ldev == dev) { 1198 if (entry->ldev == dev) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1a37b4..30b88d7b4bd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1370 1370
1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1372 continue; 1372 continue;
1373 if (fi->fib_dead)
1374 continue;
1373 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1375 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1374 continue; 1376 continue;
1375 fib_alias_accessed(fa); 1377 fib_alias_accessed(fa);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557eb..8f8db724bfaf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21113a1..cf73cc70ed2d 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
74 74
75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
76 if (iph == NULL) 76 if (iph == NULL)
77 return -NF_DROP; 77 return -NF_ACCEPT;
78 78
79 /* Conntrack defragments packets, we might still see fragments 79 /* Conntrack defragments packets, we might still see fragments
80 * inside ICMP packets though. */ 80 * inside ICMP packets though. */
81 if (iph->frag_off & htons(IP_OFFSET)) 81 if (iph->frag_off & htons(IP_OFFSET))
82 return -NF_DROP; 82 return -NF_ACCEPT;
83 83
84 *dataoff = nhoff + (iph->ihl << 2); 84 *dataoff = nhoff + (iph->ihl << 2);
85 *protonum = iph->protocol; 85 *protonum = iph->protocol;
86 86
87 /* Check bogus IP headers */
88 if (*dataoff > skb->len) {
89 pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
90 "nhoff %u, ihl %u, skblen %u\n",
91 nhoff, iph->ihl << 2, skb->len);
92 return -NF_ACCEPT;
93 }
94
87 return NF_ACCEPT; 95 return NF_ACCEPT;
88} 96}
89 97
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed30e821..6589e11d57b6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
701 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 701 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
702 if (skb) { 702 if (skb) {
703 if (sk_wmem_schedule(sk, skb->truesize)) { 703 if (sk_wmem_schedule(sk, skb->truesize)) {
704 skb_reserve(skb, sk->sk_prot->max_header);
704 /* 705 /*
705 * Make sure that we have exactly size bytes 706 * Make sure that we have exactly size bytes
706 * available to the caller, no more, no less. 707 * available to the caller, no more, no less.
707 */ 708 */
708 skb_reserve(skb, skb_tailroom(skb) - size); 709 skb->avail_size = size;
709 return skb; 710 return skb;
710 } 711 }
711 __kfree_skb(skb); 712 __kfree_skb(skb);
@@ -850,8 +851,7 @@ new_segment:
850wait_for_sndbuf: 851wait_for_sndbuf:
851 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 852 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
852wait_for_memory: 853wait_for_memory:
853 if (copied) 854 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
854 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
855 855
856 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 856 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
857 goto do_error; 857 goto do_error;
@@ -995,10 +995,9 @@ new_segment:
995 copy = seglen; 995 copy = seglen;
996 996
997 /* Where to copy to? */ 997 /* Where to copy to? */
998 if (skb_tailroom(skb) > 0) { 998 if (skb_availroom(skb) > 0) {
999 /* We have some space in skb head. Superb! */ 999 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1000 copy = min_t(int, copy, skb_availroom(skb));
1001 copy = skb_tailroom(skb);
1002 err = skb_add_data_nocache(sk, skb, from, copy); 1001 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err) 1002 if (err)
1004 goto do_fault; 1003 goto do_fault;
@@ -1452,7 +1451,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1452 if ((available < target) && 1451 if ((available < target) &&
1453 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1452 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1454 !sysctl_tcp_low_latency && 1453 !sysctl_tcp_low_latency &&
1455 dma_find_channel(DMA_MEMCPY)) { 1454 net_dma_find_channel()) {
1456 preempt_enable_no_resched(); 1455 preempt_enable_no_resched();
1457 tp->ucopy.pinned_list = 1456 tp->ucopy.pinned_list =
1458 dma_pin_iovec_pages(msg->msg_iov, len); 1457 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1666,7 @@ do_prequeue:
1667 if (!(flags & MSG_TRUNC)) { 1666 if (!(flags & MSG_TRUNC)) {
1668#ifdef CONFIG_NET_DMA 1667#ifdef CONFIG_NET_DMA
1669 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1668 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1670 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1669 tp->ucopy.dma_chan = net_dma_find_channel();
1671 1670
1672 if (tp->ucopy.dma_chan) { 1671 if (tp->ucopy.dma_chan) {
1673 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1672 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -3243,7 +3242,7 @@ void __init tcp_init(void)
3243{ 3242{
3244 struct sk_buff *skb = NULL; 3243 struct sk_buff *skb = NULL;
3245 unsigned long limit; 3244 unsigned long limit;
3246 int max_share, cnt; 3245 int max_rshare, max_wshare, cnt;
3247 unsigned int i; 3246 unsigned int i;
3248 unsigned long jiffy = jiffies; 3247 unsigned long jiffy = jiffies;
3249 3248
@@ -3302,17 +3301,17 @@ void __init tcp_init(void)
3302 3301
3303 tcp_init_mem(&init_net); 3302 tcp_init_mem(&init_net);
3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3303 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10); 3304 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3306 limit = max(limit, 128UL); 3305 max_wshare = min(4UL*1024*1024, limit);
3307 max_share = min(4UL*1024*1024, limit); 3306 max_rshare = min(6UL*1024*1024, limit);
3308 3307
3309 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3308 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3310 sysctl_tcp_wmem[1] = 16*1024; 3309 sysctl_tcp_wmem[1] = 16*1024;
3311 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3310 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3312 3311
3313 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3312 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3314 sysctl_tcp_rmem[1] = 87380; 3313 sysctl_tcp_rmem[1] = 87380;
3315 sysctl_tcp_rmem[2] = max(87380, max_share); 3314 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3316 3315
3317 pr_info("Hash tables configured (established %u bind %u)\n", 3316 pr_info("Hash tables configured (established %u bind %u)\n",
3318 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3317 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f7fa8d..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
335 incr = __tcp_grow_window(sk, skb); 335 incr = __tcp_grow_window(sk, skb);
336 336
337 if (incr) { 337 if (incr) {
338 incr = max_t(int, incr, 2 * skb->len);
338 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 339 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
339 tp->window_clamp); 340 tp->window_clamp);
340 inet_csk(sk)->icsk_ack.quick |= 1; 341 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -474,8 +475,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
474 if (!win_dep) { 475 if (!win_dep) {
475 m -= (new_sample >> 3); 476 m -= (new_sample >> 3);
476 new_sample += m; 477 new_sample += m;
477 } else if (m < new_sample) 478 } else {
478 new_sample = m << 3; 479 m <<= 3;
480 if (m < new_sample)
481 new_sample = m;
482 }
479 } else { 483 } else {
480 /* No previous measure. */ 484 /* No previous measure. */
481 new_sample = m << 3; 485 new_sample = m << 3;
@@ -491,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
491 goto new_measure; 495 goto new_measure;
492 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
493 return; 497 return;
494 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 498 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
495 499
496new_measure: 500new_measure:
497 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2864,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2864 2868
2865 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2866 if (tp->undo_marker) { 2870 if (tp->undo_marker) {
2867 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2868 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2869 else /* PRR */ 2873 tp->snd_cwnd_stamp = tcp_time_stamp;
2874 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2875 /* PRR algorithm. */
2870 tp->snd_cwnd = tp->snd_ssthresh; 2876 tp->snd_cwnd = tp->snd_ssthresh;
2871 tp->snd_cwnd_stamp = tcp_time_stamp; 2877 tp->snd_cwnd_stamp = tcp_time_stamp;
2878 }
2872 } 2879 }
2873 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2880 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2874} 2881}
@@ -5225,7 +5232,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5225 return 0; 5232 return 0;
5226 5233
5227 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5234 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5228 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 5235 tp->ucopy.dma_chan = net_dma_find_channel();
5229 5236
5230 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5237 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5231 5238
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf743f8b..0cb86ceb652f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1730,7 +1730,7 @@ process:
1730#ifdef CONFIG_NET_DMA 1730#ifdef CONFIG_NET_DMA
1731 struct tcp_sock *tp = tcp_sk(sk); 1731 struct tcp_sock *tp = tcp_sk(sk);
1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1733 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1733 tp->ucopy.dma_chan = net_dma_find_channel();
1734 if (tp->ucopy.dma_chan) 1734 if (tp->ucopy.dma_chan)
1735 ret = tcp_v4_do_rcv(sk, skb); 1735 ret = tcp_v4_do_rcv(sk, skb);
1736 else 1736 else
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 364784a91939..7ac6423117ad 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1096 eat = min_t(int, len, skb_headlen(skb)); 1096 eat = min_t(int, len, skb_headlen(skb));
1097 if (eat) { 1097 if (eat) {
1098 __skb_pull(skb, eat); 1098 __skb_pull(skb, eat);
1099 skb->avail_size -= eat;
1099 len -= eat; 1100 len -= eat;
1100 if (!len) 1101 if (!len)
1101 return; 1102 return;
@@ -2060,7 +2061,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2060 /* Punt if not enough space exists in the first SKB for 2061 /* Punt if not enough space exists in the first SKB for
2061 * the data in the second 2062 * the data in the second
2062 */ 2063 */
2063 if (skb->len > skb_tailroom(to)) 2064 if (skb->len > skb_availroom(to))
2064 break; 2065 break;
2065 2066
2066 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2067 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a3bb6077e19..7d5cb975cc6f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
803 ip6_del_rt(rt); 803 ip6_del_rt(rt);
804 rt = NULL; 804 rt = NULL;
805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { 805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
806 rt->dst.expires = expires; 806 rt6_set_expires(rt, expires);
807 rt->rt6i_flags |= RTF_EXPIRES;
808 } 807 }
809 } 808 }
810 dst_release(&rt->dst); 809 dst_release(&rt->dst);
@@ -1887,11 +1886,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1887 rt = NULL; 1886 rt = NULL;
1888 } else if (addrconf_finite_timeout(rt_expires)) { 1887 } else if (addrconf_finite_timeout(rt_expires)) {
1889 /* not infinity */ 1888 /* not infinity */
1890 rt->dst.expires = jiffies + rt_expires; 1889 rt6_set_expires(rt, jiffies + rt_expires);
1891 rt->rt6i_flags |= RTF_EXPIRES;
1892 } else { 1890 } else {
1893 rt->rt6i_flags &= ~RTF_EXPIRES; 1891 rt6_clean_expires(rt);
1894 rt->dst.expires = 0;
1895 } 1892 }
1896 } else if (valid_lft) { 1893 } else if (valid_lft) {
1897 clock_t expires = 0; 1894 clock_t expires = 0;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5b27fbcae346..93717435013e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
673 &rt->rt6i_gateway)) { 673 &rt->rt6i_gateway)) {
674 if (!(iter->rt6i_flags & RTF_EXPIRES)) 674 if (!(iter->rt6i_flags & RTF_EXPIRES))
675 return -EEXIST; 675 return -EEXIST;
676 iter->dst.expires = rt->dst.expires; 676 if (!(rt->rt6i_flags & RTF_EXPIRES))
677 if (!(rt->rt6i_flags & RTF_EXPIRES)) { 677 rt6_clean_expires(iter);
678 iter->rt6i_flags &= ~RTF_EXPIRES; 678 else
679 iter->dst.expires = 0; 679 rt6_set_expires(iter, rt->dst.expires);
680 }
681 return -EEXIST; 680 return -EEXIST;
682 } 681 }
683 } 682 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3dcdb81ec3e8..176b469322ac 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1264,8 +1264,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1264 } 1264 }
1265 1265
1266 if (rt) 1266 if (rt)
1267 rt->dst.expires = jiffies + (HZ * lifetime); 1267 rt6_set_expires(rt, jiffies + (HZ * lifetime));
1268
1269 if (ra_msg->icmph.icmp6_hop_limit) { 1268 if (ra_msg->icmph.icmp6_hop_limit) {
1270 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1269 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1271 if (rt) 1270 if (rt)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcdc..9d4e15559319 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 78
79 Hence the start of any table is given by get_table() below. */ 79 Hence the start of any table is given by get_table() below. */
80 80
81/* Check for an extension */
82int
83ip6t_ext_hdr(u8 nexthdr)
84{
85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS);
92}
93
94/* Returns whether matches rule or not. */ 81/* Returns whether matches rule or not. */
95/* Performance critical - called for every packet */ 82/* Performance critical - called for every packet */
96static inline bool 83static inline bool
@@ -2366,7 +2353,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2366EXPORT_SYMBOL(ip6t_register_table); 2353EXPORT_SYMBOL(ip6t_register_table);
2367EXPORT_SYMBOL(ip6t_unregister_table); 2354EXPORT_SYMBOL(ip6t_unregister_table);
2368EXPORT_SYMBOL(ip6t_do_table); 2355EXPORT_SYMBOL(ip6t_do_table);
2369EXPORT_SYMBOL(ip6t_ext_hdr);
2370EXPORT_SYMBOL(ipv6_find_hdr); 2356EXPORT_SYMBOL(ipv6_find_hdr);
2371 2357
2372module_init(ip6_tables_init); 2358module_init(ip6_tables_init);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3992e26a6039..bc4888d902b2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,7 +62,7 @@
62#include <linux/sysctl.h> 62#include <linux/sysctl.h>
63#endif 63#endif
64 64
65static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 65static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
66 const struct in6_addr *dest); 66 const struct in6_addr *dest);
67static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 67static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
68static unsigned int ip6_default_advmss(const struct dst_entry *dst); 68static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
285 rt->rt6i_idev = NULL; 285 rt->rt6i_idev = NULL;
286 in6_dev_put(idev); 286 in6_dev_put(idev);
287 } 287 }
288
289 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
290 dst_release(dst->from);
291
288 if (peer) { 292 if (peer) {
289 rt->rt6i_peer = NULL; 293 rt->rt6i_peer = NULL;
290 inet_putpeer(peer); 294 inet_putpeer(peer);
@@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
329 333
330static __inline__ int rt6_check_expired(const struct rt6_info *rt) 334static __inline__ int rt6_check_expired(const struct rt6_info *rt)
331{ 335{
332 return (rt->rt6i_flags & RTF_EXPIRES) && 336 struct rt6_info *ort = NULL;
333 time_after(jiffies, rt->dst.expires); 337
338 if (rt->rt6i_flags & RTF_EXPIRES) {
339 if (time_after(jiffies, rt->dst.expires))
340 return 1;
341 } else if (rt->dst.from) {
342 ort = (struct rt6_info *) rt->dst.from;
343 return (ort->rt6i_flags & RTF_EXPIRES) &&
344 time_after(jiffies, ort->dst.expires);
345 }
346 return 0;
334} 347}
335 348
336static inline int rt6_need_strict(const struct in6_addr *daddr) 349static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
620 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 633 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
621 634
622 if (rt) { 635 if (rt) {
623 if (!addrconf_finite_timeout(lifetime)) { 636 if (!addrconf_finite_timeout(lifetime))
624 rt->rt6i_flags &= ~RTF_EXPIRES; 637 rt6_clean_expires(rt);
625 } else { 638 else
626 rt->dst.expires = jiffies + HZ * lifetime; 639 rt6_set_expires(rt, jiffies + HZ * lifetime);
627 rt->rt6i_flags |= RTF_EXPIRES; 640
628 }
629 dst_release(&rt->dst); 641 dst_release(&rt->dst);
630 } 642 }
631 return 0; 643 return 0;
@@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt)
730 return __ip6_ins_rt(rt, &info); 742 return __ip6_ins_rt(rt, &info);
731} 743}
732 744
733static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, 745static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
734 const struct in6_addr *daddr, 746 const struct in6_addr *daddr,
735 const struct in6_addr *saddr) 747 const struct in6_addr *saddr)
736{ 748{
@@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
954 rt->rt6i_idev = ort->rt6i_idev; 966 rt->rt6i_idev = ort->rt6i_idev;
955 if (rt->rt6i_idev) 967 if (rt->rt6i_idev)
956 in6_dev_hold(rt->rt6i_idev); 968 in6_dev_hold(rt->rt6i_idev);
957 rt->dst.expires = 0;
958 969
959 rt->rt6i_gateway = ort->rt6i_gateway; 970 rt->rt6i_gateway = ort->rt6i_gateway;
960 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 971 rt->rt6i_flags = ort->rt6i_flags;
972 rt6_clean_expires(rt);
961 rt->rt6i_metric = 0; 973 rt->rt6i_metric = 0;
962 974
963 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 975 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
1019 1031
1020 rt = (struct rt6_info *) skb_dst(skb); 1032 rt = (struct rt6_info *) skb_dst(skb);
1021 if (rt) { 1033 if (rt) {
1022 if (rt->rt6i_flags & RTF_CACHE) { 1034 if (rt->rt6i_flags & RTF_CACHE)
1023 dst_set_expires(&rt->dst, 0); 1035 rt6_update_expires(rt, 0);
1024 rt->rt6i_flags |= RTF_EXPIRES; 1036 else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1025 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1026 rt->rt6i_node->fn_sernum = -1; 1037 rt->rt6i_node->fn_sernum = -1;
1027 } 1038 }
1028} 1039}
@@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
1289 } 1300 }
1290 1301
1291 rt->dst.obsolete = -1; 1302 rt->dst.obsolete = -1;
1292 rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? 1303
1293 jiffies + clock_t_to_jiffies(cfg->fc_expires) : 1304 if (cfg->fc_flags & RTF_EXPIRES)
1294 0; 1305 rt6_set_expires(rt, jiffies +
1306 clock_t_to_jiffies(cfg->fc_expires));
1307 else
1308 rt6_clean_expires(rt);
1295 1309
1296 if (cfg->fc_protocol == RTPROT_UNSPEC) 1310 if (cfg->fc_protocol == RTPROT_UNSPEC)
1297 cfg->fc_protocol = RTPROT_BOOT; 1311 cfg->fc_protocol = RTPROT_BOOT;
@@ -1736,8 +1750,8 @@ again:
1736 features |= RTAX_FEATURE_ALLFRAG; 1750 features |= RTAX_FEATURE_ALLFRAG;
1737 dst_metric_set(&rt->dst, RTAX_FEATURES, features); 1751 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1738 } 1752 }
1739 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1753 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1740 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1754 rt->rt6i_flags |= RTF_MODIFIED;
1741 goto out; 1755 goto out;
1742 } 1756 }
1743 1757
@@ -1765,9 +1779,8 @@ again:
1765 * which is 10 mins. After 10 mins the decreased pmtu is expired 1779 * which is 10 mins. After 10 mins the decreased pmtu is expired
1766 * and detecting PMTU increase will be automatically happened. 1780 * and detecting PMTU increase will be automatically happened.
1767 */ 1781 */
1768 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1782 rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1769 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1783 nrt->rt6i_flags |= RTF_DYNAMIC;
1770
1771 ip6_ins_rt(nrt); 1784 ip6_ins_rt(nrt);
1772 } 1785 }
1773out: 1786out:
@@ -1799,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
1799 * Misc support functions 1812 * Misc support functions
1800 */ 1813 */
1801 1814
1802static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 1815static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1803 const struct in6_addr *dest) 1816 const struct in6_addr *dest)
1804{ 1817{
1805 struct net *net = dev_net(ort->dst.dev); 1818 struct net *net = dev_net(ort->dst.dev);
@@ -1819,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1819 if (rt->rt6i_idev) 1832 if (rt->rt6i_idev)
1820 in6_dev_hold(rt->rt6i_idev); 1833 in6_dev_hold(rt->rt6i_idev);
1821 rt->dst.lastuse = jiffies; 1834 rt->dst.lastuse = jiffies;
1822 rt->dst.expires = 0;
1823 1835
1824 rt->rt6i_gateway = ort->rt6i_gateway; 1836 rt->rt6i_gateway = ort->rt6i_gateway;
1825 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 1837 rt->rt6i_flags = ort->rt6i_flags;
1838 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1839 (RTF_DEFAULT | RTF_ADDRCONF))
1840 rt6_set_from(rt, ort);
1841 else
1842 rt6_clean_expires(rt);
1826 rt->rt6i_metric = 0; 1843 rt->rt6i_metric = 0;
1827 1844
1828#ifdef CONFIG_IPV6_SUBTREES 1845#ifdef CONFIG_IPV6_SUBTREES
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece67f39..98256cf72f9d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1383,6 +1383,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1383 tcp_mtup_init(newsk); 1383 tcp_mtup_init(newsk);
1384 tcp_sync_mss(newsk, dst_mtu(dst)); 1384 tcp_sync_mss(newsk, dst_mtu(dst));
1385 newtp->advmss = dst_metric_advmss(dst); 1385 newtp->advmss = dst_metric_advmss(dst);
1386 if (tcp_sk(sk)->rx_opt.user_mss &&
1387 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1388 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1389
1386 tcp_initialize_rcv_mss(newsk); 1390 tcp_initialize_rcv_mss(newsk);
1387 if (tcp_rsk(req)->snt_synack) 1391 if (tcp_rsk(req)->snt_synack)
1388 tcp_valid_rtt_meas(newsk, 1392 tcp_valid_rtt_meas(newsk,
@@ -1645,7 +1649,7 @@ process:
1645#ifdef CONFIG_NET_DMA 1649#ifdef CONFIG_NET_DMA
1646 struct tcp_sock *tp = tcp_sk(sk); 1650 struct tcp_sock *tp = tcp_sk(sk);
1647 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1651 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1648 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1652 tp->ucopy.dma_chan = net_dma_find_channel();
1649 if (tp->ucopy.dma_chan) 1653 if (tp->ucopy.dma_chan)
1650 ret = tcp_v6_do_rcv(sk, skb); 1654 ret = tcp_v6_do_rcv(sk, skb);
1651 else 1655 else
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11dbb2255ccb..7e5d927b576f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3480 3480
3481 /* Addresses to be used by KM for negotiation, if ext is available */ 3481 /* Addresses to be used by KM for negotiation, if ext is available */
3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) 3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
3483 return -EINVAL; 3483 goto err;
3484 3484
3485 /* selector src */ 3485 /* selector src */
3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); 3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 55670ec3cd0f..6274f0be82b0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -232,7 +232,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
232{ 232{
233 write_lock_bh(&l2tp_ip_lock); 233 write_lock_bh(&l2tp_ip_lock);
234 hlist_del_init(&sk->sk_bind_node); 234 hlist_del_init(&sk->sk_bind_node);
235 hlist_del_init(&sk->sk_node); 235 sk_del_node_init(sk);
236 write_unlock_bh(&l2tp_ip_lock); 236 write_unlock_bh(&l2tp_ip_lock);
237 sk_common_release(sk); 237 sk_common_release(sk);
238} 238}
@@ -271,7 +271,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
272 goto out; 272 goto out;
273 273
274 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 274 if (addr->l2tp_addr.s_addr)
275 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
275 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 276 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
276 inet->inet_saddr = 0; /* Use device */ 277 inet->inet_saddr = 0; /* Use device */
277 sk_dst_reset(sk); 278 sk_dst_reset(sk);
@@ -441,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
441 442
442 daddr = lip->l2tp_addr.s_addr; 443 daddr = lip->l2tp_addr.s_addr;
443 } else { 444 } else {
445 rc = -EDESTADDRREQ;
444 if (sk->sk_state != TCP_ESTABLISHED) 446 if (sk->sk_state != TCP_ESTABLISHED)
445 return -EDESTADDRREQ; 447 goto out;
446 448
447 daddr = inet->inet_daddr; 449 daddr = inet->inet_daddr;
448 connected = 1; 450 connected = 1;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33fd8d9f714e..cef7c29214a8 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -457,8 +457,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
457 * fall back to HT20 if we don't use or use 457 * fall back to HT20 if we don't use or use
458 * the other extension channel 458 * the other extension channel
459 */ 459 */
460 if ((channel_type == NL80211_CHAN_HT40MINUS || 460 if (!(channel_type == NL80211_CHAN_HT40MINUS ||
461 channel_type == NL80211_CHAN_HT40PLUS) && 461 channel_type == NL80211_CHAN_HT40PLUS) ||
462 channel_type != sdata->u.ibss.channel_type) 462 channel_type != sdata->u.ibss.channel_type)
463 sta_ht_cap_new.cap &= 463 sta_ht_cap_new.cap &=
464 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 464 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f20..db8fae51714c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1210 struct sk_buff *skb); 1210 struct sk_buff *skb);
1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1213void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); 1213void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1214 1214
1215/* IBSS code */ 1215/* IBSS code */
1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731e..c20051b7ffcd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
486 /* free all potentially still buffered bcast frames */ 486 /* free all potentially still buffered bcast frames */
487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); 487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
488 skb_queue_purge(&sdata->u.ap.ps_bc_buf); 488 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
489 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
490 ieee80211_mgd_stop(sdata);
489 } 491 }
490 492
491 if (going_down) 493 if (going_down)
@@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
644 646
645 if (ieee80211_vif_is_mesh(&sdata->vif)) 647 if (ieee80211_vif_is_mesh(&sdata->vif))
646 mesh_rmc_free(sdata); 648 mesh_rmc_free(sdata);
647 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
648 ieee80211_mgd_teardown(sdata);
649 649
650 flushed = sta_info_flush(local, sdata); 650 flushed = sta_info_flush(local, sdata);
651 WARN_ON(flushed); 651 WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 576fb25456dd..20c680bfc3ae 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3387,8 +3387,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3387 */ 3387 */
3388 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", 3388 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
3389 sdata->name, ifmgd->bssid); 3389 sdata->name, ifmgd->bssid);
3390 assoc_data->timeout = jiffies + 3390 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3391 TU_TO_EXP_TIME(req->bss->beacon_interval);
3392 } else { 3391 } else {
3393 assoc_data->have_beacon = true; 3392 assoc_data->have_beacon = true;
3394 assoc_data->sent_assoc = false; 3393 assoc_data->sent_assoc = false;
@@ -3498,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3498 return 0; 3497 return 0;
3499} 3498}
3500 3499
3501void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) 3500void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
3502{ 3501{
3503 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3502 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3504 3503
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bcfe8c77c839..d64e285400aa 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -103,7 +103,7 @@ static void
103ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 103ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
104 struct sk_buff *skb, 104 struct sk_buff *skb,
105 struct ieee80211_rate *rate, 105 struct ieee80211_rate *rate,
106 int rtap_len) 106 int rtap_len, bool has_fcs)
107{ 107{
108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
109 struct ieee80211_radiotap_header *rthdr; 109 struct ieee80211_radiotap_header *rthdr;
@@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
134 } 134 }
135 135
136 /* IEEE80211_RADIOTAP_FLAGS */ 136 /* IEEE80211_RADIOTAP_FLAGS */
137 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 137 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
138 *pos |= IEEE80211_RADIOTAP_F_FCS; 138 *pos |= IEEE80211_RADIOTAP_F_FCS;
139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
140 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
294 } 294 }
295 295
296 /* prepend radiotap information */ 296 /* prepend radiotap information */
297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
298 true);
298 299
299 skb_reset_mac_header(skb); 300 skb_reset_mac_header(skb);
300 skb->ip_summed = CHECKSUM_UNNECESSARY; 301 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2571,7 +2572,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2571 goto out_free_skb; 2572 goto out_free_skb;
2572 2573
2573 /* prepend radiotap information */ 2574 /* prepend radiotap information */
2574 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 2575 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2576 false);
2575 2577
2576 skb_set_mac_header(skb, 0); 2578 skb_set_mac_header(skb, 0);
2577 skb->ip_summed = CHECKSUM_UNNECESSARY; 2579 skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df4..e76facc69e95 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1158 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1158 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1160 return TX_DROP; 1160 return TX_DROP;
1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1162 tx->sdata->control_port_protocol == tx->skb->protocol) {
1162 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1163 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1163 } 1164 }
1164 if (!tx->sta) 1165 if (!tx->sta)
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5139dea6019e..828ce46cb34b 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -364,6 +364,7 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
364{ 364{
365 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 365 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
366 u8 netmask, hbits; 366 u8 netmask, hbits;
367 size_t hsize;
367 struct ip_set_hash *h; 368 struct ip_set_hash *h;
368 369
369 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 370 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
@@ -405,9 +406,12 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
405 h->timeout = IPSET_NO_TIMEOUT; 406 h->timeout = IPSET_NO_TIMEOUT;
406 407
407 hbits = htable_bits(hashsize); 408 hbits = htable_bits(hashsize);
408 h->table = ip_set_alloc( 409 hsize = htable_size(hbits);
409 sizeof(struct htable) 410 if (hsize == 0) {
410 + jhash_size(hbits) * sizeof(struct hbucket)); 411 kfree(h);
412 return -ENOMEM;
413 }
414 h->table = ip_set_alloc(hsize);
411 if (!h->table) { 415 if (!h->table) {
412 kfree(h); 416 kfree(h);
413 return -ENOMEM; 417 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 9c27e249c171..e8dbb498af8f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -449,6 +449,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
449 struct ip_set_hash *h; 449 struct ip_set_hash *h;
450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
451 u8 hbits; 451 u8 hbits;
452 size_t hsize;
452 453
453 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 454 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
454 return -IPSET_ERR_INVALID_FAMILY; 455 return -IPSET_ERR_INVALID_FAMILY;
@@ -476,9 +477,12 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
476 h->timeout = IPSET_NO_TIMEOUT; 477 h->timeout = IPSET_NO_TIMEOUT;
477 478
478 hbits = htable_bits(hashsize); 479 hbits = htable_bits(hashsize);
479 h->table = ip_set_alloc( 480 hsize = htable_size(hbits);
480 sizeof(struct htable) 481 if (hsize == 0) {
481 + jhash_size(hbits) * sizeof(struct hbucket)); 482 kfree(h);
483 return -ENOMEM;
484 }
485 h->table = ip_set_alloc(hsize);
482 if (!h->table) { 486 if (!h->table) {
483 kfree(h); 487 kfree(h);
484 return -ENOMEM; 488 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 9134057c0728..52f79d8ef741 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -467,6 +467,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
467 struct ip_set_hash *h; 467 struct ip_set_hash *h;
468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
469 u8 hbits; 469 u8 hbits;
470 size_t hsize;
470 471
471 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 472 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
472 return -IPSET_ERR_INVALID_FAMILY; 473 return -IPSET_ERR_INVALID_FAMILY;
@@ -494,9 +495,12 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
494 h->timeout = IPSET_NO_TIMEOUT; 495 h->timeout = IPSET_NO_TIMEOUT;
495 496
496 hbits = htable_bits(hashsize); 497 hbits = htable_bits(hashsize);
497 h->table = ip_set_alloc( 498 hsize = htable_size(hbits);
498 sizeof(struct htable) 499 if (hsize == 0) {
499 + jhash_size(hbits) * sizeof(struct hbucket)); 500 kfree(h);
501 return -ENOMEM;
502 }
503 h->table = ip_set_alloc(hsize);
500 if (!h->table) { 504 if (!h->table) {
501 kfree(h); 505 kfree(h);
502 return -ENOMEM; 506 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5d05e6969862..97583f5af745 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -616,6 +616,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
616 struct ip_set_hash *h; 616 struct ip_set_hash *h;
617 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 617 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
618 u8 hbits; 618 u8 hbits;
619 size_t hsize;
619 620
620 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 621 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
621 return -IPSET_ERR_INVALID_FAMILY; 622 return -IPSET_ERR_INVALID_FAMILY;
@@ -645,9 +646,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
645 h->timeout = IPSET_NO_TIMEOUT; 646 h->timeout = IPSET_NO_TIMEOUT;
646 647
647 hbits = htable_bits(hashsize); 648 hbits = htable_bits(hashsize);
648 h->table = ip_set_alloc( 649 hsize = htable_size(hbits);
649 sizeof(struct htable) 650 if (hsize == 0) {
650 + jhash_size(hbits) * sizeof(struct hbucket)); 651 kfree(h);
652 return -ENOMEM;
653 }
654 h->table = ip_set_alloc(hsize);
651 if (!h->table) { 655 if (!h->table) {
652 kfree(h); 656 kfree(h);
653 return -ENOMEM; 657 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 7c3d945517cf..1721cdecc9f9 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -460,6 +460,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
460 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 460 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
461 struct ip_set_hash *h; 461 struct ip_set_hash *h;
462 u8 hbits; 462 u8 hbits;
463 size_t hsize;
463 464
464 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 465 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
465 return -IPSET_ERR_INVALID_FAMILY; 466 return -IPSET_ERR_INVALID_FAMILY;
@@ -489,9 +490,12 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
489 h->timeout = IPSET_NO_TIMEOUT; 490 h->timeout = IPSET_NO_TIMEOUT;
490 491
491 hbits = htable_bits(hashsize); 492 hbits = htable_bits(hashsize);
492 h->table = ip_set_alloc( 493 hsize = htable_size(hbits);
493 sizeof(struct htable) 494 if (hsize == 0) {
494 + jhash_size(hbits) * sizeof(struct hbucket)); 495 kfree(h);
496 return -ENOMEM;
497 }
498 h->table = ip_set_alloc(hsize);
495 if (!h->table) { 499 if (!h->table) {
496 kfree(h); 500 kfree(h);
497 return -ENOMEM; 501 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index f24037ff4322..33bafc97ca6d 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -722,6 +722,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
722 struct ip_set_hash *h; 722 struct ip_set_hash *h;
723 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 723 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
724 u8 hbits; 724 u8 hbits;
725 size_t hsize;
725 726
726 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 727 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
727 return -IPSET_ERR_INVALID_FAMILY; 728 return -IPSET_ERR_INVALID_FAMILY;
@@ -752,9 +753,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
752 h->ahash_max = AHASH_MAX_SIZE; 753 h->ahash_max = AHASH_MAX_SIZE;
753 754
754 hbits = htable_bits(hashsize); 755 hbits = htable_bits(hashsize);
755 h->table = ip_set_alloc( 756 hsize = htable_size(hbits);
756 sizeof(struct htable) 757 if (hsize == 0) {
757 + jhash_size(hbits) * sizeof(struct hbucket)); 758 kfree(h);
759 return -ENOMEM;
760 }
761 h->table = ip_set_alloc(hsize);
758 if (!h->table) { 762 if (!h->table) {
759 kfree(h); 763 kfree(h);
760 return -ENOMEM; 764 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ce2e77100b64..3a5e198641d6 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -572,6 +572,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
572 struct ip_set_hash *h; 572 struct ip_set_hash *h;
573 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 573 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
574 u8 hbits; 574 u8 hbits;
575 size_t hsize;
575 576
576 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 577 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
577 return -IPSET_ERR_INVALID_FAMILY; 578 return -IPSET_ERR_INVALID_FAMILY;
@@ -601,9 +602,12 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
601 h->timeout = IPSET_NO_TIMEOUT; 602 h->timeout = IPSET_NO_TIMEOUT;
602 603
603 hbits = htable_bits(hashsize); 604 hbits = htable_bits(hashsize);
604 h->table = ip_set_alloc( 605 hsize = htable_size(hbits);
605 sizeof(struct htable) 606 if (hsize == 0) {
606 + jhash_size(hbits) * sizeof(struct hbucket)); 607 kfree(h);
608 return -ENOMEM;
609 }
610 h->table = ip_set_alloc(hsize);
607 if (!h->table) { 611 if (!h->table) {
608 kfree(h); 612 kfree(h);
609 return -ENOMEM; 613 return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..00bdb1d9d690 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
1924control_fail: 1924control_fail:
1925 ip_vs_estimator_net_cleanup(net); 1925 ip_vs_estimator_net_cleanup(net);
1926estimator_fail: 1926estimator_fail:
1927 net->ipvs = NULL;
1927 return -ENOMEM; 1928 return -ENOMEM;
1928} 1929}
1929 1930
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1936 ip_vs_control_net_cleanup(net); 1937 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net); 1938 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1939 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1940 net->ipvs = NULL;
1939} 1941}
1940 1942
1941static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1943static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
1993 goto cleanup_dev; 1995 goto cleanup_dev;
1994 } 1996 }
1995 1997
1998 ret = ip_vs_register_nl_ioctl();
1999 if (ret < 0) {
2000 pr_err("can't register netlink/ioctl.\n");
2001 goto cleanup_hooks;
2002 }
2003
1996 pr_info("ipvs loaded.\n"); 2004 pr_info("ipvs loaded.\n");
1997 2005
1998 return ret; 2006 return ret;
1999 2007
2008cleanup_hooks:
2009 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2000cleanup_dev: 2010cleanup_dev:
2001 unregister_pernet_device(&ipvs_core_dev_ops); 2011 unregister_pernet_device(&ipvs_core_dev_ops);
2002cleanup_sub: 2012cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
2012 2022
2013static void __exit ip_vs_cleanup(void) 2023static void __exit ip_vs_cleanup(void)
2014{ 2024{
2025 ip_vs_unregister_nl_ioctl();
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2026 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops); 2027 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2028 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..f5589987fc80 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3680 return 0; 3680 return 0;
3681} 3681}
3682 3682
3683void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) 3683void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3684{ 3684{
3685 struct netns_ipvs *ipvs = net_ipvs(net); 3685 struct netns_ipvs *ipvs = net_ipvs(net);
3686 3686
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3692#else 3692#else
3693 3693
3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3695void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3695void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3696 3696
3697#endif 3697#endif
3698 3698
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3750 free_percpu(ipvs->tot_stats.cpustats); 3750 free_percpu(ipvs->tot_stats.cpustats);
3751} 3751}
3752 3752
3753int __init ip_vs_control_init(void) 3753int __init ip_vs_register_nl_ioctl(void)
3754{ 3754{
3755 int idx;
3756 int ret; 3755 int ret;
3757 3756
3758 EnterFunction(2);
3759
3760 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3761 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3762 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3763 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3764 }
3765
3766 smp_wmb(); /* Do we really need it now ? */
3767
3768 ret = nf_register_sockopt(&ip_vs_sockopts); 3757 ret = nf_register_sockopt(&ip_vs_sockopts);
3769 if (ret) { 3758 if (ret) {
3770 pr_err("cannot register sockopt.\n"); 3759 pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
3776 pr_err("cannot register Generic Netlink interface.\n"); 3765 pr_err("cannot register Generic Netlink interface.\n");
3777 goto err_genl; 3766 goto err_genl;
3778 } 3767 }
3779
3780 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3781 if (ret < 0)
3782 goto err_notf;
3783
3784 LeaveFunction(2);
3785 return 0; 3768 return 0;
3786 3769
3787err_notf:
3788 ip_vs_genl_unregister();
3789err_genl: 3770err_genl:
3790 nf_unregister_sockopt(&ip_vs_sockopts); 3771 nf_unregister_sockopt(&ip_vs_sockopts);
3791err_sock: 3772err_sock:
3792 return ret; 3773 return ret;
3793} 3774}
3794 3775
3776void ip_vs_unregister_nl_ioctl(void)
3777{
3778 ip_vs_genl_unregister();
3779 nf_unregister_sockopt(&ip_vs_sockopts);
3780}
3781
3782int __init ip_vs_control_init(void)
3783{
3784 int idx;
3785 int ret;
3786
3787 EnterFunction(2);
3788
3789 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3790 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3791 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3792 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3793 }
3794
3795 smp_wmb(); /* Do we really need it now ? */
3796
3797 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3798 if (ret < 0)
3799 return ret;
3800
3801 LeaveFunction(2);
3802 return 0;
3803}
3804
3795 3805
3796void ip_vs_control_cleanup(void) 3806void ip_vs_control_cleanup(void)
3797{ 3807{
3798 EnterFunction(2); 3808 EnterFunction(2);
3799 unregister_netdevice_notifier(&ip_vs_dst_notifier); 3809 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3800 ip_vs_genl_unregister();
3801 nf_unregister_sockopt(&ip_vs_sockopts);
3802 LeaveFunction(2); 3810 LeaveFunction(2);
3803} 3811}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..e39f693dd3e4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
439 struct ip_vs_app *app; 439 struct ip_vs_app *app;
440 struct netns_ipvs *ipvs = net_ipvs(net); 440 struct netns_ipvs *ipvs = net_ipvs(net);
441 441
442 if (!ipvs)
443 return -ENOENT;
442 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); 444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
443 if (!app) 445 if (!app)
444 return -ENOMEM; 446 return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..caa43704e55e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
551{ 551{
552 struct netns_ipvs *ipvs = net_ipvs(net); 552 struct netns_ipvs *ipvs = net_ipvs(net);
553 553
554 if (!ipvs)
555 return -ENOENT;
556
554 if (!net_eq(net, &init_net)) { 557 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table, 558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table), 559 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..548bf37aa29e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
745{ 745{
746 struct netns_ipvs *ipvs = net_ipvs(net); 746 struct netns_ipvs *ipvs = net_ipvs(net);
747 747
748 if (!ipvs)
749 return -ENOENT;
750
748 if (!net_eq(net, &init_net)) { 751 if (!net_eq(net, &init_net)) {
749 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
750 sizeof(vs_vars_table), 753 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a8833250..ed835e67a07e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
59 return 0; 59 return 0;
60} 60}
61 61
62#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
63 defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
64 defined(CONFIG_IP_VS_PROTO_ESP)
65/* 62/*
66 * register an ipvs protocols netns related data 63 * register an ipvs protocols netns related data
67 */ 64 */
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
81 ipvs->proto_data_table[hash] = pd; 78 ipvs->proto_data_table[hash] = pd;
82 atomic_set(&pd->appcnt, 0); /* Init app counter */ 79 atomic_set(&pd->appcnt, 0); /* Init app counter */
83 80
84 if (pp->init_netns != NULL) 81 if (pp->init_netns != NULL) {
85 pp->init_netns(net, pd); 82 int ret = pp->init_netns(net, pd);
83 if (ret) {
84 /* unlink an free proto data */
85 ipvs->proto_data_table[hash] = pd->next;
86 kfree(pd);
87 return ret;
88 }
89 }
86 90
87 return 0; 91 return 0;
88} 92}
89#endif
90 93
91/* 94/*
92 * unregister an ipvs protocol 95 * unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316 */ 319 */
317int __net_init ip_vs_protocol_net_init(struct net *net) 320int __net_init ip_vs_protocol_net_init(struct net *net)
318{ 321{
322 int i, ret;
323 static struct ip_vs_protocol *protos[] = {
319#ifdef CONFIG_IP_VS_PROTO_TCP 324#ifdef CONFIG_IP_VS_PROTO_TCP
320 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 325 &ip_vs_protocol_tcp,
321#endif 326#endif
322#ifdef CONFIG_IP_VS_PROTO_UDP 327#ifdef CONFIG_IP_VS_PROTO_UDP
323 register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); 328 &ip_vs_protocol_udp,
324#endif 329#endif
325#ifdef CONFIG_IP_VS_PROTO_SCTP 330#ifdef CONFIG_IP_VS_PROTO_SCTP
326 register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); 331 &ip_vs_protocol_sctp,
327#endif 332#endif
328#ifdef CONFIG_IP_VS_PROTO_AH 333#ifdef CONFIG_IP_VS_PROTO_AH
329 register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); 334 &ip_vs_protocol_ah,
330#endif 335#endif
331#ifdef CONFIG_IP_VS_PROTO_ESP 336#ifdef CONFIG_IP_VS_PROTO_ESP
332 register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); 337 &ip_vs_protocol_esp,
333#endif 338#endif
339 };
340
341 for (i = 0; i < ARRAY_SIZE(protos); i++) {
342 ret = register_ip_vs_proto_netns(net, protos[i]);
343 if (ret < 0)
344 goto cleanup;
345 }
334 return 0; 346 return 0;
347
348cleanup:
349 ip_vs_protocol_net_cleanup(net);
350 return ret;
335} 351}
336 352
337void __net_exit ip_vs_protocol_net_cleanup(struct net *net) 353void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
1090 * timeouts is netns related now. 1090 * timeouts is netns related now.
1091 * --------------------------------------------- 1091 * ---------------------------------------------
1092 */ 1092 */
1093static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) 1093static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1094{ 1094{
1095 struct netns_ipvs *ipvs = net_ipvs(net); 1095 struct netns_ipvs *ipvs = net_ipvs(net);
1096 1096
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1098 spin_lock_init(&ipvs->sctp_app_lock); 1098 spin_lock_init(&ipvs->sctp_app_lock);
1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1100 sizeof(sctp_timeouts)); 1100 sizeof(sctp_timeouts));
1101 if (!pd->timeout_table)
1102 return -ENOMEM;
1103 return 0;
1101} 1104}
1102 1105
1103static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) 1106static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
677 * timeouts is netns related now. 677 * timeouts is netns related now.
678 * --------------------------------------------- 678 * ---------------------------------------------
679 */ 679 */
680static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) 680static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
681{ 681{
682 struct netns_ipvs *ipvs = net_ipvs(net); 682 struct netns_ipvs *ipvs = net_ipvs(net);
683 683
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
685 spin_lock_init(&ipvs->tcp_app_lock); 685 spin_lock_init(&ipvs->tcp_app_lock);
686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
687 sizeof(tcp_timeouts)); 687 sizeof(tcp_timeouts));
688 if (!pd->timeout_table)
689 return -ENOMEM;
688 pd->tcp_state_table = tcp_states; 690 pd->tcp_state_table = tcp_states;
691 return 0;
689} 692}
690 693
691static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) 694static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468} 468}
469 469
470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471{ 471{
472 struct netns_ipvs *ipvs = net_ipvs(net); 472 struct netns_ipvs *ipvs = net_ipvs(net);
473 473
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 spin_lock_init(&ipvs->udp_app_lock); 475 spin_lock_init(&ipvs->udp_app_lock);
476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 sizeof(udp_timeouts)); 477 sizeof(udp_timeouts));
478 if (!pd->timeout_table)
479 return -ENOMEM;
480 return 0;
478} 481}
479 482
480static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) 483static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3cc4487ac349..729f157a0efa 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1592,7 +1592,7 @@ static int nf_conntrack_init_net(struct net *net)
1592 return 0; 1592 return 0;
1593 1593
1594err_timeout: 1594err_timeout:
1595 nf_conntrack_timeout_fini(net); 1595 nf_conntrack_ecache_fini(net);
1596err_ecache: 1596err_ecache:
1597 nf_conntrack_tstamp_fini(net); 1597 nf_conntrack_tstamp_fini(net);
1598err_tstamp: 1598err_tstamp:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 361eade62a09..0d07a1dcf605 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -584,8 +584,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
584 * Let's try to use the data from the packet. 584 * Let's try to use the data from the packet.
585 */ 585 */
586 sender->td_end = end; 586 sender->td_end = end;
587 win <<= sender->td_scale; 587 swin = win << sender->td_scale;
588 sender->td_maxwin = (win == 0 ? 1 : win); 588 sender->td_maxwin = (swin == 0 ? 1 : swin);
589 sender->td_maxend = end + sender->td_maxwin; 589 sender->td_maxend = end + sender->td_maxwin;
590 /* 590 /*
591 * We haven't seen traffic in the other direction yet 591 * We haven't seen traffic in the other direction yet
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..3746d8b9a478 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
227 } 227 }
228 228
229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
230 if (info->timeout) { 230 if (info->timeout[0]) {
231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
232 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
233 233
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 7b76eb7192f3..ef10ffcb4b6f 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -474,7 +474,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
474 474
475 while (remaining_len > 0) { 475 while (remaining_len > 0) {
476 476
477 frag_len = min_t(u16, local->remote_miu, remaining_len); 477 frag_len = min_t(size_t, local->remote_miu, remaining_len);
478 478
479 pr_debug("Fragment %zd bytes remaining %zd", 479 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len); 480 frag_len, remaining_len);
@@ -497,7 +497,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
497 release_sock(sk); 497 release_sock(sk);
498 498
499 remaining_len -= frag_len; 499 remaining_len -= frag_len;
500 msg_ptr += len; 500 msg_ptr += frag_len;
501 } 501 }
502 502
503 kfree(msg_data); 503 kfree(msg_data);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631ea952..e66341ec455c 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -321,7 +321,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
321 return -ENOMEM; 321 return -ENOMEM;
322 322
323 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); 323 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
324 if (!skb) 324 if (!nskb)
325 return -ENOMEM; 325 return -ENOMEM;
326 326
327 nskb->vlan_tci = 0; 327 nskb->vlan_tci = 0;
@@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
421 return validate_actions(actions, key, depth + 1); 421 return validate_actions(actions, key, depth + 1);
422} 422}
423 423
424static int validate_tp_port(const struct sw_flow_key *flow_key)
425{
426 if (flow_key->eth.type == htons(ETH_P_IP)) {
427 if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
428 return 0;
429 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
430 if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
424static int validate_set(const struct nlattr *a, 437static int validate_set(const struct nlattr *a,
425 const struct sw_flow_key *flow_key) 438 const struct sw_flow_key *flow_key)
426{ 439{
@@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
462 if (flow_key->ip.proto != IPPROTO_TCP) 475 if (flow_key->ip.proto != IPPROTO_TCP)
463 return -EINVAL; 476 return -EINVAL;
464 477
465 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 478 return validate_tp_port(flow_key);
466 return -EINVAL;
467
468 break;
469 479
470 case OVS_KEY_ATTR_UDP: 480 case OVS_KEY_ATTR_UDP:
471 if (flow_key->ip.proto != IPPROTO_UDP) 481 if (flow_key->ip.proto != IPPROTO_UDP)
472 return -EINVAL; 482 return -EINVAL;
473 483
474 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 484 return validate_tp_port(flow_key);
475 return -EINVAL;
476 break;
477 485
478 default: 486 default:
479 return -EINVAL; 487 return -EINVAL;
@@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1641 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1649 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1642 OVS_VPORT_CMD_NEW); 1650 OVS_VPORT_CMD_NEW);
1643 if (IS_ERR(reply)) { 1651 if (IS_ERR(reply)) {
1644 err = PTR_ERR(reply);
1645 netlink_set_err(init_net.genl_sock, 0, 1652 netlink_set_err(init_net.genl_sock, 0,
1646 ovs_dp_vport_multicast_group.id, err); 1653 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1647 return 0; 1654 goto exit_unlock;
1648 } 1655 }
1649 1656
1650 genl_notify(reply, genl_info_net(info), info->snd_pid, 1657 genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef1..2a11ec2383ee 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
183 u8 tcp_flags = 0; 183 u8 tcp_flags = 0;
184 184
185 if (flow->key.eth.type == htons(ETH_P_IP) && 185 if (flow->key.eth.type == htons(ETH_P_IP) &&
186 flow->key.ip.proto == IPPROTO_TCP) { 186 flow->key.ip.proto == IPPROTO_TCP &&
187 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
187 u8 *tcp = (u8 *)tcp_hdr(skb); 188 u8 *tcp = (u8 *)tcp_hdr(skb);
188 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
189 } 190 }
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b9a85ecc4c7..bf5cf69c820a 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net)
331 331
332static void __net_exit phonet_exit_net(struct net *net) 332static void __net_exit phonet_exit_net(struct net *net)
333{ 333{
334 struct phonet_net *pnn = phonet_pernet(net);
335 struct net_device *dev;
336 unsigned i;
337
338 rtnl_lock();
339 for_each_netdev(net, dev)
340 phonet_device_destroy(dev);
341
342 for (i = 0; i < 64; i++) {
343 dev = pnn->routes.table[i];
344 if (dev) {
345 rtm_phonet_notify(RTM_DELROUTE, dev, i);
346 dev_put(dev);
347 }
348 }
349 rtnl_unlock();
350
351 proc_net_remove(net, "phonet"); 334 proc_net_remove(net, "phonet");
352} 335}
353 336
@@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = {
361/* Initialize Phonet devices list */ 344/* Initialize Phonet devices list */
362int __init phonet_device_init(void) 345int __init phonet_device_init(void)
363{ 346{
364 int err = register_pernet_device(&phonet_net_ops); 347 int err = register_pernet_subsys(&phonet_net_ops);
365 if (err) 348 if (err)
366 return err; 349 return err;
367 350
@@ -377,7 +360,7 @@ void phonet_device_exit(void)
377{ 360{
378 rtnl_unregister_all(PF_PHONET); 361 rtnl_unregister_all(PF_PHONET);
379 unregister_netdevice_notifier(&phonet_device_notifier); 362 unregister_netdevice_notifier(&phonet_device_notifier);
380 unregister_pernet_device(&phonet_net_ops); 363 unregister_pernet_subsys(&phonet_net_ops);
381 proc_net_remove(&init_net, "pnresource"); 364 proc_net_remove(&init_net, "pnresource");
382} 365}
383 366
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0b15236be7b6..8179494c269a 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -565,11 +565,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
565 opt.packets = q->packetsin; 565 opt.packets = q->packetsin;
566 opt.bytesin = q->bytesin; 566 opt.bytesin = q->bytesin;
567 567
568 if (gred_wred_mode(table)) { 568 if (gred_wred_mode(table))
569 q->vars.qidlestart = 569 gred_load_wred_set(table, q);
570 table->tab[table->def]->vars.qidlestart;
571 q->vars.qavg = table->tab[table->def]->vars.qavg;
572 }
573 570
574 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); 571 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
575 572
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..ebd22966f748 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL && 410 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) { 411 skb_checksum_help(skb)))
412 sch->qstats.drops++; 412 return qdisc_drop(skb, sch);
413 return NET_XMIT_DROP;
414 }
415 413
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 414 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
417 } 415 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f41..8fc4dcd294ab 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
377 */ 377 */
378 skb_set_owner_w(nskb, sk); 378 skb_set_owner_w(nskb, sk);
379 379
380 /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 380 if (!sctp_transport_dst_check(tp)) {
381 if (!dst || (dst->obsolete > 1)) {
382 dst_release(dst);
383 sctp_transport_route(tp, NULL, sctp_sk(sk)); 381 sctp_transport_route(tp, NULL, sctp_sk(sk));
384 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
385 sctp_assoc_sync_pmtu(asoc); 383 sctp_assoc_sync_pmtu(asoc);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b04..b026ba0c6992 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
227} 227}
228 228
229/* this is a complete rip-off from __sk_dst_check
230 * the cookie is always 0 since this is how it's used in the
231 * pmtu code
232 */
233static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
234{
235 struct dst_entry *dst = t->dst;
236
237 if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
238 dst_release(t->dst);
239 t->dst = NULL;
240 return NULL;
241 }
242
243 return dst;
244}
245
246void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 229void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
247{ 230{
248 struct dst_entry *dst; 231 struct dst_entry *dst;
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c7..782bfe1b6465 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) 242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
243{ 243{
244 struct gss_api_mech *pos = NULL; 244 struct gss_api_mech *pos = NULL;
245 int i = 0; 245 int j, i = 0;
246 246
247 spin_lock(&registered_mechs_lock); 247 spin_lock(&registered_mechs_lock);
248 list_for_each_entry(pos, &registered_mechs, gm_list) { 248 list_for_each_entry(pos, &registered_mechs, gm_list) {
249 array_ptr[i] = pos->gm_pfs->pseudoflavor; 249 for (j=0; j < pos->gm_pf_num; j++) {
250 i++; 250 array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
251 }
251 } 252 }
252 spin_unlock(&registered_mechs_lock); 253 spin_unlock(&registered_mechs_lock);
253 return i; 254 return i;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 67972462a543..adf2990acebf 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -176,16 +176,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
176 return 0; 176 return 0;
177} 177}
178 178
179static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 179static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
180 struct super_block *sb) 180{
181 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
182 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
183 return 1;
184 return 0;
185}
186
187static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
188 struct super_block *sb)
181{ 189{
182 struct dentry *dentry; 190 struct dentry *dentry;
183 int err = 0; 191 int err = 0;
184 192
185 switch (event) { 193 switch (event) {
186 case RPC_PIPEFS_MOUNT: 194 case RPC_PIPEFS_MOUNT:
187 if (clnt->cl_program->pipe_dir_name == NULL)
188 break;
189 dentry = rpc_setup_pipedir_sb(sb, clnt, 195 dentry = rpc_setup_pipedir_sb(sb, clnt,
190 clnt->cl_program->pipe_dir_name); 196 clnt->cl_program->pipe_dir_name);
191 BUG_ON(dentry == NULL); 197 BUG_ON(dentry == NULL);
@@ -208,6 +214,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208 return err; 214 return err;
209} 215}
210 216
217static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
218 struct super_block *sb)
219{
220 int error = 0;
221
222 for (;; clnt = clnt->cl_parent) {
223 if (!rpc_clnt_skip_event(clnt, event))
224 error = __rpc_clnt_handle_event(clnt, event, sb);
225 if (error || clnt == clnt->cl_parent)
226 break;
227 }
228 return error;
229}
230
211static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 231static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
212{ 232{
213 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 233 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
@@ -215,10 +235,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
215 235
216 spin_lock(&sn->rpc_client_lock); 236 spin_lock(&sn->rpc_client_lock);
217 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 237 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
218 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || 238 if (clnt->cl_program->pipe_dir_name == NULL)
219 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) 239 break;
240 if (rpc_clnt_skip_event(clnt, event))
241 continue;
242 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
220 continue; 243 continue;
221 atomic_inc(&clnt->cl_count);
222 spin_unlock(&sn->rpc_client_lock); 244 spin_unlock(&sn->rpc_client_lock);
223 return clnt; 245 return clnt;
224 } 246 }
@@ -257,6 +279,14 @@ void rpc_clients_notifier_unregister(void)
257 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 279 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
258} 280}
259 281
282static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
283{
284 clnt->cl_nodelen = strlen(nodename);
285 if (clnt->cl_nodelen > UNX_MAXNODENAME)
286 clnt->cl_nodelen = UNX_MAXNODENAME;
287 memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
288}
289
260static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 290static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
261{ 291{
262 const struct rpc_program *program = args->program; 292 const struct rpc_program *program = args->program;
@@ -337,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
337 } 367 }
338 368
339 /* save the nodename */ 369 /* save the nodename */
340 clnt->cl_nodelen = strlen(init_utsname()->nodename); 370 rpc_clnt_set_nodename(clnt, utsname()->nodename);
341 if (clnt->cl_nodelen > UNX_MAXNODENAME)
342 clnt->cl_nodelen = UNX_MAXNODENAME;
343 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
344 rpc_register_client(clnt); 371 rpc_register_client(clnt);
345 return clnt; 372 return clnt;
346 373
@@ -499,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
499 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 526 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
500 if (err != 0) 527 if (err != 0)
501 goto out_no_path; 528 goto out_no_path;
529 rpc_clnt_set_nodename(new, utsname()->nodename);
502 if (new->cl_auth) 530 if (new->cl_auth)
503 atomic_inc(&new->cl_auth->au_count); 531 atomic_inc(&new->cl_auth->au_count);
504 atomic_inc(&clnt->cl_count); 532 atomic_inc(&clnt->cl_count);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0af37fc46818..3b62cf288031 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1126,19 +1126,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1126 return -ENOMEM; 1126 return -ENOMEM;
1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, 1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
1128 NET_NAME(net)); 1128 NET_NAME(net));
1129 sn->pipefs_sb = sb;
1129 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1130 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1130 RPC_PIPEFS_MOUNT, 1131 RPC_PIPEFS_MOUNT,
1131 sb); 1132 sb);
1132 if (err) 1133 if (err)
1133 goto err_depopulate; 1134 goto err_depopulate;
1134 sb->s_fs_info = get_net(net); 1135 sb->s_fs_info = get_net(net);
1135 sn->pipefs_sb = sb;
1136 return 0; 1136 return 0;
1137 1137
1138err_depopulate: 1138err_depopulate:
1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1140 RPC_PIPEFS_UMOUNT, 1140 RPC_PIPEFS_UMOUNT,
1141 sb); 1141 sb);
1142 sn->pipefs_sb = NULL;
1142 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); 1143 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
1143 return err; 1144 return err;
1144} 1145}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8adfc88e793a..3d6498af9adc 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -75,20 +75,21 @@ static struct pernet_operations sunrpc_net_ops = {
75static int __init 75static int __init
76init_sunrpc(void) 76init_sunrpc(void)
77{ 77{
78 int err = register_rpc_pipefs(); 78 int err = rpc_init_mempool();
79 if (err) 79 if (err)
80 goto out; 80 goto out;
81 err = rpc_init_mempool();
82 if (err)
83 goto out2;
84 err = rpcauth_init_module(); 81 err = rpcauth_init_module();
85 if (err) 82 if (err)
86 goto out3; 83 goto out2;
87 84
88 cache_initialize(); 85 cache_initialize();
89 86
90 err = register_pernet_subsys(&sunrpc_net_ops); 87 err = register_pernet_subsys(&sunrpc_net_ops);
91 if (err) 88 if (err)
89 goto out3;
90
91 err = register_rpc_pipefs();
92 if (err)
92 goto out4; 93 goto out4;
93#ifdef RPC_DEBUG 94#ifdef RPC_DEBUG
94 rpc_register_sysctl(); 95 rpc_register_sysctl();
@@ -98,11 +99,11 @@ init_sunrpc(void)
98 return 0; 99 return 0;
99 100
100out4: 101out4:
101 rpcauth_remove_module(); 102 unregister_pernet_subsys(&sunrpc_net_ops);
102out3: 103out3:
103 rpc_destroy_mempool(); 104 rpcauth_remove_module();
104out2: 105out2:
105 unregister_rpc_pipefs(); 106 rpc_destroy_mempool();
106out: 107out:
107 return err; 108 return err;
108} 109}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e49da2797022..f432c57af05d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1294,6 +1294,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1294 goto bad_res; 1294 goto bad_res;
1295 } 1295 }
1296 1296
1297 if (!netif_running(netdev)) {
1298 result = -ENETDOWN;
1299 goto bad_res;
1300 }
1301
1297 nla_for_each_nested(nl_txq_params, 1302 nla_for_each_nested(nl_txq_params,
1298 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1303 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1299 rem_txq_params) { 1304 rem_txq_params) {
@@ -6384,7 +6389,7 @@ static struct genl_ops nl80211_ops[] = {
6384 .doit = nl80211_get_key, 6389 .doit = nl80211_get_key,
6385 .policy = nl80211_policy, 6390 .policy = nl80211_policy,
6386 .flags = GENL_ADMIN_PERM, 6391 .flags = GENL_ADMIN_PERM,
6387 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6392 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6388 NL80211_FLAG_NEED_RTNL, 6393 NL80211_FLAG_NEED_RTNL,
6389 }, 6394 },
6390 { 6395 {
@@ -6416,7 +6421,7 @@ static struct genl_ops nl80211_ops[] = {
6416 .policy = nl80211_policy, 6421 .policy = nl80211_policy,
6417 .flags = GENL_ADMIN_PERM, 6422 .flags = GENL_ADMIN_PERM,
6418 .doit = nl80211_set_beacon, 6423 .doit = nl80211_set_beacon,
6419 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6424 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6420 NL80211_FLAG_NEED_RTNL, 6425 NL80211_FLAG_NEED_RTNL,
6421 }, 6426 },
6422 { 6427 {
@@ -6424,7 +6429,7 @@ static struct genl_ops nl80211_ops[] = {
6424 .policy = nl80211_policy, 6429 .policy = nl80211_policy,
6425 .flags = GENL_ADMIN_PERM, 6430 .flags = GENL_ADMIN_PERM,
6426 .doit = nl80211_start_ap, 6431 .doit = nl80211_start_ap,
6427 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6432 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6428 NL80211_FLAG_NEED_RTNL, 6433 NL80211_FLAG_NEED_RTNL,
6429 }, 6434 },
6430 { 6435 {
@@ -6432,7 +6437,7 @@ static struct genl_ops nl80211_ops[] = {
6432 .policy = nl80211_policy, 6437 .policy = nl80211_policy,
6433 .flags = GENL_ADMIN_PERM, 6438 .flags = GENL_ADMIN_PERM,
6434 .doit = nl80211_stop_ap, 6439 .doit = nl80211_stop_ap,
6435 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6440 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6436 NL80211_FLAG_NEED_RTNL, 6441 NL80211_FLAG_NEED_RTNL,
6437 }, 6442 },
6438 { 6443 {
@@ -6448,7 +6453,7 @@ static struct genl_ops nl80211_ops[] = {
6448 .doit = nl80211_set_station, 6453 .doit = nl80211_set_station,
6449 .policy = nl80211_policy, 6454 .policy = nl80211_policy,
6450 .flags = GENL_ADMIN_PERM, 6455 .flags = GENL_ADMIN_PERM,
6451 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6456 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6452 NL80211_FLAG_NEED_RTNL, 6457 NL80211_FLAG_NEED_RTNL,
6453 }, 6458 },
6454 { 6459 {
@@ -6464,7 +6469,7 @@ static struct genl_ops nl80211_ops[] = {
6464 .doit = nl80211_del_station, 6469 .doit = nl80211_del_station,
6465 .policy = nl80211_policy, 6470 .policy = nl80211_policy,
6466 .flags = GENL_ADMIN_PERM, 6471 .flags = GENL_ADMIN_PERM,
6467 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6472 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6468 NL80211_FLAG_NEED_RTNL, 6473 NL80211_FLAG_NEED_RTNL,
6469 }, 6474 },
6470 { 6475 {
@@ -6497,7 +6502,7 @@ static struct genl_ops nl80211_ops[] = {
6497 .doit = nl80211_del_mpath, 6502 .doit = nl80211_del_mpath,
6498 .policy = nl80211_policy, 6503 .policy = nl80211_policy,
6499 .flags = GENL_ADMIN_PERM, 6504 .flags = GENL_ADMIN_PERM,
6500 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6505 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6501 NL80211_FLAG_NEED_RTNL, 6506 NL80211_FLAG_NEED_RTNL,
6502 }, 6507 },
6503 { 6508 {
@@ -6505,7 +6510,7 @@ static struct genl_ops nl80211_ops[] = {
6505 .doit = nl80211_set_bss, 6510 .doit = nl80211_set_bss,
6506 .policy = nl80211_policy, 6511 .policy = nl80211_policy,
6507 .flags = GENL_ADMIN_PERM, 6512 .flags = GENL_ADMIN_PERM,
6508 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6513 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6509 NL80211_FLAG_NEED_RTNL, 6514 NL80211_FLAG_NEED_RTNL,
6510 }, 6515 },
6511 { 6516 {
@@ -6531,7 +6536,7 @@ static struct genl_ops nl80211_ops[] = {
6531 .doit = nl80211_get_mesh_config, 6536 .doit = nl80211_get_mesh_config,
6532 .policy = nl80211_policy, 6537 .policy = nl80211_policy,
6533 /* can be retrieved by unprivileged users */ 6538 /* can be retrieved by unprivileged users */
6534 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6539 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6535 NL80211_FLAG_NEED_RTNL, 6540 NL80211_FLAG_NEED_RTNL,
6536 }, 6541 },
6537 { 6542 {
@@ -6664,7 +6669,7 @@ static struct genl_ops nl80211_ops[] = {
6664 .doit = nl80211_setdel_pmksa, 6669 .doit = nl80211_setdel_pmksa,
6665 .policy = nl80211_policy, 6670 .policy = nl80211_policy,
6666 .flags = GENL_ADMIN_PERM, 6671 .flags = GENL_ADMIN_PERM,
6667 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6672 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6668 NL80211_FLAG_NEED_RTNL, 6673 NL80211_FLAG_NEED_RTNL,
6669 }, 6674 },
6670 { 6675 {
@@ -6672,7 +6677,7 @@ static struct genl_ops nl80211_ops[] = {
6672 .doit = nl80211_setdel_pmksa, 6677 .doit = nl80211_setdel_pmksa,
6673 .policy = nl80211_policy, 6678 .policy = nl80211_policy,
6674 .flags = GENL_ADMIN_PERM, 6679 .flags = GENL_ADMIN_PERM,
6675 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6680 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6676 NL80211_FLAG_NEED_RTNL, 6681 NL80211_FLAG_NEED_RTNL,
6677 }, 6682 },
6678 { 6683 {
@@ -6680,7 +6685,7 @@ static struct genl_ops nl80211_ops[] = {
6680 .doit = nl80211_flush_pmksa, 6685 .doit = nl80211_flush_pmksa,
6681 .policy = nl80211_policy, 6686 .policy = nl80211_policy,
6682 .flags = GENL_ADMIN_PERM, 6687 .flags = GENL_ADMIN_PERM,
6683 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6688 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6684 NL80211_FLAG_NEED_RTNL, 6689 NL80211_FLAG_NEED_RTNL,
6685 }, 6690 },
6686 { 6691 {
@@ -6840,7 +6845,7 @@ static struct genl_ops nl80211_ops[] = {
6840 .doit = nl80211_probe_client, 6845 .doit = nl80211_probe_client,
6841 .policy = nl80211_policy, 6846 .policy = nl80211_policy,
6842 .flags = GENL_ADMIN_PERM, 6847 .flags = GENL_ADMIN_PERM,
6843 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6848 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6844 NL80211_FLAG_NEED_RTNL, 6849 NL80211_FLAG_NEED_RTNL,
6845 }, 6850 },
6846 { 6851 {
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1b7a08df933c..957f25621617 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -989,7 +989,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
989 if (rdev->wiphy.software_iftypes & BIT(iftype)) 989 if (rdev->wiphy.software_iftypes & BIT(iftype))
990 continue; 990 continue;
991 for (j = 0; j < c->n_limits; j++) { 991 for (j = 0; j < c->n_limits; j++) {
992 if (!(limits[j].types & iftype)) 992 if (!(limits[j].types & BIT(iftype)))
993 continue; 993 continue;
994 if (limits[j].max < num[iftype]) 994 if (limits[j].max < num[iftype])
995 goto cont; 995 goto cont;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0af7f54e4f61..af648e08e61b 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -780,8 +780,10 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
780 if (cmd == SIOCSIWENCODEEXT) { 780 if (cmd == SIOCSIWENCODEEXT) {
781 struct iw_encode_ext *ee = (void *) extra; 781 struct iw_encode_ext *ee = (void *) extra;
782 782
783 if (iwp->length < sizeof(*ee) + ee->key_len) 783 if (iwp->length < sizeof(*ee) + ee->key_len) {
784 return -EFAULT; 784 err = -EFAULT;
785 goto out;
786 }
785 } 787 }
786 } 788 }
787 789