aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c10
-rw-r--r--net/bluetooth/af_bluetooth.c12
-rw-r--r--net/bluetooth/hci_conn.c4
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/bluetooth/l2cap_core.c24
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/rfcomm/core.c18
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/rtnetlink.c78
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/ip_gre.c10
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/ipv4/tcp_input.c45
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv4/xfrm4_mode_beet.c5
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c6
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/xfrm6_mode_beet.c6
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/rate.h3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c38
-rw-r--r--net/netfilter/nf_conntrack_netlink.c46
-rw-r--r--net/netfilter/nf_queue.c40
-rw-r--r--net/netfilter/xt_TEE.c5
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
51 files changed, 328 insertions, 213 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index c12c2582457c..127fe70a1baa 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -46,8 +46,8 @@
46 46
47static struct net_device *clip_devs; 47static struct net_device *clip_devs;
48static struct atm_vcc *atmarpd; 48static struct atm_vcc *atmarpd;
49static struct neigh_table clip_tbl;
50static struct timer_list idle_timer; 49static struct timer_list idle_timer;
50static const struct neigh_ops clip_neigh_ops;
51 51
52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) 52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
53{ 53{
@@ -123,6 +123,8 @@ static int neigh_check_cb(struct neighbour *n)
123 struct atmarp_entry *entry = neighbour_priv(n); 123 struct atmarp_entry *entry = neighbour_priv(n);
124 struct clip_vcc *cv; 124 struct clip_vcc *cv;
125 125
126 if (n->ops != &clip_neigh_ops)
127 return 0;
126 for (cv = entry->vccs; cv; cv = cv->next) { 128 for (cv = entry->vccs; cv; cv = cv->next) {
127 unsigned long exp = cv->last_use + cv->idle_timeout; 129 unsigned long exp = cv->last_use + cv->idle_timeout;
128 130
@@ -154,10 +156,10 @@ static int neigh_check_cb(struct neighbour *n)
154 156
155static void idle_timer_check(unsigned long dummy) 157static void idle_timer_check(unsigned long dummy)
156{ 158{
157 write_lock(&clip_tbl.lock); 159 write_lock(&arp_tbl.lock);
158 __neigh_for_each_release(&clip_tbl, neigh_check_cb); 160 __neigh_for_each_release(&arp_tbl, neigh_check_cb);
159 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); 161 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
160 write_unlock(&clip_tbl.lock); 162 write_unlock(&arp_tbl.lock);
161} 163}
162 164
163static int clip_arp_rcv(struct sk_buff *skb) 165static int clip_arp_rcv(struct sk_buff *skb)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ef92864ac625..72eb187a5f60 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -71,19 +71,16 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
71 "slock-AF_BLUETOOTH-BTPROTO_AVDTP", 71 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
72}; 72};
73 73
74static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) 74void bt_sock_reclassify_lock(struct sock *sk, int proto)
75{ 75{
76 struct sock *sk = sock->sk; 76 BUG_ON(!sk);
77
78 if (!sk)
79 return;
80
81 BUG_ON(sock_owned_by_user(sk)); 77 BUG_ON(sock_owned_by_user(sk));
82 78
83 sock_lock_init_class_and_name(sk, 79 sock_lock_init_class_and_name(sk,
84 bt_slock_key_strings[proto], &bt_slock_key[proto], 80 bt_slock_key_strings[proto], &bt_slock_key[proto],
85 bt_key_strings[proto], &bt_lock_key[proto]); 81 bt_key_strings[proto], &bt_lock_key[proto]);
86} 82}
83EXPORT_SYMBOL(bt_sock_reclassify_lock);
87 84
88int bt_sock_register(int proto, const struct net_proto_family *ops) 85int bt_sock_register(int proto, const struct net_proto_family *ops)
89{ 86{
@@ -145,7 +142,8 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
145 142
146 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { 143 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
147 err = bt_proto[proto]->create(net, sock, proto, kern); 144 err = bt_proto[proto]->create(net, sock, proto, kern);
148 bt_sock_reclassify_lock(sock, proto); 145 if (!err)
146 bt_sock_reclassify_lock(sock->sk, proto);
149 module_put(bt_proto[proto]->owner); 147 module_put(bt_proto[proto]->owner);
150 } 148 }
151 149
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3db432473ad5..07bc69ed9498 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -635,6 +635,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
635 635
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
637 struct hci_cp_auth_requested cp; 637 struct hci_cp_auth_requested cp;
638
639 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
641
638 cp.handle = cpu_to_le16(conn->handle); 642 cp.handle = cpu_to_le16(conn->handle);
639 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
640 sizeof(cp), &cp); 644 sizeof(cp), &cp);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9de93714213a..5aeb62491198 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -640,7 +640,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
640 /* Reset device */ 640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q); 641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1); 642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags)) { 643 if (!test_bit(HCI_RAW, &hdev->flags) &&
644 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
644 set_bit(HCI_INIT, &hdev->flags); 645 set_bit(HCI_INIT, &hdev->flags);
645 __hci_request(hdev, hci_reset_req, 0, 646 __hci_request(hdev, hci_reset_req, 0,
646 msecs_to_jiffies(250)); 647 msecs_to_jiffies(250));
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index faf0b11ac1d3..32d338c30e65 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1018,10 +1018,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1018 hci_chan_del(conn->hchan); 1018 hci_chan_del(conn->hchan);
1019 1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer); 1021 cancel_delayed_work_sync(&conn->info_timer);
1022 1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer); 1024 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn); 1025 smp_chan_destroy(conn);
1026 } 1026 }
1027 1027
@@ -1120,7 +1120,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1120 return c1; 1120 return c1;
1121} 1121}
1122 1122
1123inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) 1123int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1124{ 1124{
1125 struct sock *sk = chan->sk; 1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src; 1126 bdaddr_t *src = &bt_sk(sk)->src;
@@ -2574,7 +2574,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
2574 2574
2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2576 cmd->ident == conn->info_ident) { 2576 cmd->ident == conn->info_ident) {
2577 __cancel_delayed_work(&conn->info_timer); 2577 cancel_delayed_work(&conn->info_timer);
2578 2578
2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2580 conn->info_ident = 0; 2580 conn->info_ident = 0;
@@ -2970,7 +2970,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2970 2970
2971 default: 2971 default:
2972 sk->sk_err = ECONNRESET; 2972 sk->sk_err = ECONNRESET;
2973 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 2973 __set_chan_timer(chan,
2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2974 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2975 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2975 goto done; 2976 goto done;
2976 } 2977 }
@@ -3120,7 +3121,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3120 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 3121 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3121 return 0; 3122 return 0;
3122 3123
3123 __cancel_delayed_work(&conn->info_timer); 3124 cancel_delayed_work(&conn->info_timer);
3124 3125
3125 if (result != L2CAP_IR_SUCCESS) { 3126 if (result != L2CAP_IR_SUCCESS) {
3126 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3127 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -4478,7 +4479,8 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4478 if (encrypt == 0x00) { 4479 if (encrypt == 0x00) {
4479 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4480 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4480 __clear_chan_timer(chan); 4481 __clear_chan_timer(chan);
4481 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 4482 __set_chan_timer(chan,
4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4482 } else if (chan->sec_level == BT_SECURITY_HIGH) 4484 } else if (chan->sec_level == BT_SECURITY_HIGH)
4483 l2cap_chan_close(chan, ECONNREFUSED); 4485 l2cap_chan_close(chan, ECONNREFUSED);
4484 } else { 4486 } else {
@@ -4499,7 +4501,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4499 4501
4500 if (hcon->type == LE_LINK) { 4502 if (hcon->type == LE_LINK) {
4501 smp_distribute_keys(conn, 0); 4503 smp_distribute_keys(conn, 0);
4502 __cancel_delayed_work(&conn->security_timer); 4504 cancel_delayed_work(&conn->security_timer);
4503 } 4505 }
4504 4506
4505 rcu_read_lock(); 4507 rcu_read_lock();
@@ -4546,7 +4548,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4546 L2CAP_CONN_REQ, sizeof(req), &req); 4548 L2CAP_CONN_REQ, sizeof(req), &req);
4547 } else { 4549 } else {
4548 __clear_chan_timer(chan); 4550 __clear_chan_timer(chan);
4549 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 4551 __set_chan_timer(chan,
4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4550 } 4553 }
4551 } else if (chan->state == BT_CONNECT2) { 4554 } else if (chan->state == BT_CONNECT2) {
4552 struct l2cap_conn_rsp rsp; 4555 struct l2cap_conn_rsp rsp;
@@ -4566,7 +4569,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4566 } 4569 }
4567 } else { 4570 } else {
4568 l2cap_state_change(chan, BT_DISCONN); 4571 l2cap_state_change(chan, BT_DISCONN);
4569 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 4572 __set_chan_timer(chan,
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4570 res = L2CAP_CR_SEC_BLOCK; 4574 res = L2CAP_CR_SEC_BLOCK;
4571 stat = L2CAP_CS_NO_INFO; 4575 stat = L2CAP_CS_NO_INFO;
4572 } 4576 }
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c61d967012b2..401d9428ae4c 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -849,6 +849,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
849 if (!sk) 849 if (!sk)
850 return NULL; 850 return NULL;
851 851
852 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
853
852 l2cap_sock_init(sk, parent); 854 l2cap_sock_init(sk, parent);
853 855
854 return l2cap_pi(sk)->chan; 856 return l2cap_pi(sk)->chan;
@@ -1002,7 +1004,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1002 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 1004 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1003 1005
1004 sk->sk_destruct = l2cap_sock_destruct; 1006 sk->sk_destruct = l2cap_sock_destruct;
1005 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; 1007 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1006 1008
1007 sock_reset_flag(sk, SOCK_ZAPPED); 1009 sock_reset_flag(sk, SOCK_ZAPPED);
1008 1010
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 501649bf5596..8a602388f1e7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,12 +1164,18 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1164 break; 1164 break;
1165 1165
1166 case BT_DISCONN: 1166 case BT_DISCONN:
1167 /* When socket is closed and we are not RFCOMM 1167 /* rfcomm_session_put is called later so don't do
1168 * initiator rfcomm_process_rx already calls 1168 * anything here otherwise we will mess up the session
1169 * rfcomm_session_put() */ 1169 * reference counter:
1170 if (s->sock->sk->sk_state != BT_CLOSED) 1170 *
1171 if (list_empty(&s->dlcs)) 1171 * (a) when we are the initiator dlc_unlink will drive
1172 rfcomm_session_put(s); 1172 * the reference counter to 0 (there is no initial put
1173 * after session_add)
1174 *
1175 * (b) when we are not the initiator rfcomm_rx_process
1176 * will explicitly call put to balance the initial hold
1177 * done after session add.
1178 */
1173 break; 1179 break;
1174 } 1180 }
1175 } 1181 }
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index f066678faeee..22169c3f1482 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -956,6 +956,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
956 if (!sk) 956 if (!sk)
957 goto done; 957 goto done;
958 958
959 bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
960
959 rfcomm_sock_init(sk, parent); 961 rfcomm_sock_init(sk, parent);
960 bacpy(&bt_sk(sk)->src, &src); 962 bacpy(&bt_sk(sk)->src, &src);
961 bacpy(&bt_sk(sk)->dst, &dst); 963 bacpy(&bt_sk(sk)->dst, &dst);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a98628086452..a97d97a3a512 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
541 541
542 if (cf_sk->layer.dn == NULL) 542 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb);
543 return -EINVAL; 544 return -EINVAL;
545 }
544 546
545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 547 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
546} 548}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
683 } 685 }
684 err = transmit_skb(skb, cf_sk, 686 err = transmit_skb(skb, cf_sk,
685 msg->msg_flags&MSG_DONTWAIT, timeo); 687 msg->msg_flags&MSG_DONTWAIT, timeo);
686 if (err < 0) { 688 if (err < 0)
687 kfree_skb(skb); 689 /* skb is already freed */
688 goto pipe_err; 690 goto pipe_err;
689 } 691
690 sent += size; 692 sent += size;
691 } 693 }
692 694
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e7..94b08612a4d8 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
252 251
253 rcu_read_lock(); 252 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
257 256
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || 257 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 258 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 259 layer->id != 0)
261 260 cfmuxl_remove_uplayer(layr, layer->id);
262 idx = layer->id % UP_CACHE_SIZE; 261
263 spin_lock_bh(&muxl->receive_lock);
264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */ 262 /* NOTE: ctrlcmd is not allowed to block */
269 layer->ctrlcmd(layer, ctrl, phyid); 263 layer->ctrlcmd(layer, ctrl, phyid);
270 } 264 }
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3b..761ad9d6cc3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
85 } else { 85 } else {
86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
88 ceph_debugfs_client_init(client);
89 client->have_fsid = true;
90 } 88 }
91 return 0; 89 return 0;
92} 90}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42bd..1845cde26227 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/ceph/mon_client.h> 9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h> 10#include <linux/ceph/libceph.h>
11#include <linux/ceph/debugfs.h>
11#include <linux/ceph/decode.h> 12#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h> 13#include <linux/ceph/auth.h>
14 14
15/* 15/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
340 client->monc.monmap = monmap; 340 client->monc.monmap = monmap;
341 kfree(old); 341 kfree(old);
342 342
343 if (!client->have_fsid) {
344 client->have_fsid = true;
345 mutex_unlock(&monc->mutex);
346 /*
347 * do debugfs initialization without mutex to avoid
348 * creating a locking dependency
349 */
350 ceph_debugfs_client_init(client);
351 goto out_unlocked;
352 }
343out: 353out:
344 mutex_unlock(&monc->mutex); 354 mutex_unlock(&monc->mutex);
355out_unlocked:
345 wake_up_all(&client->auth_wq); 356 wake_up_all(&client->auth_wq);
346} 357}
347 358
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..2a83914b0277 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
826 write_unlock_bh(&tbl->lock); 826 write_unlock_bh(&tbl->lock);
827 cond_resched(); 827 cond_resched();
828 write_lock_bh(&tbl->lock); 828 write_lock_bh(&tbl->lock);
829 nht = rcu_dereference_protected(tbl->nht,
830 lockdep_is_held(&tbl->lock));
829 } 831 }
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 832 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 833 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 65aebd450027..606a6e8f3671 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
60}; 60};
61 61
62static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
64 63
65void rtnl_lock(void) 64void rtnl_lock(void)
66{ 65{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
724} 723}
725 724
726/* All VF info */ 725/* All VF info */
727static inline int rtnl_vfinfo_size(const struct net_device *dev) 726static inline int rtnl_vfinfo_size(const struct net_device *dev,
727 u32 ext_filter_mask)
728{ 728{
729 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 729 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
730 730 (ext_filter_mask & RTEXT_FILTER_VF)) {
731 int num_vfs = dev_num_vf(dev->dev.parent); 731 int num_vfs = dev_num_vf(dev->dev.parent);
732 size_t size = nla_total_size(sizeof(struct nlattr)); 732 size_t size = nla_total_size(sizeof(struct nlattr));
733 size += nla_total_size(num_vfs * sizeof(struct nlattr)); 733 size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
766 return port_self_size; 766 return port_self_size;
767} 767}
768 768
769static noinline size_t if_nlmsg_size(const struct net_device *dev) 769static noinline size_t if_nlmsg_size(const struct net_device *dev,
770 u32 ext_filter_mask)
770{ 771{
771 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 772 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
772 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 773 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
784 + nla_total_size(4) /* IFLA_MASTER */ 785 + nla_total_size(4) /* IFLA_MASTER */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(4) /* IFLA_NUM_VF */ 788 + nla_total_size(ext_filter_mask
788 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 789 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
790 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
789 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 791 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
790 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 792 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
791 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ 793 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
868 870
869static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 871static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
870 int type, u32 pid, u32 seq, u32 change, 872 int type, u32 pid, u32 seq, u32 change,
871 unsigned int flags) 873 unsigned int flags, u32 ext_filter_mask)
872{ 874{
873 struct ifinfomsg *ifm; 875 struct ifinfomsg *ifm;
874 struct nlmsghdr *nlh; 876 struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
941 goto nla_put_failure; 943 goto nla_put_failure;
942 copy_rtnl_link_stats64(nla_data(attr), stats); 944 copy_rtnl_link_stats64(nla_data(attr), stats);
943 945
944 if (dev->dev.parent) 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
945 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
946 948
947 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) {
948 int i; 951 int i;
949 952
950 struct nlattr *vfinfo, *vf; 953 struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1048 struct net_device *dev; 1051 struct net_device *dev;
1049 struct hlist_head *head; 1052 struct hlist_head *head;
1050 struct hlist_node *node; 1053 struct hlist_node *node;
1054 struct nlattr *tb[IFLA_MAX+1];
1055 u32 ext_filter_mask = 0;
1051 1056
1052 s_h = cb->args[0]; 1057 s_h = cb->args[0];
1053 s_idx = cb->args[1]; 1058 s_idx = cb->args[1];
@@ -1055,6 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1055 rcu_read_lock(); 1060 rcu_read_lock();
1056 cb->seq = net->dev_base_seq; 1061 cb->seq = net->dev_base_seq;
1057 1062
1063 nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1064 ifla_policy);
1065
1066 if (tb[IFLA_EXT_MASK])
1067 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1068
1058 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1069 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1059 idx = 0; 1070 idx = 0;
1060 head = &net->dev_index_head[h]; 1071 head = &net->dev_index_head[h];
@@ -1064,7 +1075,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1064 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1075 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1065 NETLINK_CB(cb->skb).pid, 1076 NETLINK_CB(cb->skb).pid,
1066 cb->nlh->nlmsg_seq, 0, 1077 cb->nlh->nlmsg_seq, 0,
1067 NLM_F_MULTI) <= 0) 1078 NLM_F_MULTI,
1079 ext_filter_mask) <= 0)
1068 goto out; 1080 goto out;
1069 1081
1070 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1082 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1112,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1100 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1112 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1101 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1113 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1102 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1114 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1115 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1103}; 1116};
1104EXPORT_SYMBOL(ifla_policy); 1117EXPORT_SYMBOL(ifla_policy);
1105 1118
@@ -1509,8 +1522,6 @@ errout:
1509 1522
1510 if (send_addr_notify) 1523 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1524 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1512 min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
1513 min_ifinfo_dump_size);
1514 1525
1515 return err; 1526 return err;
1516} 1527}
@@ -1842,6 +1853,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1842 struct net_device *dev = NULL; 1853 struct net_device *dev = NULL;
1843 struct sk_buff *nskb; 1854 struct sk_buff *nskb;
1844 int err; 1855 int err;
1856 u32 ext_filter_mask = 0;
1845 1857
1846 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1858 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1847 if (err < 0) 1859 if (err < 0)
@@ -1850,6 +1862,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1850 if (tb[IFLA_IFNAME]) 1862 if (tb[IFLA_IFNAME])
1851 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1863 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1852 1864
1865 if (tb[IFLA_EXT_MASK])
1866 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1867
1853 ifm = nlmsg_data(nlh); 1868 ifm = nlmsg_data(nlh);
1854 if (ifm->ifi_index > 0) 1869 if (ifm->ifi_index > 0)
1855 dev = __dev_get_by_index(net, ifm->ifi_index); 1870 dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1861,12 +1876,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1861 if (dev == NULL) 1876 if (dev == NULL)
1862 return -ENODEV; 1877 return -ENODEV;
1863 1878
1864 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1879 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1865 if (nskb == NULL) 1880 if (nskb == NULL)
1866 return -ENOBUFS; 1881 return -ENOBUFS;
1867 1882
1868 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1883 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1869 nlh->nlmsg_seq, 0, 0); 1884 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1870 if (err < 0) { 1885 if (err < 0) {
1871 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1886 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1872 WARN_ON(err == -EMSGSIZE); 1887 WARN_ON(err == -EMSGSIZE);
@@ -1877,8 +1892,31 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1877 return err; 1892 return err;
1878} 1893}
1879 1894
1880static u16 rtnl_calcit(struct sk_buff *skb) 1895static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1881{ 1896{
1897 struct net *net = sock_net(skb->sk);
1898 struct net_device *dev;
1899 struct nlattr *tb[IFLA_MAX+1];
1900 u32 ext_filter_mask = 0;
1901 u16 min_ifinfo_dump_size = 0;
1902
1903 nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
1904
1905 if (tb[IFLA_EXT_MASK])
1906 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1907
1908 if (!ext_filter_mask)
1909 return NLMSG_GOODSIZE;
1910 /*
1911 * traverse the list of net devices and compute the minimum
1912 * buffer size based upon the filter mask.
1913 */
1914 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1915 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1916 if_nlmsg_size(dev,
1917 ext_filter_mask));
1918 }
1919
1882 return min_ifinfo_dump_size; 1920 return min_ifinfo_dump_size;
1883} 1921}
1884 1922
@@ -1913,13 +1951,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1913 int err = -ENOBUFS; 1951 int err = -ENOBUFS;
1914 size_t if_info_size; 1952 size_t if_info_size;
1915 1953
1916 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL); 1954 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1917 if (skb == NULL) 1955 if (skb == NULL)
1918 goto errout; 1956 goto errout;
1919 1957
1920 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size); 1958 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
1921
1922 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1923 if (err < 0) { 1959 if (err < 0) {
1924 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1960 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
1925 WARN_ON(err == -EMSGSIZE); 1961 WARN_ON(err == -EMSGSIZE);
@@ -1977,7 +2013,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1977 return -EOPNOTSUPP; 2013 return -EOPNOTSUPP;
1978 calcit = rtnl_get_calcit(family, type); 2014 calcit = rtnl_get_calcit(family, type);
1979 if (calcit) 2015 if (calcit)
1980 min_dump_alloc = calcit(skb); 2016 min_dump_alloc = calcit(skb, nlh);
1981 2017
1982 __rtnl_unlock(); 2018 __rtnl_unlock();
1983 rtnl = net->rtnl; 2019 rtnl = net->rtnl;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776f..d183262943d9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
409 409
410config INET_UDP_DIAG 410config INET_UDP_DIAG
411 tristate "UDP: socket monitoring interface" 411 tristate "UDP: socket monitoring interface"
412 depends on INET_DIAG 412 depends on INET_DIAG && (IPV6 || IPV6=n)
413 default n 413 default n
414 ---help--- 414 ---help---
415 Support for UDP socket monitoring interface used by the ss tool. 415 Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f0..63e49890ad31 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
863 if (addr_type == RTN_UNICAST && 863 if (addr_type == RTN_UNICAST &&
864 (arp_fwd_proxy(in_dev, dev, rt) || 864 (arp_fwd_proxy(in_dev, dev, rt) ||
865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
866 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 866 (rt->dst.dev != dev &&
867 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
867 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 868 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
868 if (n) 869 if (n)
869 neigh_release(n); 870 neigh_release(n);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6b3ca5ba4450..38673d2860e2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -65,7 +65,7 @@
65 it is infeasible task. The most general solutions would be 65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl), 66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good 67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL 68 solution, but it supposes maintaining new variable in ALL
69 skb, even if no tunneling is used. 69 skb, even if no tunneling is used.
70 70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu 71 Current solution: xmit_recursion breaks dead loops. This is a percpu
@@ -91,14 +91,14 @@
91 91
92 One of them is to parse packet trying to detect inner encapsulation 92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially, 93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all. 94 taking into account fragmentation. TO be short, ttl is not solution at all.
95 95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE. 96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit, 97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely, 98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear 99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu 100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops. 101 rapidly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop, 102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set. 103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made 104 But it is not our problem! Nobody could accuse us, we made
@@ -457,8 +457,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
457 GRE tunnels with enabled checksum. Tell them "thank you". 457 GRE tunnels with enabled checksum. Tell them "thank you".
458 458
459 Well, I wonder, rfc1812 was written by Cisco employee, 459 Well, I wonder, rfc1812 was written by Cisco employee,
460 what the hell these idiots break standrads established 460 what the hell these idiots break standards established
461 by themself??? 461 by themselves???
462 */ 462 */
463 463
464 const struct iphdr *iph = (const struct iphdr *)skb->data; 464 const struct iphdr *iph = (const struct iphdr *)skb->data;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f7679075..42dd1a90edea 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop; 576 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index aea5a199c37a..b072386cee21 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -630,6 +630,7 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
630 630
631 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); 631 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
632 632
633 err = -EOPNOTSUPP;
633 if (flags & MSG_OOB) 634 if (flags & MSG_OOB)
634 goto out; 635 goto out;
635 636
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4cb9cd2f2c39..7a7724da9bff 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
778static __net_init int ipv4_sysctl_init_net(struct net *net) 778static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 779{
780 struct ctl_table *table; 780 struct ctl_table *table;
781 unsigned long limit;
782 781
783 table = ipv4_net_table; 782 table = ipv4_net_table;
784 if (!net_eq(net, &init_net)) { 783 if (!net_eq(net, &init_net)) {
@@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
815 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 814 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
816 815
817 tcp_init_mem(net); 816 tcp_init_mem(net);
818 limit = nr_free_buffer_pages() / 8;
819 limit = max(limit, 128UL);
820 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
821 net->ipv4.sysctl_tcp_mem[1] = limit;
822 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
823 817
824 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
825 net_ipv4_ctl_path, table); 819 net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 06373b4a449a..22ef5f9fd2ff 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3218,7 +3229,6 @@ __setup("thash_entries=", set_thash_entries);
3218 3229
3219void tcp_init_mem(struct net *net) 3230void tcp_init_mem(struct net *net)
3220{ 3231{
3221 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3222 unsigned long limit = nr_free_buffer_pages() / 8; 3232 unsigned long limit = nr_free_buffer_pages() / 8;
3223 limit = max(limit, 128UL); 3233 limit = max(limit, 128UL);
3224 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; 3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
@@ -3230,7 +3240,8 @@ void __init tcp_init(void)
3230{ 3240{
3231 struct sk_buff *skb = NULL; 3241 struct sk_buff *skb = NULL;
3232 unsigned long limit; 3242 unsigned long limit;
3233 int i, max_share, cnt; 3243 int max_share, cnt;
3244 unsigned int i;
3234 unsigned long jiffy = jiffies; 3245 unsigned long jiffy = jiffies;
3235 3246
3236 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3247 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3273,7 +3284,7 @@ void __init tcp_init(void)
3273 &tcp_hashinfo.bhash_size, 3284 &tcp_hashinfo.bhash_size,
3274 NULL, 3285 NULL,
3275 64 * 1024); 3286 64 * 1024);
3276 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 3287 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3277 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3288 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3278 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3289 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3279 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3290 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
@@ -3287,7 +3298,8 @@ void __init tcp_init(void)
3287 sysctl_max_syn_backlog = max(128, cnt / 256); 3298 sysctl_max_syn_backlog = max(128, cnt / 256);
3288 3299
3289 tcp_init_mem(&init_net); 3300 tcp_init_mem(&init_net);
3290 limit = nr_free_buffer_pages() / 8; 3301 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3302 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3291 limit = max(limit, 128UL); 3303 limit = max(limit, 128UL);
3292 max_share = min(4UL*1024*1024, limit); 3304 max_share = min(4UL*1024*1024, limit);
3293 3305
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 976034f82320..53c8ce4046b2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1307 return in_sack; 1307 return in_sack;
1308} 1308}
1309 1309
1310static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1310/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1311 struct tcp_sacktag_state *state, 1311static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq,
1312 int dup_sack, int pcount) 1314 int dup_sack, int pcount)
1313{ 1315{
1314 struct tcp_sock *tp = tcp_sk(sk); 1316 struct tcp_sock *tp = tcp_sk(sk);
1315 u8 sacked = TCP_SKB_CB(skb)->sacked;
1316 int fack_count = state->fack_count; 1317 int fack_count = state->fack_count;
1317 1318
1318 /* Account D-SACK for retransmitted packet. */ 1319 /* Account D-SACK for retransmitted packet. */
1319 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1320 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1320 if (tp->undo_marker && tp->undo_retrans && 1321 if (tp->undo_marker && tp->undo_retrans &&
1321 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1322 after(end_seq, tp->undo_marker))
1322 tp->undo_retrans--; 1323 tp->undo_retrans--;
1323 if (sacked & TCPCB_SACKED_ACKED) 1324 if (sacked & TCPCB_SACKED_ACKED)
1324 state->reord = min(fack_count, state->reord); 1325 state->reord = min(fack_count, state->reord);
1325 } 1326 }
1326 1327
1327 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1328 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1328 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1329 if (!after(end_seq, tp->snd_una))
1329 return sacked; 1330 return sacked;
1330 1331
1331 if (!(sacked & TCPCB_SACKED_ACKED)) { 1332 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1344 /* New sack for not retransmitted frame, 1345 /* New sack for not retransmitted frame,
1345 * which was in hole. It is reordering. 1346 * which was in hole. It is reordering.
1346 */ 1347 */
1347 if (before(TCP_SKB_CB(skb)->seq, 1348 if (before(start_seq,
1348 tcp_highest_sack_seq(tp))) 1349 tcp_highest_sack_seq(tp)))
1349 state->reord = min(fack_count, 1350 state->reord = min(fack_count,
1350 state->reord); 1351 state->reord);
1351 1352
1352 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1353 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1353 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1354 if (!after(end_seq, tp->frto_highmark))
1354 state->flag |= FLAG_ONLY_ORIG_SACKED; 1355 state->flag |= FLAG_ONLY_ORIG_SACKED;
1355 } 1356 }
1356 1357
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1368 1369
1369 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1370 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1370 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1371 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1371 before(TCP_SKB_CB(skb)->seq, 1372 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1372 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1373 tp->lost_cnt_hint += pcount; 1373 tp->lost_cnt_hint += pcount;
1374 1374
1375 if (fack_count > tp->fackets_out) 1375 if (fack_count > tp->fackets_out)
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1388 return sacked; 1388 return sacked;
1389} 1389}
1390 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1391static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1392 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1393 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1395,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395{ 1398{
1396 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1397 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1398 1403
1399 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1400 1405
1401 if (skb == tp->lost_skb_hint) 1406 /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
1407 if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
1402 tp->lost_cnt_hint += pcount; 1408 tp->lost_cnt_hint += pcount;
1403 1409
1404 TCP_SKB_CB(prev)->end_seq += shifted; 1410 TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1424,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1424 skb_shinfo(skb)->gso_type = 0; 1430 skb_shinfo(skb)->gso_type = 0;
1425 } 1431 }
1426 1432
1427 /* We discard results */ 1433 /* Adjust counters and hints for the newly sacked sequence range but
1428 tcp_sacktag_one(skb, sk, state, dup_sack, pcount); 1434 * discard the return value since prev is already marked.
1435 */
1436 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1437 start_seq, end_seq, dup_sack, pcount);
1429 1438
1430 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1439 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1431 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1440 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1664,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1664 break; 1673 break;
1665 1674
1666 if (in_sack) { 1675 if (in_sack) {
1667 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1676 TCP_SKB_CB(skb)->sacked =
1668 state, 1677 tcp_sacktag_one(sk,
1669 dup_sack, 1678 state,
1670 tcp_skb_pcount(skb)); 1679 TCP_SKB_CB(skb)->sacked,
1680 TCP_SKB_CB(skb)->seq,
1681 TCP_SKB_CB(skb)->end_seq,
1682 dup_sack,
1683 tcp_skb_pcount(skb));
1671 1684
1672 if (!before(TCP_SKB_CB(skb)->seq, 1685 if (!before(TCP_SKB_CB(skb)->seq,
1673 tcp_highest_sack_seq(tp))) 1686 tcp_highest_sack_seq(tp)))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..94d683a61cba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.iov[0].iov_len, IPPROTO_TCP, 0); 651 arg.iov[0].iov_len, IPPROTO_TCP, 0);
652 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 652 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
654 /* When socket is gone, all binding information is lost.
655 * routing might fail in this case. using iif for oif to
656 * make sure we can deliver it
657 */
658 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
654 659
655 net = dev_net(skb_dst(skb)->dev); 660 net = dev_net(skb_dst(skb)->dev);
656 arg.tos = ip_hdr(skb)->tos; 661 arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399df..cd2e0723266d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_check_oom(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84 /* Catch exceptional cases, when connection requires reset. 81 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 82 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 63418185f524..e3db3f915114 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
110 110
111 skb_push(skb, sizeof(*iph)); 111 skb_push(skb, sizeof(*iph));
112 skb_reset_network_header(skb); 112 skb_reset_network_header(skb);
113 113 skb_mac_header_rebuild(skb);
114 memmove(skb->data - skb->mac_len, skb_mac_header(skb),
115 skb->mac_len);
116 skb_set_mac_header(skb, -skb->mac_len);
117 114
118 xfrm4_beet_make_header(skb); 115 xfrm4_beet_make_header(skb);
119 116
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 534972e114ac..ed4bf11ef9f4 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
66 66
67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
68{ 68{
69 const unsigned char *old_mac;
70 int err = -EINVAL; 69 int err = -EINVAL;
71 70
72 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 71 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 if (!(x->props.flags & XFRM_STATE_NOECN)) 83 if (!(x->props.flags & XFRM_STATE_NOECN))
85 ipip_ecn_decapsulate(skb); 84 ipip_ecn_decapsulate(skb);
86 85
87 old_mac = skb_mac_header(skb);
88 skb_set_mac_header(skb, -skb->mac_len);
89 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
90 skb_reset_network_header(skb); 86 skb_reset_network_header(skb);
87 skb_mac_header_rebuild(skb);
88
91 err = 0; 89 err = 0;
92 90
93out: 91out:
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c7e95c8c579f..5aa3981a3922 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1926,8 +1926,10 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1926 }; 1926 };
1927 1927
1928 dst = ip6_route_output(net, NULL, &fl6); 1928 dst = ip6_route_output(net, NULL, &fl6);
1929 if (!dst) 1929 if (dst->error) {
1930 dst_release(dst);
1930 goto out_free; 1931 goto out_free;
1932 }
1931 1933
1932 skb_dst_drop(skb); 1934 skb_dst_drop(skb);
1933 skb_dst_set(skb, dst); 1935 skb_dst_set(skb, dst);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8f02ef88e59..c964958ac470 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1545,9 +1545,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1545 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); 1545 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
1546 1546
1547 dst = ip6_route_output(net, NULL, &fl6); 1547 dst = ip6_route_output(net, NULL, &fl6);
1548 if (dst == NULL) 1548 if (dst->error) {
1549 dst_release(dst);
1549 return; 1550 return;
1550 1551 }
1551 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); 1552 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
1552 if (IS_ERR(dst)) 1553 if (IS_ERR(dst))
1553 return; 1554 return;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index a81ce9450750..9949a356d62c 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) 80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
81{ 81{
82 struct ipv6hdr *ip6h; 82 struct ipv6hdr *ip6h;
83 const unsigned char *old_mac;
84 int size = sizeof(struct ipv6hdr); 83 int size = sizeof(struct ipv6hdr);
85 int err; 84 int err;
86 85
@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
90 89
91 __skb_push(skb, size); 90 __skb_push(skb, size);
92 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
93 92 skb_mac_header_rebuild(skb);
94 old_mac = skb_mac_header(skb);
95 skb_set_mac_header(skb, -skb->mac_len);
96 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
97 93
98 xfrm6_beet_make_header(skb); 94 xfrm6_beet_make_header(skb);
99 95
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 261e6e6f487e..9f2095b19ad0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
64{ 64{
65 int err = -EINVAL; 65 int err = -EINVAL;
66 const unsigned char *old_mac;
67 66
68 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 67 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
69 goto out; 68 goto out;
@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
80 if (!(x->props.flags & XFRM_STATE_NOECN)) 79 if (!(x->props.flags & XFRM_STATE_NOECN))
81 ipip6_ecn_decapsulate(skb); 80 ipip6_ecn_decapsulate(skb);
82 81
83 old_mac = skb_mac_header(skb);
84 skb_set_mac_header(skb, -skb->mac_len);
85 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
86 skb_reset_network_header(skb); 82 skb_reset_network_header(skb);
83 skb_mac_header_rebuild(skb);
84
87 err = 0; 85 err = 0;
88 86
89out: 87out:
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 2406b3e7393f..d86217d56bd7 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,14 +63,14 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), 69 TEST(SHORT_PREAMBLE),
70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH)); 73 TEST(TDLS_PEER_AUTH), TEST(RATE_CONTROL));
74#undef TEST 74#undef TEST
75 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 75 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
76} 76}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0a0d94ad9b08..b142bd4c2390 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 5a5a7767d541..ad64f4d5271a 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -336,7 +336,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
336 int i; 336 int i;
337 u32 mask; 337 u32 mask;
338 338
339 if (sta) { 339 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
340 ista = &sta->sta; 340 ista = &sta->sta;
341 priv_sta = sta->rate_ctrl_priv; 341 priv_sta = sta->rate_ctrl_priv;
342 } 342 }
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 168427b0ffdc..80cfc006dd74 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -41,7 +41,7 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
41 struct ieee80211_sta *ista = &sta->sta; 41 struct ieee80211_sta *ista = &sta->sta;
42 void *priv_sta = sta->rate_ctrl_priv; 42 void *priv_sta = sta->rate_ctrl_priv;
43 43
44 if (!ref) 44 if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
45 return; 45 return;
46 46
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
@@ -62,6 +62,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
62 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 62 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
63 63
64 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 64 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
65 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
65} 66}
66 67
67static inline void rate_control_rate_update(struct ieee80211_local *local, 68static inline void rate_control_rate_update(struct ieee80211_local *local,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 751409120769..5a5e504a8ffb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
612 tid_agg_rx->buf_size; 612 tid_agg_rx->buf_size;
613 if (!tid_agg_rx->reorder_buf[index] && 613 if (!tid_agg_rx->reorder_buf[index] &&
614 tid_agg_rx->stored_mpdu_num > 1) { 614 tid_agg_rx->stored_mpdu_num) {
615 /* 615 /*
616 * No buffers ready to be released, but check whether any 616 * No buffers ready to be released, but check whether any
617 * frames in the reorder buffer have timed out. 617 * frames in the reorder buffer have timed out.
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 6f77f12dc3fc..bfed851d0d36 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -52,6 +52,7 @@
52 * @WLAN_STA_SP: Station is in a service period, so don't try to 52 * @WLAN_STA_SP: Station is in a service period, so don't try to
53 * reply to other uAPSD trigger frames or PS-Poll. 53 * reply to other uAPSD trigger frames or PS-Poll.
54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. 54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
55 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
55 */ 56 */
56enum ieee80211_sta_info_flags { 57enum ieee80211_sta_info_flags {
57 WLAN_STA_AUTH, 58 WLAN_STA_AUTH,
@@ -71,6 +72,7 @@ enum ieee80211_sta_info_flags {
71 WLAN_STA_UAPSD, 72 WLAN_STA_UAPSD,
72 WLAN_STA_SP, 73 WLAN_STA_SP,
73 WLAN_STA_4ADDR_EVENT, 74 WLAN_STA_4ADDR_EVENT,
75 WLAN_STA_RATE_CONTROL,
74}; 76};
75 77
76enum ieee80211_sta_state { 78enum ieee80211_sta_state {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 611c3359b94d..2555816e7788 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
232 __be16 dport = 0; /* destination port to forward */ 232 __be16 dport = 0; /* destination port to forward */
233 unsigned int flags; 233 unsigned int flags;
234 struct ip_vs_conn_param param; 234 struct ip_vs_conn_param param;
235 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
235 union nf_inet_addr snet; /* source network of the client, 236 union nf_inet_addr snet; /* source network of the client,
236 after masking */ 237 after masking */
237 238
@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
267 { 268 {
268 int protocol = iph.protocol; 269 int protocol = iph.protocol;
269 const union nf_inet_addr *vaddr = &iph.daddr; 270 const union nf_inet_addr *vaddr = &iph.daddr;
270 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
271 __be16 vport = 0; 271 __be16 vport = 0;
272 272
273 if (dst_port == svc->port) { 273 if (dst_port == svc->port) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 76613f5a55c0..ed86a3be678e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -404,19 +404,49 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
404 &net->ct.hash[repl_hash]); 404 &net->ct.hash[repl_hash]);
405} 405}
406 406
407void nf_conntrack_hash_insert(struct nf_conn *ct) 407int
408nf_conntrack_hash_check_insert(struct nf_conn *ct)
408{ 409{
409 struct net *net = nf_ct_net(ct); 410 struct net *net = nf_ct_net(ct);
410 unsigned int hash, repl_hash; 411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
411 u16 zone; 414 u16 zone;
412 415
413 zone = nf_ct_zone(ct); 416 zone = nf_ct_zone(ct);
414 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 417 hash = hash_conntrack(net, zone,
415 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
416 423
424 /* See if there's one in the list already, including reverse */
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
417 __nf_conntrack_hash_insert(ct, hash, repl_hash); 438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
418} 448}
419EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 449EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
420 450
421/* Confirm a connection given skb; places it in hash table */ 451/* Confirm a connection given skb; places it in hash table */
422int 452int
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9307b033c0c9..30c9d4ca0218 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1367 nf_ct_protonum(ct)); 1367 nf_ct_protonum(ct));
1368 if (helper == NULL) { 1368 if (helper == NULL) {
1369 rcu_read_unlock(); 1369 rcu_read_unlock();
1370 spin_unlock_bh(&nf_conntrack_lock);
1371#ifdef CONFIG_MODULES 1370#ifdef CONFIG_MODULES
1372 if (request_module("nfct-helper-%s", helpname) < 0) { 1371 if (request_module("nfct-helper-%s", helpname) < 0) {
1373 spin_lock_bh(&nf_conntrack_lock);
1374 err = -EOPNOTSUPP; 1372 err = -EOPNOTSUPP;
1375 goto err1; 1373 goto err1;
1376 } 1374 }
1377 1375
1378 spin_lock_bh(&nf_conntrack_lock);
1379 rcu_read_lock(); 1376 rcu_read_lock();
1380 helper = __nf_conntrack_helper_find(helpname, 1377 helper = __nf_conntrack_helper_find(helpname,
1381 nf_ct_l3num(ct), 1378 nf_ct_l3num(ct),
@@ -1468,8 +1465,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1468 if (tstamp) 1465 if (tstamp)
1469 tstamp->start = ktime_to_ns(ktime_get_real()); 1466 tstamp->start = ktime_to_ns(ktime_get_real());
1470 1467
1471 add_timer(&ct->timeout); 1468 err = nf_conntrack_hash_check_insert(ct);
1472 nf_conntrack_hash_insert(ct); 1469 if (err < 0)
1470 goto err2;
1471
1473 rcu_read_unlock(); 1472 rcu_read_unlock();
1474 1473
1475 return ct; 1474 return ct;
@@ -1490,6 +1489,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1490 struct nf_conntrack_tuple otuple, rtuple; 1489 struct nf_conntrack_tuple otuple, rtuple;
1491 struct nf_conntrack_tuple_hash *h = NULL; 1490 struct nf_conntrack_tuple_hash *h = NULL;
1492 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1491 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1492 struct nf_conn *ct;
1493 u_int8_t u3 = nfmsg->nfgen_family; 1493 u_int8_t u3 = nfmsg->nfgen_family;
1494 u16 zone; 1494 u16 zone;
1495 int err; 1495 int err;
@@ -1510,27 +1510,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1510 return err; 1510 return err;
1511 } 1511 }
1512 1512
1513 spin_lock_bh(&nf_conntrack_lock);
1514 if (cda[CTA_TUPLE_ORIG]) 1513 if (cda[CTA_TUPLE_ORIG])
1515 h = __nf_conntrack_find(net, zone, &otuple); 1514 h = nf_conntrack_find_get(net, zone, &otuple);
1516 else if (cda[CTA_TUPLE_REPLY]) 1515 else if (cda[CTA_TUPLE_REPLY])
1517 h = __nf_conntrack_find(net, zone, &rtuple); 1516 h = nf_conntrack_find_get(net, zone, &rtuple);
1518 1517
1519 if (h == NULL) { 1518 if (h == NULL) {
1520 err = -ENOENT; 1519 err = -ENOENT;
1521 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1520 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1522 struct nf_conn *ct;
1523 enum ip_conntrack_events events; 1521 enum ip_conntrack_events events;
1524 1522
1525 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1523 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1526 &rtuple, u3); 1524 &rtuple, u3);
1527 if (IS_ERR(ct)) { 1525 if (IS_ERR(ct))
1528 err = PTR_ERR(ct); 1526 return PTR_ERR(ct);
1529 goto out_unlock; 1527
1530 }
1531 err = 0; 1528 err = 0;
1532 nf_conntrack_get(&ct->ct_general);
1533 spin_unlock_bh(&nf_conntrack_lock);
1534 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1529 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1535 events = IPCT_RELATED; 1530 events = IPCT_RELATED;
1536 else 1531 else
@@ -1545,23 +1540,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1545 ct, NETLINK_CB(skb).pid, 1540 ct, NETLINK_CB(skb).pid,
1546 nlmsg_report(nlh)); 1541 nlmsg_report(nlh));
1547 nf_ct_put(ct); 1542 nf_ct_put(ct);
1548 } else 1543 }
1549 spin_unlock_bh(&nf_conntrack_lock);
1550 1544
1551 return err; 1545 return err;
1552 } 1546 }
1553 /* implicit 'else' */ 1547 /* implicit 'else' */
1554 1548
1555 /* We manipulate the conntrack inside the global conntrack table lock,
1556 * so there's no need to increase the refcount */
1557 err = -EEXIST; 1549 err = -EEXIST;
1550 ct = nf_ct_tuplehash_to_ctrack(h);
1558 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1551 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1559 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1552 spin_lock_bh(&nf_conntrack_lock);
1560
1561 err = ctnetlink_change_conntrack(ct, cda); 1553 err = ctnetlink_change_conntrack(ct, cda);
1554 spin_unlock_bh(&nf_conntrack_lock);
1562 if (err == 0) { 1555 if (err == 0) {
1563 nf_conntrack_get(&ct->ct_general);
1564 spin_unlock_bh(&nf_conntrack_lock);
1565 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1556 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1566 (1 << IPCT_ASSURED) | 1557 (1 << IPCT_ASSURED) |
1567 (1 << IPCT_HELPER) | 1558 (1 << IPCT_HELPER) |
@@ -1570,15 +1561,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1570 (1 << IPCT_MARK), 1561 (1 << IPCT_MARK),
1571 ct, NETLINK_CB(skb).pid, 1562 ct, NETLINK_CB(skb).pid,
1572 nlmsg_report(nlh)); 1563 nlmsg_report(nlh));
1573 nf_ct_put(ct); 1564 }
1574 } else
1575 spin_unlock_bh(&nf_conntrack_lock);
1576
1577 return err;
1578 } 1565 }
1579 1566
1580out_unlock: 1567 nf_ct_put(ct);
1581 spin_unlock_bh(&nf_conntrack_lock);
1582 return err; 1568 return err;
1583} 1569}
1584 1570
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b3a7db678b8d..ce60cf0f6c11 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -203,6 +203,27 @@ err:
203 return status; 203 return status;
204} 204}
205 205
206#ifdef CONFIG_BRIDGE_NETFILTER
207/* When called from bridge netfilter, skb->data must point to MAC header
208 * before calling skb_gso_segment(). Else, original MAC header is lost
209 * and segmented skbs will be sent to wrong destination.
210 */
211static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
212{
213 if (skb->nf_bridge)
214 __skb_push(skb, skb->network_header - skb->mac_header);
215}
216
217static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
218{
219 if (skb->nf_bridge)
220 __skb_pull(skb, skb->network_header - skb->mac_header);
221}
222#else
223#define nf_bridge_adjust_skb_data(s) do {} while (0)
224#define nf_bridge_adjust_segmented_data(s) do {} while (0)
225#endif
226
206int nf_queue(struct sk_buff *skb, 227int nf_queue(struct sk_buff *skb,
207 struct list_head *elem, 228 struct list_head *elem,
208 u_int8_t pf, unsigned int hook, 229 u_int8_t pf, unsigned int hook,
@@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb,
212 unsigned int queuenum) 233 unsigned int queuenum)
213{ 234{
214 struct sk_buff *segs; 235 struct sk_buff *segs;
215 int err; 236 int err = -EINVAL;
216 unsigned int queued; 237 unsigned int queued;
217 238
218 if (!skb_is_gso(skb)) 239 if (!skb_is_gso(skb))
@@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb,
228 break; 249 break;
229 } 250 }
230 251
252 nf_bridge_adjust_skb_data(skb);
231 segs = skb_gso_segment(skb, 0); 253 segs = skb_gso_segment(skb, 0);
232 /* Does not use PTR_ERR to limit the number of error codes that can be 254 /* Does not use PTR_ERR to limit the number of error codes that can be
233 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean 255 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
234 * 'ignore this hook'. 256 * 'ignore this hook'.
235 */ 257 */
236 if (IS_ERR(segs)) 258 if (IS_ERR(segs))
237 return -EINVAL; 259 goto out_err;
238
239 queued = 0; 260 queued = 0;
240 err = 0; 261 err = 0;
241 do { 262 do {
242 struct sk_buff *nskb = segs->next; 263 struct sk_buff *nskb = segs->next;
243 264
244 segs->next = NULL; 265 segs->next = NULL;
245 if (err == 0) 266 if (err == 0) {
267 nf_bridge_adjust_segmented_data(segs);
246 err = __nf_queue(segs, elem, pf, hook, indev, 268 err = __nf_queue(segs, elem, pf, hook, indev,
247 outdev, okfn, queuenum); 269 outdev, okfn, queuenum);
270 }
248 if (err == 0) 271 if (err == 0)
249 queued++; 272 queued++;
250 else 273 else
@@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb,
252 segs = nskb; 275 segs = nskb;
253 } while (segs); 276 } while (segs);
254 277
255 /* also free orig skb if only some segments were queued */ 278 if (queued) {
256 if (unlikely(err && queued))
257 err = 0;
258 if (err == 0)
259 kfree_skb(skb); 279 kfree_skb(skb);
280 return 0;
281 }
282 out_err:
283 nf_bridge_adjust_segmented_data(skb);
260 return err; 284 return err;
261} 285}
262 286
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3aae66facf9f..4d5057902839 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,9 +152,10 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
154 dst = ip6_route_output(net, NULL, &fl6); 154 dst = ip6_route_output(net, NULL, &fl6);
155 if (dst == NULL) 155 if (dst->error) {
156 dst_release(dst);
156 return false; 157 return false;
157 158 }
158 skb_dst_drop(skb); 159 skb_dst_drop(skb);
159 skb_dst_set(skb, dst); 160 skb_dst_set(skb, dst);
160 skb->dev = dst->dev; 161 skb->dev = dst->dev;
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ffd..ae3a035f5390 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
232 if (toklen <= (n_parts + 1) * 4) 232 if (toklen <= (n_parts + 1) * 4)
233 return -EINVAL; 233 return -EINVAL;
234 234
235 princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); 235 princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
236 if (!princ->name_parts) 236 if (!princ->name_parts)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
355 355
356 _debug("n_elem %d", n_elem); 356 _debug("n_elem %d", n_elem);
357 357
358 td = kcalloc(sizeof(struct krb5_tagged_data), n_elem, 358 td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!td) 360 if (!td)
361 return -ENOMEM; 361 return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a3..7e267d7b9c75 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
148 148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{ 150{
151 BUILD_BUG_ON(sizeof(skb->cb) < 151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
153 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
154} 153}
155 154
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2776012132ea..5da548fa7ae9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
130 130
131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{ 132{
133 BUILD_BUG_ON(sizeof(skb->cb) < 133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
135 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
136} 135}
137 136
@@ -502,9 +501,8 @@ tfifo_dequeue:
502 501
503 /* if more time remaining? */ 502 /* if more time remaining? */
504 if (cb->time_to_send <= psched_get_time()) { 503 if (cb->time_to_send <= psched_get_time()) {
505 skb = qdisc_dequeue_tail(sch); 504 __skb_unlink(skb, &sch->q);
506 if (unlikely(!skb)) 505 sch->qstats.backlog -= qdisc_pkt_len(skb);
507 goto qdisc_dequeue;
508 506
509#ifdef CONFIG_NET_CLS_ACT 507#ifdef CONFIG_NET_CLS_ACT
510 /* 508 /*
@@ -540,7 +538,6 @@ deliver:
540 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); 538 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
541 } 539 }
542 540
543qdisc_dequeue:
544 if (q->qdisc) { 541 if (q->qdisc) {
545 skb = q->qdisc->ops->dequeue(q->qdisc); 542 skb = q->qdisc->ops->dequeue(q->qdisc);
546 if (skb) 543 if (skb)
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7a..d7eea99333e9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
94 94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{ 96{
97 BUILD_BUG_ON(sizeof(skb->cb) < 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100} 99}
101 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9acf..60d47180f043 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
166 166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) 167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{ 168{
169 BUILD_BUG_ON(sizeof(skb->cb) < 169 qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); 170 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
172} 171}
173 172
174static unsigned int sfq_hash(const struct sfq_sched_data *q, 173static unsigned int sfq_hash(const struct sfq_sched_data *q,