aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-14 02:44:11 -0400
committerIngo Molnar <mingo@elte.hu>2012-03-14 02:44:11 -0400
commitcd593accdcc27ccbe6498d9ad1c2b6cc8e1d830d (patch)
tree9424d3ac86e753706cc6c3d7e6072d2a73711e29 /net
parent11b91d6fe7272452c999573bab33c15c2f03dc31 (diff)
parentfde7d9049e55ab85a390be7f415d74c9f62dd0f9 (diff)
Merge tag 'v3.3-rc7' into x86/mce
Merge reason: Update from an ancient -rc1 base to an almost-final stable kernel. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c10
-rw-r--r--net/bluetooth/af_bluetooth.c12
-rw-r--r--net/bluetooth/hci_conn.c4
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/bluetooth/l2cap_core.c24
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/rfcomm/core.c18
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netfilter.c32
-rw-r--r--net/bridge/br_stp.c8
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/bridge/netfilter/ebtables.c26
-rw-r--r--net/caif/caif_dev.c22
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfcnfg.c1
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net_namespace.c31
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/rtnetlink.c79
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inetpeer.c81
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c38
-rw-r--r--net/ipv4/tcp_input.c52
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv4/xfrm4_mode_beet.c5
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c6
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/xfrm6_mode_beet.c6
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/l2tp/l2tp_ip.c5
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rate.c4
-rw-r--r--net/mac80211/rate.h3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c46
-rw-r--r--net/netfilter/nf_conntrack_netlink.c49
-rw-r--r--net/netfilter/nf_queue.c40
-rw-r--r--net/netfilter/xt_TEE.c5
-rw-r--r--net/openvswitch/actions.c44
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sunrpc/auth_generic.c17
-rw-r--r--net/unix/af_unix.c19
71 files changed, 574 insertions, 336 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index c12c2582457c..127fe70a1baa 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -46,8 +46,8 @@
46 46
47static struct net_device *clip_devs; 47static struct net_device *clip_devs;
48static struct atm_vcc *atmarpd; 48static struct atm_vcc *atmarpd;
49static struct neigh_table clip_tbl;
50static struct timer_list idle_timer; 49static struct timer_list idle_timer;
50static const struct neigh_ops clip_neigh_ops;
51 51
52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) 52static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
53{ 53{
@@ -123,6 +123,8 @@ static int neigh_check_cb(struct neighbour *n)
123 struct atmarp_entry *entry = neighbour_priv(n); 123 struct atmarp_entry *entry = neighbour_priv(n);
124 struct clip_vcc *cv; 124 struct clip_vcc *cv;
125 125
126 if (n->ops != &clip_neigh_ops)
127 return 0;
126 for (cv = entry->vccs; cv; cv = cv->next) { 128 for (cv = entry->vccs; cv; cv = cv->next) {
127 unsigned long exp = cv->last_use + cv->idle_timeout; 129 unsigned long exp = cv->last_use + cv->idle_timeout;
128 130
@@ -154,10 +156,10 @@ static int neigh_check_cb(struct neighbour *n)
154 156
155static void idle_timer_check(unsigned long dummy) 157static void idle_timer_check(unsigned long dummy)
156{ 158{
157 write_lock(&clip_tbl.lock); 159 write_lock(&arp_tbl.lock);
158 __neigh_for_each_release(&clip_tbl, neigh_check_cb); 160 __neigh_for_each_release(&arp_tbl, neigh_check_cb);
159 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); 161 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
160 write_unlock(&clip_tbl.lock); 162 write_unlock(&arp_tbl.lock);
161} 163}
162 164
163static int clip_arp_rcv(struct sk_buff *skb) 165static int clip_arp_rcv(struct sk_buff *skb)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ef92864ac625..72eb187a5f60 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -71,19 +71,16 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
71 "slock-AF_BLUETOOTH-BTPROTO_AVDTP", 71 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
72}; 72};
73 73
74static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) 74void bt_sock_reclassify_lock(struct sock *sk, int proto)
75{ 75{
76 struct sock *sk = sock->sk; 76 BUG_ON(!sk);
77
78 if (!sk)
79 return;
80
81 BUG_ON(sock_owned_by_user(sk)); 77 BUG_ON(sock_owned_by_user(sk));
82 78
83 sock_lock_init_class_and_name(sk, 79 sock_lock_init_class_and_name(sk,
84 bt_slock_key_strings[proto], &bt_slock_key[proto], 80 bt_slock_key_strings[proto], &bt_slock_key[proto],
85 bt_key_strings[proto], &bt_lock_key[proto]); 81 bt_key_strings[proto], &bt_lock_key[proto]);
86} 82}
83EXPORT_SYMBOL(bt_sock_reclassify_lock);
87 84
88int bt_sock_register(int proto, const struct net_proto_family *ops) 85int bt_sock_register(int proto, const struct net_proto_family *ops)
89{ 86{
@@ -145,7 +142,8 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
145 142
146 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { 143 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
147 err = bt_proto[proto]->create(net, sock, proto, kern); 144 err = bt_proto[proto]->create(net, sock, proto, kern);
148 bt_sock_reclassify_lock(sock, proto); 145 if (!err)
146 bt_sock_reclassify_lock(sock->sk, proto);
149 module_put(bt_proto[proto]->owner); 147 module_put(bt_proto[proto]->owner);
150 } 148 }
151 149
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3db432473ad5..07bc69ed9498 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -635,6 +635,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
635 635
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
637 struct hci_cp_auth_requested cp; 637 struct hci_cp_auth_requested cp;
638
639 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
641
638 cp.handle = cpu_to_le16(conn->handle); 642 cp.handle = cpu_to_le16(conn->handle);
639 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
640 sizeof(cp), &cp); 644 sizeof(cp), &cp);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9de93714213a..5aeb62491198 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -640,7 +640,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
640 /* Reset device */ 640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q); 641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1); 642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags)) { 643 if (!test_bit(HCI_RAW, &hdev->flags) &&
644 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
644 set_bit(HCI_INIT, &hdev->flags); 645 set_bit(HCI_INIT, &hdev->flags);
645 __hci_request(hdev, hci_reset_req, 0, 646 __hci_request(hdev, hci_reset_req, 0,
646 msecs_to_jiffies(250)); 647 msecs_to_jiffies(250));
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index faf0b11ac1d3..32d338c30e65 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1018,10 +1018,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1018 hci_chan_del(conn->hchan); 1018 hci_chan_del(conn->hchan);
1019 1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer); 1021 cancel_delayed_work_sync(&conn->info_timer);
1022 1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer); 1024 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn); 1025 smp_chan_destroy(conn);
1026 } 1026 }
1027 1027
@@ -1120,7 +1120,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1120 return c1; 1120 return c1;
1121} 1121}
1122 1122
1123inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) 1123int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1124{ 1124{
1125 struct sock *sk = chan->sk; 1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src; 1126 bdaddr_t *src = &bt_sk(sk)->src;
@@ -2574,7 +2574,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
2574 2574
2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2576 cmd->ident == conn->info_ident) { 2576 cmd->ident == conn->info_ident) {
2577 __cancel_delayed_work(&conn->info_timer); 2577 cancel_delayed_work(&conn->info_timer);
2578 2578
2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2580 conn->info_ident = 0; 2580 conn->info_ident = 0;
@@ -2970,7 +2970,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2970 2970
2971 default: 2971 default:
2972 sk->sk_err = ECONNRESET; 2972 sk->sk_err = ECONNRESET;
2973 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 2973 __set_chan_timer(chan,
2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2974 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2975 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2975 goto done; 2976 goto done;
2976 } 2977 }
@@ -3120,7 +3121,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3120 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 3121 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3121 return 0; 3122 return 0;
3122 3123
3123 __cancel_delayed_work(&conn->info_timer); 3124 cancel_delayed_work(&conn->info_timer);
3124 3125
3125 if (result != L2CAP_IR_SUCCESS) { 3126 if (result != L2CAP_IR_SUCCESS) {
3126 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3127 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -4478,7 +4479,8 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4478 if (encrypt == 0x00) { 4479 if (encrypt == 0x00) {
4479 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4480 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4480 __clear_chan_timer(chan); 4481 __clear_chan_timer(chan);
4481 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 4482 __set_chan_timer(chan,
4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4482 } else if (chan->sec_level == BT_SECURITY_HIGH) 4484 } else if (chan->sec_level == BT_SECURITY_HIGH)
4483 l2cap_chan_close(chan, ECONNREFUSED); 4485 l2cap_chan_close(chan, ECONNREFUSED);
4484 } else { 4486 } else {
@@ -4499,7 +4501,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4499 4501
4500 if (hcon->type == LE_LINK) { 4502 if (hcon->type == LE_LINK) {
4501 smp_distribute_keys(conn, 0); 4503 smp_distribute_keys(conn, 0);
4502 __cancel_delayed_work(&conn->security_timer); 4504 cancel_delayed_work(&conn->security_timer);
4503 } 4505 }
4504 4506
4505 rcu_read_lock(); 4507 rcu_read_lock();
@@ -4546,7 +4548,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4546 L2CAP_CONN_REQ, sizeof(req), &req); 4548 L2CAP_CONN_REQ, sizeof(req), &req);
4547 } else { 4549 } else {
4548 __clear_chan_timer(chan); 4550 __clear_chan_timer(chan);
4549 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 4551 __set_chan_timer(chan,
4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4550 } 4553 }
4551 } else if (chan->state == BT_CONNECT2) { 4554 } else if (chan->state == BT_CONNECT2) {
4552 struct l2cap_conn_rsp rsp; 4555 struct l2cap_conn_rsp rsp;
@@ -4566,7 +4569,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4566 } 4569 }
4567 } else { 4570 } else {
4568 l2cap_state_change(chan, BT_DISCONN); 4571 l2cap_state_change(chan, BT_DISCONN);
4569 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 4572 __set_chan_timer(chan,
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4570 res = L2CAP_CR_SEC_BLOCK; 4574 res = L2CAP_CR_SEC_BLOCK;
4571 stat = L2CAP_CS_NO_INFO; 4575 stat = L2CAP_CS_NO_INFO;
4572 } 4576 }
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c61d967012b2..401d9428ae4c 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -849,6 +849,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
849 if (!sk) 849 if (!sk)
850 return NULL; 850 return NULL;
851 851
852 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
853
852 l2cap_sock_init(sk, parent); 854 l2cap_sock_init(sk, parent);
853 855
854 return l2cap_pi(sk)->chan; 856 return l2cap_pi(sk)->chan;
@@ -1002,7 +1004,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1002 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 1004 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1003 1005
1004 sk->sk_destruct = l2cap_sock_destruct; 1006 sk->sk_destruct = l2cap_sock_destruct;
1005 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; 1007 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1006 1008
1007 sock_reset_flag(sk, SOCK_ZAPPED); 1009 sock_reset_flag(sk, SOCK_ZAPPED);
1008 1010
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 501649bf5596..8a602388f1e7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,12 +1164,18 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1164 break; 1164 break;
1165 1165
1166 case BT_DISCONN: 1166 case BT_DISCONN:
1167 /* When socket is closed and we are not RFCOMM 1167 /* rfcomm_session_put is called later so don't do
1168 * initiator rfcomm_process_rx already calls 1168 * anything here otherwise we will mess up the session
1169 * rfcomm_session_put() */ 1169 * reference counter:
1170 if (s->sock->sk->sk_state != BT_CLOSED) 1170 *
1171 if (list_empty(&s->dlcs)) 1171 * (a) when we are the initiator dlc_unlink will drive
1172 rfcomm_session_put(s); 1172 * the reference counter to 0 (there is no initial put
1173 * after session_add)
1174 *
1175 * (b) when we are not the initiator rfcomm_rx_process
1176 * will explicitly call put to balance the initial hold
1177 * done after session add.
1178 */
1173 break; 1179 break;
1174 } 1180 }
1175 } 1181 }
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index f066678faeee..22169c3f1482 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -956,6 +956,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
956 if (!sk) 956 if (!sk)
957 goto done; 957 goto done;
958 958
959 bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
960
959 rfcomm_sock_init(sk, parent); 961 rfcomm_sock_init(sk, parent);
960 bacpy(&bt_sk(sk)->src, &src); 962 bacpy(&bt_sk(sk)->src, &src);
961 bacpy(&bt_sk(sk)->dst, &dst); 963 bacpy(&bt_sk(sk)->dst, &dst);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 568d5bf17534..702a1ae9220b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -446,8 +446,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
446 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
447 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
448 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 448 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
449 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 449 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
450 &ip6h->saddr); 450 &ip6h->saddr)) {
451 kfree_skb(skb);
452 return NULL;
453 }
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 454 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
452 455
453 hopopt = (u8 *)(ip6h + 1); 456 hopopt = (u8 *)(ip6h + 1);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 84122472656c..dec4f3817133 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -62,6 +62,15 @@ static int brnf_filter_pppoe_tagged __read_mostly = 0;
62#define brnf_filter_pppoe_tagged 0 62#define brnf_filter_pppoe_tagged 0
63#endif 63#endif
64 64
65#define IS_IP(skb) \
66 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
67
68#define IS_IPV6(skb) \
69 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
70
71#define IS_ARP(skb) \
72 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
73
65static inline __be16 vlan_proto(const struct sk_buff *skb) 74static inline __be16 vlan_proto(const struct sk_buff *skb)
66{ 75{
67 if (vlan_tx_tag_present(skb)) 76 if (vlan_tx_tag_present(skb))
@@ -639,8 +648,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
639 return NF_DROP; 648 return NF_DROP;
640 br = p->br; 649 br = p->br;
641 650
642 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 651 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
643 IS_PPPOE_IPV6(skb)) {
644 if (!brnf_call_ip6tables && !br->nf_call_ip6tables) 652 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
645 return NF_ACCEPT; 653 return NF_ACCEPT;
646 654
@@ -651,8 +659,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
651 if (!brnf_call_iptables && !br->nf_call_iptables) 659 if (!brnf_call_iptables && !br->nf_call_iptables)
652 return NF_ACCEPT; 660 return NF_ACCEPT;
653 661
654 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && 662 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
655 !IS_PPPOE_IP(skb))
656 return NF_ACCEPT; 663 return NF_ACCEPT;
657 664
658 nf_bridge_pull_encap_header_rcsum(skb); 665 nf_bridge_pull_encap_header_rcsum(skb);
@@ -701,7 +708,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
701 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 708 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
702 struct net_device *in; 709 struct net_device *in;
703 710
704 if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) { 711 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
705 in = nf_bridge->physindev; 712 in = nf_bridge->physindev;
706 if (nf_bridge->mask & BRNF_PKT_TYPE) { 713 if (nf_bridge->mask & BRNF_PKT_TYPE) {
707 skb->pkt_type = PACKET_OTHERHOST; 714 skb->pkt_type = PACKET_OTHERHOST;
@@ -718,6 +725,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
718 return 0; 725 return 0;
719} 726}
720 727
728
721/* This is the 'purely bridged' case. For IP, we pass the packet to 729/* This is the 'purely bridged' case. For IP, we pass the packet to
722 * netfilter with indev and outdev set to the bridge device, 730 * netfilter with indev and outdev set to the bridge device,
723 * but we are still able to filter on the 'real' indev/outdev 731 * but we are still able to filter on the 'real' indev/outdev
@@ -744,11 +752,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
744 if (!parent) 752 if (!parent)
745 return NF_DROP; 753 return NF_DROP;
746 754
747 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || 755 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
748 IS_PPPOE_IP(skb))
749 pf = PF_INET; 756 pf = PF_INET;
750 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 757 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
751 IS_PPPOE_IPV6(skb))
752 pf = PF_INET6; 758 pf = PF_INET6;
753 else 759 else
754 return NF_ACCEPT; 760 return NF_ACCEPT;
@@ -795,7 +801,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
795 if (!brnf_call_arptables && !br->nf_call_arptables) 801 if (!brnf_call_arptables && !br->nf_call_arptables)
796 return NF_ACCEPT; 802 return NF_ACCEPT;
797 803
798 if (skb->protocol != htons(ETH_P_ARP)) { 804 if (!IS_ARP(skb)) {
799 if (!IS_VLAN_ARP(skb)) 805 if (!IS_VLAN_ARP(skb))
800 return NF_ACCEPT; 806 return NF_ACCEPT;
801 nf_bridge_pull_encap_header(skb); 807 nf_bridge_pull_encap_header(skb);
@@ -853,11 +859,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
853 if (!realoutdev) 859 if (!realoutdev)
854 return NF_DROP; 860 return NF_DROP;
855 861
856 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || 862 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
857 IS_PPPOE_IP(skb))
858 pf = PF_INET; 863 pf = PF_INET;
859 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 864 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
860 IS_PPPOE_IPV6(skb))
861 pf = PF_INET6; 865 pf = PF_INET6;
862 else 866 else
863 return NF_ACCEPT; 867 return NF_ACCEPT;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index dd147d78a588..8c836d96ba76 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -17,9 +17,9 @@
17#include "br_private_stp.h" 17#include "br_private_stp.h"
18 18
19/* since time values in bpdu are in jiffies and then scaled (1/256) 19/* since time values in bpdu are in jiffies and then scaled (1/256)
20 * before sending, make sure that is at least one. 20 * before sending, make sure that is at least one STP tick.
21 */ 21 */
22#define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256)) 22#define MESSAGE_AGE_INCR ((HZ / 256) + 1)
23 23
24static const char *const br_port_state_names[] = { 24static const char *const br_port_state_names[] = {
25 [BR_STATE_DISABLED] = "disabled", 25 [BR_STATE_DISABLED] = "disabled",
@@ -31,7 +31,7 @@ static const char *const br_port_state_names[] = {
31 31
32void br_log_state(const struct net_bridge_port *p) 32void br_log_state(const struct net_bridge_port *p)
33{ 33{
34 br_info(p->br, "port %u(%s) entering %s state\n", 34 br_info(p->br, "port %u(%s) entered %s state\n",
35 (unsigned) p->port_no, p->dev->name, 35 (unsigned) p->port_no, p->dev->name,
36 br_port_state_names[p->state]); 36 br_port_state_names[p->state]);
37} 37}
@@ -186,7 +186,7 @@ static void br_record_config_information(struct net_bridge_port *p,
186 p->designated_cost = bpdu->root_path_cost; 186 p->designated_cost = bpdu->root_path_cost;
187 p->designated_bridge = bpdu->bridge_id; 187 p->designated_bridge = bpdu->bridge_id;
188 p->designated_port = bpdu->port_id; 188 p->designated_port = bpdu->port_id;
189 p->designated_age = jiffies + bpdu->message_age; 189 p->designated_age = jiffies - bpdu->message_age;
190 190
191 mod_timer(&p->message_age_timer, jiffies 191 mod_timer(&p->message_age_timer, jiffies
192 + (p->br->max_age - bpdu->message_age)); 192 + (p->br->max_age - bpdu->message_age));
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 19308e305d85..f494496373d6 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -98,14 +98,13 @@ void br_stp_disable_port(struct net_bridge_port *p)
98 struct net_bridge *br = p->br; 98 struct net_bridge *br = p->br;
99 int wasroot; 99 int wasroot;
100 100
101 br_log_state(p);
102
103 wasroot = br_is_root_bridge(br); 101 wasroot = br_is_root_bridge(br);
104 br_become_designated_port(p); 102 br_become_designated_port(p);
105 p->state = BR_STATE_DISABLED; 103 p->state = BR_STATE_DISABLED;
106 p->topology_change_ack = 0; 104 p->topology_change_ack = 0;
107 p->config_pending = 0; 105 p->config_pending = 0;
108 106
107 br_log_state(p);
109 br_ifinfo_notify(RTM_NEWLINK, p); 108 br_ifinfo_notify(RTM_NEWLINK, p);
110 109
111 del_timer(&p->message_age_timer); 110 del_timer(&p->message_age_timer);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5864cc491369..5fe2ff3b01ef 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1335,7 +1335,12 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335 const char *base, char __user *ubase) 1335 const char *base, char __user *ubase)
1336{ 1336{
1337 char __user *hlp = ubase + ((char *)m - base); 1337 char __user *hlp = ubase + ((char *)m - base);
1338 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) 1338 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1339
1340 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1341 long. Copy 29 bytes and fill remaining bytes with zeroes. */
1342 strncpy(name, m->u.match->name, sizeof(name));
1343 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1339 return -EFAULT; 1344 return -EFAULT;
1340 return 0; 1345 return 0;
1341} 1346}
@@ -1344,7 +1349,10 @@ static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1344 const char *base, char __user *ubase) 1349 const char *base, char __user *ubase)
1345{ 1350{
1346 char __user *hlp = ubase + ((char *)w - base); 1351 char __user *hlp = ubase + ((char *)w - base);
1347 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) 1352 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1353
1354 strncpy(name, w->u.watcher->name, sizeof(name));
1355 if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1348 return -EFAULT; 1356 return -EFAULT;
1349 return 0; 1357 return 0;
1350} 1358}
@@ -1355,6 +1363,7 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1355 int ret; 1363 int ret;
1356 char __user *hlp; 1364 char __user *hlp;
1357 const struct ebt_entry_target *t; 1365 const struct ebt_entry_target *t;
1366 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1358 1367
1359 if (e->bitmask == 0) 1368 if (e->bitmask == 0)
1360 return 0; 1369 return 0;
@@ -1368,7 +1377,8 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1368 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); 1377 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1369 if (ret != 0) 1378 if (ret != 0)
1370 return ret; 1379 return ret;
1371 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN)) 1380 strncpy(name, t->u.target->name, sizeof(name));
1381 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1372 return -EFAULT; 1382 return -EFAULT;
1373 return 0; 1383 return 0;
1374} 1384}
@@ -1893,10 +1903,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1893 1903
1894 switch (compat_mwt) { 1904 switch (compat_mwt) {
1895 case EBT_COMPAT_MATCH: 1905 case EBT_COMPAT_MATCH:
1896 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, 1906 match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1897 name, 0), "ebt_%s", name);
1898 if (match == NULL)
1899 return -ENOENT;
1900 if (IS_ERR(match)) 1907 if (IS_ERR(match))
1901 return PTR_ERR(match); 1908 return PTR_ERR(match);
1902 1909
@@ -1915,10 +1922,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1915 break; 1922 break;
1916 case EBT_COMPAT_WATCHER: /* fallthrough */ 1923 case EBT_COMPAT_WATCHER: /* fallthrough */
1917 case EBT_COMPAT_TARGET: 1924 case EBT_COMPAT_TARGET:
1918 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE, 1925 wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1919 name, 0), "ebt_%s", name);
1920 if (wt == NULL)
1921 return -ENOENT;
1922 if (IS_ERR(wt)) 1926 if (IS_ERR(wt))
1923 return PTR_ERR(wt); 1927 return PTR_ERR(wt);
1924 off = xt_compat_target_offset(wt); 1928 off = xt_compat_target_offset(wt);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 673728add60b..82c57069415f 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -59,8 +59,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
59{ 59{
60 struct caif_net *caifn; 60 struct caif_net *caifn;
61 caifn = net_generic(net, caif_net_id); 61 caifn = net_generic(net, caif_net_id);
62 if (!caifn)
63 return NULL;
64 return caifn->cfg; 62 return caifn->cfg;
65} 63}
66EXPORT_SYMBOL(get_cfcnfg); 64EXPORT_SYMBOL(get_cfcnfg);
@@ -69,8 +67,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
69{ 67{
70 struct caif_net *caifn; 68 struct caif_net *caifn;
71 caifn = net_generic(net, caif_net_id); 69 caifn = net_generic(net, caif_net_id);
72 if (!caifn)
73 return NULL;
74 return &caifn->caifdevs; 70 return &caifn->caifdevs;
75} 71}
76 72
@@ -99,8 +95,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
99 struct caif_device_entry *caifd; 95 struct caif_device_entry *caifd;
100 96
101 caifdevs = caif_device_list(dev_net(dev)); 97 caifdevs = caif_device_list(dev_net(dev));
102 if (!caifdevs)
103 return NULL;
104 98
105 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 99 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
106 if (!caifd) 100 if (!caifd)
@@ -120,8 +114,6 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
120 struct caif_device_entry_list *caifdevs = 114 struct caif_device_entry_list *caifdevs =
121 caif_device_list(dev_net(dev)); 115 caif_device_list(dev_net(dev));
122 struct caif_device_entry *caifd; 116 struct caif_device_entry *caifd;
123 if (!caifdevs)
124 return NULL;
125 117
126 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 118 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
127 if (caifd->netdev == dev) 119 if (caifd->netdev == dev)
@@ -321,8 +313,6 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
321 struct caif_device_entry_list *caifdevs; 313 struct caif_device_entry_list *caifdevs;
322 314
323 caifdevs = caif_device_list(dev_net(dev)); 315 caifdevs = caif_device_list(dev_net(dev));
324 if (!cfg || !caifdevs)
325 return;
326 caifd = caif_device_alloc(dev); 316 caifd = caif_device_alloc(dev);
327 if (!caifd) 317 if (!caifd)
328 return; 318 return;
@@ -374,8 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
374 364
375 cfg = get_cfcnfg(dev_net(dev)); 365 cfg = get_cfcnfg(dev_net(dev));
376 caifdevs = caif_device_list(dev_net(dev)); 366 caifdevs = caif_device_list(dev_net(dev));
377 if (!cfg || !caifdevs)
378 return 0;
379 367
380 caifd = caif_get(dev); 368 caifd = caif_get(dev);
381 if (caifd == NULL && dev->type != ARPHRD_CAIF) 369 if (caifd == NULL && dev->type != ARPHRD_CAIF)
@@ -507,9 +495,6 @@ static struct notifier_block caif_device_notifier = {
507static int caif_init_net(struct net *net) 495static int caif_init_net(struct net *net)
508{ 496{
509 struct caif_net *caifn = net_generic(net, caif_net_id); 497 struct caif_net *caifn = net_generic(net, caif_net_id);
510 if (WARN_ON(!caifn))
511 return -EINVAL;
512
513 INIT_LIST_HEAD(&caifn->caifdevs.list); 498 INIT_LIST_HEAD(&caifn->caifdevs.list);
514 mutex_init(&caifn->caifdevs.lock); 499 mutex_init(&caifn->caifdevs.lock);
515 500
@@ -527,9 +512,6 @@ static void caif_exit_net(struct net *net)
527 caif_device_list(net); 512 caif_device_list(net);
528 struct cfcnfg *cfg = get_cfcnfg(net); 513 struct cfcnfg *cfg = get_cfcnfg(net);
529 514
530 if (!cfg || !caifdevs)
531 return;
532
533 rtnl_lock(); 515 rtnl_lock();
534 mutex_lock(&caifdevs->lock); 516 mutex_lock(&caifdevs->lock);
535 517
@@ -569,7 +551,7 @@ static int __init caif_device_init(void)
569{ 551{
570 int result; 552 int result;
571 553
572 result = register_pernet_device(&caif_net_ops); 554 result = register_pernet_subsys(&caif_net_ops);
573 555
574 if (result) 556 if (result)
575 return result; 557 return result;
@@ -582,7 +564,7 @@ static int __init caif_device_init(void)
582 564
583static void __exit caif_device_exit(void) 565static void __exit caif_device_exit(void)
584{ 566{
585 unregister_pernet_device(&caif_net_ops); 567 unregister_pernet_subsys(&caif_net_ops);
586 unregister_netdevice_notifier(&caif_device_notifier); 568 unregister_netdevice_notifier(&caif_device_notifier);
587 dev_remove_pack(&caif_packet_type); 569 dev_remove_pack(&caif_packet_type);
588} 570}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a98628086452..a97d97a3a512 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
541 541
542 if (cf_sk->layer.dn == NULL) 542 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb);
543 return -EINVAL; 544 return -EINVAL;
545 }
544 546
545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 547 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
546} 548}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
683 } 685 }
684 err = transmit_skb(skb, cf_sk, 686 err = transmit_skb(skb, cf_sk,
685 msg->msg_flags&MSG_DONTWAIT, timeo); 687 msg->msg_flags&MSG_DONTWAIT, timeo);
686 if (err < 0) { 688 if (err < 0)
687 kfree_skb(skb); 689 /* skb is already freed */
688 goto pipe_err; 690 goto pipe_err;
689 } 691
690 sent += size; 692 sent += size;
691 } 693 }
692 694
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 598aafb4cb51..ba9cfd47778a 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
309 int err; 309 int err;
310 struct cfctrl_link_param param; 310 struct cfctrl_link_param param;
311 struct cfcnfg *cfg = get_cfcnfg(net); 311 struct cfcnfg *cfg = get_cfcnfg(net);
312 caif_assert(cfg != NULL);
313 312
314 rcu_read_lock(); 313 rcu_read_lock();
315 err = caif_connect_req_to_link_param(cfg, conn_req, &param); 314 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e7..94b08612a4d8 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
252 251
253 rcu_read_lock(); 252 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
257 256
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || 257 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 258 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 259 layer->id != 0)
261 260 cfmuxl_remove_uplayer(layr, layer->id);
262 idx = layer->id % UP_CACHE_SIZE; 261
263 spin_lock_bh(&muxl->receive_lock);
264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */ 262 /* NOTE: ctrlcmd is not allowed to block */
269 layer->ctrlcmd(layer, ctrl, phyid); 263 layer->ctrlcmd(layer, ctrl, phyid);
270 } 264 }
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3b..761ad9d6cc3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
85 } else { 85 } else {
86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
88 ceph_debugfs_client_init(client);
89 client->have_fsid = true;
90 } 88 }
91 return 0; 89 return 0;
92} 90}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42bd..1845cde26227 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/ceph/mon_client.h> 9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h> 10#include <linux/ceph/libceph.h>
11#include <linux/ceph/debugfs.h>
11#include <linux/ceph/decode.h> 12#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h> 13#include <linux/ceph/auth.h>
14 14
15/* 15/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
340 client->monc.monmap = monmap; 340 client->monc.monmap = monmap;
341 kfree(old); 341 kfree(old);
342 342
343 if (!client->have_fsid) {
344 client->have_fsid = true;
345 mutex_unlock(&monc->mutex);
346 /*
347 * do debugfs initialization without mutex to avoid
348 * creating a locking dependency
349 */
350 ceph_debugfs_client_init(client);
351 goto out_unlocked;
352 }
343out: 353out:
344 mutex_unlock(&monc->mutex); 354 mutex_unlock(&monc->mutex);
355out_unlocked:
345 wake_up_all(&client->auth_wq); 356 wake_up_all(&client->auth_wq);
346} 357}
347 358
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..2a83914b0277 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
826 write_unlock_bh(&tbl->lock); 826 write_unlock_bh(&tbl->lock);
827 cond_resched(); 827 cond_resched();
828 write_lock_bh(&tbl->lock); 828 write_lock_bh(&tbl->lock);
829 nht = rcu_dereference_protected(tbl->nht,
830 lockdep_is_held(&tbl->lock));
829 } 831 }
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 832 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 833 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index aefcd7acbffa..0e950fda9a0a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
30 30
31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
32 32
33static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
34
35static struct net_generic *net_alloc_generic(void)
36{
37 struct net_generic *ng;
38 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
39
40 ng = kzalloc(generic_size, GFP_KERNEL);
41 if (ng)
42 ng->len = max_gen_ptrs;
43
44 return ng;
45}
46
33static int net_assign_generic(struct net *net, int id, void *data) 47static int net_assign_generic(struct net *net, int id, void *data)
34{ 48{
35 struct net_generic *ng, *old_ng; 49 struct net_generic *ng, *old_ng;
@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
43 if (old_ng->len >= id) 57 if (old_ng->len >= id)
44 goto assign; 58 goto assign;
45 59
46 ng = kzalloc(sizeof(struct net_generic) + 60 ng = net_alloc_generic();
47 id * sizeof(void *), GFP_KERNEL);
48 if (ng == NULL) 61 if (ng == NULL)
49 return -ENOMEM; 62 return -ENOMEM;
50 63
@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
59 * the old copy for kfree after a grace period. 72 * the old copy for kfree after a grace period.
60 */ 73 */
61 74
62 ng->len = id;
63 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 75 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
64 76
65 rcu_assign_pointer(net->gen, ng); 77 rcu_assign_pointer(net->gen, ng);
@@ -161,18 +173,6 @@ out_undo:
161 goto out; 173 goto out;
162} 174}
163 175
164static struct net_generic *net_alloc_generic(void)
165{
166 struct net_generic *ng;
167 size_t generic_size = sizeof(struct net_generic) +
168 INITIAL_NET_GEN_PTRS * sizeof(void *);
169
170 ng = kzalloc(generic_size, GFP_KERNEL);
171 if (ng)
172 ng->len = INITIAL_NET_GEN_PTRS;
173
174 return ng;
175}
176 176
177#ifdef CONFIG_NET_NS 177#ifdef CONFIG_NET_NS
178static struct kmem_cache *net_cachep; 178static struct kmem_cache *net_cachep;
@@ -483,6 +483,7 @@ again:
483 } 483 }
484 return error; 484 return error;
485 } 485 }
486 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
486 } 487 }
487 error = __register_pernet_operations(list, ops); 488 error = __register_pernet_operations(list, ops);
488 if (error) { 489 if (error) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f16444bc6cbb..f965dce6f20f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
60}; 60};
61 61
62static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
64 63
65void rtnl_lock(void) 64void rtnl_lock(void)
66{ 65{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
724} 723}
725 724
726/* All VF info */ 725/* All VF info */
727static inline int rtnl_vfinfo_size(const struct net_device *dev) 726static inline int rtnl_vfinfo_size(const struct net_device *dev,
727 u32 ext_filter_mask)
728{ 728{
729 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 729 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
730 730 (ext_filter_mask & RTEXT_FILTER_VF)) {
731 int num_vfs = dev_num_vf(dev->dev.parent); 731 int num_vfs = dev_num_vf(dev->dev.parent);
732 size_t size = nla_total_size(sizeof(struct nlattr)); 732 size_t size = nla_total_size(sizeof(struct nlattr));
733 size += nla_total_size(num_vfs * sizeof(struct nlattr)); 733 size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
766 return port_self_size; 766 return port_self_size;
767} 767}
768 768
769static noinline size_t if_nlmsg_size(const struct net_device *dev) 769static noinline size_t if_nlmsg_size(const struct net_device *dev,
770 u32 ext_filter_mask)
770{ 771{
771 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 772 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
772 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 773 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
784 + nla_total_size(4) /* IFLA_MASTER */ 785 + nla_total_size(4) /* IFLA_MASTER */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(4) /* IFLA_NUM_VF */ 788 + nla_total_size(ext_filter_mask
788 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 789 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
790 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
789 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 791 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
790 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 792 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
791 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ 793 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
868 870
869static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 871static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
870 int type, u32 pid, u32 seq, u32 change, 872 int type, u32 pid, u32 seq, u32 change,
871 unsigned int flags) 873 unsigned int flags, u32 ext_filter_mask)
872{ 874{
873 struct ifinfomsg *ifm; 875 struct ifinfomsg *ifm;
874 struct nlmsghdr *nlh; 876 struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
941 goto nla_put_failure; 943 goto nla_put_failure;
942 copy_rtnl_link_stats64(nla_data(attr), stats); 944 copy_rtnl_link_stats64(nla_data(attr), stats);
943 945
944 if (dev->dev.parent) 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
945 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
946 948
947 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) {
948 int i; 951 int i;
949 952
950 struct nlattr *vfinfo, *vf; 953 struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1048 struct net_device *dev; 1051 struct net_device *dev;
1049 struct hlist_head *head; 1052 struct hlist_head *head;
1050 struct hlist_node *node; 1053 struct hlist_node *node;
1054 struct nlattr *tb[IFLA_MAX+1];
1055 u32 ext_filter_mask = 0;
1051 1056
1052 s_h = cb->args[0]; 1057 s_h = cb->args[0];
1053 s_idx = cb->args[1]; 1058 s_idx = cb->args[1];
@@ -1055,6 +1060,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1055 rcu_read_lock(); 1060 rcu_read_lock();
1056 cb->seq = net->dev_base_seq; 1061 cb->seq = net->dev_base_seq;
1057 1062
1063 if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1064 ifla_policy) >= 0) {
1065
1066 if (tb[IFLA_EXT_MASK])
1067 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1068 }
1069
1058 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1070 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1059 idx = 0; 1071 idx = 0;
1060 head = &net->dev_index_head[h]; 1072 head = &net->dev_index_head[h];
@@ -1064,7 +1076,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1064 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1076 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1065 NETLINK_CB(cb->skb).pid, 1077 NETLINK_CB(cb->skb).pid,
1066 cb->nlh->nlmsg_seq, 0, 1078 cb->nlh->nlmsg_seq, 0,
1067 NLM_F_MULTI) <= 0) 1079 NLM_F_MULTI,
1080 ext_filter_mask) <= 0)
1068 goto out; 1081 goto out;
1069 1082
1070 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1083 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1113,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1100 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1113 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1101 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1114 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1102 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1115 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1116 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1103}; 1117};
1104EXPORT_SYMBOL(ifla_policy); 1118EXPORT_SYMBOL(ifla_policy);
1105 1119
@@ -1509,6 +1523,7 @@ errout:
1509 1523
1510 if (send_addr_notify) 1524 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1525 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1526
1512 return err; 1527 return err;
1513} 1528}
1514 1529
@@ -1839,6 +1854,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1839 struct net_device *dev = NULL; 1854 struct net_device *dev = NULL;
1840 struct sk_buff *nskb; 1855 struct sk_buff *nskb;
1841 int err; 1856 int err;
1857 u32 ext_filter_mask = 0;
1842 1858
1843 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1859 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1844 if (err < 0) 1860 if (err < 0)
@@ -1847,6 +1863,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1847 if (tb[IFLA_IFNAME]) 1863 if (tb[IFLA_IFNAME])
1848 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1864 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1849 1865
1866 if (tb[IFLA_EXT_MASK])
1867 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1868
1850 ifm = nlmsg_data(nlh); 1869 ifm = nlmsg_data(nlh);
1851 if (ifm->ifi_index > 0) 1870 if (ifm->ifi_index > 0)
1852 dev = __dev_get_by_index(net, ifm->ifi_index); 1871 dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1858,12 +1877,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1858 if (dev == NULL) 1877 if (dev == NULL)
1859 return -ENODEV; 1878 return -ENODEV;
1860 1879
1861 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1880 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1862 if (nskb == NULL) 1881 if (nskb == NULL)
1863 return -ENOBUFS; 1882 return -ENOBUFS;
1864 1883
1865 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1884 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1866 nlh->nlmsg_seq, 0, 0); 1885 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1867 if (err < 0) { 1886 if (err < 0) {
1868 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1887 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1869 WARN_ON(err == -EMSGSIZE); 1888 WARN_ON(err == -EMSGSIZE);
@@ -1874,8 +1893,32 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1874 return err; 1893 return err;
1875} 1894}
1876 1895
1877static u16 rtnl_calcit(struct sk_buff *skb) 1896static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1878{ 1897{
1898 struct net *net = sock_net(skb->sk);
1899 struct net_device *dev;
1900 struct nlattr *tb[IFLA_MAX+1];
1901 u32 ext_filter_mask = 0;
1902 u16 min_ifinfo_dump_size = 0;
1903
1904 if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1905 ifla_policy) >= 0) {
1906 if (tb[IFLA_EXT_MASK])
1907 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1908 }
1909
1910 if (!ext_filter_mask)
1911 return NLMSG_GOODSIZE;
1912 /*
1913 * traverse the list of net devices and compute the minimum
1914 * buffer size based upon the filter mask.
1915 */
1916 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1917 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1918 if_nlmsg_size(dev,
1919 ext_filter_mask));
1920 }
1921
1879 return min_ifinfo_dump_size; 1922 return min_ifinfo_dump_size;
1880} 1923}
1881 1924
@@ -1910,13 +1953,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1910 int err = -ENOBUFS; 1953 int err = -ENOBUFS;
1911 size_t if_info_size; 1954 size_t if_info_size;
1912 1955
1913 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL); 1956 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1914 if (skb == NULL) 1957 if (skb == NULL)
1915 goto errout; 1958 goto errout;
1916 1959
1917 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size); 1960 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
1918
1919 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1920 if (err < 0) { 1961 if (err < 0) {
1921 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1962 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
1922 WARN_ON(err == -EMSGSIZE); 1963 WARN_ON(err == -EMSGSIZE);
@@ -1974,7 +2015,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1974 return -EOPNOTSUPP; 2015 return -EOPNOTSUPP;
1975 calcit = rtnl_get_calcit(family, type); 2016 calcit = rtnl_get_calcit(family, type);
1976 if (calcit) 2017 if (calcit)
1977 min_dump_alloc = calcit(skb); 2018 min_dump_alloc = calcit(skb, nlh);
1978 2019
1979 __rtnl_unlock(); 2020 __rtnl_unlock();
1980 rtnl = net->rtnl; 2021 rtnl = net->rtnl;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776f..d183262943d9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
409 409
410config INET_UDP_DIAG 410config INET_UDP_DIAG
411 tristate "UDP: socket monitoring interface" 411 tristate "UDP: socket monitoring interface"
412 depends on INET_DIAG 412 depends on INET_DIAG && (IPV6 || IPV6=n)
413 default n 413 default n
414 ---help--- 414 ---help---
415 Support for UDP socket monitoring interface used by the ss tool. 415 Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f0..63e49890ad31 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
863 if (addr_type == RTN_UNICAST && 863 if (addr_type == RTN_UNICAST &&
864 (arp_fwd_proxy(in_dev, dev, rt) || 864 (arp_fwd_proxy(in_dev, dev, rt) ||
865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
866 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 866 (rt->dst.dev != dev &&
867 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
867 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 868 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
868 if (n) 869 if (n)
869 neigh_release(n); 870 neigh_release(n);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2e4e24476c4c..19d66cefd7d3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -123,11 +123,14 @@ again:
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
126 spin_unlock(&head->lock);
127 snum = smallest_rover; 126 snum = smallest_rover;
128 goto have_snum; 127 goto tb_found;
129 } 128 }
130 } 129 }
130 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
131 snum = rover;
132 goto tb_found;
133 }
131 goto next; 134 goto next;
132 } 135 }
133 break; 136 break;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index bf4a9c4808e1..d4d61b694fab 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/workqueue.h>
20#include <net/ip.h> 21#include <net/ip.h>
21#include <net/inetpeer.h> 22#include <net/inetpeer.h>
22#include <net/secure_seq.h> 23#include <net/secure_seq.h>
@@ -66,6 +67,11 @@
66 67
67static struct kmem_cache *peer_cachep __read_mostly; 68static struct kmem_cache *peer_cachep __read_mostly;
68 69
70static LIST_HEAD(gc_list);
71static const int gc_delay = 60 * HZ;
72static struct delayed_work gc_work;
73static DEFINE_SPINLOCK(gc_lock);
74
69#define node_height(x) x->avl_height 75#define node_height(x) x->avl_height
70 76
71#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) 77#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
102int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 108int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
103int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 109int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
104 110
111static void inetpeer_gc_worker(struct work_struct *work)
112{
113 struct inet_peer *p, *n;
114 LIST_HEAD(list);
115
116 spin_lock_bh(&gc_lock);
117 list_replace_init(&gc_list, &list);
118 spin_unlock_bh(&gc_lock);
119
120 if (list_empty(&list))
121 return;
122
123 list_for_each_entry_safe(p, n, &list, gc_list) {
124
125 if(need_resched())
126 cond_resched();
127
128 if (p->avl_left != peer_avl_empty) {
129 list_add_tail(&p->avl_left->gc_list, &list);
130 p->avl_left = peer_avl_empty;
131 }
132
133 if (p->avl_right != peer_avl_empty) {
134 list_add_tail(&p->avl_right->gc_list, &list);
135 p->avl_right = peer_avl_empty;
136 }
137
138 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
139
140 if (!atomic_read(&p->refcnt)) {
141 list_del(&p->gc_list);
142 kmem_cache_free(peer_cachep, p);
143 }
144 }
145
146 if (list_empty(&list))
147 return;
148
149 spin_lock_bh(&gc_lock);
150 list_splice(&list, &gc_list);
151 spin_unlock_bh(&gc_lock);
152
153 schedule_delayed_work(&gc_work, gc_delay);
154}
105 155
106/* Called from ip_output.c:ip_init */ 156/* Called from ip_output.c:ip_init */
107void __init inet_initpeers(void) 157void __init inet_initpeers(void)
@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
126 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 176 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
127 NULL); 177 NULL);
128 178
179 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
129} 180}
130 181
131static int addr_compare(const struct inetpeer_addr *a, 182static int addr_compare(const struct inetpeer_addr *a,
@@ -447,9 +498,8 @@ relookup:
447 p->rate_last = 0; 498 p->rate_last = 0;
448 p->pmtu_expires = 0; 499 p->pmtu_expires = 0;
449 p->pmtu_orig = 0; 500 p->pmtu_orig = 0;
450 p->redirect_genid = 0;
451 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); 501 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
452 502 INIT_LIST_HEAD(&p->gc_list);
453 503
454 /* Link the node. */ 504 /* Link the node. */
455 link_to_pool(p, base); 505 link_to_pool(p, base);
@@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
509 return rc; 559 return rc;
510} 560}
511EXPORT_SYMBOL(inet_peer_xrlim_allow); 561EXPORT_SYMBOL(inet_peer_xrlim_allow);
562
563void inetpeer_invalidate_tree(int family)
564{
565 struct inet_peer *old, *new, *prev;
566 struct inet_peer_base *base = family_to_base(family);
567
568 write_seqlock_bh(&base->lock);
569
570 old = base->root;
571 if (old == peer_avl_empty_rcu)
572 goto out;
573
574 new = peer_avl_empty_rcu;
575
576 prev = cmpxchg(&base->root, old, new);
577 if (prev == old) {
578 base->total = 0;
579 spin_lock(&gc_lock);
580 list_add_tail(&prev->gc_list, &gc_list);
581 spin_unlock(&gc_lock);
582 schedule_delayed_work(&gc_work, gc_delay);
583 }
584
585out:
586 write_sequnlock_bh(&base->lock);
587}
588EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2b53a1f7abf6..38673d2860e2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -65,7 +65,7 @@
65 it is infeasible task. The most general solutions would be 65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl), 66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good 67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL 68 solution, but it supposes maintaining new variable in ALL
69 skb, even if no tunneling is used. 69 skb, even if no tunneling is used.
70 70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu 71 Current solution: xmit_recursion breaks dead loops. This is a percpu
@@ -91,14 +91,14 @@
91 91
92 One of them is to parse packet trying to detect inner encapsulation 92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially, 93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all. 94 taking into account fragmentation. TO be short, ttl is not solution at all.
95 95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE. 96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit, 97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely, 98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear 99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu 100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops. 101 rapidly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop, 102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set. 103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made 104 But it is not our problem! Nobody could accuse us, we made
@@ -422,6 +422,10 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
422 if (register_netdevice(dev) < 0) 422 if (register_netdevice(dev) < 0)
423 goto failed_free; 423 goto failed_free;
424 424
425 /* Can use a lockless transmit, unless we generate output sequences */
426 if (!(nt->parms.o_flags & GRE_SEQ))
427 dev->features |= NETIF_F_LLTX;
428
425 dev_hold(dev); 429 dev_hold(dev);
426 ipgre_tunnel_link(ign, nt); 430 ipgre_tunnel_link(ign, nt);
427 return nt; 431 return nt;
@@ -453,8 +457,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
453 GRE tunnels with enabled checksum. Tell them "thank you". 457 GRE tunnels with enabled checksum. Tell them "thank you".
454 458
455 Well, I wonder, rfc1812 was written by Cisco employee, 459 Well, I wonder, rfc1812 was written by Cisco employee,
456 what the hell these idiots break standrads established 460 what the hell these idiots break standards established
457 by themself??? 461 by themselves???
458 */ 462 */
459 463
460 const struct iphdr *iph = (const struct iphdr *)skb->data; 464 const struct iphdr *iph = (const struct iphdr *)skb->data;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f7679075..42dd1a90edea 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop; 576 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index aea5a199c37a..b072386cee21 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -630,6 +630,7 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
630 630
631 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); 631 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
632 632
633 err = -EOPNOTSUPP;
633 if (flags & MSG_OOB) 634 if (flags & MSG_OOB)
634 goto out; 635 goto out;
635 636
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bcacf54e5418..019774796174 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
132static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 132static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
133static int ip_rt_min_advmss __read_mostly = 256; 133static int ip_rt_min_advmss __read_mostly = 256;
134static int rt_chain_length_max __read_mostly = 20; 134static int rt_chain_length_max __read_mostly = 20;
135static int redirect_genid;
136 135
137static struct delayed_work expires_work; 136static struct delayed_work expires_work;
138static unsigned long expires_ljiffies; 137static unsigned long expires_ljiffies;
@@ -937,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
937 936
938 get_random_bytes(&shuffle, sizeof(shuffle)); 937 get_random_bytes(&shuffle, sizeof(shuffle));
939 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 938 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
940 redirect_genid++; 939 inetpeer_invalidate_tree(AF_INET);
941} 940}
942 941
943/* 942/*
@@ -1485,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1485 1484
1486 peer = rt->peer; 1485 peer = rt->peer;
1487 if (peer) { 1486 if (peer) {
1488 if (peer->redirect_learned.a4 != new_gw || 1487 if (peer->redirect_learned.a4 != new_gw) {
1489 peer->redirect_genid != redirect_genid) {
1490 peer->redirect_learned.a4 = new_gw; 1488 peer->redirect_learned.a4 = new_gw;
1491 peer->redirect_genid = redirect_genid;
1492 atomic_inc(&__rt_peer_genid); 1489 atomic_inc(&__rt_peer_genid);
1493 } 1490 }
1494 check_peer_redir(&rt->dst, peer); 1491 check_peer_redir(&rt->dst, peer);
@@ -1793,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt)
1793 if (peer) { 1790 if (peer) {
1794 check_peer_pmtu(&rt->dst, peer); 1791 check_peer_pmtu(&rt->dst, peer);
1795 1792
1796 if (peer->redirect_genid != redirect_genid)
1797 peer->redirect_learned.a4 = 0;
1798 if (peer->redirect_learned.a4 && 1793 if (peer->redirect_learned.a4 &&
1799 peer->redirect_learned.a4 != rt->rt_gateway) 1794 peer->redirect_learned.a4 != rt->rt_gateway)
1800 check_peer_redir(&rt->dst, peer); 1795 check_peer_redir(&rt->dst, peer);
@@ -1958,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1958 dst_init_metrics(&rt->dst, peer->metrics, false); 1953 dst_init_metrics(&rt->dst, peer->metrics, false);
1959 1954
1960 check_peer_pmtu(&rt->dst, peer); 1955 check_peer_pmtu(&rt->dst, peer);
1961 if (peer->redirect_genid != redirect_genid) 1956
1962 peer->redirect_learned.a4 = 0;
1963 if (peer->redirect_learned.a4 && 1957 if (peer->redirect_learned.a4 &&
1964 peer->redirect_learned.a4 != rt->rt_gateway) { 1958 peer->redirect_learned.a4 != rt->rt_gateway) {
1965 rt->rt_gateway = peer->redirect_learned.a4; 1959 rt->rt_gateway = peer->redirect_learned.a4;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4aa7e9dc0cbb..7a7724da9bff 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
778static __net_init int ipv4_sysctl_init_net(struct net *net) 778static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 779{
780 struct ctl_table *table; 780 struct ctl_table *table;
781 unsigned long limit;
782 781
783 table = ipv4_net_table; 782 table = ipv4_net_table;
784 if (!net_eq(net, &init_net)) { 783 if (!net_eq(net, &init_net)) {
@@ -814,11 +813,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
814 813
815 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 814 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
816 815
817 limit = nr_free_buffer_pages() / 8; 816 tcp_init_mem(net);
818 limit = max(limit, 128UL);
819 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
820 net->ipv4.sysctl_tcp_mem[1] = limit;
821 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
822 817
823 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
824 net_ipv4_ctl_path, table); 819 net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9bcdec3ad772..22ef5f9fd2ff 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3216,11 +3227,21 @@ static int __init set_thash_entries(char *str)
3216} 3227}
3217__setup("thash_entries=", set_thash_entries); 3228__setup("thash_entries=", set_thash_entries);
3218 3229
3230void tcp_init_mem(struct net *net)
3231{
3232 unsigned long limit = nr_free_buffer_pages() / 8;
3233 limit = max(limit, 128UL);
3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
3235 net->ipv4.sysctl_tcp_mem[1] = limit;
3236 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
3237}
3238
3219void __init tcp_init(void) 3239void __init tcp_init(void)
3220{ 3240{
3221 struct sk_buff *skb = NULL; 3241 struct sk_buff *skb = NULL;
3222 unsigned long limit; 3242 unsigned long limit;
3223 int i, max_share, cnt; 3243 int max_share, cnt;
3244 unsigned int i;
3224 unsigned long jiffy = jiffies; 3245 unsigned long jiffy = jiffies;
3225 3246
3226 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3247 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3263,7 +3284,7 @@ void __init tcp_init(void)
3263 &tcp_hashinfo.bhash_size, 3284 &tcp_hashinfo.bhash_size,
3264 NULL, 3285 NULL,
3265 64 * 1024); 3286 64 * 1024);
3266 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 3287 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3267 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3288 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3268 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3289 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3269 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3290 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
@@ -3276,9 +3297,10 @@ void __init tcp_init(void)
3276 sysctl_tcp_max_orphans = cnt / 2; 3297 sysctl_tcp_max_orphans = cnt / 2;
3277 sysctl_max_syn_backlog = max(128, cnt / 256); 3298 sysctl_max_syn_backlog = max(128, cnt / 256);
3278 3299
3300 tcp_init_mem(&init_net);
3279 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3301 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3280 limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) 3302 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3281 << (PAGE_SHIFT - 7); 3303 limit = max(limit, 128UL);
3282 max_share = min(4UL*1024*1024, limit); 3304 max_share = min(4UL*1024*1024, limit);
3283 3305
3284 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3306 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 976034f82320..b5e315f13641 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1307 return in_sack; 1307 return in_sack;
1308} 1308}
1309 1309
1310static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1310/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1311 struct tcp_sacktag_state *state, 1311static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq,
1312 int dup_sack, int pcount) 1314 int dup_sack, int pcount)
1313{ 1315{
1314 struct tcp_sock *tp = tcp_sk(sk); 1316 struct tcp_sock *tp = tcp_sk(sk);
1315 u8 sacked = TCP_SKB_CB(skb)->sacked;
1316 int fack_count = state->fack_count; 1317 int fack_count = state->fack_count;
1317 1318
1318 /* Account D-SACK for retransmitted packet. */ 1319 /* Account D-SACK for retransmitted packet. */
1319 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1320 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1320 if (tp->undo_marker && tp->undo_retrans && 1321 if (tp->undo_marker && tp->undo_retrans &&
1321 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1322 after(end_seq, tp->undo_marker))
1322 tp->undo_retrans--; 1323 tp->undo_retrans--;
1323 if (sacked & TCPCB_SACKED_ACKED) 1324 if (sacked & TCPCB_SACKED_ACKED)
1324 state->reord = min(fack_count, state->reord); 1325 state->reord = min(fack_count, state->reord);
1325 } 1326 }
1326 1327
1327 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1328 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1328 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1329 if (!after(end_seq, tp->snd_una))
1329 return sacked; 1330 return sacked;
1330 1331
1331 if (!(sacked & TCPCB_SACKED_ACKED)) { 1332 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1344 /* New sack for not retransmitted frame, 1345 /* New sack for not retransmitted frame,
1345 * which was in hole. It is reordering. 1346 * which was in hole. It is reordering.
1346 */ 1347 */
1347 if (before(TCP_SKB_CB(skb)->seq, 1348 if (before(start_seq,
1348 tcp_highest_sack_seq(tp))) 1349 tcp_highest_sack_seq(tp)))
1349 state->reord = min(fack_count, 1350 state->reord = min(fack_count,
1350 state->reord); 1351 state->reord);
1351 1352
1352 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1353 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1353 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1354 if (!after(end_seq, tp->frto_highmark))
1354 state->flag |= FLAG_ONLY_ORIG_SACKED; 1355 state->flag |= FLAG_ONLY_ORIG_SACKED;
1355 } 1356 }
1356 1357
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1368 1369
1369 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1370 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1370 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1371 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1371 before(TCP_SKB_CB(skb)->seq, 1372 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1372 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1373 tp->lost_cnt_hint += pcount; 1373 tp->lost_cnt_hint += pcount;
1374 1374
1375 if (fack_count > tp->fackets_out) 1375 if (fack_count > tp->fackets_out)
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1388 return sacked; 1388 return sacked;
1389} 1389}
1390 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1391static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1392 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1393 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1395,9 +1398,20 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395{ 1398{
1396 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1397 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1398 1403
1399 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1400 1405
1406 /* Adjust counters and hints for the newly sacked sequence
1407 * range but discard the return value since prev is already
1408 * marked. We must tag the range first because the seq
1409 * advancement below implicitly advances
1410 * tcp_highest_sack_seq() when skb is highest_sack.
1411 */
1412 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1413 start_seq, end_seq, dup_sack, pcount);
1414
1401 if (skb == tp->lost_skb_hint) 1415 if (skb == tp->lost_skb_hint)
1402 tp->lost_cnt_hint += pcount; 1416 tp->lost_cnt_hint += pcount;
1403 1417
@@ -1424,9 +1438,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1424 skb_shinfo(skb)->gso_type = 0; 1438 skb_shinfo(skb)->gso_type = 0;
1425 } 1439 }
1426 1440
1427 /* We discard results */
1428 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1429
1430 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1441 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1431 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1442 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1432 1443
@@ -1574,6 +1585,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1574 } 1585 }
1575 } 1586 }
1576 1587
1588 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1589 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1590 goto fallback;
1591
1577 if (!skb_shift(prev, skb, len)) 1592 if (!skb_shift(prev, skb, len))
1578 goto fallback; 1593 goto fallback;
1579 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1594 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -1664,10 +1679,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1664 break; 1679 break;
1665 1680
1666 if (in_sack) { 1681 if (in_sack) {
1667 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1682 TCP_SKB_CB(skb)->sacked =
1668 state, 1683 tcp_sacktag_one(sk,
1669 dup_sack, 1684 state,
1670 tcp_skb_pcount(skb)); 1685 TCP_SKB_CB(skb)->sacked,
1686 TCP_SKB_CB(skb)->seq,
1687 TCP_SKB_CB(skb)->end_seq,
1688 dup_sack,
1689 tcp_skb_pcount(skb));
1671 1690
1672 if (!before(TCP_SKB_CB(skb)->seq, 1691 if (!before(TCP_SKB_CB(skb)->seq,
1673 tcp_highest_sack_seq(tp))) 1692 tcp_highest_sack_seq(tp)))
@@ -2554,6 +2573,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2554 2573
2555 if (cnt > packets) { 2574 if (cnt > packets) {
2556 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2575 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2576 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
2557 (oldcnt >= packets)) 2577 (oldcnt >= packets))
2558 break; 2578 break;
2559 2579
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..94d683a61cba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.iov[0].iov_len, IPPROTO_TCP, 0); 651 arg.iov[0].iov_len, IPPROTO_TCP, 0);
652 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 652 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
654 /* When socket is gone, all binding information is lost.
655 * routing might fail in this case. using iif for oif to
656 * make sure we can deliver it
657 */
658 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
654 659
655 net = dev_net(skb_dst(skb)->dev); 660 net = dev_net(skb_dst(skb)->dev);
656 arg.tos = ip_hdr(skb)->tos; 661 arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8c8de2780c7a..4ff3b6dc74fc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1141,11 +1141,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1141 sk_mem_uncharge(sk, len); 1141 sk_mem_uncharge(sk, len);
1142 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1142 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1143 1143
1144 /* Any change of skb->len requires recalculation of tso 1144 /* Any change of skb->len requires recalculation of tso factor. */
1145 * factor and mss.
1146 */
1147 if (tcp_skb_pcount(skb) > 1) 1145 if (tcp_skb_pcount(skb) > 1)
1148 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 1146 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1149 1147
1150 return 0; 1148 return 0;
1151} 1149}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399df..cd2e0723266d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_check_oom(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84 /* Catch exceptional cases, when connection requires reset. 81 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 82 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 63418185f524..e3db3f915114 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
110 110
111 skb_push(skb, sizeof(*iph)); 111 skb_push(skb, sizeof(*iph));
112 skb_reset_network_header(skb); 112 skb_reset_network_header(skb);
113 113 skb_mac_header_rebuild(skb);
114 memmove(skb->data - skb->mac_len, skb_mac_header(skb),
115 skb->mac_len);
116 skb_set_mac_header(skb, -skb->mac_len);
117 114
118 xfrm4_beet_make_header(skb); 115 xfrm4_beet_make_header(skb);
119 116
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 534972e114ac..ed4bf11ef9f4 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
66 66
67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
68{ 68{
69 const unsigned char *old_mac;
70 int err = -EINVAL; 69 int err = -EINVAL;
71 70
72 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 71 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 if (!(x->props.flags & XFRM_STATE_NOECN)) 83 if (!(x->props.flags & XFRM_STATE_NOECN))
85 ipip_ecn_decapsulate(skb); 84 ipip_ecn_decapsulate(skb);
86 85
87 old_mac = skb_mac_header(skb);
88 skb_set_mac_header(skb, -skb->mac_len);
89 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
90 skb_reset_network_header(skb); 86 skb_reset_network_header(skb);
87 skb_mac_header_rebuild(skb);
88
91 err = 0; 89 err = 0;
92 90
93out: 91out:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c02280a4d126..6b8ebc5da0e1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -434,6 +434,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
434 /* Join all-node multicast group */ 434 /* Join all-node multicast group */
435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
436 436
437 /* Join all-router multicast group if forwarding is set */
438 if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
439 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
440
437 return ndev; 441 return ndev;
438} 442}
439 443
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c7e95c8c579f..5aa3981a3922 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1926,8 +1926,10 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1926 }; 1926 };
1927 1927
1928 dst = ip6_route_output(net, NULL, &fl6); 1928 dst = ip6_route_output(net, NULL, &fl6);
1929 if (!dst) 1929 if (dst->error) {
1930 dst_release(dst);
1930 goto out_free; 1931 goto out_free;
1932 }
1931 1933
1932 skb_dst_drop(skb); 1934 skb_dst_drop(skb);
1933 skb_dst_set(skb, dst); 1935 skb_dst_set(skb, dst);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8f02ef88e59..c964958ac470 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1545,9 +1545,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1545 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); 1545 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
1546 1546
1547 dst = ip6_route_output(net, NULL, &fl6); 1547 dst = ip6_route_output(net, NULL, &fl6);
1548 if (dst == NULL) 1548 if (dst->error) {
1549 dst_release(dst);
1549 return; 1550 return;
1550 1551 }
1551 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); 1552 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
1552 if (IS_ERR(dst)) 1553 if (IS_ERR(dst))
1553 return; 1554 return;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index a81ce9450750..9949a356d62c 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) 80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
81{ 81{
82 struct ipv6hdr *ip6h; 82 struct ipv6hdr *ip6h;
83 const unsigned char *old_mac;
84 int size = sizeof(struct ipv6hdr); 83 int size = sizeof(struct ipv6hdr);
85 int err; 84 int err;
86 85
@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
90 89
91 __skb_push(skb, size); 90 __skb_push(skb, size);
92 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
93 92 skb_mac_header_rebuild(skb);
94 old_mac = skb_mac_header(skb);
95 skb_set_mac_header(skb, -skb->mac_len);
96 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
97 93
98 xfrm6_beet_make_header(skb); 94 xfrm6_beet_make_header(skb);
99 95
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 261e6e6f487e..9f2095b19ad0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
64{ 64{
65 int err = -EINVAL; 65 int err = -EINVAL;
66 const unsigned char *old_mac;
67 66
68 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 67 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
69 goto out; 68 goto out;
@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
80 if (!(x->props.flags & XFRM_STATE_NOECN)) 79 if (!(x->props.flags & XFRM_STATE_NOECN))
81 ipip6_ecn_decapsulate(skb); 80 ipip6_ecn_decapsulate(skb);
82 81
83 old_mac = skb_mac_header(skb);
84 skb_set_mac_header(skb, -skb->mac_len);
85 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
86 skb_reset_network_header(skb); 82 skb_reset_network_header(skb);
83 skb_mac_header_rebuild(skb);
84
87 err = 0; 85 err = 0;
88 86
89out: 87out:
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d21e7ebd91ca..55670ec3cd0f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
393{ 393{
394 int rc; 394 int rc;
395 395
396 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
397 goto drop;
398
399 nf_reset(skb);
400
401 /* Charge it to the socket, dropping if the queue is full. */ 396 /* Charge it to the socket, dropping if the queue is full. */
402 rc = sock_queue_rcv_skb(sk, skb); 397 rc = sock_queue_rcv_skb(sk, skb);
403 if (rc < 0) 398 if (rc < 0)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 2406b3e7393f..d86217d56bd7 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,14 +63,14 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), 69 TEST(SHORT_PREAMBLE),
70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH)); 73 TEST(TDLS_PEER_AUTH), TEST(RATE_CONTROL));
74#undef TEST 74#undef TEST
75 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 75 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
76} 76}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b3d76b756cd5..a4643969a13b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -106,6 +106,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
106 106
107 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 107 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
108 108
109 local->oper_channel = chan;
109 channel_type = ifibss->channel_type; 110 channel_type = ifibss->channel_type;
110 if (channel_type > NL80211_CHAN_HT20 && 111 if (channel_type > NL80211_CHAN_HT20 &&
111 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) 112 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e47768cb8cb3..8e2137bd87e2 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1314,6 +1314,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1314 continue; 1314 continue;
1315 } 1315 }
1316 /* count everything else */ 1316 /* count everything else */
1317 sdata->vif.bss_conf.idle = false;
1317 count++; 1318 count++;
1318 } 1319 }
1319 1320
@@ -1331,6 +1332,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1331 hw_roc = true; 1332 hw_roc = true;
1332 1333
1333 list_for_each_entry(sdata, &local->interfaces, list) { 1334 list_for_each_entry(sdata, &local->interfaces, list) {
1335 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1336 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1337 continue;
1334 if (sdata->old_idle == sdata->vif.bss_conf.idle) 1338 if (sdata->old_idle == sdata->vif.bss_conf.idle)
1335 continue; 1339 continue;
1336 if (!ieee80211_sdata_running(sdata)) 1340 if (!ieee80211_sdata_running(sdata))
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0a0d94ad9b08..b142bd4c2390 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 5a5a7767d541..f9b8e819ca63 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -336,7 +336,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
336 int i; 336 int i;
337 u32 mask; 337 u32 mask;
338 338
339 if (sta) { 339 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
340 ista = &sta->sta; 340 ista = &sta->sta;
341 priv_sta = sta->rate_ctrl_priv; 341 priv_sta = sta->rate_ctrl_priv;
342 } 342 }
@@ -344,7 +344,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
344 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 344 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
345 info->control.rates[i].idx = -1; 345 info->control.rates[i].idx = -1;
346 info->control.rates[i].flags = 0; 346 info->control.rates[i].flags = 0;
347 info->control.rates[i].count = 1; 347 info->control.rates[i].count = 0;
348 } 348 }
349 349
350 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 350 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 168427b0ffdc..80cfc006dd74 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -41,7 +41,7 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
41 struct ieee80211_sta *ista = &sta->sta; 41 struct ieee80211_sta *ista = &sta->sta;
42 void *priv_sta = sta->rate_ctrl_priv; 42 void *priv_sta = sta->rate_ctrl_priv;
43 43
44 if (!ref) 44 if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
45 return; 45 return;
46 46
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
@@ -62,6 +62,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
62 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 62 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
63 63
64 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 64 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
65 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
65} 66}
66 67
67static inline void rate_control_rate_update(struct ieee80211_local *local, 68static inline void rate_control_rate_update(struct ieee80211_local *local,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 751409120769..5a5e504a8ffb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
612 tid_agg_rx->buf_size; 612 tid_agg_rx->buf_size;
613 if (!tid_agg_rx->reorder_buf[index] && 613 if (!tid_agg_rx->reorder_buf[index] &&
614 tid_agg_rx->stored_mpdu_num > 1) { 614 tid_agg_rx->stored_mpdu_num) {
615 /* 615 /*
616 * No buffers ready to be released, but check whether any 616 * No buffers ready to be released, but check whether any
617 * frames in the reorder buffer have timed out. 617 * frames in the reorder buffer have timed out.
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 6f77f12dc3fc..bfed851d0d36 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -52,6 +52,7 @@
52 * @WLAN_STA_SP: Station is in a service period, so don't try to 52 * @WLAN_STA_SP: Station is in a service period, so don't try to
53 * reply to other uAPSD trigger frames or PS-Poll. 53 * reply to other uAPSD trigger frames or PS-Poll.
54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. 54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
55 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
55 */ 56 */
56enum ieee80211_sta_info_flags { 57enum ieee80211_sta_info_flags {
57 WLAN_STA_AUTH, 58 WLAN_STA_AUTH,
@@ -71,6 +72,7 @@ enum ieee80211_sta_info_flags {
71 WLAN_STA_UAPSD, 72 WLAN_STA_UAPSD,
72 WLAN_STA_SP, 73 WLAN_STA_SP,
73 WLAN_STA_4ADDR_EVENT, 74 WLAN_STA_4ADDR_EVENT,
75 WLAN_STA_RATE_CONTROL,
74}; 76};
75 77
76enum ieee80211_sta_state { 78enum ieee80211_sta_state {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 611c3359b94d..2555816e7788 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
232 __be16 dport = 0; /* destination port to forward */ 232 __be16 dport = 0; /* destination port to forward */
233 unsigned int flags; 233 unsigned int flags;
234 struct ip_vs_conn_param param; 234 struct ip_vs_conn_param param;
235 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
235 union nf_inet_addr snet; /* source network of the client, 236 union nf_inet_addr snet; /* source network of the client,
236 after masking */ 237 after masking */
237 238
@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
267 { 268 {
268 int protocol = iph.protocol; 269 int protocol = iph.protocol;
269 const union nf_inet_addr *vaddr = &iph.daddr; 270 const union nf_inet_addr *vaddr = &iph.daddr;
270 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
271 __be16 vport = 0; 271 __be16 vport = 0;
272 272
273 if (dst_port == svc->port) { 273 if (dst_port == svc->port) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 76613f5a55c0..fa4b82c8ae80 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -404,19 +404,49 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
404 &net->ct.hash[repl_hash]); 404 &net->ct.hash[repl_hash]);
405} 405}
406 406
407void nf_conntrack_hash_insert(struct nf_conn *ct) 407int
408nf_conntrack_hash_check_insert(struct nf_conn *ct)
408{ 409{
409 struct net *net = nf_ct_net(ct); 410 struct net *net = nf_ct_net(ct);
410 unsigned int hash, repl_hash; 411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
411 u16 zone; 414 u16 zone;
412 415
413 zone = nf_ct_zone(ct); 416 zone = nf_ct_zone(ct);
414 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 417 hash = hash_conntrack(net, zone,
415 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
423
424 /* See if there's one in the list already, including reverse */
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
416 435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
417 __nf_conntrack_hash_insert(ct, hash, repl_hash); 438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
418} 448}
419EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 449EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
420 450
421/* Confirm a connection given skb; places it in hash table */ 451/* Confirm a connection given skb; places it in hash table */
422int 452int
@@ -605,8 +635,12 @@ static noinline int early_drop(struct net *net, unsigned int hash)
605 635
606 if (del_timer(&ct->timeout)) { 636 if (del_timer(&ct->timeout)) {
607 death_by_timeout((unsigned long)ct); 637 death_by_timeout((unsigned long)ct);
608 dropped = 1; 638 /* Check if we indeed killed this entry. Reliable event
609 NF_CT_STAT_INC_ATOMIC(net, early_drop); 639 delivery may have inserted it into the dying list. */
640 if (test_bit(IPS_DYING_BIT, &ct->status)) {
641 dropped = 1;
642 NF_CT_STAT_INC_ATOMIC(net, early_drop);
643 }
610 } 644 }
611 nf_ct_put(ct); 645 nf_ct_put(ct);
612 return dropped; 646 return dropped;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9307b033c0c9..10687692831e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1041,16 +1041,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1041 if (!parse_nat_setup) { 1041 if (!parse_nat_setup) {
1042#ifdef CONFIG_MODULES 1042#ifdef CONFIG_MODULES
1043 rcu_read_unlock(); 1043 rcu_read_unlock();
1044 spin_unlock_bh(&nf_conntrack_lock);
1045 nfnl_unlock(); 1044 nfnl_unlock();
1046 if (request_module("nf-nat-ipv4") < 0) { 1045 if (request_module("nf-nat-ipv4") < 0) {
1047 nfnl_lock(); 1046 nfnl_lock();
1048 spin_lock_bh(&nf_conntrack_lock);
1049 rcu_read_lock(); 1047 rcu_read_lock();
1050 return -EOPNOTSUPP; 1048 return -EOPNOTSUPP;
1051 } 1049 }
1052 nfnl_lock(); 1050 nfnl_lock();
1053 spin_lock_bh(&nf_conntrack_lock);
1054 rcu_read_lock(); 1051 rcu_read_lock();
1055 if (nfnetlink_parse_nat_setup_hook) 1052 if (nfnetlink_parse_nat_setup_hook)
1056 return -EAGAIN; 1053 return -EAGAIN;
@@ -1367,15 +1364,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1367 nf_ct_protonum(ct)); 1364 nf_ct_protonum(ct));
1368 if (helper == NULL) { 1365 if (helper == NULL) {
1369 rcu_read_unlock(); 1366 rcu_read_unlock();
1370 spin_unlock_bh(&nf_conntrack_lock);
1371#ifdef CONFIG_MODULES 1367#ifdef CONFIG_MODULES
1372 if (request_module("nfct-helper-%s", helpname) < 0) { 1368 if (request_module("nfct-helper-%s", helpname) < 0) {
1373 spin_lock_bh(&nf_conntrack_lock);
1374 err = -EOPNOTSUPP; 1369 err = -EOPNOTSUPP;
1375 goto err1; 1370 goto err1;
1376 } 1371 }
1377 1372
1378 spin_lock_bh(&nf_conntrack_lock);
1379 rcu_read_lock(); 1373 rcu_read_lock();
1380 helper = __nf_conntrack_helper_find(helpname, 1374 helper = __nf_conntrack_helper_find(helpname,
1381 nf_ct_l3num(ct), 1375 nf_ct_l3num(ct),
@@ -1468,8 +1462,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1468 if (tstamp) 1462 if (tstamp)
1469 tstamp->start = ktime_to_ns(ktime_get_real()); 1463 tstamp->start = ktime_to_ns(ktime_get_real());
1470 1464
1471 add_timer(&ct->timeout); 1465 err = nf_conntrack_hash_check_insert(ct);
1472 nf_conntrack_hash_insert(ct); 1466 if (err < 0)
1467 goto err2;
1468
1473 rcu_read_unlock(); 1469 rcu_read_unlock();
1474 1470
1475 return ct; 1471 return ct;
@@ -1490,6 +1486,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1490 struct nf_conntrack_tuple otuple, rtuple; 1486 struct nf_conntrack_tuple otuple, rtuple;
1491 struct nf_conntrack_tuple_hash *h = NULL; 1487 struct nf_conntrack_tuple_hash *h = NULL;
1492 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1488 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1489 struct nf_conn *ct;
1493 u_int8_t u3 = nfmsg->nfgen_family; 1490 u_int8_t u3 = nfmsg->nfgen_family;
1494 u16 zone; 1491 u16 zone;
1495 int err; 1492 int err;
@@ -1510,27 +1507,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1510 return err; 1507 return err;
1511 } 1508 }
1512 1509
1513 spin_lock_bh(&nf_conntrack_lock);
1514 if (cda[CTA_TUPLE_ORIG]) 1510 if (cda[CTA_TUPLE_ORIG])
1515 h = __nf_conntrack_find(net, zone, &otuple); 1511 h = nf_conntrack_find_get(net, zone, &otuple);
1516 else if (cda[CTA_TUPLE_REPLY]) 1512 else if (cda[CTA_TUPLE_REPLY])
1517 h = __nf_conntrack_find(net, zone, &rtuple); 1513 h = nf_conntrack_find_get(net, zone, &rtuple);
1518 1514
1519 if (h == NULL) { 1515 if (h == NULL) {
1520 err = -ENOENT; 1516 err = -ENOENT;
1521 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1517 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1522 struct nf_conn *ct;
1523 enum ip_conntrack_events events; 1518 enum ip_conntrack_events events;
1524 1519
1525 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1520 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1526 &rtuple, u3); 1521 &rtuple, u3);
1527 if (IS_ERR(ct)) { 1522 if (IS_ERR(ct))
1528 err = PTR_ERR(ct); 1523 return PTR_ERR(ct);
1529 goto out_unlock; 1524
1530 }
1531 err = 0; 1525 err = 0;
1532 nf_conntrack_get(&ct->ct_general);
1533 spin_unlock_bh(&nf_conntrack_lock);
1534 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1526 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1535 events = IPCT_RELATED; 1527 events = IPCT_RELATED;
1536 else 1528 else
@@ -1545,23 +1537,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1545 ct, NETLINK_CB(skb).pid, 1537 ct, NETLINK_CB(skb).pid,
1546 nlmsg_report(nlh)); 1538 nlmsg_report(nlh));
1547 nf_ct_put(ct); 1539 nf_ct_put(ct);
1548 } else 1540 }
1549 spin_unlock_bh(&nf_conntrack_lock);
1550 1541
1551 return err; 1542 return err;
1552 } 1543 }
1553 /* implicit 'else' */ 1544 /* implicit 'else' */
1554 1545
1555 /* We manipulate the conntrack inside the global conntrack table lock,
1556 * so there's no need to increase the refcount */
1557 err = -EEXIST; 1546 err = -EEXIST;
1547 ct = nf_ct_tuplehash_to_ctrack(h);
1558 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1548 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1559 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1549 spin_lock_bh(&nf_conntrack_lock);
1560
1561 err = ctnetlink_change_conntrack(ct, cda); 1550 err = ctnetlink_change_conntrack(ct, cda);
1551 spin_unlock_bh(&nf_conntrack_lock);
1562 if (err == 0) { 1552 if (err == 0) {
1563 nf_conntrack_get(&ct->ct_general);
1564 spin_unlock_bh(&nf_conntrack_lock);
1565 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1553 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1566 (1 << IPCT_ASSURED) | 1554 (1 << IPCT_ASSURED) |
1567 (1 << IPCT_HELPER) | 1555 (1 << IPCT_HELPER) |
@@ -1570,15 +1558,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1570 (1 << IPCT_MARK), 1558 (1 << IPCT_MARK),
1571 ct, NETLINK_CB(skb).pid, 1559 ct, NETLINK_CB(skb).pid,
1572 nlmsg_report(nlh)); 1560 nlmsg_report(nlh));
1573 nf_ct_put(ct); 1561 }
1574 } else
1575 spin_unlock_bh(&nf_conntrack_lock);
1576
1577 return err;
1578 } 1562 }
1579 1563
1580out_unlock: 1564 nf_ct_put(ct);
1581 spin_unlock_bh(&nf_conntrack_lock);
1582 return err; 1565 return err;
1583} 1566}
1584 1567
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b3a7db678b8d..ce60cf0f6c11 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -203,6 +203,27 @@ err:
203 return status; 203 return status;
204} 204}
205 205
206#ifdef CONFIG_BRIDGE_NETFILTER
207/* When called from bridge netfilter, skb->data must point to MAC header
208 * before calling skb_gso_segment(). Else, original MAC header is lost
209 * and segmented skbs will be sent to wrong destination.
210 */
211static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
212{
213 if (skb->nf_bridge)
214 __skb_push(skb, skb->network_header - skb->mac_header);
215}
216
217static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
218{
219 if (skb->nf_bridge)
220 __skb_pull(skb, skb->network_header - skb->mac_header);
221}
222#else
223#define nf_bridge_adjust_skb_data(s) do {} while (0)
224#define nf_bridge_adjust_segmented_data(s) do {} while (0)
225#endif
226
206int nf_queue(struct sk_buff *skb, 227int nf_queue(struct sk_buff *skb,
207 struct list_head *elem, 228 struct list_head *elem,
208 u_int8_t pf, unsigned int hook, 229 u_int8_t pf, unsigned int hook,
@@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb,
212 unsigned int queuenum) 233 unsigned int queuenum)
213{ 234{
214 struct sk_buff *segs; 235 struct sk_buff *segs;
215 int err; 236 int err = -EINVAL;
216 unsigned int queued; 237 unsigned int queued;
217 238
218 if (!skb_is_gso(skb)) 239 if (!skb_is_gso(skb))
@@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb,
228 break; 249 break;
229 } 250 }
230 251
252 nf_bridge_adjust_skb_data(skb);
231 segs = skb_gso_segment(skb, 0); 253 segs = skb_gso_segment(skb, 0);
232 /* Does not use PTR_ERR to limit the number of error codes that can be 254 /* Does not use PTR_ERR to limit the number of error codes that can be
233 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean 255 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
234 * 'ignore this hook'. 256 * 'ignore this hook'.
235 */ 257 */
236 if (IS_ERR(segs)) 258 if (IS_ERR(segs))
237 return -EINVAL; 259 goto out_err;
238
239 queued = 0; 260 queued = 0;
240 err = 0; 261 err = 0;
241 do { 262 do {
242 struct sk_buff *nskb = segs->next; 263 struct sk_buff *nskb = segs->next;
243 264
244 segs->next = NULL; 265 segs->next = NULL;
245 if (err == 0) 266 if (err == 0) {
267 nf_bridge_adjust_segmented_data(segs);
246 err = __nf_queue(segs, elem, pf, hook, indev, 268 err = __nf_queue(segs, elem, pf, hook, indev,
247 outdev, okfn, queuenum); 269 outdev, okfn, queuenum);
270 }
248 if (err == 0) 271 if (err == 0)
249 queued++; 272 queued++;
250 else 273 else
@@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb,
252 segs = nskb; 275 segs = nskb;
253 } while (segs); 276 } while (segs);
254 277
255 /* also free orig skb if only some segments were queued */ 278 if (queued) {
256 if (unlikely(err && queued))
257 err = 0;
258 if (err == 0)
259 kfree_skb(skb); 279 kfree_skb(skb);
280 return 0;
281 }
282 out_err:
283 nf_bridge_adjust_segmented_data(skb);
260 return err; 284 return err;
261} 285}
262 286
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3aae66facf9f..4d5057902839 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,9 +152,10 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
154 dst = ip6_route_output(net, NULL, &fl6); 154 dst = ip6_route_output(net, NULL, &fl6);
155 if (dst == NULL) 155 if (dst->error) {
156 dst_release(dst);
156 return false; 157 return false;
157 158 }
158 skb_dst_drop(skb); 159 skb_dst_drop(skb);
159 skb_dst_set(skb, dst); 160 skb_dst_set(skb, dst);
160 skb->dev = dst->dev; 161 skb->dev = dst->dev;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2725d1bdf291..48badffaafc1 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2011 Nicira Networks. 2 * Copyright (c) 2007-2012 Nicira Networks.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -145,9 +145,16 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
145 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 145 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
146 *addr, new_addr, 1); 146 *addr, new_addr, 1);
147 } else if (nh->protocol == IPPROTO_UDP) { 147 } else if (nh->protocol == IPPROTO_UDP) {
148 if (likely(transport_len >= sizeof(struct udphdr))) 148 if (likely(transport_len >= sizeof(struct udphdr))) {
149 inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, 149 struct udphdr *uh = udp_hdr(skb);
150 *addr, new_addr, 1); 150
151 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
152 inet_proto_csum_replace4(&uh->check, skb,
153 *addr, new_addr, 1);
154 if (!uh->check)
155 uh->check = CSUM_MANGLED_0;
156 }
157 }
151 } 158 }
152 159
153 csum_replace4(&nh->check, *addr, new_addr); 160 csum_replace4(&nh->check, *addr, new_addr);
@@ -197,8 +204,22 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port,
197 skb->rxhash = 0; 204 skb->rxhash = 0;
198} 205}
199 206
200static int set_udp_port(struct sk_buff *skb, 207static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
201 const struct ovs_key_udp *udp_port_key) 208{
209 struct udphdr *uh = udp_hdr(skb);
210
211 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
212 set_tp_port(skb, port, new_port, &uh->check);
213
214 if (!uh->check)
215 uh->check = CSUM_MANGLED_0;
216 } else {
217 *port = new_port;
218 skb->rxhash = 0;
219 }
220}
221
222static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
202{ 223{
203 struct udphdr *uh; 224 struct udphdr *uh;
204 int err; 225 int err;
@@ -210,16 +231,15 @@ static int set_udp_port(struct sk_buff *skb,
210 231
211 uh = udp_hdr(skb); 232 uh = udp_hdr(skb);
212 if (udp_port_key->udp_src != uh->source) 233 if (udp_port_key->udp_src != uh->source)
213 set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); 234 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
214 235
215 if (udp_port_key->udp_dst != uh->dest) 236 if (udp_port_key->udp_dst != uh->dest)
216 set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); 237 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
217 238
218 return 0; 239 return 0;
219} 240}
220 241
221static int set_tcp_port(struct sk_buff *skb, 242static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
222 const struct ovs_key_tcp *tcp_port_key)
223{ 243{
224 struct tcphdr *th; 244 struct tcphdr *th;
225 int err; 245 int err;
@@ -328,11 +348,11 @@ static int execute_set_action(struct sk_buff *skb,
328 break; 348 break;
329 349
330 case OVS_KEY_ATTR_TCP: 350 case OVS_KEY_ATTR_TCP:
331 err = set_tcp_port(skb, nla_data(nested_attr)); 351 err = set_tcp(skb, nla_data(nested_attr));
332 break; 352 break;
333 353
334 case OVS_KEY_ATTR_UDP: 354 case OVS_KEY_ATTR_UDP:
335 err = set_udp_port(skb, nla_data(nested_attr)); 355 err = set_udp(skb, nla_data(nested_attr));
336 break; 356 break;
337 } 357 }
338 358
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ce64c18b8c79..2c030505b335 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1521,6 +1521,9 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
1521 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); 1521 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1522 if (!vport) 1522 if (!vport)
1523 return ERR_PTR(-ENODEV); 1523 return ERR_PTR(-ENODEV);
1524 if (ovs_header->dp_ifindex &&
1525 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1526 return ERR_PTR(-ENODEV);
1524 return vport; 1527 return vport;
1525 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 1528 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1526 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); 1529 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ffd..ae3a035f5390 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
232 if (toklen <= (n_parts + 1) * 4) 232 if (toklen <= (n_parts + 1) * 4)
233 return -EINVAL; 233 return -EINVAL;
234 234
235 princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); 235 princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
236 if (!princ->name_parts) 236 if (!princ->name_parts)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
355 355
356 _debug("n_elem %d", n_elem); 356 _debug("n_elem %d", n_elem);
357 357
358 td = kcalloc(sizeof(struct krb5_tagged_data), n_elem, 358 td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!td) 360 if (!td)
361 return -ENOMEM; 361 return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a3..7e267d7b9c75 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
148 148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{ 150{
151 BUILD_BUG_ON(sizeof(skb->cb) < 151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
153 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
154} 153}
155 154
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2776012132ea..5da548fa7ae9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
130 130
131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{ 132{
133 BUILD_BUG_ON(sizeof(skb->cb) < 133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
135 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
136} 135}
137 136
@@ -502,9 +501,8 @@ tfifo_dequeue:
502 501
503 /* if more time remaining? */ 502 /* if more time remaining? */
504 if (cb->time_to_send <= psched_get_time()) { 503 if (cb->time_to_send <= psched_get_time()) {
505 skb = qdisc_dequeue_tail(sch); 504 __skb_unlink(skb, &sch->q);
506 if (unlikely(!skb)) 505 sch->qstats.backlog -= qdisc_pkt_len(skb);
507 goto qdisc_dequeue;
508 506
509#ifdef CONFIG_NET_CLS_ACT 507#ifdef CONFIG_NET_CLS_ACT
510 /* 508 /*
@@ -540,7 +538,6 @@ deliver:
540 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); 538 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
541 } 539 }
542 540
543qdisc_dequeue:
544 if (q->qdisc) { 541 if (q->qdisc) {
545 skb = q->qdisc->ops->dequeue(q->qdisc); 542 skb = q->qdisc->ops->dequeue(q->qdisc);
546 if (skb) 543 if (skb)
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7a..d7eea99333e9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
94 94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{ 96{
97 BUILD_BUG_ON(sizeof(skb->cb) < 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100} 99}
101 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9acf..60d47180f043 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
166 166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) 167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{ 168{
169 BUILD_BUG_ON(sizeof(skb->cb) < 169 qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); 170 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
172} 171}
173 172
174static unsigned int sfq_hash(const struct sfq_sched_data *q, 173static unsigned int sfq_hash(const struct sfq_sched_data *q,
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 1426ec3d0a53..75762f346975 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -92,6 +92,7 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
92 if (gcred->acred.group_info != NULL) 92 if (gcred->acred.group_info != NULL)
93 get_group_info(gcred->acred.group_info); 93 get_group_info(gcred->acred.group_info);
94 gcred->acred.machine_cred = acred->machine_cred; 94 gcred->acred.machine_cred = acred->machine_cred;
95 gcred->acred.principal = acred->principal;
95 96
96 dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", 97 dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
97 gcred->acred.machine_cred ? "machine" : "generic", 98 gcred->acred.machine_cred ? "machine" : "generic",
@@ -123,6 +124,17 @@ generic_destroy_cred(struct rpc_cred *cred)
123 call_rcu(&cred->cr_rcu, generic_free_cred_callback); 124 call_rcu(&cred->cr_rcu, generic_free_cred_callback);
124} 125}
125 126
127static int
128machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags)
129{
130 if (!gcred->acred.machine_cred ||
131 gcred->acred.principal != acred->principal ||
132 gcred->acred.uid != acred->uid ||
133 gcred->acred.gid != acred->gid)
134 return 0;
135 return 1;
136}
137
126/* 138/*
127 * Match credentials against current process creds. 139 * Match credentials against current process creds.
128 */ 140 */
@@ -132,9 +144,12 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
132 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); 144 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
133 int i; 145 int i;
134 146
147 if (acred->machine_cred)
148 return machine_cred_match(acred, gcred, flags);
149
135 if (gcred->acred.uid != acred->uid || 150 if (gcred->acred.uid != acred->uid ||
136 gcred->acred.gid != acred->gid || 151 gcred->acred.gid != acred->gid ||
137 gcred->acred.machine_cred != acred->machine_cred) 152 gcred->acred.machine_cred != 0)
138 goto out_nomatch; 153 goto out_nomatch;
139 154
140 /* Optimisation in the case where pointers are identical... */ 155 /* Optimisation in the case where pointers are identical... */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aad8fb699989..85d3bb7490aa 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1918,7 +1918,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1918 struct sk_buff *skb; 1918 struct sk_buff *skb;
1919 1919
1920 unix_state_lock(sk); 1920 unix_state_lock(sk);
1921 skb = skb_dequeue(&sk->sk_receive_queue); 1921 skb = skb_peek(&sk->sk_receive_queue);
1922 if (skb == NULL) { 1922 if (skb == NULL) {
1923 unix_sk(sk)->recursion_level = 0; 1923 unix_sk(sk)->recursion_level = 0;
1924 if (copied >= target) 1924 if (copied >= target)
@@ -1958,11 +1958,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1958 if (check_creds) { 1958 if (check_creds) {
1959 /* Never glue messages from different writers */ 1959 /* Never glue messages from different writers */
1960 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1960 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1961 (UNIXCB(skb).cred != siocb->scm->cred)) { 1961 (UNIXCB(skb).cred != siocb->scm->cred))
1962 skb_queue_head(&sk->sk_receive_queue, skb);
1963 sk->sk_data_ready(sk, skb->len);
1964 break; 1962 break;
1965 }
1966 } else { 1963 } else {
1967 /* Copy credentials */ 1964 /* Copy credentials */
1968 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1965 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
@@ -1977,8 +1974,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1977 1974
1978 chunk = min_t(unsigned int, skb->len, size); 1975 chunk = min_t(unsigned int, skb->len, size);
1979 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1980 skb_queue_head(&sk->sk_receive_queue, skb);
1981 sk->sk_data_ready(sk, skb->len);
1982 if (copied == 0) 1977 if (copied == 0)
1983 copied = -EFAULT; 1978 copied = -EFAULT;
1984 break; 1979 break;
@@ -1993,13 +1988,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1993 if (UNIXCB(skb).fp) 1988 if (UNIXCB(skb).fp)
1994 unix_detach_fds(siocb->scm, skb); 1989 unix_detach_fds(siocb->scm, skb);
1995 1990
1996 /* put the skb back if we didn't use it up.. */ 1991 if (skb->len)
1997 if (skb->len) {
1998 skb_queue_head(&sk->sk_receive_queue, skb);
1999 sk->sk_data_ready(sk, skb->len);
2000 break; 1992 break;
2001 }
2002 1993
1994 skb_unlink(skb, &sk->sk_receive_queue);
2003 consume_skb(skb); 1995 consume_skb(skb);
2004 1996
2005 if (siocb->scm->fp) 1997 if (siocb->scm->fp)
@@ -2010,9 +2002,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2010 if (UNIXCB(skb).fp) 2002 if (UNIXCB(skb).fp)
2011 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 2003 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2012 2004
2013 /* put message back and return */
2014 skb_queue_head(&sk->sk_receive_queue, skb);
2015 sk->sk_data_ready(sk, skb->len);
2016 break; 2005 break;
2017 } 2006 }
2018 } while (size); 2007 } while (size);