aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-02-12 18:47:45 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-02-12 18:47:45 -0500
commit8ecba5af948cb58bf6d5eb1537c0df53cbc319c4 (patch)
tree83775a334c9e4be97b5dbf3f0a1d86854b10c4c1 /net
parent2b9b6d8c715b23fa119261c32ad360681f4464a9 (diff)
parent836dc9e3fbbab0c30aa6e664417225f5c1fb1c39 (diff)
Merge tag 'v3.8-rc7' into x86/asm
Merge in the updates to head_32.S from the previous urgent branch, as upcoming patches will make further changes. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/distributed-arp-table.c19
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c11
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/ipv4/ah4.c18
-rw-r--r--net/ipv4/datagram.c25
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c54
-rw-r--r--net/ipv4/tcp_cong.c14
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_core.c82
-rw-r--r--net/l2tp/l2tp_core.h5
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/offchannel.c19
-rw-r--r--net/mac80211/scan.c15
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/x_tables.c28
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/sunrpc/sched.c18
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_replay.c4
66 files changed, 471 insertions, 206 deletions
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e1d89d2b1c1..183f97a86bb2 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
738 struct arphdr *arphdr; 738 struct arphdr *arphdr;
739 struct ethhdr *ethhdr; 739 struct ethhdr *ethhdr;
740 __be32 ip_src, ip_dst; 740 __be32 ip_src, ip_dst;
741 uint8_t *hw_src, *hw_dst;
741 uint16_t type = 0; 742 uint16_t type = 0;
742 743
743 /* pull the ethernet header */ 744 /* pull the ethernet header */
@@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
777 ip_src = batadv_arp_ip_src(skb, hdr_size); 778 ip_src = batadv_arp_ip_src(skb, hdr_size);
778 ip_dst = batadv_arp_ip_dst(skb, hdr_size); 779 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
779 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || 780 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
780 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) 781 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
782 ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
783 ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
781 goto out; 784 goto out;
782 785
786 hw_src = batadv_arp_hw_src(skb, hdr_size);
787 if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
788 goto out;
789
790 /* we don't care about the destination MAC address in ARP requests */
791 if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
792 hw_dst = batadv_arp_hw_dst(skb, hdr_size);
793 if (is_zero_ether_addr(hw_dst) ||
794 is_multicast_ether_addr(hw_dst))
795 goto out;
796 }
797
783 type = ntohs(arphdr->ar_op); 798 type = ntohs(arphdr->ar_op);
784out: 799out:
785 return type; 800 return type;
@@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1012 */ 1027 */
1013 ret = !batadv_is_my_client(bat_priv, hw_dst); 1028 ret = !batadv_is_my_client(bat_priv, hw_dst);
1014out: 1029out:
1030 if (ret)
1031 kfree_skb(skb);
1015 /* if ret == false -> packet has to be delivered to the interface */ 1032 /* if ret == false -> packet has to be delivered to the interface */
1016 return ret; 1033 return ret;
1017} 1034}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0666eb..4925a02ae7e4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
249 __u8 reason = hci_proto_disconn_ind(conn); 249 __u8 reason = hci_proto_disconn_ind(conn);
250 250
251 switch (conn->type) { 251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK: 252 case AMP_LINK:
256 hci_amp_disconn(conn, reason); 253 hci_amp_disconn(conn, reason);
257 break; 254 break;
255 default:
256 hci_acl_disconn(conn, reason);
257 break;
258 } 258 }
259} 259}
260 260
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 596660d37c5e..0f78e34220c9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2810,14 +2810,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2810 if (conn) { 2810 if (conn) {
2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2812 2812
2813 hci_dev_lock(hdev);
2814 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2815 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2816 mgmt_device_connected(hdev, &conn->dst, conn->type,
2817 conn->dst_type, 0, NULL, 0,
2818 conn->dev_class);
2819 hci_dev_unlock(hdev);
2820
2821 /* Send to upper protocol */ 2813 /* Send to upper protocol */
2822 l2cap_recv_acldata(conn, skb, flags); 2814 l2cap_recv_acldata(conn, skb, flags);
2823 return; 2815 return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 705078a0cc39..81b44481d0d9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2688,7 +2688,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2688 if (ev->opcode != HCI_OP_NOP) 2688 if (ev->opcode != HCI_OP_NOP)
2689 del_timer(&hdev->cmd_timer); 2689 del_timer(&hdev->cmd_timer);
2690 2690
2691 if (ev->ncmd) { 2691 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2692 atomic_set(&hdev->cmd_cnt, 1); 2692 atomic_set(&hdev->cmd_cnt, 1);
2693 if (!skb_queue_empty(&hdev->cmd_q)) 2693 if (!skb_queue_empty(&hdev->cmd_q))
2694 queue_work(hdev->workqueue, &hdev->cmd_work); 2694 queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b2bcbe2dc328..a7352ff3fd1e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
931 hid->version = req->version; 931 hid->version = req->version;
932 hid->country = req->country; 932 hid->country = req->country;
933 933
934 strncpy(hid->name, req->name, 128); 934 strncpy(hid->name, req->name, sizeof(req->name) - 1);
935 935
936 snprintf(hid->phys, sizeof(hid->phys), "%pMR", 936 snprintf(hid->phys, sizeof(hid->phys), "%pMR",
937 &bt_sk(session->ctrl_sock->sk)->src); 937 &bt_sk(session->ctrl_sock->sk)->src);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c78208d793e..22e658322845 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3727,6 +3727,17 @@ sendresp:
3727static int l2cap_connect_req(struct l2cap_conn *conn, 3727static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data) 3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3729{ 3729{
3730 struct hci_dev *hdev = conn->hcon->hdev;
3731 struct hci_conn *hcon = conn->hcon;
3732
3733 hci_dev_lock(hdev);
3734 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3735 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3736 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3737 hcon->dst_type, 0, NULL, 0,
3738 hcon->dev_class);
3739 hci_dev_unlock(hdev);
3740
3730 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 3741 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3731 return 0; 3742 return 0;
3732} 3743}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 531a93d613d4..57f250c20e39 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)
352 352
353 case BT_CONNECTED: 353 case BT_CONNECTED:
354 case BT_CONFIG: 354 case BT_CONFIG:
355 if (sco_pi(sk)->conn) { 355 if (sco_pi(sk)->conn->hcon) {
356 sk->sk_state = BT_DISCONN; 356 sk->sk_state = BT_DISCONN;
357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); 357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
358 hci_conn_put(sco_pi(sk)->conn->hcon); 358 hci_conn_put(sco_pi(sk)->conn->hcon);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 68a9587c9694..5abefb12891d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
859 859
860 skb_pull(skb, sizeof(code)); 860 skb_pull(skb, sizeof(code));
861 861
862 /*
863 * The SMP context must be initialized for all other PDUs except
864 * pairing and security requests. If we get any other PDU when
865 * not initialized simply disconnect (done if this function
866 * returns an error).
867 */
868 if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
869 !conn->smp_chan) {
870 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
871 kfree_skb(skb);
872 return -ENOTSUPP;
873 }
874
862 switch (code) { 875 switch (code) {
863 case SMP_CMD_PAIRING_REQ: 876 case SMP_CMD_PAIRING_REQ:
864 reason = smp_cmd_pairing_req(conn, skb); 877 reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b29dacf900f9..e6e1cbe863f5 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1781 return -EFAULT; 1781 return -EFAULT;
1782 i += len; 1782 i += len;
1783 mutex_lock(&pktgen_thread_lock); 1783 mutex_lock(&pktgen_thread_lock);
1784 pktgen_add_device(t, f); 1784 ret = pktgen_add_device(t, f);
1785 mutex_unlock(&pktgen_thread_lock); 1785 mutex_unlock(&pktgen_thread_lock);
1786 ret = count; 1786 if (!ret) {
1787 sprintf(pg_result, "OK: add_device=%s", f); 1787 ret = count;
1788 sprintf(pg_result, "OK: add_device=%s", f);
1789 } else
1790 sprintf(pg_result, "ERROR: can not add device %s", f);
1788 goto out; 1791 goto out;
1789 } 1792 }
1790 1793
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index c31d9e8668c3..4425148d2b51 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
186 struct fastopen_queue *fastopenq = 186 struct fastopen_queue *fastopenq =
187 inet_csk(lsk)->icsk_accept_queue.fastopenq; 187 inet_csk(lsk)->icsk_accept_queue.fastopenq;
188 188
189 BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
190
191 tcp_sk(sk)->fastopen_rsk = NULL; 189 tcp_sk(sk)->fastopen_rsk = NULL;
192 spin_lock_bh(&fastopenq->lock); 190 spin_lock_bh(&fastopenq->lock);
193 fastopenq->qlen--; 191 fastopenq->qlen--;
diff --git a/net/core/scm.c b/net/core/scm.c
index 57fb1ee6649f..905dcc6ad1e3 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -35,6 +35,7 @@
35#include <net/sock.h> 35#include <net/sock.h>
36#include <net/compat.h> 36#include <net/compat.h>
37#include <net/scm.h> 37#include <net/scm.h>
38#include <net/cls_cgroup.h>
38 39
39 40
40/* 41/*
@@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
302 } 303 }
303 /* Bump the usage count and install the file. */ 304 /* Bump the usage count and install the file. */
304 sock = sock_from_file(fp[i], &err); 305 sock = sock_from_file(fp[i], &err);
305 if (sock) 306 if (sock) {
306 sock_update_netprioidx(sock->sk, current); 307 sock_update_netprioidx(sock->sk, current);
308 sock_update_classid(sock->sk, current);
309 }
307 fd_install(new_fd, get_file(fp[i])); 310 fd_install(new_fd, get_file(fp[i]));
308 } 311 }
309 312
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b0de42..32443ebc3e89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
683 new->network_header = old->network_header; 683 new->network_header = old->network_header;
684 new->mac_header = old->mac_header; 684 new->mac_header = old->mac_header;
685 new->inner_transport_header = old->inner_transport_header; 685 new->inner_transport_header = old->inner_transport_header;
686 new->inner_network_header = old->inner_transport_header; 686 new->inner_network_header = old->inner_network_header;
687 skb_dst_copy(new, old); 687 skb_dst_copy(new, old);
688 new->rxhash = old->rxhash; 688 new->rxhash = old->rxhash;
689 new->ooo_okay = old->ooo_okay; 689 new->ooo_okay = old->ooo_okay;
@@ -1649,7 +1649,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1649 1649
1650static struct page *linear_to_page(struct page *page, unsigned int *len, 1650static struct page *linear_to_page(struct page *page, unsigned int *len,
1651 unsigned int *offset, 1651 unsigned int *offset,
1652 struct sk_buff *skb, struct sock *sk) 1652 struct sock *sk)
1653{ 1653{
1654 struct page_frag *pfrag = sk_page_frag(sk); 1654 struct page_frag *pfrag = sk_page_frag(sk);
1655 1655
@@ -1682,14 +1682,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1682static bool spd_fill_page(struct splice_pipe_desc *spd, 1682static bool spd_fill_page(struct splice_pipe_desc *spd,
1683 struct pipe_inode_info *pipe, struct page *page, 1683 struct pipe_inode_info *pipe, struct page *page,
1684 unsigned int *len, unsigned int offset, 1684 unsigned int *len, unsigned int offset,
1685 struct sk_buff *skb, bool linear, 1685 bool linear,
1686 struct sock *sk) 1686 struct sock *sk)
1687{ 1687{
1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1689 return true; 1689 return true;
1690 1690
1691 if (linear) { 1691 if (linear) {
1692 page = linear_to_page(page, len, &offset, skb, sk); 1692 page = linear_to_page(page, len, &offset, sk);
1693 if (!page) 1693 if (!page)
1694 return true; 1694 return true;
1695 } 1695 }
@@ -1706,23 +1706,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
1706 return false; 1706 return false;
1707} 1707}
1708 1708
1709static inline void __segment_seek(struct page **page, unsigned int *poff,
1710 unsigned int *plen, unsigned int off)
1711{
1712 unsigned long n;
1713
1714 *poff += off;
1715 n = *poff / PAGE_SIZE;
1716 if (n)
1717 *page = nth_page(*page, n);
1718
1719 *poff = *poff % PAGE_SIZE;
1720 *plen -= off;
1721}
1722
1723static bool __splice_segment(struct page *page, unsigned int poff, 1709static bool __splice_segment(struct page *page, unsigned int poff,
1724 unsigned int plen, unsigned int *off, 1710 unsigned int plen, unsigned int *off,
1725 unsigned int *len, struct sk_buff *skb, 1711 unsigned int *len,
1726 struct splice_pipe_desc *spd, bool linear, 1712 struct splice_pipe_desc *spd, bool linear,
1727 struct sock *sk, 1713 struct sock *sk,
1728 struct pipe_inode_info *pipe) 1714 struct pipe_inode_info *pipe)
@@ -1737,23 +1723,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
1737 } 1723 }
1738 1724
1739 /* ignore any bits we already processed */ 1725 /* ignore any bits we already processed */
1740 if (*off) { 1726 poff += *off;
1741 __segment_seek(&page, &poff, &plen, *off); 1727 plen -= *off;
1742 *off = 0; 1728 *off = 0;
1743 }
1744 1729
1745 do { 1730 do {
1746 unsigned int flen = min(*len, plen); 1731 unsigned int flen = min(*len, plen);
1747 1732
1748 /* the linear region may spread across several pages */ 1733 if (spd_fill_page(spd, pipe, page, &flen, poff,
1749 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1734 linear, sk))
1750
1751 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1752 return true; 1735 return true;
1753 1736 poff += flen;
1754 __segment_seek(&page, &poff, &plen, flen); 1737 plen -= flen;
1755 *len -= flen; 1738 *len -= flen;
1756
1757 } while (*len && plen); 1739 } while (*len && plen);
1758 1740
1759 return false; 1741 return false;
@@ -1777,7 +1759,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1777 if (__splice_segment(virt_to_page(skb->data), 1759 if (__splice_segment(virt_to_page(skb->data),
1778 (unsigned long) skb->data & (PAGE_SIZE - 1), 1760 (unsigned long) skb->data & (PAGE_SIZE - 1),
1779 skb_headlen(skb), 1761 skb_headlen(skb),
1780 offset, len, skb, spd, 1762 offset, len, spd,
1781 skb_head_is_locked(skb), 1763 skb_head_is_locked(skb),
1782 sk, pipe)) 1764 sk, pipe))
1783 return true; 1765 return true;
@@ -1790,7 +1772,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1790 1772
1791 if (__splice_segment(skb_frag_page(f), 1773 if (__splice_segment(skb_frag_page(f),
1792 f->page_offset, skb_frag_size(f), 1774 f->page_offset, skb_frag_size(f),
1793 offset, len, skb, spd, false, sk, pipe)) 1775 offset, len, spd, false, sk, pipe))
1794 return true; 1776 return true;
1795 } 1777 }
1796 1778
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a0d8392491c3..a69b4e4a02b5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
269 skb->network_header += ah_hlen; 269 skb->network_header += ah_hlen;
270 memcpy(skb_network_header(skb), work_iph, ihl); 270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl); 271 __skb_pull(skb, ah_hlen + ihl);
272 skb_set_transport_header(skb, -ihl); 272
273 if (x->props.mode == XFRM_MODE_TUNNEL)
274 skb_reset_transport_header(skb);
275 else
276 skb_set_transport_header(skb, -ihl);
273out: 277out:
274 kfree(AH_SKB_CB(skb)->tmp); 278 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 279 xfrm_input_resume(skb, err);
@@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
381 skb->network_header += ah_hlen; 385 skb->network_header += ah_hlen;
382 memcpy(skb_network_header(skb), work_iph, ihl); 386 memcpy(skb_network_header(skb), work_iph, ihl);
383 __skb_pull(skb, ah_hlen + ihl); 387 __skb_pull(skb, ah_hlen + ihl);
384 skb_set_transport_header(skb, -ihl); 388 if (x->props.mode == XFRM_MODE_TUNNEL)
389 skb_reset_transport_header(skb);
390 else
391 skb_set_transport_header(skb, -ihl);
385 392
386 err = nexthdr; 393 err = nexthdr;
387 394
@@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
413 if (!x) 420 if (!x)
414 return; 421 return;
415 422
416 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 423 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
424 atomic_inc(&flow_cache_genid);
425 rt_genid_bump(net);
426
417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 427 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
418 else 428 } else
419 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 429 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
420 xfrm_state_put(x); 430 xfrm_state_put(x);
421} 431}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 424fafbc8cb0..b28e863fe0a7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -85,3 +85,28 @@ out:
85 return err; 85 return err;
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88
89void ip4_datagram_release_cb(struct sock *sk)
90{
91 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr;
94 struct flowi4 fl4;
95 struct rtable *rt;
96
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock();
101 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr;
104 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
105 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt))
109 __sk_dst_set(sk, &rt->dst);
110 rcu_read_unlock();
111}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b61e9deb7c7e..3b4f0cd2e63e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
346 346
347 pskb_trim(skb, skb->len - alen - padlen - 2); 347 pskb_trim(skb, skb->len - alen - padlen - 2);
348 __skb_pull(skb, hlen); 348 __skb_pull(skb, hlen);
349 skb_set_transport_header(skb, -ihl); 349 if (x->props.mode == XFRM_MODE_TUNNEL)
350 skb_reset_transport_header(skb);
351 else
352 skb_set_transport_header(skb, -ihl);
350 353
351 err = nexthdr[1]; 354 err = nexthdr[1];
352 355
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
499 if (!x) 502 if (!x)
500 return; 503 return;
501 504
502 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 505 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
506 atomic_inc(&flow_cache_genid);
507 rt_genid_bump(net);
508
503 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 509 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 else 510 } else
505 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 511 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
506 xfrm_state_put(x); 512 xfrm_state_put(x);
507} 513}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 303012adf9e6..e81b1caf2ea2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -963,8 +963,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
963 ptr--; 963 ptr--;
964 } 964 }
965 if (tunnel->parms.o_flags&GRE_CSUM) { 965 if (tunnel->parms.o_flags&GRE_CSUM) {
966 int offset = skb_transport_offset(skb);
967
966 *ptr = 0; 968 *ptr = 0;
967 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); 969 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
970 skb->len - offset,
971 0));
968 } 972 }
969 } 973 }
970 974
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d3ab47e19a89..9a46daed2f3c 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47 if (!x) 47 if (!x)
48 return; 48 return;
49 49
50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
51 atomic_inc(&flow_cache_genid);
52 rt_genid_bump(net);
53
51 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); 54 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
52 else 55 } else
53 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); 56 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
54 xfrm_state_put(x); 57 xfrm_state_put(x);
55} 58}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8f3d05424a3e..6f9c07268cf6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -738,6 +738,7 @@ struct proto ping_prot = {
738 .recvmsg = ping_recvmsg, 738 .recvmsg = ping_recvmsg,
739 .bind = ping_bind, 739 .bind = ping_bind,
740 .backlog_rcv = ping_queue_rcv_skb, 740 .backlog_rcv = ping_queue_rcv_skb,
741 .release_cb = ip4_datagram_release_cb,
741 .hash = ping_v4_hash, 742 .hash = ping_v4_hash,
742 .unhash = ping_v4_unhash, 743 .unhash = ping_v4_unhash,
743 .get_port = ping_v4_get_port, 744 .get_port = ping_v4_get_port,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 73d1e4df4bf6..6f08991409c3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -894,6 +894,7 @@ struct proto raw_prot = {
894 .recvmsg = raw_recvmsg, 894 .recvmsg = raw_recvmsg,
895 .bind = raw_bind, 895 .bind = raw_bind,
896 .backlog_rcv = raw_rcv_skb, 896 .backlog_rcv = raw_rcv_skb,
897 .release_cb = ip4_datagram_release_cb,
897 .hash = raw_hash_sk, 898 .hash = raw_hash_sk,
898 .unhash = raw_unhash_sk, 899 .unhash = raw_unhash_sk,
899 .obj_size = sizeof(struct raw_sock), 900 .obj_size = sizeof(struct raw_sock),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 844a9ef60dbd..a0fcc47fee73 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
912 struct dst_entry *dst = &rt->dst; 912 struct dst_entry *dst = &rt->dst;
913 struct fib_result res; 913 struct fib_result res;
914 914
915 if (dst_metric_locked(dst, RTAX_MTU))
916 return;
917
915 if (dst->dev->mtu < mtu) 918 if (dst->dev->mtu < mtu)
916 return; 919 return;
917 920
@@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
962} 965}
963EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 966EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
964 967
965void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 968static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
966{ 969{
967 const struct iphdr *iph = (const struct iphdr *) skb->data; 970 const struct iphdr *iph = (const struct iphdr *) skb->data;
968 struct flowi4 fl4; 971 struct flowi4 fl4;
@@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
975 ip_rt_put(rt); 978 ip_rt_put(rt);
976 } 979 }
977} 980}
981
982void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
983{
984 const struct iphdr *iph = (const struct iphdr *) skb->data;
985 struct flowi4 fl4;
986 struct rtable *rt;
987 struct dst_entry *dst;
988 bool new = false;
989
990 bh_lock_sock(sk);
991 rt = (struct rtable *) __sk_dst_get(sk);
992
993 if (sock_owned_by_user(sk) || !rt) {
994 __ipv4_sk_update_pmtu(skb, sk, mtu);
995 goto out;
996 }
997
998 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
999
1000 if (!__sk_dst_check(sk, 0)) {
1001 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1002 if (IS_ERR(rt))
1003 goto out;
1004
1005 new = true;
1006 }
1007
1008 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1009
1010 dst = dst_check(&rt->dst, 0);
1011 if (!dst) {
1012 if (new)
1013 dst_release(&rt->dst);
1014
1015 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1016 if (IS_ERR(rt))
1017 goto out;
1018
1019 new = true;
1020 }
1021
1022 if (new)
1023 __sk_dst_set(sk, &rt->dst);
1024
1025out:
1026 bh_unlock_sock(sk);
1027}
978EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1028EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
979 1029
980void ipv4_redirect(struct sk_buff *skb, struct net *net, 1030void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1120 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1170 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1121 mtu = dst_metric_raw(dst, RTAX_MTU); 1171 mtu = dst_metric_raw(dst, RTAX_MTU);
1122 1172
1123 if (mtu && rt_is_output_route(rt)) 1173 if (mtu)
1124 return mtu; 1174 return mtu;
1125 1175
1126 mtu = dst->dev->mtu; 1176 mtu = dst->dev->mtu;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 291f2ed7cc31..cdf2e707bb10 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
310{ 310{
311 int cnt; /* increase in packets */ 311 int cnt; /* increase in packets */
312 unsigned int delta = 0; 312 unsigned int delta = 0;
313 u32 snd_cwnd = tp->snd_cwnd;
314
315 if (unlikely(!snd_cwnd)) {
316 pr_err_once("snd_cwnd is nul, please report this bug.\n");
317 snd_cwnd = 1U;
318 }
313 319
314 /* RFC3465: ABC Slow start 320 /* RFC3465: ABC Slow start
315 * Increase only after a full MSS of bytes is acked 321 * Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 330 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 331 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
326 else 332 else
327 cnt = tp->snd_cwnd; /* exponential increase */ 333 cnt = snd_cwnd; /* exponential increase */
328 334
329 /* RFC3465: ABC 335 /* RFC3465: ABC
330 * We MAY increase by 2 if discovered delayed ack 336 * We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
334 tp->bytes_acked = 0; 340 tp->bytes_acked = 0;
335 341
336 tp->snd_cwnd_cnt += cnt; 342 tp->snd_cwnd_cnt += cnt;
337 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 343 while (tp->snd_cwnd_cnt >= snd_cwnd) {
338 tp->snd_cwnd_cnt -= tp->snd_cwnd; 344 tp->snd_cwnd_cnt -= snd_cwnd;
339 delta++; 345 delta++;
340 } 346 }
341 tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 347 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
342} 348}
343EXPORT_SYMBOL_GPL(tcp_slow_start); 349EXPORT_SYMBOL_GPL(tcp_slow_start);
344 350
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18f97ca76b00..ad70a962c20e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3504 } 3504 }
3505 } else { 3505 } else {
3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3507 if (!tcp_packets_in_flight(tp)) {
3508 tcp_enter_frto_loss(sk, 2, flag);
3509 return true;
3510 }
3511
3507 /* Prevent sending of new data. */ 3512 /* Prevent sending of new data. */
3508 tp->snd_cwnd = min(tp->snd_cwnd, 3513 tp->snd_cwnd = min(tp->snd_cwnd,
3509 tcp_packets_in_flight(tp)); 3514 tcp_packets_in_flight(tp));
@@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5649 * the remote receives only the retransmitted (regular) SYNs: either 5654 * the remote receives only the retransmitted (regular) SYNs: either
5650 * the original SYN-data or the corresponding SYN-ACK is lost. 5655 * the original SYN-data or the corresponding SYN-ACK is lost.
5651 */ 5656 */
5652 syn_drop = (cookie->len <= 0 && data && 5657 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5653 inet_csk(sk)->icsk_retransmits);
5654 5658
5655 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5659 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5656 5660
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 54139fa514e6..eadb693eef55 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 * We do take care of PMTU discovery (RFC1191) special case : 369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held. 370 * we can receive locally generated ICMP messages while socket is held.
371 */ 371 */
372 if (sock_owned_by_user(sk) && 372 if (sock_owned_by_user(sk)) {
373 type != ICMP_DEST_UNREACH && 373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 code != ICMP_FRAG_NEEDED) 374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 }
376
377 if (sk->sk_state == TCP_CLOSE) 376 if (sk->sk_state == TCP_CLOSE)
378 goto out; 377 goto out;
379 378
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
497 * errors returned from accept(). 496 * errors returned from accept().
498 */ 497 */
499 inet_csk_reqsk_queue_drop(sk, req, prev); 498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500 goto out; 500 goto out;
501 501
502 case TCP_SYN_SENT: 502 case TCP_SYN_SENT:
@@ -1501,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1501 * clogging syn queue with openreqs with exponentially increasing 1501 * clogging syn queue with openreqs with exponentially increasing
1502 * timeout. 1502 * timeout.
1503 */ 1503 */
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1505 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 goto drop; 1506 goto drop;
1507 }
1506 1508
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1509 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508 if (!req) 1510 if (!req)
@@ -1667,6 +1669,7 @@ drop_and_release:
1667drop_and_free: 1669drop_and_free:
1668 reqsk_free(req); 1670 reqsk_free(req);
1669drop: 1671drop:
1672 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1670 return 0; 1673 return 0;
1671} 1674}
1672EXPORT_SYMBOL(tcp_v4_conn_request); 1675EXPORT_SYMBOL(tcp_v4_conn_request);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 79c8dbe59b54..1f4d405eafba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1952,6 +1952,7 @@ struct proto udp_prot = {
1952 .recvmsg = udp_recvmsg, 1952 .recvmsg = udp_recvmsg,
1953 .sendpage = udp_sendpage, 1953 .sendpage = udp_sendpage,
1954 .backlog_rcv = __udp_queue_rcv_skb, 1954 .backlog_rcv = __udp_queue_rcv_skb,
1955 .release_cb = ip4_datagram_release_cb,
1955 .hash = udp_lib_hash, 1956 .hash = udp_lib_hash,
1956 .unhash = udp_lib_unhash, 1957 .unhash = udp_lib_unhash,
1957 .rehash = udp_v4_rehash, 1958 .rehash = udp_v4_rehash,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 420e56326384..1b5d8cb9b123 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1660 if (dev->addr_len != IEEE802154_ADDR_LEN) 1660 if (dev->addr_len != IEEE802154_ADDR_LEN)
1661 return -1; 1661 return -1;
1662 memcpy(eui, dev->dev_addr, 8); 1662 memcpy(eui, dev->dev_addr, 8);
1663 eui[0] ^= 2;
1663 return 0; 1664 return 0;
1664} 1665}
1665 1666
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ecc35b93314b..384233188ac1 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
472 skb->network_header += ah_hlen; 472 skb->network_header += ah_hlen;
473 memcpy(skb_network_header(skb), work_iph, hdr_len); 473 memcpy(skb_network_header(skb), work_iph, hdr_len);
474 __skb_pull(skb, ah_hlen + hdr_len); 474 __skb_pull(skb, ah_hlen + hdr_len);
475 skb_set_transport_header(skb, -hdr_len); 475 if (x->props.mode == XFRM_MODE_TUNNEL)
476 skb_reset_transport_header(skb);
477 else
478 skb_set_transport_header(skb, -hdr_len);
476out: 479out:
477 kfree(AH_SKB_CB(skb)->tmp); 480 kfree(AH_SKB_CB(skb)->tmp);
478 xfrm_input_resume(skb, err); 481 xfrm_input_resume(skb, err);
@@ -593,9 +596,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
593 596
594 skb->network_header += ah_hlen; 597 skb->network_header += ah_hlen;
595 memcpy(skb_network_header(skb), work_iph, hdr_len); 598 memcpy(skb_network_header(skb), work_iph, hdr_len);
596 skb->transport_header = skb->network_header;
597 __skb_pull(skb, ah_hlen + hdr_len); 599 __skb_pull(skb, ah_hlen + hdr_len);
598 600
601 if (x->props.mode == XFRM_MODE_TUNNEL)
602 skb_reset_transport_header(skb);
603 else
604 skb_set_transport_header(skb, -hdr_len);
605
599 err = nexthdr; 606 err = nexthdr;
600 607
601out_free: 608out_free:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8edf2601065a..7a778b9a7b85 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
380 if (skb->protocol == htons(ETH_P_IPV6)) { 380 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 381 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 382 if (np->rxopt.all)
383 datagram_recv_ctl(sk, msg, skb); 383 ip6_datagram_recv_ctl(sk, msg, skb);
384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
385 sin->sin6_scope_id = IP6CB(skb)->iif; 385 sin->sin6_scope_id = IP6CB(skb)->iif;
386 } else { 386 } else {
@@ -468,7 +468,8 @@ out:
468} 468}
469 469
470 470
471int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
472 struct sk_buff *skb)
472{ 473{
473 struct ipv6_pinfo *np = inet6_sk(sk); 474 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct inet6_skb_parm *opt = IP6CB(skb); 475 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
597 } 598 }
598 return 0; 599 return 0;
599} 600}
601EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
600 602
601int datagram_send_ctl(struct net *net, struct sock *sk, 603int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
602 struct msghdr *msg, struct flowi6 *fl6, 604 struct msghdr *msg, struct flowi6 *fl6,
603 struct ipv6_txoptions *opt, 605 struct ipv6_txoptions *opt,
604 int *hlimit, int *tclass, int *dontfrag) 606 int *hlimit, int *tclass, int *dontfrag)
605{ 607{
606 struct in6_pktinfo *src_info; 608 struct in6_pktinfo *src_info;
607 struct cmsghdr *cmsg; 609 struct cmsghdr *cmsg;
@@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
871exit_f: 873exit_f:
872 return err; 874 return err;
873} 875}
874EXPORT_SYMBOL_GPL(datagram_send_ctl); 876EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 282f3723ee19..40ffd72243a4 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
300 300
301 pskb_trim(skb, skb->len - alen - padlen - 2); 301 pskb_trim(skb, skb->len - alen - padlen - 2);
302 __skb_pull(skb, hlen); 302 __skb_pull(skb, hlen);
303 skb_set_transport_header(skb, -hdr_len); 303 if (x->props.mode == XFRM_MODE_TUNNEL)
304 skb_reset_transport_header(skb);
305 else
306 skb_set_transport_header(skb, -hdr_len);
304 307
305 err = nexthdr[1]; 308 err = nexthdr[1];
306 309
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b4a9fd51dae7..fff5bdd8b680 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
81 return net->ipv6.icmp_sk[smp_processor_id()]; 81 return net->ipv6.icmp_sk[smp_processor_id()];
82} 82}
83 83
84static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
85 u8 type, u8 code, int offset, __be32 info)
86{
87 struct net *net = dev_net(skb->dev);
88
89 if (type == ICMPV6_PKT_TOOBIG)
90 ip6_update_pmtu(skb, net, info, 0, 0);
91 else if (type == NDISC_REDIRECT)
92 ip6_redirect(skb, net, 0, 0);
93}
94
84static int icmpv6_rcv(struct sk_buff *skb); 95static int icmpv6_rcv(struct sk_buff *skb);
85 96
86static const struct inet6_protocol icmpv6_protocol = { 97static const struct inet6_protocol icmpv6_protocol = {
87 .handler = icmpv6_rcv, 98 .handler = icmpv6_rcv,
99 .err_handler = icmpv6_err,
88 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
89}; 101};
90 102
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 29124b7a04c8..d6de4b447250 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
365 msg.msg_control = (void*)(fl->opt+1); 365 msg.msg_control = (void*)(fl->opt+1);
366 memset(&flowi6, 0, sizeof(flowi6)); 366 memset(&flowi6, 0, sizeof(flowi6));
367 367
368 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 368 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
369 &junk, &junk); 369 &junk, &junk, &junk);
370 if (err) 370 if (err)
371 goto done; 371 goto done;
372 err = -EINVAL; 372 err = -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c727e4712751..131dd097736d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
960 int ret; 960 int ret;
961 961
962 if (!ip6_tnl_xmit_ctl(t)) 962 if (!ip6_tnl_xmit_ctl(t))
963 return -1; 963 goto tx_err;
964 964
965 switch (skb->protocol) { 965 switch (skb->protocol) {
966 case htons(ETH_P_IP): 966 case htons(ETH_P_IP):
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5552d13ae92f..0c7c03d50dc0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1213,10 +1213,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1213 if (dst_allfrag(rt->dst.path)) 1213 if (dst_allfrag(rt->dst.path))
1214 cork->flags |= IPCORK_ALLFRAG; 1214 cork->flags |= IPCORK_ALLFRAG;
1215 cork->length = 0; 1215 cork->length = 0;
1216 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; 1216 exthdrlen = (opt ? opt->opt_flen : 0);
1217 length += exthdrlen; 1217 length += exthdrlen;
1218 transhdrlen += exthdrlen; 1218 transhdrlen += exthdrlen;
1219 dst_exthdrlen = rt->dst.header_len; 1219 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1220 } else { 1220 } else {
1221 rt = (struct rt6_info *)cork->dst; 1221 rt = (struct rt6_info *)cork->dst;
1222 fl6 = &inet->cork.fl.u.ip6; 1222 fl6 = &inet->cork.fl.u.ip6;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 26dcdec9e3a5..8fd154e5f079 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1710,6 +1710,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1710 return -EINVAL; 1710 return -EINVAL;
1711 if (get_user(v, (u32 __user *)optval)) 1711 if (get_user(v, (u32 __user *)optval))
1712 return -EFAULT; 1712 return -EFAULT;
1713 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1714 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1715 return -EINVAL;
1713 if (sk == mrt->mroute6_sk) 1716 if (sk == mrt->mroute6_sk)
1714 return -EBUSY; 1717 return -EBUSY;
1715 1718
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee94d31c9d4d..d1e2e8ef29c5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -476,8 +476,8 @@ sticky_done:
476 msg.msg_controllen = optlen; 476 msg.msg_controllen = optlen;
477 msg.msg_control = (void*)(opt+1); 477 msg.msg_control = (void*)(opt+1);
478 478
479 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
480 &junk); 480 &junk, &junk);
481 if (retv) 481 if (retv)
482 goto done; 482 goto done;
483update: 483update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1002 release_sock(sk); 1002 release_sock(sk);
1003 1003
1004 if (skb) { 1004 if (skb) {
1005 int err = datagram_recv_ctl(sk, &msg, skb); 1005 int err = ip6_datagram_recv_ctl(sk, &msg, skb);
1006 kfree_skb(skb); 1006 kfree_skb(skb);
1007 if (err) 1007 if (err)
1008 return err; 1008 return err;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6cd29b1e8b92..70fa81449997 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
507 sock_recv_ts_and_drops(msg, sk, skb); 507 sock_recv_ts_and_drops(msg, sk, skb);
508 508
509 if (np->rxopt.all) 509 if (np->rxopt.all)
510 datagram_recv_ctl(sk, msg, skb); 510 ip6_datagram_recv_ctl(sk, msg, skb);
511 511
512 err = copied; 512 err = copied;
513 if (flags & MSG_TRUNC) 513 if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 822 memset(opt, 0, sizeof(struct ipv6_txoptions));
823 opt->tot_len = sizeof(struct ipv6_txoptions); 823 opt->tot_len = sizeof(struct ipv6_txoptions);
824 824
825 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 825 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
826 &hlimit, &tclass, &dontfrag); 826 &hlimit, &tclass, &dontfrag);
827 if (err < 0) { 827 if (err < 0) {
828 fl6_sock_release(flowlabel); 828 fl6_sock_release(flowlabel);
829 return err; 829 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e229a3bc345d..363d8b7772e8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -928,7 +928,7 @@ restart:
928 dst_hold(&rt->dst); 928 dst_hold(&rt->dst);
929 read_unlock_bh(&table->tb6_lock); 929 read_unlock_bh(&table->tb6_lock);
930 930
931 if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) 931 if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
933 else if (!(rt->dst.flags & DST_HOST)) 933 else if (!(rt->dst.flags & DST_HOST))
934 nrt = rt6_alloc_clone(rt, &fl6->daddr); 934 nrt = rt6_alloc_clone(rt, &fl6->daddr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93825dd3a7c0..4f43537197ef 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
423 } 423 }
424 424
425 inet_csk_reqsk_queue_drop(sk, req, prev); 425 inet_csk_reqsk_queue_drop(sk, req, prev);
426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
426 goto out; 427 goto out;
427 428
428 case TCP_SYN_SENT: 429 case TCP_SYN_SENT:
@@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
958 goto drop; 959 goto drop;
959 } 960 }
960 961
961 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 962 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
963 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
962 goto drop; 964 goto drop;
965 }
963 966
964 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 967 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
965 if (req == NULL) 968 if (req == NULL)
@@ -1108,6 +1111,7 @@ drop_and_release:
1108drop_and_free: 1111drop_and_free:
1109 reqsk_free(req); 1112 reqsk_free(req);
1110drop: 1113drop:
1114 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1111 return 0; /* don't send reset */ 1115 return 0; /* don't send reset */
1112} 1116}
1113 1117
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dfaa29b8b293..fb083295ff0b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -443,7 +443,7 @@ try_again:
443 ip_cmsg_recv(msg, skb); 443 ip_cmsg_recv(msg, skb);
444 } else { 444 } else {
445 if (np->rxopt.all) 445 if (np->rxopt.all)
446 datagram_recv_ctl(sk, msg, skb); 446 ip6_datagram_recv_ctl(sk, msg, skb);
447 } 447 }
448 448
449 err = copied; 449 err = copied;
@@ -1153,8 +1153,8 @@ do_udp_sendmsg:
1153 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1153 memset(opt, 0, sizeof(struct ipv6_txoptions));
1154 opt->tot_len = sizeof(*opt); 1154 opt->tot_len = sizeof(*opt);
1155 1155
1156 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1156 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1157 &hlimit, &tclass, &dontfrag); 1157 &hlimit, &tclass, &dontfrag);
1158 if (err < 0) { 1158 if (err < 0) {
1159 fl6_sock_release(flowlabel); 1159 fl6_sock_release(flowlabel);
1160 return err; 1160 return err;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f3723c13c..2ac884d0e89b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
168 168
169} 169}
170 170
171/* Lookup the tunnel socket, possibly involving the fs code if the socket is
172 * owned by userspace. A struct sock returned from this function must be
173 * released using l2tp_tunnel_sock_put once you're done with it.
174 */
175struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
176{
177 int err = 0;
178 struct socket *sock = NULL;
179 struct sock *sk = NULL;
180
181 if (!tunnel)
182 goto out;
183
184 if (tunnel->fd >= 0) {
185 /* Socket is owned by userspace, who might be in the process
186 * of closing it. Look the socket up using the fd to ensure
187 * consistency.
188 */
189 sock = sockfd_lookup(tunnel->fd, &err);
190 if (sock)
191 sk = sock->sk;
192 } else {
193 /* Socket is owned by kernelspace */
194 sk = tunnel->sock;
195 }
196
197out:
198 return sk;
199}
200EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203void l2tp_tunnel_sock_put(struct sock *sk)
204{
205 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206 if (tunnel) {
207 if (tunnel->fd >= 0) {
208 /* Socket is owned by userspace */
209 sockfd_put(sk->sk_socket);
210 }
211 sock_put(sk);
212 }
213}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215
171/* Lookup a session by id in the global session list 216/* Lookup a session by id in the global session list
172 */ 217 */
173static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) 218static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1123 struct udphdr *uh; 1168 struct udphdr *uh;
1124 struct inet_sock *inet; 1169 struct inet_sock *inet;
1125 __wsum csum; 1170 __wsum csum;
1126 int old_headroom;
1127 int new_headroom;
1128 int headroom; 1171 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1172 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1173 int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1136 */ 1179 */
1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1180 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1138 uhlen + hdr_len; 1181 uhlen + hdr_len;
1139 old_headroom = skb_headroom(skb);
1140 if (skb_cow_head(skb, headroom)) { 1182 if (skb_cow_head(skb, headroom)) {
1141 kfree_skb(skb); 1183 kfree_skb(skb);
1142 return NET_XMIT_DROP; 1184 return NET_XMIT_DROP;
1143 } 1185 }
1144 1186
1145 new_headroom = skb_headroom(skb);
1146 skb_orphan(skb); 1187 skb_orphan(skb);
1147 skb->truesize += new_headroom - old_headroom;
1148
1149 /* Setup L2TP header */ 1188 /* Setup L2TP header */
1150 session->build_header(session, __skb_push(skb, hdr_len)); 1189 session->build_header(session, __skb_push(skb, hdr_len));
1151 1190
@@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1607 tunnel->old_sk_destruct = sk->sk_destruct; 1646 tunnel->old_sk_destruct = sk->sk_destruct;
1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1647 sk->sk_destruct = &l2tp_tunnel_destruct;
1609 tunnel->sock = sk; 1648 tunnel->sock = sk;
1649 tunnel->fd = fd;
1610 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1650 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1611 1651
1612 sk->sk_allocation = GFP_ATOMIC; 1652 sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1642 */ 1682 */
1643int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1683int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1644{ 1684{
1645 int err = 0; 1685 int err = -EBADF;
1646 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; 1686 struct socket *sock = NULL;
1687 struct sock *sk = NULL;
1688
1689 sk = l2tp_tunnel_sock_lookup(tunnel);
1690 if (!sk)
1691 goto out;
1692
1693 sock = sk->sk_socket;
1694 BUG_ON(!sock);
1647 1695
1648 /* Force the tunnel socket to close. This will eventually 1696 /* Force the tunnel socket to close. This will eventually
1649 * cause the tunnel to be deleted via the normal socket close 1697 * cause the tunnel to be deleted via the normal socket close
1650 * mechanisms when userspace closes the tunnel socket. 1698 * mechanisms when userspace closes the tunnel socket.
1651 */ 1699 */
1652 if (sock != NULL) { 1700 err = inet_shutdown(sock, 2);
1653 err = inet_shutdown(sock, 2);
1654 1701
1655 /* If the tunnel's socket was created by the kernel, 1702 /* If the tunnel's socket was created by the kernel,
1656 * close the socket here since the socket was not 1703 * close the socket here since the socket was not
1657 * created by userspace. 1704 * created by userspace.
1658 */ 1705 */
1659 if (sock->file == NULL) 1706 if (sock->file == NULL)
1660 err = inet_release(sock); 1707 err = inet_release(sock);
1661 }
1662 1708
1709 l2tp_tunnel_sock_put(sk);
1710out:
1663 return err; 1711 return err;
1664} 1712}
1665EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1713EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e083a7..e62204cad4fe 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
188 int (*recv_payload_hook)(struct sk_buff *skb); 188 int (*recv_payload_hook)(struct sk_buff *skb);
189 void (*old_sk_destruct)(struct sock *); 189 void (*old_sk_destruct)(struct sock *);
190 struct sock *sock; /* Parent socket */ 190 struct sock *sock; /* Parent socket */
191 int fd; 191 int fd; /* Parent fd, if tunnel socket
192 * was created by userspace */
192 193
193 uint8_t priv[0]; /* private data */ 194 uint8_t priv[0]; /* private data */
194}; 195};
@@ -228,6 +229,8 @@ out:
228 return tunnel; 229 return tunnel;
229} 230}
230 231
232extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
233extern void l2tp_tunnel_sock_put(struct sock *sk);
231extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 234extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
232extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 235extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
233extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 236extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 927547171bc7..8ee4a86ae996 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 554 memset(opt, 0, sizeof(struct ipv6_txoptions));
555 opt->tot_len = sizeof(struct ipv6_txoptions); 555 opt->tot_len = sizeof(struct ipv6_txoptions);
556 556
557 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 557 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
558 &hlimit, &tclass, &dontfrag); 558 &hlimit, &tclass, &dontfrag);
559 if (err < 0) { 559 if (err < 0) {
560 fl6_sock_release(flowlabel); 560 fl6_sock_release(flowlabel);
561 return err; 561 return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
646 struct msghdr *msg, size_t len, int noblock, 646 struct msghdr *msg, size_t len, int noblock,
647 int flags, int *addr_len) 647 int flags, int *addr_len)
648{ 648{
649 struct inet_sock *inet = inet_sk(sk); 649 struct ipv6_pinfo *np = inet6_sk(sk);
650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
651 size_t copied = 0; 651 size_t copied = 0;
652 int err = -EOPNOTSUPP; 652 int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 688 lsa->l2tp_scope_id = IP6CB(skb)->iif;
689 } 689 }
690 690
691 if (inet->cmsg_flags) 691 if (np->rxopt.all)
692 ip_cmsg_recv(msg, skb); 692 ip6_datagram_recv_ctl(sk, msg, skb);
693 693
694 if (flags & MSG_TRUNC) 694 if (flags & MSG_TRUNC)
695 copied = skb->len; 695 copied = skb->len;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 286366ef8930..716605c241f4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
388 struct l2tp_session *session; 388 struct l2tp_session *session;
389 struct l2tp_tunnel *tunnel; 389 struct l2tp_tunnel *tunnel;
390 struct pppol2tp_session *ps; 390 struct pppol2tp_session *ps;
391 int old_headroom;
392 int new_headroom;
393 int uhlen, headroom; 391 int uhlen, headroom;
394 392
395 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
408 if (tunnel == NULL) 406 if (tunnel == NULL)
409 goto abort_put_sess; 407 goto abort_put_sess;
410 408
411 old_headroom = skb_headroom(skb);
412 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 409 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
413 headroom = NET_SKB_PAD + 410 headroom = NET_SKB_PAD +
414 sizeof(struct iphdr) + /* IP header */ 411 sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
418 if (skb_cow_head(skb, headroom)) 415 if (skb_cow_head(skb, headroom))
419 goto abort_put_sess_tun; 416 goto abort_put_sess_tun;
420 417
421 new_headroom = skb_headroom(skb);
422 skb->truesize += new_headroom - old_headroom;
423
424 /* Setup PPP header */ 418 /* Setup PPP header */
425 __skb_push(skb, sizeof(ppph)); 419 __skb_push(skb, sizeof(ppph));
426 skb->data[0] = ppph[0]; 420 skb->data[0] = ppph[0];
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e0aca614b7..516fbc96feff 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
164 sta = sta_info_get(sdata, mac_addr); 164 sta = sta_info_get(sdata, mac_addr);
165 else 165 else
166 sta = sta_info_get_bss(sdata, mac_addr); 166 sta = sta_info_get_bss(sdata, mac_addr);
167 if (!sta) { 167 /*
168 * The ASSOC test makes sure the driver is ready to
169 * receive the key. When wpa_supplicant has roamed
170 * using FT, it attempts to set the key before
171 * association has completed, this rejects that attempt
172 * so it will set the key again after assocation.
173 *
174 * TODO: accept the key if we have a station entry and
175 * add it to the device after the station.
176 */
177 if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
168 ieee80211_key_free(sdata->local, key); 178 ieee80211_key_free(sdata->local, key);
169 err = -ENOENT; 179 err = -ENOENT;
170 goto out_unlock; 180 goto out_unlock;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8563b9a5cac3..2ed065c09562 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1358,10 +1358,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1358void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1358void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1359 1359
1360/* off-channel helpers */ 1360/* off-channel helpers */
1361void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 1361void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
1362 bool offchannel_ps_enable); 1362void ieee80211_offchannel_return(struct ieee80211_local *local);
1363void ieee80211_offchannel_return(struct ieee80211_local *local,
1364 bool offchannel_ps_disable);
1365void ieee80211_roc_setup(struct ieee80211_local *local); 1363void ieee80211_roc_setup(struct ieee80211_local *local);
1366void ieee80211_start_next_roc(struct ieee80211_local *local); 1364void ieee80211_start_next_roc(struct ieee80211_local *local);
1367void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); 1365void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 47aeee2d8db1..2659e428b80c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -215,6 +215,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
215 skb->priority = 7; 215 skb->priority = 7;
216 216
217 info->control.vif = &sdata->vif; 217 info->control.vif = &sdata->vif;
218 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
218 ieee80211_set_qos_hdr(sdata, skb); 219 ieee80211_set_qos_hdr(sdata, skb);
219} 220}
220 221
@@ -246,11 +247,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
246 return -EAGAIN; 247 return -EAGAIN;
247 248
248 skb = dev_alloc_skb(local->tx_headroom + 249 skb = dev_alloc_skb(local->tx_headroom +
250 IEEE80211_ENCRYPT_HEADROOM +
251 IEEE80211_ENCRYPT_TAILROOM +
249 hdr_len + 252 hdr_len +
250 2 + 15 /* PERR IE */); 253 2 + 15 /* PERR IE */);
251 if (!skb) 254 if (!skb)
252 return -1; 255 return -1;
253 skb_reserve(skb, local->tx_headroom); 256 skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
254 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 257 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
255 memset(mgmt, 0, hdr_len); 258 memset(mgmt, 0, hdr_len);
256 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 259 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index a5379aea7d09..a3ad4c3c80a3 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
102 ieee80211_sta_reset_conn_monitor(sdata); 102 ieee80211_sta_reset_conn_monitor(sdata);
103} 103}
104 104
105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
106 bool offchannel_ps_enable)
107{ 106{
108 struct ieee80211_sub_if_data *sdata; 107 struct ieee80211_sub_if_data *sdata;
109 108
@@ -134,8 +133,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
134 133
135 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 134 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
136 netif_tx_stop_all_queues(sdata->dev); 135 netif_tx_stop_all_queues(sdata->dev);
137 if (offchannel_ps_enable && 136 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
138 (sdata->vif.type == NL80211_IFTYPE_STATION) &&
139 sdata->u.mgd.associated) 137 sdata->u.mgd.associated)
140 ieee80211_offchannel_ps_enable(sdata); 138 ieee80211_offchannel_ps_enable(sdata);
141 } 139 }
@@ -143,8 +141,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
143 mutex_unlock(&local->iflist_mtx); 141 mutex_unlock(&local->iflist_mtx);
144} 142}
145 143
146void ieee80211_offchannel_return(struct ieee80211_local *local, 144void ieee80211_offchannel_return(struct ieee80211_local *local)
147 bool offchannel_ps_disable)
148{ 145{
149 struct ieee80211_sub_if_data *sdata; 146 struct ieee80211_sub_if_data *sdata;
150 147
@@ -163,11 +160,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
163 continue; 160 continue;
164 161
165 /* Tell AP we're back */ 162 /* Tell AP we're back */
166 if (offchannel_ps_disable && 163 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
167 sdata->vif.type == NL80211_IFTYPE_STATION) { 164 sdata->u.mgd.associated)
168 if (sdata->u.mgd.associated) 165 ieee80211_offchannel_ps_disable(sdata);
169 ieee80211_offchannel_ps_disable(sdata);
170 }
171 166
172 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 167 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
173 /* 168 /*
@@ -385,7 +380,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
385 local->tmp_channel = NULL; 380 local->tmp_channel = NULL;
386 ieee80211_hw_config(local, 0); 381 ieee80211_hw_config(local, 0);
387 382
388 ieee80211_offchannel_return(local, true); 383 ieee80211_offchannel_return(local);
389 } 384 }
390 385
391 ieee80211_recalc_idle(local); 386 ieee80211_recalc_idle(local);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d59fc6818b1c..bf82e69d0601 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -292,7 +292,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
292 if (!was_hw_scan) { 292 if (!was_hw_scan) {
293 ieee80211_configure_filter(local); 293 ieee80211_configure_filter(local);
294 drv_sw_scan_complete(local); 294 drv_sw_scan_complete(local);
295 ieee80211_offchannel_return(local, true); 295 ieee80211_offchannel_return(local);
296 } 296 }
297 297
298 ieee80211_recalc_idle(local); 298 ieee80211_recalc_idle(local);
@@ -341,7 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
341 local->next_scan_state = SCAN_DECISION; 341 local->next_scan_state = SCAN_DECISION;
342 local->scan_channel_idx = 0; 342 local->scan_channel_idx = 0;
343 343
344 ieee80211_offchannel_stop_vifs(local, true); 344 ieee80211_offchannel_stop_vifs(local);
345 345
346 ieee80211_configure_filter(local); 346 ieee80211_configure_filter(local);
347 347
@@ -678,12 +678,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
678 local->scan_channel = NULL; 678 local->scan_channel = NULL;
679 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 679 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
680 680
681 /* 681 /* disable PS */
682 * Re-enable vifs and beaconing. Leave PS 682 ieee80211_offchannel_return(local);
683 * in off-channel state..will put that back
684 * on-channel at the end of scanning.
685 */
686 ieee80211_offchannel_return(local, false);
687 683
688 *next_delay = HZ / 5; 684 *next_delay = HZ / 5;
689 /* afterwards, resume scan & go to next channel */ 685 /* afterwards, resume scan & go to next channel */
@@ -693,8 +689,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
693static void ieee80211_scan_state_resume(struct ieee80211_local *local, 689static void ieee80211_scan_state_resume(struct ieee80211_local *local,
694 unsigned long *next_delay) 690 unsigned long *next_delay)
695{ 691{
696 /* PS already is in off-channel mode */ 692 ieee80211_offchannel_stop_vifs(local);
697 ieee80211_offchannel_stop_vifs(local, false);
698 693
699 if (local->ops->flush) { 694 if (local->ops->flush) {
700 drv_flush(local, false); 695 drv_flush(local, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e9eadc40c09c..467c1d1b66f2 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1673,10 +1673,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1673 chanctx_conf = 1673 chanctx_conf =
1674 rcu_dereference(tmp_sdata->vif.chanctx_conf); 1674 rcu_dereference(tmp_sdata->vif.chanctx_conf);
1675 } 1675 }
1676 if (!chanctx_conf)
1677 goto fail_rcu;
1678 1676
1679 chan = chanctx_conf->def.chan; 1677 if (chanctx_conf)
1678 chan = chanctx_conf->def.chan;
1679 else if (!local->use_chanctx)
1680 chan = local->_oper_channel;
1681 else
1682 goto fail_rcu;
1680 1683
1681 /* 1684 /*
1682 * Frame injection is not allowed if beaconing is not allowed 1685 * Frame injection is not allowed if beaconing is not allowed
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 016d95ead930..e4a0c4fb3a7c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1376,11 +1376,12 @@ void nf_conntrack_cleanup(struct net *net)
1376 synchronize_net(); 1376 synchronize_net();
1377 nf_conntrack_proto_fini(net); 1377 nf_conntrack_proto_fini(net);
1378 nf_conntrack_cleanup_net(net); 1378 nf_conntrack_cleanup_net(net);
1379}
1379 1380
1380 if (net_eq(net, &init_net)) { 1381void nf_conntrack_cleanup_end(void)
1381 RCU_INIT_POINTER(nf_ct_destroy, NULL); 1382{
1382 nf_conntrack_cleanup_init_net(); 1383 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1383 } 1384 nf_conntrack_cleanup_init_net();
1384} 1385}
1385 1386
1386void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1387void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 363285d544a1..e7185c684816 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -575,6 +575,7 @@ static int __init nf_conntrack_standalone_init(void)
575static void __exit nf_conntrack_standalone_fini(void) 575static void __exit nf_conntrack_standalone_fini(void)
576{ 576{
577 unregister_pernet_subsys(&nf_conntrack_net_ops); 577 unregister_pernet_subsys(&nf_conntrack_net_ops);
578 nf_conntrack_cleanup_end();
578} 579}
579 580
580module_init(nf_conntrack_standalone_init); 581module_init(nf_conntrack_standalone_init);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8d987c3573fd..7b3a9e5999c0 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
345} 345}
346EXPORT_SYMBOL_GPL(xt_find_revision); 346EXPORT_SYMBOL_GPL(xt_find_revision);
347 347
348static char *textify_hooks(char *buf, size_t size, unsigned int mask) 348static char *
349textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
349{ 350{
350 static const char *const names[] = { 351 static const char *const inetbr_names[] = {
351 "PREROUTING", "INPUT", "FORWARD", 352 "PREROUTING", "INPUT", "FORWARD",
352 "OUTPUT", "POSTROUTING", "BROUTING", 353 "OUTPUT", "POSTROUTING", "BROUTING",
353 }; 354 };
354 unsigned int i; 355 static const char *const arp_names[] = {
356 "INPUT", "FORWARD", "OUTPUT",
357 };
358 const char *const *names;
359 unsigned int i, max;
355 char *p = buf; 360 char *p = buf;
356 bool np = false; 361 bool np = false;
357 int res; 362 int res;
358 363
364 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
365 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
366 ARRAY_SIZE(inetbr_names);
359 *p = '\0'; 367 *p = '\0';
360 for (i = 0; i < ARRAY_SIZE(names); ++i) { 368 for (i = 0; i < max; ++i) {
361 if (!(mask & (1 << i))) 369 if (!(mask & (1 << i)))
362 continue; 370 continue;
363 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); 371 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
@@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,
402 pr_err("%s_tables: %s match: used from hooks %s, but only " 410 pr_err("%s_tables: %s match: used from hooks %s, but only "
403 "valid from %s\n", 411 "valid from %s\n",
404 xt_prefix[par->family], par->match->name, 412 xt_prefix[par->family], par->match->name,
405 textify_hooks(used, sizeof(used), par->hook_mask), 413 textify_hooks(used, sizeof(used), par->hook_mask,
406 textify_hooks(allow, sizeof(allow), par->match->hooks)); 414 par->family),
415 textify_hooks(allow, sizeof(allow), par->match->hooks,
416 par->family));
407 return -EINVAL; 417 return -EINVAL;
408 } 418 }
409 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 419 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
@@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,
575 pr_err("%s_tables: %s target: used from hooks %s, but only " 585 pr_err("%s_tables: %s target: used from hooks %s, but only "
576 "usable from %s\n", 586 "usable from %s\n",
577 xt_prefix[par->family], par->target->name, 587 xt_prefix[par->family], par->target->name,
578 textify_hooks(used, sizeof(used), par->hook_mask), 588 textify_hooks(used, sizeof(used), par->hook_mask,
579 textify_hooks(allow, sizeof(allow), par->target->hooks)); 589 par->family),
590 textify_hooks(allow, sizeof(allow), par->target->hooks,
591 par->family));
580 return -EINVAL; 592 return -EINVAL;
581 } 593 }
582 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 594 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2a0843081840..bde009ed8d3b 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -109,7 +109,7 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
109 struct xt_ct_target_info *info = par->targinfo; 109 struct xt_ct_target_info *info = par->targinfo;
110 struct nf_conntrack_tuple t; 110 struct nf_conntrack_tuple t;
111 struct nf_conn *ct; 111 struct nf_conn *ct;
112 int ret; 112 int ret = -EOPNOTSUPP;
113 113
114 if (info->flags & ~XT_CT_NOTRACK) 114 if (info->flags & ~XT_CT_NOTRACK)
115 return -EINVAL; 115 return -EINVAL;
@@ -247,7 +247,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
247 struct xt_ct_target_info_v1 *info = par->targinfo; 247 struct xt_ct_target_info_v1 *info = par->targinfo;
248 struct nf_conntrack_tuple t; 248 struct nf_conntrack_tuple t;
249 struct nf_conn *ct; 249 struct nf_conn *ct;
250 int ret; 250 int ret = -EOPNOTSUPP;
251 251
252 if (info->flags & ~XT_CT_NOTRACK) 252 if (info->flags & ~XT_CT_NOTRACK)
253 return -EINVAL; 253 return -EINVAL;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index a9327e2e48ce..670cbc3518de 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -35,10 +35,11 @@
35/* Must be called with rcu_read_lock. */ 35/* Must be called with rcu_read_lock. */
36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
37{ 37{
38 if (unlikely(!vport)) { 38 if (unlikely(!vport))
39 kfree_skb(skb); 39 goto error;
40 return; 40
41 } 41 if (unlikely(skb_warn_if_lro(skb)))
42 goto error;
42 43
43 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
44 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
50 51
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 ovs_vport_receive(vport, skb); 53 ovs_vport_receive(vport, skb);
54 return;
55
56error:
57 kfree_skb(skb);
53} 58}
54 59
55/* Called with rcu_read_lock and bottom-halves disabled. */ 60/* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
169 goto error; 174 goto error;
170 } 175 }
171 176
172 if (unlikely(skb_warn_if_lro(skb)))
173 goto error;
174
175 skb->dev = netdev_vport->dev; 177 skb->dev = netdev_vport->dev;
176 len = skb->len; 178 len = skb->len;
177 dev_queue_xmit(skb); 179 dev_queue_xmit(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e639645e8fec..c111bd0e083a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
2361 2361
2362 packet_flush_mclist(sk); 2362 packet_flush_mclist(sk);
2363 2363
2364 memset(&req_u, 0, sizeof(req_u)); 2364 if (po->rx_ring.pg_vec) {
2365 2365 memset(&req_u, 0, sizeof(req_u));
2366 if (po->rx_ring.pg_vec)
2367 packet_set_ring(sk, &req_u, 1, 0); 2366 packet_set_ring(sk, &req_u, 1, 0);
2367 }
2368 2368
2369 if (po->tx_ring.pg_vec) 2369 if (po->tx_ring.pg_vec) {
2370 memset(&req_u, 0, sizeof(req_u));
2370 packet_set_ring(sk, &req_u, 1, 1); 2371 packet_set_ring(sk, &req_u, 1, 1);
2372 }
2371 2373
2372 fanout_release(sk); 2374 fanout_release(sk);
2373 2375
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 298c0ddfb57e..3d2acc7a9c80 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
438 if (q->rate) { 438 if (q->rate) {
439 struct sk_buff_head *list = &sch->q; 439 struct sk_buff_head *list = &sch->q;
440 440
441 delay += packet_len_2_sched_time(skb->len, q);
442
443 if (!skb_queue_empty(list)) { 441 if (!skb_queue_empty(list)) {
444 /* 442 /*
445 * Last packet in queue is reference point (now). 443 * Last packet in queue is reference point (now),
446 * First packet in queue is already in flight, 444 * calculate this time bonus and subtract
447 * calculate this time bonus and substract
448 * from delay. 445 * from delay.
449 */ 446 */
450 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
448 delay = max_t(psched_tdiff_t, 0, delay);
451 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
452 } 450 }
451
452 delay += packet_len_2_sched_time(skb->len, q);
453 } 453 }
454 454
455 cb->time_to_send = now + delay; 455 cb->time_to_send = now + delay;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 159b9bc5d633..d8420ae614dc 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
71 return; 71 return;
72 72
73 if (atomic_dec_and_test(&key->refcnt)) { 73 if (atomic_dec_and_test(&key->refcnt)) {
74 kfree(key); 74 kzfree(key);
75 SCTP_DBG_OBJCNT_DEC(keys); 75 SCTP_DBG_OBJCNT_DEC(keys);
76 } 76 }
77} 77}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 17a001bac2cc..1a9c5fb77310 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
249/* Final destructor for endpoint. */ 249/* Final destructor for endpoint. */
250static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 250static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
251{ 251{
252 int i;
253
252 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 254 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
253 255
254 /* Free up the HMAC transform. */ 256 /* Free up the HMAC transform. */
@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
271 sctp_inq_free(&ep->base.inqueue); 273 sctp_inq_free(&ep->base.inqueue);
272 sctp_bind_addr_free(&ep->base.bind_addr); 274 sctp_bind_addr_free(&ep->base.bind_addr);
273 275
276 for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
277 memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
278
274 /* Remove and free the port */ 279 /* Remove and free the port */
275 if (sctp_sk(ep->base.sk)->bind_hash) 280 if (sctp_sk(ep->base.sk)->bind_hash)
276 sctp_put_port(ep->base.sk); 281 sctp_put_port(ep->base.sk);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 379c81dee9d1..9bcdbd02d777 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
224 224
225/* Free the outqueue structure and any related pending chunks. 225/* Free the outqueue structure and any related pending chunks.
226 */ 226 */
227void sctp_outq_teardown(struct sctp_outq *q) 227static void __sctp_outq_teardown(struct sctp_outq *q)
228{ 228{
229 struct sctp_transport *transport; 229 struct sctp_transport *transport;
230 struct list_head *lchunk, *temp; 230 struct list_head *lchunk, *temp;
@@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
277 sctp_chunk_free(chunk); 277 sctp_chunk_free(chunk);
278 } 278 }
279 279
280 q->error = 0;
281
282 /* Throw away any leftover control chunks. */ 280 /* Throw away any leftover control chunks. */
283 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
284 list_del_init(&chunk->list); 282 list_del_init(&chunk->list);
@@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
286 } 284 }
287} 285}
288 286
287void sctp_outq_teardown(struct sctp_outq *q)
288{
289 __sctp_outq_teardown(q);
290 sctp_outq_init(q->asoc, q);
291}
292
289/* Free the outqueue structure and any related pending chunks. */ 293/* Free the outqueue structure and any related pending chunks. */
290void sctp_outq_free(struct sctp_outq *q) 294void sctp_outq_free(struct sctp_outq *q)
291{ 295{
292 /* Throw away leftover chunks. */ 296 /* Throw away leftover chunks. */
293 sctp_outq_teardown(q); 297 __sctp_outq_teardown(q);
294 298
295 /* If we were kmalloc()'d, free the memory. */ 299 /* If we were kmalloc()'d, free the memory. */
296 if (q->malloced) 300 if (q->malloced)
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 618ec7e216ca..5131fcfedb03 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1779 1779
1780 /* Update the content of current association. */ 1780 /* Update the content of current association. */
1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1782 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1782 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1784 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1785 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1784 return SCTP_DISPOSITION_CONSUME; 1786 return SCTP_DISPOSITION_CONSUME;
1785 1787
1786nomem_ev: 1788nomem_ev:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e65758cb038..cedd9bf67b8c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3390 3390
3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3392out: 3392out:
3393 kfree(authkey); 3393 kzfree(authkey);
3394 return ret; 3394 return ret;
3395} 3395}
3396 3396
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 043889ac86c0..bf3c6e8fc401 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)
366 366
367void sctp_sysctl_net_unregister(struct net *net) 367void sctp_sysctl_net_unregister(struct net *net)
368{ 368{
369 struct ctl_table *table;
370
371 table = net->sctp.sysctl_header->ctl_table_arg;
369 unregister_net_sysctl_table(net->sctp.sysctl_header); 372 unregister_net_sysctl_table(net->sctp.sysctl_header);
373 kfree(table);
370} 374}
371 375
372static struct ctl_table_header * sctp_sysctl_header; 376static struct ctl_table_header * sctp_sysctl_header;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index bfa31714581f..fb20f25ddec9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103 struct list_head *q = &queue->tasks[queue->priority];
104 struct rpc_task *task;
105
106 if (!list_empty(q)) {
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 if (task->tk_owner == queue->owner)
109 list_move_tail(&task->u.tk_wait.list, q);
110 }
111}
112
101static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
102{ 114{
103 queue->priority = priority; 115 if (queue->priority != priority) {
116 /* Fairness: rotate the list when changing priority */
117 rpc_rotate_queue_owner(queue);
118 queue->priority = priority;
119 }
104} 120}
105 121
106static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0a148c9d2a5c..0f679df7d072 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
465} 465}
466 466
467/* 467/*
468 * See net/ipv6/datagram.c : datagram_recv_ctl 468 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
469 */ 469 */
470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
471 struct cmsghdr *cmh) 471 struct cmsghdr *cmh)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7d4789..45f1618c8e23 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1358 &iwe, IW_EV_UINT_LEN); 1358 &iwe, IW_EV_UINT_LEN);
1359 } 1359 }
1360 1360
1361 buf = kmalloc(30, GFP_ATOMIC); 1361 buf = kmalloc(31, GFP_ATOMIC);
1362 if (buf) { 1362 if (buf) {
1363 memset(&iwe, 0, sizeof(iwe)); 1363 memset(&iwe, 0, sizeof(iwe));
1364 iwe.cmd = IWEVCUSTOM; 1364 iwe.cmd = IWEVCUSTOM;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41eabc46f110..07c585756d2a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2656,7 +2656,7 @@ static void xfrm_policy_fini(struct net *net)
2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2657 2657
2658 htab = &net->xfrm.policy_bydst[dir]; 2658 htab = &net->xfrm.policy_bydst[dir];
2659 sz = (htab->hmask + 1); 2659 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2660 WARN_ON(!hlist_empty(htab->table)); 2660 WARN_ON(!hlist_empty(htab->table));
2661 xfrm_hash_free(htab->table, sz); 2661 xfrm_hash_free(htab->table, sz);
2662 } 2662 }
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 765f6fe951eb..35754cc8a9e5 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
242 u32 diff; 242 u32 diff;
243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
244 u32 seq = ntohl(net_seq); 244 u32 seq = ntohl(net_seq);
245 u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 245 u32 pos;
246 246
247 if (!replay_esn->replay_window) 247 if (!replay_esn->replay_window)
248 return; 248 return;
249 249
250 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
251
250 if (seq > replay_esn->seq) { 252 if (seq > replay_esn->seq) {
251 diff = seq - replay_esn->seq; 253 diff = seq - replay_esn->seq;
252 254