aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_diag.c18
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c33
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c6
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/xfrm4_output.c2
17 files changed, 90 insertions, 46 deletions
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 57be71dd6a9e..23b9b3e86f4c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1111,11 +1111,10 @@ static void ip_fib_net_exit(struct net *net)
1111{ 1111{
1112 unsigned int i; 1112 unsigned int i;
1113 1113
1114 rtnl_lock();
1114#ifdef CONFIG_IP_MULTIPLE_TABLES 1115#ifdef CONFIG_IP_MULTIPLE_TABLES
1115 fib4_rules_exit(net); 1116 fib4_rules_exit(net);
1116#endif 1117#endif
1117
1118 rtnl_lock();
1119 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1118 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1120 struct fib_table *tb; 1119 struct fib_table *tb;
1121 struct hlist_head *head; 1120 struct hlist_head *head;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 14d02ea905b6..3e44b9b0b78e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
268 release_sock(sk); 268 release_sock(sk);
269 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 269 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
270 timeo = schedule_timeout(timeo); 270 timeo = schedule_timeout(timeo);
271 sched_annotate_sleep();
271 lock_sock(sk); 272 lock_sock(sk);
272 err = 0; 273 err = 0;
273 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 274 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 81751f12645f..592aff37366b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
71 mutex_unlock(&inet_diag_table_mutex); 71 mutex_unlock(&inet_diag_table_mutex);
72} 72}
73 73
74static size_t inet_sk_attr_size(void)
75{
76 return nla_total_size(sizeof(struct tcp_info))
77 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
78 + nla_total_size(1) /* INET_DIAG_TOS */
79 + nla_total_size(1) /* INET_DIAG_TCLASS */
80 + nla_total_size(sizeof(struct inet_diag_meminfo))
81 + nla_total_size(sizeof(struct inet_diag_msg))
82 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
83 + nla_total_size(TCP_CA_NAME_MAX)
84 + nla_total_size(sizeof(struct tcpvegas_info))
85 + 64;
86}
87
74int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 88int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
75 struct sk_buff *skb, struct inet_diag_req_v2 *req, 89 struct sk_buff *skb, struct inet_diag_req_v2 *req,
76 struct user_namespace *user_ns, 90 struct user_namespace *user_ns,
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
326 if (err) 340 if (err)
327 goto out; 341 goto out;
328 342
329 rep = nlmsg_new(sizeof(struct inet_diag_msg) + 343 rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
330 sizeof(struct inet_diag_meminfo) +
331 sizeof(struct tcp_info) + 64, GFP_KERNEL);
332 if (!rep) { 344 if (!rep) {
333 err = -ENOMEM; 345 err = -ENOMEM;
334 goto out; 346 goto out;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 787b3c294ce6..d9bc28ac5d1b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb)
67 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
68 ip_forward_options(skb); 68 ip_forward_options(skb);
69 69
70 skb_sender_cpu_clear(skb);
70 return dst_output(skb); 71 return dst_output(skb);
71} 72}
72 73
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e5b6d0ddcb58..145a50c4d566 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
660{ 660{
661 struct iphdr iph; 661 struct iphdr iph;
662 int netoff;
662 u32 len; 663 u32 len;
663 664
664 if (skb->protocol != htons(ETH_P_IP)) 665 if (skb->protocol != htons(ETH_P_IP))
665 return skb; 666 return skb;
666 667
667 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) 668 netoff = skb_network_offset(skb);
669
670 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
668 return skb; 671 return skb;
669 672
670 if (iph.ihl < 5 || iph.version != 4) 673 if (iph.ihl < 5 || iph.version != 4)
671 return skb; 674 return skb;
672 675
673 len = ntohs(iph.tot_len); 676 len = ntohs(iph.tot_len);
674 if (skb->len < len || len < (iph.ihl * 4)) 677 if (skb->len < netoff + len || len < (iph.ihl * 4))
675 return skb; 678 return skb;
676 679
677 if (ip_is_fragment(&iph)) { 680 if (ip_is_fragment(&iph)) {
678 skb = skb_share_check(skb, GFP_ATOMIC); 681 skb = skb_share_check(skb, GFP_ATOMIC);
679 if (skb) { 682 if (skb) {
680 if (!pskb_may_pull(skb, iph.ihl*4)) 683 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
681 return skb; 684 return skb;
682 if (pskb_trim_rcsum(skb, len)) 685 if (pskb_trim_rcsum(skb, netoff + len))
683 return skb; 686 return skb;
684 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 687 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
685 if (ip_defrag(skb, user)) 688 if (ip_defrag(skb, user))
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d68199d9b2b0..a7aea2048a0d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
888 cork->length += length; 888 cork->length += length;
889 if (((length > mtu) || (skb && skb_is_gso(skb))) && 889 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
890 (sk->sk_protocol == IPPROTO_UDP) && 890 (sk->sk_protocol == IPPROTO_UDP) &&
891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { 891 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
892 (sk->sk_type == SOCK_DGRAM)) {
892 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 893 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
893 hh_len, fragheaderlen, transhdrlen, 894 hh_len, fragheaderlen, transhdrlen,
894 maxfraglen, flags); 895 maxfraglen, flags);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 31d8c71986b4..5cd99271d3a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
432 kfree_skb(skb); 432 kfree_skb(skb);
433} 433}
434 434
435static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, 435/* IPv4 supports cmsg on all imcp errors and some timestamps
436 const struct sk_buff *skb, 436 *
437 int ee_origin) 437 * Timestamp code paths do not initialize the fields expected by cmsg:
438 * the PKTINFO fields in skb->cb[]. Fill those in here.
439 */
440static bool ipv4_datagram_support_cmsg(const struct sock *sk,
441 struct sk_buff *skb,
442 int ee_origin)
438{ 443{
439 struct in_pktinfo *info = PKTINFO_SKB_CB(skb); 444 struct in_pktinfo *info;
445
446 if (ee_origin == SO_EE_ORIGIN_ICMP)
447 return true;
440 448
441 if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || 449 if (ee_origin == SO_EE_ORIGIN_LOCAL)
442 (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 450 return false;
451
452 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
453 * timestamp with egress dev. Not possible for packets without dev
454 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
455 */
456 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
443 (!skb->dev)) 457 (!skb->dev))
444 return false; 458 return false;
445 459
460 info = PKTINFO_SKB_CB(skb);
446 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 461 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
447 info->ipi_ifindex = skb->dev->ifindex; 462 info->ipi_ifindex = skb->dev->ifindex;
448 return true; 463 return true;
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
483 498
484 serr = SKB_EXT_ERR(skb); 499 serr = SKB_EXT_ERR(skb);
485 500
486 if (sin && skb->len) { 501 if (sin && serr->port) {
487 sin->sin_family = AF_INET; 502 sin->sin_family = AF_INET;
488 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 503 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
489 serr->addr_offset); 504 serr->addr_offset);
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
496 sin = &errhdr.offender; 511 sin = &errhdr.offender;
497 memset(sin, 0, sizeof(*sin)); 512 memset(sin, 0, sizeof(*sin));
498 513
499 if (skb->len && 514 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
500 (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
501 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
502 sin->sin_family = AF_INET; 515 sin->sin_family = AF_INET;
503 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 516 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
504 if (inet_sk(sk)->cmsg_flags) 517 if (inet_sk(sk)->cmsg_flags)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d78427652d2..fe54eba6d00d 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net)
268 return 0; 268 return 0;
269 269
270err2: 270err2:
271 kfree(mrt); 271 ipmr_free_table(mrt);
272err1: 272err1:
273 fib_rules_unregister(ops); 273 fib_rules_unregister(ops);
274 return err; 274 return err;
@@ -278,11 +278,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
278{ 278{
279 struct mr_table *mrt, *next; 279 struct mr_table *mrt, *next;
280 280
281 rtnl_lock();
281 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 282 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
282 list_del(&mrt->list); 283 list_del(&mrt->list);
283 ipmr_free_table(mrt); 284 ipmr_free_table(mrt);
284 } 285 }
285 fib_rules_unregister(net->ipv4.mr_rules_ops); 286 fib_rules_unregister(net->ipv4.mr_rules_ops);
287 rtnl_unlock();
286} 288}
287#else 289#else
288#define ipmr_for_each_table(mrt, net) \ 290#define ipmr_for_each_table(mrt, net) \
@@ -308,7 +310,10 @@ static int __net_init ipmr_rules_init(struct net *net)
308 310
309static void __net_exit ipmr_rules_exit(struct net *net) 311static void __net_exit ipmr_rules_exit(struct net *net)
310{ 312{
313 rtnl_lock();
311 ipmr_free_table(net->ipv4.mrt); 314 ipmr_free_table(net->ipv4.mrt);
315 net->ipv4.mrt = NULL;
316 rtnl_unlock();
312} 317}
313#endif 318#endif
314 319
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 99e810f84671..cf5e82f39d3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb,
272 &chainname, &comment, &rulenum) != 0) 272 &chainname, &comment, &rulenum) != 0)
273 break; 273 break;
274 274
275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, 275 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276 "TRACE: %s:%s:%s:%u ", 276 "TRACE: %s:%s:%s:%u ",
277 tablename, chainname, comment, rulenum); 277 tablename, chainname, comment, rulenum);
278} 278}
279#endif 279#endif
280 280
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e9f66e1cda50..208d5439e59b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
259 kgid_t low, high; 259 kgid_t low, high;
260 int ret = 0; 260 int ret = 0;
261 261
262 if (sk->sk_family == AF_INET6)
263 sk->sk_ipv6only = 1;
264
262 inet_get_ping_group_range_net(net, &low, &high); 265 inet_get_ping_group_range_net(net, &low, &high);
263 if (gid_lte(low, group) && gid_lte(group, high)) 266 if (gid_lte(low, group) && gid_lte(group, high))
264 return 0; 267 return 0;
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
305 if (addr_len < sizeof(*addr)) 308 if (addr_len < sizeof(*addr))
306 return -EINVAL; 309 return -EINVAL;
307 310
311 if (addr->sin_family != AF_INET &&
312 !(addr->sin_family == AF_UNSPEC &&
313 addr->sin_addr.s_addr == htonl(INADDR_ANY)))
314 return -EAFNOSUPPORT;
315
308 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", 316 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
309 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); 317 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
310 318
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
330 return -EINVAL; 338 return -EINVAL;
331 339
332 if (addr->sin6_family != AF_INET6) 340 if (addr->sin6_family != AF_INET6)
333 return -EINVAL; 341 return -EAFNOSUPPORT;
334 342
335 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", 343 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
336 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); 344 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
716 if (msg->msg_namelen < sizeof(*usin)) 724 if (msg->msg_namelen < sizeof(*usin))
717 return -EINVAL; 725 return -EINVAL;
718 if (usin->sin_family != AF_INET) 726 if (usin->sin_family != AF_INET)
719 return -EINVAL; 727 return -EAFNOSUPPORT;
720 daddr = usin->sin_addr.s_addr; 728 daddr = usin->sin_addr.s_addr;
721 /* no remote port */ 729 /* no remote port */
722 } else { 730 } else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d72a0fcd928..995a2259bcfc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
835 int large_allowed) 835 int large_allowed)
836{ 836{
837 struct tcp_sock *tp = tcp_sk(sk); 837 struct tcp_sock *tp = tcp_sk(sk);
838 u32 new_size_goal, size_goal, hlen; 838 u32 new_size_goal, size_goal;
839 839
840 if (!large_allowed || !sk_can_gso(sk)) 840 if (!large_allowed || !sk_can_gso(sk))
841 return mss_now; 841 return mss_now;
842 842
843 /* Maybe we should/could use sk->sk_prot->max_header here ? */ 843 /* Note : tcp_tso_autosize() will eventually split this later */
844 hlen = inet_csk(sk)->icsk_af_ops->net_header_len + 844 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
845 inet_csk(sk)->icsk_ext_hdr_len +
846 tp->tcp_header_len;
847
848 new_size_goal = sk->sk_gso_max_size - 1 - hlen;
849 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); 845 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
850 846
851 /* We try hard to avoid divides here */ 847 /* We try hard to avoid divides here */
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d694088214cd..62856e185a93 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
378 */ 378 */
379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) 379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
380{ 380{
381 /* If credits accumulated at a higher w, apply them gently now. */
382 if (tp->snd_cwnd_cnt >= w) {
383 tp->snd_cwnd_cnt = 0;
384 tp->snd_cwnd++;
385 }
386
381 tp->snd_cwnd_cnt += acked; 387 tp->snd_cwnd_cnt += acked;
382 if (tp->snd_cwnd_cnt >= w) { 388 if (tp->snd_cwnd_cnt >= w) {
383 u32 delta = tp->snd_cwnd_cnt / w; 389 u32 delta = tp->snd_cwnd_cnt / w;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4b276d1ed980..06d3d665a9fd 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -306,8 +306,10 @@ tcp_friendliness:
306 } 306 }
307 } 307 }
308 308
309 if (ca->cnt == 0) /* cannot be zero */ 309 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
310 ca->cnt = 1; 310 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
311 */
312 ca->cnt = max(ca->cnt, 2U);
311} 313}
312 314
313static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 315static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fdd27b17306..f501ac048366 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3105 if (!first_ackt.v64) 3105 if (!first_ackt.v64)
3106 first_ackt = last_ackt; 3106 first_ackt = last_ackt;
3107 3107
3108 if (!(sacked & TCPCB_SACKED_ACKED)) 3108 if (!(sacked & TCPCB_SACKED_ACKED)) {
3109 reord = min(pkts_acked, reord); 3109 reord = min(pkts_acked, reord);
3110 if (!after(scb->end_seq, tp->high_seq)) 3110 if (!after(scb->end_seq, tp->high_seq))
3111 flag |= FLAG_ORIG_SACK_ACKED; 3111 flag |= FLAG_ORIG_SACK_ACKED;
3112 }
3112 } 3113 }
3113 3114
3114 if (sacked & TCPCB_SACKED_ACKED) 3115 if (sacked & TCPCB_SACKED_ACKED)
@@ -4770,7 +4771,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
4770 return false; 4771 return false;
4771 4772
4772 /* If we filled the congestion window, do not expand. */ 4773 /* If we filled the congestion window, do not expand. */
4773 if (tp->packets_out >= tp->snd_cwnd) 4774 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
4774 return false; 4775 return false;
4775 4776
4776 return true; 4777 return true;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a2dfed4783b..f1756ee02207 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1518 skb->sk = sk; 1518 skb->sk = sk;
1519 skb->destructor = sock_edemux; 1519 skb->destructor = sock_edemux;
1520 if (sk->sk_state != TCP_TIME_WAIT) { 1520 if (sk->sk_state != TCP_TIME_WAIT) {
1521 struct dst_entry *dst = sk->sk_rx_dst; 1521 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1522 1522
1523 if (dst) 1523 if (dst)
1524 dst = dst_check(dst, 0); 1524 dst = dst_check(dst, 0);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a2a796c5536b..1db253e36045 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk)
2773 } else { 2773 } else {
2774 /* Socket is locked, keep trying until memory is available. */ 2774 /* Socket is locked, keep trying until memory is available. */
2775 for (;;) { 2775 for (;;) {
2776 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2776 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
2777 sk->sk_allocation);
2778 if (skb) 2777 if (skb)
2779 break; 2778 break;
2780 yield(); 2779 yield();
2781 } 2780 }
2782
2783 /* Reserve space for headers and prepare control bits. */
2784 skb_reserve(skb, MAX_TCP_HEADER);
2785 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2781 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2786 tcp_init_nondata_skb(skb, tp->write_seq, 2782 tcp_init_nondata_skb(skb, tp->write_seq,
2787 TCPHDR_ACK | TCPHDR_FIN); 2783 TCPHDR_ACK | TCPHDR_FIN);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d5f6bd9a210a..dab73813cb92 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
63 return err; 63 return err;
64 64
65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 skb->protocol = htons(ETH_P_IP);
66 67
67 return x->outer_mode->output2(x, skb); 68 return x->outer_mode->output2(x, skb);
68} 69}
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
71int xfrm4_output_finish(struct sk_buff *skb) 72int xfrm4_output_finish(struct sk_buff *skb)
72{ 73{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75 75
76#ifdef CONFIG_NETFILTER 76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; 77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;