aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig3
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/ip_gre.c8
-rw-r--r--net/ipv4/ip_output.c19
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_timer.c24
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv4/xfrm4_state.c33
13 files changed, 85 insertions, 45 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 571f8950ed0..7cd7760144f 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -217,6 +217,7 @@ config NET_IPIP
217 217
218config NET_IPGRE 218config NET_IPGRE
219 tristate "IP: GRE tunnels over IP" 219 tristate "IP: GRE tunnels over IP"
220 depends on IPV6 || IPV6=n
220 help 221 help
221 Tunneling means encapsulating data of one protocol type within 222 Tunneling means encapsulating data of one protocol type within
222 another protocol and sending it over a channel that understands the 223 another protocol and sending it over a channel that understands the
@@ -412,7 +413,7 @@ config INET_XFRM_MODE_BEET
412 If unsure, say Y. 413 If unsure, say Y.
413 414
414config INET_LRO 415config INET_LRO
415 bool "Large Receive Offload (ipv4/tcp)" 416 tristate "Large Receive Offload (ipv4/tcp)"
416 default y 417 default y
417 ---help--- 418 ---help---
418 Support for Large Receive Offload (ipv4/tcp). 419 Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fdcacd36ce..2a4bb76f213 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
834 int mark = 0; 834 int mark = 0;
835 835
836 836
837 if (len == 8 || IGMP_V2_SEEN(in_dev)) { 837 if (len == 8) {
838 if (ih->code == 0) { 838 if (ih->code == 0) {
839 /* Alas, old v1 router presents here. */ 839 /* Alas, old v1 router presents here. */
840 840
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
856 igmpv3_clear_delrec(in_dev); 856 igmpv3_clear_delrec(in_dev);
857 } else if (len < 12) { 857 } else if (len < 12) {
858 return; /* ignore bogus packet; freed by caller */ 858 return; /* ignore bogus packet; freed by caller */
859 } else if (IGMP_V1_SEEN(in_dev)) {
860 /* This is a v3 query with v1 queriers present */
861 max_delay = IGMP_Query_Response_Interval;
862 group = 0;
863 } else if (IGMP_V2_SEEN(in_dev)) {
864 /* this is a v3 query with v2 queriers present;
865 * Interpretation of the max_delay code is problematic here.
866 * A real v2 host would use ih_code directly, while v3 has a
867 * different encoding. We use the v3 encoding as more likely
868 * to be intended in a v3 query.
869 */
870 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
859 } else { /* v3 */ 871 } else { /* v3 */
860 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 872 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
861 return; 873 return;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a5ad5..35c93e8b6a4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -45,7 +45,7 @@
45#include <net/netns/generic.h> 45#include <net/netns/generic.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47 47
48#ifdef CONFIG_IPV6 48#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
49#include <net/ipv6.h> 49#include <net/ipv6.h>
50#include <net/ip6_fib.h> 50#include <net/ip6_fib.h>
51#include <net/ip6_route.h> 51#include <net/ip6_route.h>
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
699 if ((dst = rt->rt_gateway) == 0) 699 if ((dst = rt->rt_gateway) == 0)
700 goto tx_error_icmp; 700 goto tx_error_icmp;
701 } 701 }
702#ifdef CONFIG_IPV6 702#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
703 else if (skb->protocol == htons(ETH_P_IPV6)) { 703 else if (skb->protocol == htons(ETH_P_IPV6)) {
704 struct in6_addr *addr6; 704 struct in6_addr *addr6;
705 int addr_type; 705 int addr_type;
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
774 goto tx_error; 774 goto tx_error;
775 } 775 }
776 } 776 }
777#ifdef CONFIG_IPV6 777#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
778 else if (skb->protocol == htons(ETH_P_IPV6)) { 778 else if (skb->protocol == htons(ETH_P_IPV6)) {
779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
780 780
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
850 if ((iph->ttl = tiph->ttl) == 0) { 850 if ((iph->ttl = tiph->ttl) == 0) {
851 if (skb->protocol == htons(ETH_P_IP)) 851 if (skb->protocol == htons(ETH_P_IP))
852 iph->ttl = old_iph->ttl; 852 iph->ttl = old_iph->ttl;
853#ifdef CONFIG_IPV6 853#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
854 else if (skb->protocol == htons(ETH_P_IPV6)) 854 else if (skb->protocol == htons(ETH_P_IPV6))
855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
856#endif 856#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b69896df5..7649d775007 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
488 * we can switch to copy when see the first bad fragment. 488 * we can switch to copy when see the first bad fragment.
489 */ 489 */
490 if (skb_has_frags(skb)) { 490 if (skb_has_frags(skb)) {
491 struct sk_buff *frag; 491 struct sk_buff *frag, *frag2;
492 int first_len = skb_pagelen(skb); 492 int first_len = skb_pagelen(skb);
493 int truesizes = 0;
494 493
495 if (first_len - hlen > mtu || 494 if (first_len - hlen > mtu ||
496 ((first_len - hlen) & 7) || 495 ((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
503 if (frag->len > mtu || 502 if (frag->len > mtu ||
504 ((frag->len & 7) && frag->next) || 503 ((frag->len & 7) && frag->next) ||
505 skb_headroom(frag) < hlen) 504 skb_headroom(frag) < hlen)
506 goto slow_path; 505 goto slow_path_clean;
507 506
508 /* Partially cloned skb? */ 507 /* Partially cloned skb? */
509 if (skb_shared(frag)) 508 if (skb_shared(frag))
510 goto slow_path; 509 goto slow_path_clean;
511 510
512 BUG_ON(frag->sk); 511 BUG_ON(frag->sk);
513 if (skb->sk) { 512 if (skb->sk) {
514 frag->sk = skb->sk; 513 frag->sk = skb->sk;
515 frag->destructor = sock_wfree; 514 frag->destructor = sock_wfree;
516 } 515 }
517 truesizes += frag->truesize; 516 skb->truesize -= frag->truesize;
518 } 517 }
519 518
520 /* Everything is OK. Generate! */ 519 /* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
524 frag = skb_shinfo(skb)->frag_list; 523 frag = skb_shinfo(skb)->frag_list;
525 skb_frag_list_init(skb); 524 skb_frag_list_init(skb);
526 skb->data_len = first_len - skb_headlen(skb); 525 skb->data_len = first_len - skb_headlen(skb);
527 skb->truesize -= truesizes;
528 skb->len = first_len; 526 skb->len = first_len;
529 iph->tot_len = htons(first_len); 527 iph->tot_len = htons(first_len);
530 iph->frag_off = htons(IP_MF); 528 iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
576 } 574 }
577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578 return err; 576 return err;
577
578slow_path_clean:
579 skb_walk_frags(skb, frag2) {
580 if (frag2 == frag)
581 break;
582 frag2->sk = NULL;
583 frag2->destructor = NULL;
584 skb->truesize += frag2->truesize;
585 }
579 } 586 }
580 587
581slow_path: 588slow_path:
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254dafaf42..43eec80c0e7 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
112 /* ip_route_me_harder expects skb->dst to be set */ 112 /* ip_route_me_harder expects skb->dst to be set */
113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 113 skb_dst_set_noref(nskb, skb_dst(oldskb));
114 114
115 nskb->protocol = htons(ETH_P_IP);
115 if (ip_route_me_harder(nskb, addr_type)) 116 if (ip_route_me_harder(nskb, addr_type))
116 goto free_nskb; 117 goto free_nskb;
117 118
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de32f20..f3a9b42b16c 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
66 const struct net_device *out, 66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *)) 67 int (*okfn)(struct sk_buff *))
68{ 68{
69 struct sock *sk = skb->sk;
69 struct inet_sock *inet = inet_sk(skb->sk); 70 struct inet_sock *inet = inet_sk(skb->sk);
70 71
71 if (inet && inet->nodefrag) 72 if (sk && (sk->sk_family == PF_INET) &&
73 inet->nodefrag)
72 return NF_ACCEPT; 74 return NF_ACCEPT;
73 75
74#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 76#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c0963..ee5f419d0a5 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
893 unsigned char s[4]; 893 unsigned char s[4];
894 894
895 if (offset & 1) { 895 if (offset & 1) {
896 s[0] = s[2] = 0; 896 s[0] = ~0;
897 s[1] = ~*optr; 897 s[1] = ~*optr;
898 s[2] = 0;
898 s[3] = *nptr; 899 s[3] = *nptr;
899 } else { 900 } else {
900 s[1] = s[3] = 0;
901 s[0] = ~*optr; 901 s[0] = ~*optr;
902 s[1] = ~0;
902 s[2] = *nptr; 903 s[2] = *nptr;
904 s[3] = 0;
903 } 905 }
904 906
905 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); 907 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6298f75d5e9..ac6559cb54f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1231,7 +1231,7 @@ restart:
1231 } 1231 }
1232 1232
1233 if (net_ratelimit()) 1233 if (net_ratelimit())
1234 printk(KERN_WARNING "Neighbour table overflow.\n"); 1234 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1235 rt_drop(rt); 1235 rt_drop(rt);
1236 return -ENOBUFS; 1236 return -ENOBUFS;
1237 } 1237 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3fb1428e526..f115ea68a4e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
386 */ 386 */
387 387
388 mask = 0; 388 mask = 0;
389 if (sk->sk_err)
390 mask = POLLERR;
391 389
392 /* 390 /*
393 * POLLHUP is certainly not done right. But poll() doesn't 391 * POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
457 if (tp->urg_data & TCP_URG_VALID) 455 if (tp->urg_data & TCP_URG_VALID)
458 mask |= POLLPRI; 456 mask |= POLLPRI;
459 } 457 }
458 /* This barrier is coupled with smp_wmb() in tcp_reset() */
459 smp_rmb();
460 if (sk->sk_err)
461 mask |= POLLERR;
462
460 return mask; 463 return mask;
461} 464}
462EXPORT_SYMBOL(tcp_poll); 465EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
940 sg = sk->sk_route_caps & NETIF_F_SG; 943 sg = sk->sk_route_caps & NETIF_F_SG;
941 944
942 while (--iovlen >= 0) { 945 while (--iovlen >= 0) {
943 int seglen = iov->iov_len; 946 size_t seglen = iov->iov_len;
944 unsigned char __user *from = iov->iov_base; 947 unsigned char __user *from = iov->iov_base;
945 948
946 iov++; 949 iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78a2ef..b55f60f6fcb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2545 cnt += tcp_skb_pcount(skb); 2545 cnt += tcp_skb_pcount(skb);
2546 2546
2547 if (cnt > packets) { 2547 if (cnt > packets) {
2548 if (tcp_is_sack(tp) || (oldcnt >= packets)) 2548 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2549 (oldcnt >= packets))
2549 break; 2550 break;
2550 2551
2551 mss = skb_shinfo(skb)->gso_size; 2552 mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk)
4048 default: 4049 default:
4049 sk->sk_err = ECONNRESET; 4050 sk->sk_err = ECONNRESET;
4050 } 4051 }
4052 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4053 smp_wmb();
4051 4054
4052 if (!sock_flag(sk, SOCK_DEAD)) 4055 if (!sock_flag(sk, SOCK_DEAD))
4053 sk->sk_error_report(sk); 4056 sk->sk_error_report(sk);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c35b469e851..74c54b30600 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
135 135
136/* This function calculates a "timeout" which is equivalent to the timeout of a 136/* This function calculates a "timeout" which is equivalent to the timeout of a
137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off
138 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
139 * syn_set flag is set.
139 */ 140 */
140static bool retransmits_timed_out(struct sock *sk, 141static bool retransmits_timed_out(struct sock *sk,
141 unsigned int boundary) 142 unsigned int boundary,
143 bool syn_set)
142{ 144{
143 unsigned int timeout, linear_backoff_thresh; 145 unsigned int timeout, linear_backoff_thresh;
144 unsigned int start_ts; 146 unsigned int start_ts;
147 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
145 148
146 if (!inet_csk(sk)->icsk_retransmits) 149 if (!inet_csk(sk)->icsk_retransmits)
147 return false; 150 return false;
@@ -151,12 +154,12 @@ static bool retransmits_timed_out(struct sock *sk,
151 else 154 else
152 start_ts = tcp_sk(sk)->retrans_stamp; 155 start_ts = tcp_sk(sk)->retrans_stamp;
153 156
154 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 157 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
155 158
156 if (boundary <= linear_backoff_thresh) 159 if (boundary <= linear_backoff_thresh)
157 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; 160 timeout = ((2 << boundary) - 1) * rto_base;
158 else 161 else
159 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + 162 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 163 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
161 164
162 return (tcp_time_stamp - start_ts) >= timeout; 165 return (tcp_time_stamp - start_ts) >= timeout;
@@ -167,14 +170,15 @@ static int tcp_write_timeout(struct sock *sk)
167{ 170{
168 struct inet_connection_sock *icsk = inet_csk(sk); 171 struct inet_connection_sock *icsk = inet_csk(sk);
169 int retry_until; 172 int retry_until;
170 bool do_reset; 173 bool do_reset, syn_set = 0;
171 174
172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 175 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
173 if (icsk->icsk_retransmits) 176 if (icsk->icsk_retransmits)
174 dst_negative_advice(sk); 177 dst_negative_advice(sk);
175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 178 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
179 syn_set = 1;
176 } else { 180 } else {
177 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 181 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
178 /* Black hole detection */ 182 /* Black hole detection */
179 tcp_mtu_probing(icsk, sk); 183 tcp_mtu_probing(icsk, sk);
180 184
@@ -187,14 +191,14 @@ static int tcp_write_timeout(struct sock *sk)
187 191
188 retry_until = tcp_orphan_retries(sk, alive); 192 retry_until = tcp_orphan_retries(sk, alive);
189 do_reset = alive || 193 do_reset = alive ||
190 !retransmits_timed_out(sk, retry_until); 194 !retransmits_timed_out(sk, retry_until, 0);
191 195
192 if (tcp_out_of_resources(sk, do_reset)) 196 if (tcp_out_of_resources(sk, do_reset))
193 return 1; 197 return 1;
194 } 198 }
195 } 199 }
196 200
197 if (retransmits_timed_out(sk, retry_until)) { 201 if (retransmits_timed_out(sk, retry_until, syn_set)) {
198 /* Has it gone just too far? */ 202 /* Has it gone just too far? */
199 tcp_write_err(sk); 203 tcp_write_err(sk);
200 return 1; 204 return 1;
@@ -436,7 +440,7 @@ out_reset_timer:
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 440 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 } 441 }
438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 442 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 443 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
440 __sk_dst_reset(sk); 444 __sk_dst_reset(sk);
441 445
442out:; 446out:;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d4eeb..a580349f0b8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
61 61
62static int xfrm4_get_tos(struct flowi *fl) 62static int xfrm4_get_tos(struct flowi *fl)
63{ 63{
64 return fl->fl4_tos; 64 return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
65} 65}
66 66
67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, 67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366a0a0..47947624ecc 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
21} 21}
22 22
23static void 23static void
24__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, 24__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
25 struct xfrm_tmpl *tmpl, 25{
26 xfrm_address_t *daddr, xfrm_address_t *saddr) 26 sel->daddr.a4 = fl->fl4_dst;
27 sel->saddr.a4 = fl->fl4_src;
28 sel->dport = xfrm_flowi_dport(fl);
29 sel->dport_mask = htons(0xffff);
30 sel->sport = xfrm_flowi_sport(fl);
31 sel->sport_mask = htons(0xffff);
32 sel->family = AF_INET;
33 sel->prefixlen_d = 32;
34 sel->prefixlen_s = 32;
35 sel->proto = fl->proto;
36 sel->ifindex = fl->oif;
37}
38
39static void
40xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
41 xfrm_address_t *daddr, xfrm_address_t *saddr)
27{ 42{
28 x->sel.daddr.a4 = fl->fl4_dst;
29 x->sel.saddr.a4 = fl->fl4_src;
30 x->sel.dport = xfrm_flowi_dport(fl);
31 x->sel.dport_mask = htons(0xffff);
32 x->sel.sport = xfrm_flowi_sport(fl);
33 x->sel.sport_mask = htons(0xffff);
34 x->sel.family = AF_INET;
35 x->sel.prefixlen_d = 32;
36 x->sel.prefixlen_s = 32;
37 x->sel.proto = fl->proto;
38 x->sel.ifindex = fl->oif;
39 x->id = tmpl->id; 43 x->id = tmpl->id;
40 if (x->id.daddr.a4 == 0) 44 if (x->id.daddr.a4 == 0)
41 x->id.daddr.a4 = daddr->a4; 45 x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
70 .owner = THIS_MODULE, 74 .owner = THIS_MODULE,
71 .init_flags = xfrm4_init_flags, 75 .init_flags = xfrm4_init_flags,
72 .init_tempsel = __xfrm4_init_tempsel, 76 .init_tempsel = __xfrm4_init_tempsel,
77 .init_temprop = xfrm4_init_temprop,
73 .output = xfrm4_output, 78 .output = xfrm4_output,
74 .extract_input = xfrm4_extract_input, 79 .extract_input = xfrm4_extract_input,
75 .extract_output = xfrm4_extract_output, 80 .extract_output = xfrm4_extract_output,