aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-01-29 15:32:13 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-29 15:32:13 -0500
commitf1e7b73acc26e8908af783bcd3a9900fd80688f5 (patch)
tree9a9382fb7f12f1889020efb4bffa3f4a88589fc5 /net/ipv4
parent218774dc341f219bfcf940304a081b121a0e8099 (diff)
parentfc16e884a2320198b8cb7bc2fdcf6b4485e79709 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Bring in the 'net' tree so that we can get some ipv4/ipv6 bug fixes that some net-next work will build upon. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ah4.c18
-rw-r--r--net/ipv4/datagram.c25
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c54
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/udp.c1
10 files changed, 117 insertions, 17 deletions
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a0d8392491c3..a69b4e4a02b5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
269 skb->network_header += ah_hlen; 269 skb->network_header += ah_hlen;
270 memcpy(skb_network_header(skb), work_iph, ihl); 270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl); 271 __skb_pull(skb, ah_hlen + ihl);
272 skb_set_transport_header(skb, -ihl); 272
273 if (x->props.mode == XFRM_MODE_TUNNEL)
274 skb_reset_transport_header(skb);
275 else
276 skb_set_transport_header(skb, -ihl);
273out: 277out:
274 kfree(AH_SKB_CB(skb)->tmp); 278 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 279 xfrm_input_resume(skb, err);
@@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
381 skb->network_header += ah_hlen; 385 skb->network_header += ah_hlen;
382 memcpy(skb_network_header(skb), work_iph, ihl); 386 memcpy(skb_network_header(skb), work_iph, ihl);
383 __skb_pull(skb, ah_hlen + ihl); 387 __skb_pull(skb, ah_hlen + ihl);
384 skb_set_transport_header(skb, -ihl); 388 if (x->props.mode == XFRM_MODE_TUNNEL)
389 skb_reset_transport_header(skb);
390 else
391 skb_set_transport_header(skb, -ihl);
385 392
386 err = nexthdr; 393 err = nexthdr;
387 394
@@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
413 if (!x) 420 if (!x)
414 return; 421 return;
415 422
416 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 423 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
424 atomic_inc(&flow_cache_genid);
425 rt_genid_bump(net);
426
417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 427 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
418 else 428 } else
419 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 429 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
420 xfrm_state_put(x); 430 xfrm_state_put(x);
421} 431}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 424fafbc8cb0..b28e863fe0a7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -85,3 +85,28 @@ out:
85 return err; 85 return err;
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88
89void ip4_datagram_release_cb(struct sock *sk)
90{
91 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr;
94 struct flowi4 fl4;
95 struct rtable *rt;
96
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock();
101 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr;
104 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
105 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt))
109 __sk_dst_set(sk, &rt->dst);
110 rcu_read_unlock();
111}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b61e9deb7c7e..3b4f0cd2e63e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
346 346
347 pskb_trim(skb, skb->len - alen - padlen - 2); 347 pskb_trim(skb, skb->len - alen - padlen - 2);
348 __skb_pull(skb, hlen); 348 __skb_pull(skb, hlen);
349 skb_set_transport_header(skb, -ihl); 349 if (x->props.mode == XFRM_MODE_TUNNEL)
350 skb_reset_transport_header(skb);
351 else
352 skb_set_transport_header(skb, -ihl);
350 353
351 err = nexthdr[1]; 354 err = nexthdr[1];
352 355
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
499 if (!x) 502 if (!x)
500 return; 503 return;
501 504
502 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 505 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
506 atomic_inc(&flow_cache_genid);
507 rt_genid_bump(net);
508
503 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 509 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 else 510 } else
505 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 511 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
506 xfrm_state_put(x); 512 xfrm_state_put(x);
507} 513}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 801e02355ec4..00a14b9864ea 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -965,8 +965,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
965 ptr--; 965 ptr--;
966 } 966 }
967 if (tunnel->parms.o_flags&GRE_CSUM) { 967 if (tunnel->parms.o_flags&GRE_CSUM) {
968 int offset = skb_transport_offset(skb);
969
968 *ptr = 0; 970 *ptr = 0;
969 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); 971 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
972 skb->len - offset,
973 0));
970 } 974 }
971 } 975 }
972 976
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d3ab47e19a89..9a46daed2f3c 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47 if (!x) 47 if (!x)
48 return; 48 return;
49 49
50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
51 atomic_inc(&flow_cache_genid);
52 rt_genid_bump(net);
53
51 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); 54 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
52 else 55 } else
53 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); 56 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
54 xfrm_state_put(x); 57 xfrm_state_put(x);
55} 58}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8f3d05424a3e..6f9c07268cf6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -738,6 +738,7 @@ struct proto ping_prot = {
738 .recvmsg = ping_recvmsg, 738 .recvmsg = ping_recvmsg,
739 .bind = ping_bind, 739 .bind = ping_bind,
740 .backlog_rcv = ping_queue_rcv_skb, 740 .backlog_rcv = ping_queue_rcv_skb,
741 .release_cb = ip4_datagram_release_cb,
741 .hash = ping_v4_hash, 742 .hash = ping_v4_hash,
742 .unhash = ping_v4_unhash, 743 .unhash = ping_v4_unhash,
743 .get_port = ping_v4_get_port, 744 .get_port = ping_v4_get_port,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 73d1e4df4bf6..6f08991409c3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -894,6 +894,7 @@ struct proto raw_prot = {
894 .recvmsg = raw_recvmsg, 894 .recvmsg = raw_recvmsg,
895 .bind = raw_bind, 895 .bind = raw_bind,
896 .backlog_rcv = raw_rcv_skb, 896 .backlog_rcv = raw_rcv_skb,
897 .release_cb = ip4_datagram_release_cb,
897 .hash = raw_hash_sk, 898 .hash = raw_hash_sk,
898 .unhash = raw_unhash_sk, 899 .unhash = raw_unhash_sk,
899 .obj_size = sizeof(struct raw_sock), 900 .obj_size = sizeof(struct raw_sock),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 844a9ef60dbd..a0fcc47fee73 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
912 struct dst_entry *dst = &rt->dst; 912 struct dst_entry *dst = &rt->dst;
913 struct fib_result res; 913 struct fib_result res;
914 914
915 if (dst_metric_locked(dst, RTAX_MTU))
916 return;
917
915 if (dst->dev->mtu < mtu) 918 if (dst->dev->mtu < mtu)
916 return; 919 return;
917 920
@@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
962} 965}
963EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 966EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
964 967
965void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 968static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
966{ 969{
967 const struct iphdr *iph = (const struct iphdr *) skb->data; 970 const struct iphdr *iph = (const struct iphdr *) skb->data;
968 struct flowi4 fl4; 971 struct flowi4 fl4;
@@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
975 ip_rt_put(rt); 978 ip_rt_put(rt);
976 } 979 }
977} 980}
981
982void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
983{
984 const struct iphdr *iph = (const struct iphdr *) skb->data;
985 struct flowi4 fl4;
986 struct rtable *rt;
987 struct dst_entry *dst;
988 bool new = false;
989
990 bh_lock_sock(sk);
991 rt = (struct rtable *) __sk_dst_get(sk);
992
993 if (sock_owned_by_user(sk) || !rt) {
994 __ipv4_sk_update_pmtu(skb, sk, mtu);
995 goto out;
996 }
997
998 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
999
1000 if (!__sk_dst_check(sk, 0)) {
1001 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1002 if (IS_ERR(rt))
1003 goto out;
1004
1005 new = true;
1006 }
1007
1008 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1009
1010 dst = dst_check(&rt->dst, 0);
1011 if (!dst) {
1012 if (new)
1013 dst_release(&rt->dst);
1014
1015 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1016 if (IS_ERR(rt))
1017 goto out;
1018
1019 new = true;
1020 }
1021
1022 if (new)
1023 __sk_dst_set(sk, &rt->dst);
1024
1025out:
1026 bh_unlock_sock(sk);
1027}
978EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1028EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
979 1029
980void ipv4_redirect(struct sk_buff *skb, struct net *net, 1030void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1120 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1170 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1121 mtu = dst_metric_raw(dst, RTAX_MTU); 1171 mtu = dst_metric_raw(dst, RTAX_MTU);
1122 1172
1123 if (mtu && rt_is_output_route(rt)) 1173 if (mtu)
1124 return mtu; 1174 return mtu;
1125 1175
1126 mtu = dst->dev->mtu; 1176 mtu = dst->dev->mtu;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bbbdcc5c1973..5a1cfc692df0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 * We do take care of PMTU discovery (RFC1191) special case : 369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held. 370 * we can receive locally generated ICMP messages while socket is held.
371 */ 371 */
372 if (sock_owned_by_user(sk) && 372 if (sock_owned_by_user(sk)) {
373 type != ICMP_DEST_UNREACH && 373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 code != ICMP_FRAG_NEEDED) 374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 }
376
377 if (sk->sk_state == TCP_CLOSE) 376 if (sk->sk_state == TCP_CLOSE)
378 goto out; 377 goto out;
379 378
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e0610e4b5158..6791aac06ea9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1977,6 +1977,7 @@ struct proto udp_prot = {
1977 .recvmsg = udp_recvmsg, 1977 .recvmsg = udp_recvmsg,
1978 .sendpage = udp_sendpage, 1978 .sendpage = udp_sendpage,
1979 .backlog_rcv = __udp_queue_rcv_skb, 1979 .backlog_rcv = __udp_queue_rcv_skb,
1980 .release_cb = ip4_datagram_release_cb,
1980 .hash = udp_lib_hash, 1981 .hash = udp_lib_hash,
1981 .unhash = udp_lib_unhash, 1982 .unhash = udp_lib_unhash,
1982 .rehash = udp_v4_rehash, 1983 .rehash = udp_v4_rehash,