aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_sockglue.c15
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c1
-rw-r--r--net/ipv4/route.c20
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/udp.c69
14 files changed, 106 insertions, 51 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8a3ac1fa71a9..1fbff5fa4241 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -469,7 +469,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
469 */ 469 */
470 err = -EADDRNOTAVAIL; 470 err = -EADDRNOTAVAIL;
471 if (!sysctl_ip_nonlocal_bind && 471 if (!sysctl_ip_nonlocal_bind &&
472 !inet->freebind && 472 !(inet->freebind || inet->transparent) &&
473 addr->sin_addr.s_addr != htonl(INADDR_ANY) && 473 addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
474 chk_addr_ret != RTN_LOCAL && 474 chk_addr_ret != RTN_LOCAL &&
475 chk_addr_ret != RTN_MULTICAST && 475 chk_addr_ret != RTN_MULTICAST &&
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 0c1ae68ee84b..21fcc5a9045f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -335,6 +335,7 @@ struct dst_entry* inet_csk_route_req(struct sock *sk,
335 .saddr = ireq->loc_addr, 335 .saddr = ireq->loc_addr,
336 .tos = RT_CONN_FLAGS(sk) } }, 336 .tos = RT_CONN_FLAGS(sk) } },
337 .proto = sk->sk_protocol, 337 .proto = sk->sk_protocol,
338 .flags = inet_sk_flowi_flags(sk),
338 .uli_u = { .ports = 339 .uli_u = { .ports =
339 { .sport = inet_sk(sk)->sport, 340 { .sport = inet_sk(sk)->sport,
340 .dport = ireq->rmt_port } } }; 341 .dport = ireq->rmt_port } } };
@@ -515,6 +516,8 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
515 newicsk->icsk_bind_hash = NULL; 516 newicsk->icsk_bind_hash = NULL;
516 517
517 inet_sk(newsk)->dport = inet_rsk(req)->rmt_port; 518 inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
519 inet_sk(newsk)->num = ntohs(inet_rsk(req)->loc_port);
520 inet_sk(newsk)->sport = inet_rsk(req)->loc_port;
518 newsk->sk_write_space = sk_stream_write_space; 521 newsk->sk_write_space = sk_stream_write_space;
519 522
520 newicsk->icsk_retransmits = 0; 523 newicsk->icsk_retransmits = 0;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 743f011b9a84..1c5fd38f8824 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -126,6 +126,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
126 tw->tw_reuse = sk->sk_reuse; 126 tw->tw_reuse = sk->sk_reuse;
127 tw->tw_hash = sk->sk_hash; 127 tw->tw_hash = sk->sk_hash;
128 tw->tw_ipv6only = 0; 128 tw->tw_ipv6only = 0;
129 tw->tw_transparent = inet->transparent;
129 tw->tw_prot = sk->sk_prot_creator; 130 tw->tw_prot = sk->sk_prot_creator;
130 twsk_net_set(tw, hold_net(sock_net(sk))); 131 twsk_net_set(tw, hold_net(sock_net(sk)));
131 atomic_set(&tw->tw_refcnt, 1); 132 atomic_set(&tw->tw_refcnt, 1);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d533a89e08de..d2a8f8bb78a6 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -340,6 +340,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
340 .saddr = inet->saddr, 340 .saddr = inet->saddr,
341 .tos = RT_CONN_FLAGS(sk) } }, 341 .tos = RT_CONN_FLAGS(sk) } },
342 .proto = sk->sk_protocol, 342 .proto = sk->sk_protocol,
343 .flags = inet_sk_flowi_flags(sk),
343 .uli_u = { .ports = 344 .uli_u = { .ports =
344 { .sport = inet->sport, 345 { .sport = inet->sport,
345 .dport = inet->dport } } }; 346 .dport = inet->dport } } };
@@ -1371,7 +1372,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1371 .uli_u = { .ports = 1372 .uli_u = { .ports =
1372 { .sport = tcp_hdr(skb)->dest, 1373 { .sport = tcp_hdr(skb)->dest,
1373 .dport = tcp_hdr(skb)->source } }, 1374 .dport = tcp_hdr(skb)->source } },
1374 .proto = sk->sk_protocol }; 1375 .proto = sk->sk_protocol,
1376 .flags = ip_reply_arg_flowi_flags(arg) };
1375 security_skb_classify_flow(skb, &fl); 1377 security_skb_classify_flow(skb, &fl);
1376 if (ip_route_output_key(sock_net(sk), &rt, &fl)) 1378 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1377 return; 1379 return;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 105d92a039b9..465abf0a9869 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -419,7 +419,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
419 (1<<IP_TTL) | (1<<IP_HDRINCL) | 419 (1<<IP_TTL) | (1<<IP_HDRINCL) |
420 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 420 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
421 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 421 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
422 (1<<IP_PASSSEC))) || 422 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
423 optname == IP_MULTICAST_TTL || 423 optname == IP_MULTICAST_TTL ||
424 optname == IP_MULTICAST_LOOP) { 424 optname == IP_MULTICAST_LOOP) {
425 if (optlen >= sizeof(int)) { 425 if (optlen >= sizeof(int)) {
@@ -878,6 +878,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
878 err = xfrm_user_policy(sk, optname, optval, optlen); 878 err = xfrm_user_policy(sk, optname, optval, optlen);
879 break; 879 break;
880 880
881 case IP_TRANSPARENT:
882 if (!capable(CAP_NET_ADMIN)) {
883 err = -EPERM;
884 break;
885 }
886 if (optlen < 1)
887 goto e_inval;
888 inet->transparent = !!val;
889 break;
890
881 default: 891 default:
882 err = -ENOPROTOOPT; 892 err = -ENOPROTOOPT;
883 break; 893 break;
@@ -1130,6 +1140,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1130 case IP_FREEBIND: 1140 case IP_FREEBIND:
1131 val = inet->freebind; 1141 val = inet->freebind;
1132 break; 1142 break;
1143 case IP_TRANSPARENT:
1144 val = inet->transparent;
1145 break;
1133 default: 1146 default:
1134 release_sock(sk); 1147 release_sock(sk);
1135 return -ENOPROTOOPT; 1148 return -ENOPROTOOPT;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f8edacdf991d..01671ad51ed3 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -20,6 +20,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
20 unsigned int type; 20 unsigned int type;
21 21
22 type = inet_addr_type(&init_net, iph->saddr); 22 type = inet_addr_type(&init_net, iph->saddr);
23 if (skb->sk && inet_sk(skb->sk)->transparent)
24 type = RTN_LOCAL;
23 if (addr_type == RTN_UNSPEC) 25 if (addr_type == RTN_UNSPEC)
24 addr_type = type; 26 addr_type = type;
25 27
@@ -33,6 +35,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
33 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 35 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
34 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 36 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
35 fl.mark = skb->mark; 37 fl.mark = skb->mark;
38 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
36 if (ip_route_output_key(&init_net, &rt, &fl) != 0) 39 if (ip_route_output_key(&init_net, &rt, &fl) != 0)
37 return -1; 40 return -1;
38 41
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 11976ea29884..112dcfa12900 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -16,6 +16,7 @@
16#include <linux/udp.h> 16#include <linux/udp.h>
17#include <net/checksum.h> 17#include <net/checksum.h>
18#include <net/tcp.h> 18#include <net/tcp.h>
19#include <net/route.h>
19 20
20#include <linux/netfilter_ipv4.h> 21#include <linux/netfilter_ipv4.h>
21#include <net/netfilter/nf_conntrack.h> 22#include <net/netfilter/nf_conntrack.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f62187bb6d08..a6d7c584f53b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2361,11 +2361,6 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2361 ipv4_is_zeronet(oldflp->fl4_src)) 2361 ipv4_is_zeronet(oldflp->fl4_src))
2362 goto out; 2362 goto out;
2363 2363
2364 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2365 dev_out = ip_dev_find(net, oldflp->fl4_src);
2366 if (dev_out == NULL)
2367 goto out;
2368
2369 /* I removed check for oif == dev_out->oif here. 2364 /* I removed check for oif == dev_out->oif here.
2370 It was wrong for two reasons: 2365 It was wrong for two reasons:
2371 1. ip_dev_find(net, saddr) can return wrong iface, if saddr 2366 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
@@ -2377,6 +2372,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2377 if (oldflp->oif == 0 2372 if (oldflp->oif == 0
2378 && (ipv4_is_multicast(oldflp->fl4_dst) || 2373 && (ipv4_is_multicast(oldflp->fl4_dst) ||
2379 oldflp->fl4_dst == htonl(0xFFFFFFFF))) { 2374 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2375 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2376 dev_out = ip_dev_find(net, oldflp->fl4_src);
2377 if (dev_out == NULL)
2378 goto out;
2379
2380 /* Special hack: user can direct multicasts 2380 /* Special hack: user can direct multicasts
2381 and limited broadcast via necessary interface 2381 and limited broadcast via necessary interface
2382 without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2382 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
@@ -2395,9 +2395,15 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2395 fl.oif = dev_out->ifindex; 2395 fl.oif = dev_out->ifindex;
2396 goto make_route; 2396 goto make_route;
2397 } 2397 }
2398 if (dev_out) 2398
2399 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2400 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2401 dev_out = ip_dev_find(net, oldflp->fl4_src);
2402 if (dev_out == NULL)
2403 goto out;
2399 dev_put(dev_out); 2404 dev_put(dev_out);
2400 dev_out = NULL; 2405 dev_out = NULL;
2406 }
2401 } 2407 }
2402 2408
2403 2409
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 9d38005abbac..d346c22aa6ae 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -16,6 +16,7 @@
16#include <linux/cryptohash.h> 16#include <linux/cryptohash.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <net/tcp.h> 18#include <net/tcp.h>
19#include <net/route.h>
19 20
20/* Timestamps: lowest 9 bits store TCP options */ 21/* Timestamps: lowest 9 bits store TCP options */
21#define TSBITS 9 22#define TSBITS 9
@@ -296,6 +297,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
296 treq->rcv_isn = ntohl(th->seq) - 1; 297 treq->rcv_isn = ntohl(th->seq) - 1;
297 treq->snt_isn = cookie; 298 treq->snt_isn = cookie;
298 req->mss = mss; 299 req->mss = mss;
300 ireq->loc_port = th->dest;
299 ireq->rmt_port = th->source; 301 ireq->rmt_port = th->source;
300 ireq->loc_addr = ip_hdr(skb)->daddr; 302 ireq->loc_addr = ip_hdr(skb)->daddr;
301 ireq->rmt_addr = ip_hdr(skb)->saddr; 303 ireq->rmt_addr = ip_hdr(skb)->saddr;
@@ -337,6 +339,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
337 .saddr = ireq->loc_addr, 339 .saddr = ireq->loc_addr,
338 .tos = RT_CONN_FLAGS(sk) } }, 340 .tos = RT_CONN_FLAGS(sk) } },
339 .proto = IPPROTO_TCP, 341 .proto = IPPROTO_TCP,
342 .flags = inet_sk_flowi_flags(sk),
340 .uli_u = { .ports = 343 .uli_u = { .ports =
341 { .sport = th->dest, 344 { .sport = th->dest,
342 .dport = th->source } } }; 345 .dport = th->source } } };
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1ab341e5d3e0..7d81a1ee5507 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -384,13 +384,17 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
384 384
385 /* Connected? */ 385 /* Connected? */
386 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 386 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
387 int target = sock_rcvlowat(sk, 0, INT_MAX);
388
389 if (tp->urg_seq == tp->copied_seq &&
390 !sock_flag(sk, SOCK_URGINLINE) &&
391 tp->urg_data)
392 target--;
393
387 /* Potential race condition. If read of tp below will 394 /* Potential race condition. If read of tp below will
388 * escape above sk->sk_state, we can be illegally awaken 395 * escape above sk->sk_state, we can be illegally awaken
389 * in SYN_* states. */ 396 * in SYN_* states. */
390 if ((tp->rcv_nxt != tp->copied_seq) && 397 if (tp->rcv_nxt - tp->copied_seq >= target)
391 (tp->urg_seq != tp->copied_seq ||
392 tp->rcv_nxt != tp->copied_seq + 1 ||
393 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
394 mask |= POLLIN | POLLRDNORM; 398 mask |= POLLIN | POLLRDNORM;
395 399
396 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 400 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85627f83665f..3b76bce769dd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1746,6 +1746,8 @@ int tcp_use_frto(struct sock *sk)
1746 return 0; 1746 return 0;
1747 1747
1748 skb = tcp_write_queue_head(sk); 1748 skb = tcp_write_queue_head(sk);
1749 if (tcp_skb_is_last(sk, skb))
1750 return 1;
1749 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 1751 skb = tcp_write_queue_next(sk, skb); /* Skips head */
1750 tcp_for_write_queue_from(skb, sk) { 1752 tcp_for_write_queue_from(skb, sk) {
1751 if (skb == tcp_send_head(sk)) 1753 if (skb == tcp_send_head(sk))
@@ -4156,7 +4158,7 @@ drop:
4156 skb1 = skb1->prev; 4158 skb1 = skb1->prev;
4157 } 4159 }
4158 } 4160 }
4159 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 4161 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4160 4162
4161 /* And clean segments covered by new one as whole. */ 4163 /* And clean segments covered by new one as whole. */
4162 while ((skb1 = skb->next) != 4164 while ((skb1 = skb->next) !=
@@ -4254,7 +4256,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4254 memcpy(nskb->head, skb->head, header); 4256 memcpy(nskb->head, skb->head, header);
4255 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4257 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4256 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4258 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4257 __skb_insert(nskb, skb->prev, skb, list); 4259 __skb_queue_before(list, skb, nskb);
4258 skb_set_owner_r(nskb, sk); 4260 skb_set_owner_r(nskb, sk);
4259 4261
4260 /* Copy data, releasing collapsed skbs. */ 4262 /* Copy data, releasing collapsed skbs. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44aef1c1f373..8b24bd833cb4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -591,6 +591,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
591 ip_hdr(skb)->saddr, /* XXX */ 591 ip_hdr(skb)->saddr, /* XXX */
592 sizeof(struct tcphdr), IPPROTO_TCP, 0); 592 sizeof(struct tcphdr), IPPROTO_TCP, 0);
593 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 593 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
594 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
594 595
595 net = dev_net(skb->dst->dev); 596 net = dev_net(skb->dst->dev);
596 ip_send_reply(net->ipv4.tcp_sock, skb, 597 ip_send_reply(net->ipv4.tcp_sock, skb,
@@ -606,7 +607,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
606 607
607static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 608static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
608 u32 win, u32 ts, int oif, 609 u32 win, u32 ts, int oif,
609 struct tcp_md5sig_key *key) 610 struct tcp_md5sig_key *key,
611 int reply_flags)
610{ 612{
611 struct tcphdr *th = tcp_hdr(skb); 613 struct tcphdr *th = tcp_hdr(skb);
612 struct { 614 struct {
@@ -618,7 +620,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
618 ]; 620 ];
619 } rep; 621 } rep;
620 struct ip_reply_arg arg; 622 struct ip_reply_arg arg;
621 struct net *net = dev_net(skb->dev); 623 struct net *net = dev_net(skb->dst->dev);
622 624
623 memset(&rep.th, 0, sizeof(struct tcphdr)); 625 memset(&rep.th, 0, sizeof(struct tcphdr));
624 memset(&arg, 0, sizeof(arg)); 626 memset(&arg, 0, sizeof(arg));
@@ -659,6 +661,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
659 ip_hdr(skb)->daddr, &rep.th); 661 ip_hdr(skb)->daddr, &rep.th);
660 } 662 }
661#endif 663#endif
664 arg.flags = reply_flags;
662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 665 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
663 ip_hdr(skb)->saddr, /* XXX */ 666 ip_hdr(skb)->saddr, /* XXX */
664 arg.iov[0].iov_len, IPPROTO_TCP, 0); 667 arg.iov[0].iov_len, IPPROTO_TCP, 0);
@@ -681,7 +684,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
681 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 684 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
682 tcptw->tw_ts_recent, 685 tcptw->tw_ts_recent,
683 tw->tw_bound_dev_if, 686 tw->tw_bound_dev_if,
684 tcp_twsk_md5_key(tcptw) 687 tcp_twsk_md5_key(tcptw),
688 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
685 ); 689 );
686 690
687 inet_twsk_put(tw); 691 inet_twsk_put(tw);
@@ -694,7 +698,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 698 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
695 req->ts_recent, 699 req->ts_recent,
696 0, 700 0,
697 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr)); 701 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
702 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
698} 703}
699 704
700/* 705/*
@@ -1244,6 +1249,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1244 ireq = inet_rsk(req); 1249 ireq = inet_rsk(req);
1245 ireq->loc_addr = daddr; 1250 ireq->loc_addr = daddr;
1246 ireq->rmt_addr = saddr; 1251 ireq->rmt_addr = saddr;
1252 ireq->no_srccheck = inet_sk(sk)->transparent;
1247 ireq->opt = tcp_v4_save_options(sk, skb); 1253 ireq->opt = tcp_v4_save_options(sk, skb);
1248 if (!want_cookie) 1254 if (!want_cookie)
1249 TCP_ECN_create_request(req, tcp_hdr(skb)); 1255 TCP_ECN_create_request(req, tcp_hdr(skb));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c3d58ee3e16f..493553c71d32 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1932,8 +1932,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1932 /* Collapse two adjacent packets if worthwhile and we can. */ 1932 /* Collapse two adjacent packets if worthwhile and we can. */
1933 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1933 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1934 (skb->len < (cur_mss >> 1)) && 1934 (skb->len < (cur_mss >> 1)) &&
1935 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1936 (!tcp_skb_is_last(sk, skb)) && 1935 (!tcp_skb_is_last(sk, skb)) &&
1936 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1937 (skb_shinfo(skb)->nr_frags == 0 && 1937 (skb_shinfo(skb)->nr_frags == 0 &&
1938 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1938 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1939 (tcp_skb_pcount(skb) == 1 && 1939 (tcp_skb_pcount(skb) == 1 &&
@@ -2275,7 +2275,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2275 th->syn = 1; 2275 th->syn = 1;
2276 th->ack = 1; 2276 th->ack = 1;
2277 TCP_ECN_make_synack(req, th); 2277 TCP_ECN_make_synack(req, th);
2278 th->source = inet_sk(sk)->sport; 2278 th->source = ireq->loc_port;
2279 th->dest = ireq->rmt_port; 2279 th->dest = ireq->rmt_port;
2280 /* Setting of flags are superfluous here for callers (and ECE is 2280 /* Setting of flags are superfluous here for callers (and ECE is
2281 * not even correctly set) 2281 * not even correctly set)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8e42fbbd5761..c83d0ef469c9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -302,6 +302,13 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
302 return result; 302 return result;
303} 303}
304 304
305struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
306 __be32 daddr, __be16 dport, int dif)
307{
308 return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, udp_hash);
309}
310EXPORT_SYMBOL_GPL(udp4_lib_lookup);
311
305static inline struct sock *udp_v4_mcast_next(struct sock *sk, 312static inline struct sock *udp_v4_mcast_next(struct sock *sk,
306 __be16 loc_port, __be32 loc_addr, 313 __be16 loc_port, __be32 loc_addr,
307 __be16 rmt_port, __be32 rmt_addr, 314 __be16 rmt_port, __be32 rmt_addr,
@@ -951,6 +958,27 @@ int udp_disconnect(struct sock *sk, int flags)
951 return 0; 958 return 0;
952} 959}
953 960
961static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
962{
963 int is_udplite = IS_UDPLITE(sk);
964 int rc;
965
966 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
967 /* Note that an ENOMEM error is charged twice */
968 if (rc == -ENOMEM)
969 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
970 is_udplite);
971 goto drop;
972 }
973
974 return 0;
975
976drop:
977 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
978 kfree_skb(skb);
979 return -1;
980}
981
954/* returns: 982/* returns:
955 * -1: error 983 * -1: error
956 * 0: success 984 * 0: success
@@ -989,9 +1017,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
989 up->encap_rcv != NULL) { 1017 up->encap_rcv != NULL) {
990 int ret; 1018 int ret;
991 1019
992 bh_unlock_sock(sk);
993 ret = (*up->encap_rcv)(sk, skb); 1020 ret = (*up->encap_rcv)(sk, skb);
994 bh_lock_sock(sk);
995 if (ret <= 0) { 1021 if (ret <= 0) {
996 UDP_INC_STATS_BH(sock_net(sk), 1022 UDP_INC_STATS_BH(sock_net(sk),
997 UDP_MIB_INDATAGRAMS, 1023 UDP_MIB_INDATAGRAMS,
@@ -1044,17 +1070,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1044 goto drop; 1070 goto drop;
1045 } 1071 }
1046 1072
1047 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1073 rc = 0;
1048 /* Note that an ENOMEM error is charged twice */
1049 if (rc == -ENOMEM) {
1050 UDP_INC_STATS_BH(sock_net(sk),
1051 UDP_MIB_RCVBUFERRORS, is_udplite);
1052 atomic_inc(&sk->sk_drops);
1053 }
1054 goto drop;
1055 }
1056 1074
1057 return 0; 1075 bh_lock_sock(sk);
1076 if (!sock_owned_by_user(sk))
1077 rc = __udp_queue_rcv_skb(sk, skb);
1078 else
1079 sk_add_backlog(sk, skb);
1080 bh_unlock_sock(sk);
1081
1082 return rc;
1058 1083
1059drop: 1084drop:
1060 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1085 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -1092,15 +1117,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1092 skb1 = skb_clone(skb, GFP_ATOMIC); 1117 skb1 = skb_clone(skb, GFP_ATOMIC);
1093 1118
1094 if (skb1) { 1119 if (skb1) {
1095 int ret = 0; 1120 int ret = udp_queue_rcv_skb(sk, skb1);
1096
1097 bh_lock_sock(sk);
1098 if (!sock_owned_by_user(sk))
1099 ret = udp_queue_rcv_skb(sk, skb1);
1100 else
1101 sk_add_backlog(sk, skb1);
1102 bh_unlock_sock(sk);
1103
1104 if (ret > 0) 1121 if (ret > 0)
1105 /* we should probably re-process instead 1122 /* we should probably re-process instead
1106 * of dropping packets here. */ 1123 * of dropping packets here. */
@@ -1195,13 +1212,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1195 uh->dest, inet_iif(skb), udptable); 1212 uh->dest, inet_iif(skb), udptable);
1196 1213
1197 if (sk != NULL) { 1214 if (sk != NULL) {
1198 int ret = 0; 1215 int ret = udp_queue_rcv_skb(sk, skb);
1199 bh_lock_sock(sk);
1200 if (!sock_owned_by_user(sk))
1201 ret = udp_queue_rcv_skb(sk, skb);
1202 else
1203 sk_add_backlog(sk, skb);
1204 bh_unlock_sock(sk);
1205 sock_put(sk); 1216 sock_put(sk);
1206 1217
1207 /* a return value > 0 means to resubmit the input, but 1218 /* a return value > 0 means to resubmit the input, but
@@ -1494,7 +1505,7 @@ struct proto udp_prot = {
1494 .sendmsg = udp_sendmsg, 1505 .sendmsg = udp_sendmsg,
1495 .recvmsg = udp_recvmsg, 1506 .recvmsg = udp_recvmsg,
1496 .sendpage = udp_sendpage, 1507 .sendpage = udp_sendpage,
1497 .backlog_rcv = udp_queue_rcv_skb, 1508 .backlog_rcv = __udp_queue_rcv_skb,
1498 .hash = udp_lib_hash, 1509 .hash = udp_lib_hash,
1499 .unhash = udp_lib_unhash, 1510 .unhash = udp_lib_unhash,
1500 .get_port = udp_v4_get_port, 1511 .get_port = udp_v4_get_port,