aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-06-20 01:02:19 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-23 00:47:33 -0400
commit7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a (patch)
tree79fc35a3afa23896ab3e6e00b4d9d1178bfee1df /net/ipv4/tcp_ipv4.c
parent24ea818e305b92ad1fadcca015ae3b0c1222c497 (diff)
ipv4: tcp: dont cache output dst for syncookies
Don't cache output dst for syncookies, as this adds pressure on IP route cache and rcu subsystem for no gain. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Hans Schillstrom <hans.schillstrom@ericsson.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 21e22a00481a..b52934f5334e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp, 827 struct request_values *rvp,
828 u16 queue_mapping) 828 u16 queue_mapping,
829 bool nocache)
829{ 830{
830 const struct inet_request_sock *ireq = inet_rsk(req); 831 const struct inet_request_sock *ireq = inet_rsk(req);
831 struct flowi4 fl4; 832 struct flowi4 fl4;
@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
833 struct sk_buff * skb; 834 struct sk_buff * skb;
834 835
835 /* First, grab a route. */ 836 /* First, grab a route. */
836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 837 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
837 return -1; 838 return -1;
838 839
839 skb = tcp_make_synack(sk, dst, req, rvp); 840 skb = tcp_make_synack(sk, dst, req, rvp);
@@ -855,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
855 struct request_values *rvp) 856 struct request_values *rvp)
856{ 857{
857 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
858 return tcp_v4_send_synack(sk, NULL, req, rvp, 0); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
859} 860}
860 861
861/* 862/*
@@ -1388,7 +1389,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1388 */ 1389 */
1389 if (tmp_opt.saw_tstamp && 1390 if (tmp_opt.saw_tstamp &&
1390 tcp_death_row.sysctl_tw_recycle && 1391 tcp_death_row.sysctl_tw_recycle &&
1391 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && 1392 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1392 fl4.daddr == saddr && 1393 fl4.daddr == saddr &&
1393 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { 1394 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1394 inet_peer_refcheck(peer); 1395 inet_peer_refcheck(peer);
@@ -1424,7 +1425,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1424 1425
1425 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1426 (struct request_values *)&tmp_ext, 1427 (struct request_values *)&tmp_ext,
1427 skb_get_queue_mapping(skb)) || 1428 skb_get_queue_mapping(skb),
1429 want_cookie) ||
1428 want_cookie) 1430 want_cookie)
1429 goto drop_and_free; 1431 goto drop_and_free;
1430 1432