diff options
author | Eric Dumazet <edumazet@google.com> | 2012-06-20 01:02:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-06-23 00:47:33 -0400 |
commit | 7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a (patch) | |
tree | 79fc35a3afa23896ab3e6e00b4d9d1178bfee1df /net/ipv4 | |
parent | 24ea818e305b92ad1fadcca015ae3b0c1222c497 (diff) |
ipv4: tcp: dont cache output dst for syncookies
Don't cache output dst for syncookies, as this adds pressure on IP route
cache and rcu subsystem for no gain.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Hans Schillstrom <hans.schillstrom@ericsson.com>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 8 | ||||
-rw-r--r-- | net/ipv4/route.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 12 |
3 files changed, 17 insertions, 8 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f9ee7417f6a0..034ddbe42adf 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -368,17 +368,21 @@ EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); | |||
368 | 368 | ||
369 | struct dst_entry *inet_csk_route_req(struct sock *sk, | 369 | struct dst_entry *inet_csk_route_req(struct sock *sk, |
370 | struct flowi4 *fl4, | 370 | struct flowi4 *fl4, |
371 | const struct request_sock *req) | 371 | const struct request_sock *req, |
372 | bool nocache) | ||
372 | { | 373 | { |
373 | struct rtable *rt; | 374 | struct rtable *rt; |
374 | const struct inet_request_sock *ireq = inet_rsk(req); | 375 | const struct inet_request_sock *ireq = inet_rsk(req); |
375 | struct ip_options_rcu *opt = inet_rsk(req)->opt; | 376 | struct ip_options_rcu *opt = inet_rsk(req)->opt; |
376 | struct net *net = sock_net(sk); | 377 | struct net *net = sock_net(sk); |
378 | int flags = inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS; | ||
377 | 379 | ||
380 | if (nocache) | ||
381 | flags |= FLOWI_FLAG_RT_NOCACHE; | ||
378 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, | 382 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, |
379 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, | 383 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
380 | sk->sk_protocol, | 384 | sk->sk_protocol, |
381 | inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, | 385 | flags, |
382 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, | 386 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, |
383 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); | 387 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); |
384 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); | 388 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a91f6d33804c..8d62d85e68dc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1156,7 +1156,7 @@ restart: | |||
1156 | candp = NULL; | 1156 | candp = NULL; |
1157 | now = jiffies; | 1157 | now = jiffies; |
1158 | 1158 | ||
1159 | if (!rt_caching(dev_net(rt->dst.dev))) { | 1159 | if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) { |
1160 | /* | 1160 | /* |
1161 | * If we're not caching, just tell the caller we | 1161 | * If we're not caching, just tell the caller we |
1162 | * were successful and don't touch the route. The | 1162 | * were successful and don't touch the route. The |
@@ -2582,6 +2582,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
2582 | 2582 | ||
2583 | rt_set_nexthop(rth, fl4, res, fi, type, 0); | 2583 | rt_set_nexthop(rth, fl4, res, fi, type, 0); |
2584 | 2584 | ||
2585 | if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE) | ||
2586 | rth->dst.flags |= DST_NOCACHE; | ||
2587 | |||
2585 | return rth; | 2588 | return rth; |
2586 | } | 2589 | } |
2587 | 2590 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 21e22a00481a..b52934f5334e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
825 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | 825 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, |
826 | struct request_sock *req, | 826 | struct request_sock *req, |
827 | struct request_values *rvp, | 827 | struct request_values *rvp, |
828 | u16 queue_mapping) | 828 | u16 queue_mapping, |
829 | bool nocache) | ||
829 | { | 830 | { |
830 | const struct inet_request_sock *ireq = inet_rsk(req); | 831 | const struct inet_request_sock *ireq = inet_rsk(req); |
831 | struct flowi4 fl4; | 832 | struct flowi4 fl4; |
@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
833 | struct sk_buff * skb; | 834 | struct sk_buff * skb; |
834 | 835 | ||
835 | /* First, grab a route. */ | 836 | /* First, grab a route. */ |
836 | if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) | 837 | if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL) |
837 | return -1; | 838 | return -1; |
838 | 839 | ||
839 | skb = tcp_make_synack(sk, dst, req, rvp); | 840 | skb = tcp_make_synack(sk, dst, req, rvp); |
@@ -855,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, | |||
855 | struct request_values *rvp) | 856 | struct request_values *rvp) |
856 | { | 857 | { |
857 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 858 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
858 | return tcp_v4_send_synack(sk, NULL, req, rvp, 0); | 859 | return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); |
859 | } | 860 | } |
860 | 861 | ||
861 | /* | 862 | /* |
@@ -1388,7 +1389,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1388 | */ | 1389 | */ |
1389 | if (tmp_opt.saw_tstamp && | 1390 | if (tmp_opt.saw_tstamp && |
1390 | tcp_death_row.sysctl_tw_recycle && | 1391 | tcp_death_row.sysctl_tw_recycle && |
1391 | (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && | 1392 | (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL && |
1392 | fl4.daddr == saddr && | 1393 | fl4.daddr == saddr && |
1393 | (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { | 1394 | (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { |
1394 | inet_peer_refcheck(peer); | 1395 | inet_peer_refcheck(peer); |
@@ -1424,7 +1425,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1424 | 1425 | ||
1425 | if (tcp_v4_send_synack(sk, dst, req, | 1426 | if (tcp_v4_send_synack(sk, dst, req, |
1426 | (struct request_values *)&tmp_ext, | 1427 | (struct request_values *)&tmp_ext, |
1427 | skb_get_queue_mapping(skb)) || | 1428 | skb_get_queue_mapping(skb), |
1429 | want_cookie) || | ||
1428 | want_cookie) | 1430 | want_cookie) |
1429 | goto drop_and_free; | 1431 | goto drop_and_free; |
1430 | 1432 | ||