diff options
author | Eric Dumazet <edumazet@google.com> | 2015-10-16 16:00:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-19 01:26:02 -0400 |
commit | dc6ef6be52154490c5c03f742e28bc781cc751b2 (patch) | |
tree | 8e297dd00c9cef73f67424b72a5669d6a97d13f7 | |
parent | 951b5d959f1da4bae8910085a2d8d6a3d374c72d (diff) |
tcp: do not set queue_mapping on SYNACK
At the time of commit fff326990789 ("tcp: reflect SYN queue_mapping into
SYNACK packets") we had little ways to cope with SYN floods.
We no longer need to reflect incoming skb queue mappings, and instead
can pick a TX queue based on cpu cooking the SYNACK, with normal XPS
affinities.
Note that all SYNACK retransmits were picking TX queue 0, this no longer
is a win given that SYNACK rtx are now distributed on all cpus.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 2 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 2 |
6 files changed, 4 insertions, 9 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index a6be56d5f0e3..eed94fc355c1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1716,7 +1716,7 @@ struct tcp_request_sock_ops { | |||
1716 | __u32 (*init_seq)(const struct sk_buff *skb); | 1716 | __u32 (*init_seq)(const struct sk_buff *skb); |
1717 | int (*send_synack)(const struct sock *sk, struct dst_entry *dst, | 1717 | int (*send_synack)(const struct sock *sk, struct dst_entry *dst, |
1718 | struct flowi *fl, struct request_sock *req, | 1718 | struct flowi *fl, struct request_sock *req, |
1719 | u16 queue_mapping, struct tcp_fastopen_cookie *foc, | 1719 | struct tcp_fastopen_cookie *foc, |
1720 | bool attach_req); | 1720 | bool attach_req); |
1721 | }; | 1721 | }; |
1722 | 1722 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 67404e1fe7d4..50e29737b584 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1596,7 +1596,6 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, | |||
1596 | arg->csumoffset) = csum_fold(csum_add(nskb->csum, | 1596 | arg->csumoffset) = csum_fold(csum_add(nskb->csum, |
1597 | arg->csum)); | 1597 | arg->csum)); |
1598 | nskb->ip_summed = CHECKSUM_NONE; | 1598 | nskb->ip_summed = CHECKSUM_NONE; |
1599 | skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); | ||
1600 | ip_push_pending_frames(sk, &fl4); | 1599 | ip_push_pending_frames(sk, &fl4); |
1601 | } | 1600 | } |
1602 | out: | 1601 | out: |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3b35c3f4d268..944eaca69115 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -6236,7 +6236,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, | |||
6236 | } | 6236 | } |
6237 | if (fastopen_sk) { | 6237 | if (fastopen_sk) { |
6238 | af_ops->send_synack(fastopen_sk, dst, &fl, req, | 6238 | af_ops->send_synack(fastopen_sk, dst, &fl, req, |
6239 | skb_get_queue_mapping(skb), &foc, false); | 6239 | &foc, false); |
6240 | /* Add the child socket directly into the accept queue */ | 6240 | /* Add the child socket directly into the accept queue */ |
6241 | inet_csk_reqsk_queue_add(sk, req, fastopen_sk); | 6241 | inet_csk_reqsk_queue_add(sk, req, fastopen_sk); |
6242 | sk->sk_data_ready(sk); | 6242 | sk->sk_data_ready(sk); |
@@ -6247,7 +6247,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, | |||
6247 | if (!want_cookie) | 6247 | if (!want_cookie) |
6248 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | 6248 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
6249 | af_ops->send_synack(sk, dst, &fl, req, | 6249 | af_ops->send_synack(sk, dst, &fl, req, |
6250 | skb_get_queue_mapping(skb), &foc, !want_cookie); | 6250 | &foc, !want_cookie); |
6251 | if (want_cookie) | 6251 | if (want_cookie) |
6252 | goto drop_and_free; | 6252 | goto drop_and_free; |
6253 | } | 6253 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9c68cf3762c4..30dd45c1f568 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -821,7 +821,6 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |||
821 | static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | 821 | static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, |
822 | struct flowi *fl, | 822 | struct flowi *fl, |
823 | struct request_sock *req, | 823 | struct request_sock *req, |
824 | u16 queue_mapping, | ||
825 | struct tcp_fastopen_cookie *foc, | 824 | struct tcp_fastopen_cookie *foc, |
826 | bool attach_req) | 825 | bool attach_req) |
827 | { | 826 | { |
@@ -839,7 +838,6 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
839 | if (skb) { | 838 | if (skb) { |
840 | __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); | 839 | __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); |
841 | 840 | ||
842 | skb_set_queue_mapping(skb, queue_mapping); | ||
843 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, | 841 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, |
844 | ireq->ir_rmt_addr, | 842 | ireq->ir_rmt_addr, |
845 | ireq->opt); | 843 | ireq->opt); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6e79fcb0addb..19adedb8c5cc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -3518,7 +3518,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) | |||
3518 | int res; | 3518 | int res; |
3519 | 3519 | ||
3520 | tcp_rsk(req)->txhash = net_tx_rndhash(); | 3520 | tcp_rsk(req)->txhash = net_tx_rndhash(); |
3521 | res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true); | 3521 | res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true); |
3522 | if (!res) { | 3522 | if (!res) { |
3523 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 3523 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
3524 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); | 3524 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index acb06f86f372..f495d189f5e0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -437,7 +437,6 @@ out: | |||
437 | static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | 437 | static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, |
438 | struct flowi *fl, | 438 | struct flowi *fl, |
439 | struct request_sock *req, | 439 | struct request_sock *req, |
440 | u16 queue_mapping, | ||
441 | struct tcp_fastopen_cookie *foc, | 440 | struct tcp_fastopen_cookie *foc, |
442 | bool attach_req) | 441 | bool attach_req) |
443 | { | 442 | { |
@@ -462,7 +461,6 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
462 | if (np->repflow && ireq->pktopts) | 461 | if (np->repflow && ireq->pktopts) |
463 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 462 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
464 | 463 | ||
465 | skb_set_queue_mapping(skb, queue_mapping); | ||
466 | err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); | 464 | err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); |
467 | err = net_xmit_eval(err); | 465 | err = net_xmit_eval(err); |
468 | } | 466 | } |