diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-31 21:47:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-06-01 14:22:11 -0400 |
commit | fff3269907897ee91406ece125795f53e722677e (patch) | |
tree | 6c22d2afaea6bc6fd2b34311db9c80b5418b94d1 /net/ipv4/tcp_ipv4.c | |
parent | 7433819a1eefd4e74711fffd6d54e30a644ef240 (diff) |
tcp: reflect SYN queue_mapping into SYNACK packets
While testing how linux behaves on SYNFLOOD attack on multiqueue device
(ixgbe), I found that SYNACK messages were dropped at Qdisc level
because we send them all on a single queue.
Obvious choice is to reflect incoming SYN packet @queue_mapping to
SYNACK packet.
Under stress, my machine could only send 25.000 SYNACK per second (for
200.000 incoming SYN per second). NIC : ixgbe with 16 rx/tx queues.
After patch, not a single SYNACK is dropped.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Hans Schillstrom <hans.schillstrom@ericsson.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a43b87dfe800..c8d28c433b2b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
824 | */ | 824 | */ |
825 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | 825 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, |
826 | struct request_sock *req, | 826 | struct request_sock *req, |
827 | struct request_values *rvp) | 827 | struct request_values *rvp, |
828 | u16 queue_mapping) | ||
828 | { | 829 | { |
829 | const struct inet_request_sock *ireq = inet_rsk(req); | 830 | const struct inet_request_sock *ireq = inet_rsk(req); |
830 | struct flowi4 fl4; | 831 | struct flowi4 fl4; |
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
840 | if (skb) { | 841 | if (skb) { |
841 | __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); | 842 | __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); |
842 | 843 | ||
844 | skb_set_queue_mapping(skb, queue_mapping); | ||
843 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 845 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
844 | ireq->rmt_addr, | 846 | ireq->rmt_addr, |
845 | ireq->opt); | 847 | ireq->opt); |
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, | |||
854 | struct request_values *rvp) | 856 | struct request_values *rvp) |
855 | { | 857 | { |
856 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 858 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
857 | return tcp_v4_send_synack(sk, NULL, req, rvp); | 859 | return tcp_v4_send_synack(sk, NULL, req, rvp, 0); |
858 | } | 860 | } |
859 | 861 | ||
860 | /* | 862 | /* |
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1422 | tcp_rsk(req)->snt_synack = tcp_time_stamp; | 1424 | tcp_rsk(req)->snt_synack = tcp_time_stamp; |
1423 | 1425 | ||
1424 | if (tcp_v4_send_synack(sk, dst, req, | 1426 | if (tcp_v4_send_synack(sk, dst, req, |
1425 | (struct request_values *)&tmp_ext) || | 1427 | (struct request_values *)&tmp_ext, |
1428 | skb_get_queue_mapping(skb)) || | ||
1426 | want_cookie) | 1429 | want_cookie) |
1427 | goto drop_and_free; | 1430 | goto drop_and_free; |
1428 | 1431 | ||