aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-31 21:47:50 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-01 14:22:11 -0400
commitfff3269907897ee91406ece125795f53e722677e (patch)
tree6c22d2afaea6bc6fd2b34311db9c80b5418b94d1
parent7433819a1eefd4e74711fffd6d54e30a644ef240 (diff)
tcp: reflect SYN queue_mapping into SYNACK packets
While testing how linux behaves on SYNFLOOD attack on multiqueue device (ixgbe), I found that SYNACK messages were dropped at Qdisc level because we send them all on a single queue. Obvious choice is to reflect incoming SYN packet @queue_mapping to SYNACK packet. Under stress, my machine could only send 25.000 SYNACK per second (for 200.000 incoming SYN per second). NIC : ixgbe with 16 rx/tx queues. After patch, not a single SYNACK is dropped. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Hans Schillstrom <hans.schillstrom@ericsson.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv6/tcp_ipv6.c9
2 files changed, 12 insertions, 6 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a43b87dfe80..c8d28c433b2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
824 */ 824 */
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp) 827 struct request_values *rvp,
828 u16 queue_mapping)
828{ 829{
829 const struct inet_request_sock *ireq = inet_rsk(req); 830 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct flowi4 fl4; 831 struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 if (skb) { 841 if (skb) {
841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
842 843
844 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
844 ireq->rmt_addr, 846 ireq->rmt_addr,
845 ireq->opt); 847 ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
854 struct request_values *rvp) 856 struct request_values *rvp)
855{ 857{
856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
857 return tcp_v4_send_synack(sk, NULL, req, rvp); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
858} 860}
859 861
860/* 862/*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1422 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1423 1425
1424 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1425 (struct request_values *)&tmp_ext) || 1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1426 want_cookie) 1429 want_cookie)
1427 goto drop_and_free; 1430 goto drop_and_free;
1428 1431
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 554d5999abc..3a9aec29581 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@ out:
476 476
477 477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp) 479 struct request_values *rvp,
480 u16 queue_mapping)
480{ 481{
481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 515
515 fl6.daddr = treq->rmt_addr; 516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 519 err = net_xmit_eval(err);
518 } 520 }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp) 530 struct request_values *rvp)
529{ 531{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp); 533 return tcp_v6_send_synack(sk, req, rvp, 0);
532} 534}
533 535
534static void tcp_v6_reqsk_destructor(struct request_sock *req) 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1213,7 +1215,8 @@ have_isn:
1213 security_inet_conn_request(sk, skb, req); 1215 security_inet_conn_request(sk, skb, req);
1214 1216
1215 if (tcp_v6_send_synack(sk, req, 1217 if (tcp_v6_send_synack(sk, req,
1216 (struct request_values *)&tmp_ext) || 1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1217 want_cookie) 1220 want_cookie)
1218 goto drop_and_free; 1221 goto drop_and_free;
1219 1222