aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_connection_sock.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-22 13:22:18 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 16:52:25 -0400
commit2b41fab70fc001d2acd89c0477d32feb8265bb32 (patch)
tree1597c834b5bc63933f5d3d1d62612319280e3fd0 /net/ipv4/inet_connection_sock.c
parentc9231f8247f5676a8dedd66efc6d89af99518530 (diff)
inet: cache listen_sock_qlen() and read rskq_defer_accept once
Cache listen_sock_qlen() to limit false sharing, and read rskq_defer_accept once as it might change under us. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_connection_sock.c')
-rw-r--r--net/ipv4/inet_connection_sock.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 844808d9337b..7d011e825c48 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -571,8 +571,9 @@ static void reqsk_timer_handler(unsigned long data)
571 struct inet_connection_sock *icsk = inet_csk(sk_listener); 571 struct inet_connection_sock *icsk = inet_csk(sk_listener);
572 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 572 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
573 struct listen_sock *lopt = queue->listen_opt; 573 struct listen_sock *lopt = queue->listen_opt;
574 int expire = 0, resend = 0; 574 int qlen, expire = 0, resend = 0;
575 int max_retries, thresh; 575 int max_retries, thresh;
576 u8 defer_accept;
576 577
577 if (sk_listener->sk_state != TCP_LISTEN || !lopt) { 578 if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
578 reqsk_put(req); 579 reqsk_put(req);
@@ -598,19 +599,21 @@ static void reqsk_timer_handler(unsigned long data)
598 * embrions; and abort old ones without pity, if old 599 * embrions; and abort old ones without pity, if old
599 * ones are about to clog our table. 600 * ones are about to clog our table.
600 */ 601 */
601 if (listen_sock_qlen(lopt) >> (lopt->max_qlen_log - 1)) { 602 qlen = listen_sock_qlen(lopt);
603 if (qlen >> (lopt->max_qlen_log - 1)) {
602 int young = listen_sock_young(lopt) << 1; 604 int young = listen_sock_young(lopt) << 1;
603 605
604 while (thresh > 2) { 606 while (thresh > 2) {
605 if (listen_sock_qlen(lopt) < young) 607 if (qlen < young)
606 break; 608 break;
607 thresh--; 609 thresh--;
608 young <<= 1; 610 young <<= 1;
609 } 611 }
610 } 612 }
611 if (queue->rskq_defer_accept) 613 defer_accept = READ_ONCE(queue->rskq_defer_accept);
612 max_retries = queue->rskq_defer_accept; 614 if (defer_accept)
613 syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept, 615 max_retries = defer_accept;
616 syn_ack_recalc(req, thresh, max_retries, defer_accept,
614 &expire, &resend); 617 &expire, &resend);
615 req->rsk_ops->syn_ack_timeout(sk_listener, req); 618 req->rsk_ops->syn_ack_timeout(sk_listener, req);
616 if (!expire && 619 if (!expire &&