aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-22 13:22:21 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 16:52:26 -0400
commitb282705336e03fc7b9377a278939594870a40f96 (patch)
treefbb9b0bf127fb3910e65b6ff6566fc12396385e4 /net/ipv4
parent8b929ab12fb2ab960adb3c3ec8d107fef5ff3243 (diff)
net: convert syn_wait_lock to a spinlock
This is a low hanging fruit, as we'll get rid of syn_wait_lock eventually. We hold syn_wait_lock for such small sections, that it makes no sense to use a read/write lock. A spin lock is simply faster. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/tcp_ipv4.c12
3 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 711ab143d4cb..79c0c9439fdc 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
495 u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd, 495 u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
496 lopt->nr_table_entries); 496 lopt->nr_table_entries);
497 497
498 write_lock(&icsk->icsk_accept_queue.syn_wait_lock); 498 spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
499 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { 499 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
500 const struct inet_request_sock *ireq = inet_rsk(req); 500 const struct inet_request_sock *ireq = inet_rsk(req);
501 501
@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
508 break; 508 break;
509 } 509 }
510 } 510 }
511 write_unlock(&icsk->icsk_accept_queue.syn_wait_lock); 511 spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
512 512
513 return req; 513 return req;
514} 514}
@@ -650,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
650 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); 650 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
651 req->rsk_hash = hash; 651 req->rsk_hash = hash;
652 652
653 write_lock(&queue->syn_wait_lock); 653 spin_lock(&queue->syn_wait_lock);
654 req->dl_next = lopt->syn_table[hash]; 654 req->dl_next = lopt->syn_table[hash];
655 lopt->syn_table[hash] = req; 655 lopt->syn_table[hash] = req;
656 write_unlock(&queue->syn_wait_lock); 656 spin_unlock(&queue->syn_wait_lock);
657 657
658 mod_timer_pinned(&req->rsk_timer, jiffies + timeout); 658 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
659} 659}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f984b2001d0a..76322c9867d5 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
728 728
729 entry.family = sk->sk_family; 729 entry.family = sk->sk_family;
730 730
731 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 731 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
732 732
733 lopt = icsk->icsk_accept_queue.listen_opt; 733 lopt = icsk->icsk_accept_queue.listen_opt;
734 if (!lopt || !listen_sock_qlen(lopt)) 734 if (!lopt || !listen_sock_qlen(lopt))
@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
776 } 776 }
777 777
778out: 778out:
779 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 779 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
780 780
781 return err; 781 return err;
782} 782}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5554b8f33d41..8028ad5920a4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1909,13 +1909,13 @@ get_req:
1909 } 1909 }
1910 sk = sk_nulls_next(st->syn_wait_sk); 1910 sk = sk_nulls_next(st->syn_wait_sk);
1911 st->state = TCP_SEQ_STATE_LISTENING; 1911 st->state = TCP_SEQ_STATE_LISTENING;
1912 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1912 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1913 } else { 1913 } else {
1914 icsk = inet_csk(sk); 1914 icsk = inet_csk(sk);
1915 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1915 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1916 if (reqsk_queue_len(&icsk->icsk_accept_queue)) 1916 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1917 goto start_req; 1917 goto start_req;
1918 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1918 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1919 sk = sk_nulls_next(sk); 1919 sk = sk_nulls_next(sk);
1920 } 1920 }
1921get_sk: 1921get_sk:
@@ -1927,7 +1927,7 @@ get_sk:
1927 goto out; 1927 goto out;
1928 } 1928 }
1929 icsk = inet_csk(sk); 1929 icsk = inet_csk(sk);
1930 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1930 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1931 if (reqsk_queue_len(&icsk->icsk_accept_queue)) { 1931 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1932start_req: 1932start_req:
1933 st->uid = sock_i_uid(sk); 1933 st->uid = sock_i_uid(sk);
@@ -1936,7 +1936,7 @@ start_req:
1936 st->sbucket = 0; 1936 st->sbucket = 0;
1937 goto get_req; 1937 goto get_req;
1938 } 1938 }
1939 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1939 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1940 } 1940 }
1941 spin_unlock_bh(&ilb->lock); 1941 spin_unlock_bh(&ilb->lock);
1942 st->offset = 0; 1942 st->offset = 0;
@@ -2155,7 +2155,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2155 case TCP_SEQ_STATE_OPENREQ: 2155 case TCP_SEQ_STATE_OPENREQ:
2156 if (v) { 2156 if (v) {
2157 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); 2157 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2158 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2158 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2159 } 2159 }
2160 case TCP_SEQ_STATE_LISTENING: 2160 case TCP_SEQ_STATE_LISTENING:
2161 if (v != SEQ_START_TOKEN) 2161 if (v != SEQ_START_TOKEN)