diff options
-rw-r--r-- | include/net/request_sock.h | 11 | ||||
-rw-r--r-- | net/core/request_sock.c | 14 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 8 | ||||
-rw-r--r-- | net/ipv4/inet_diag.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 12 | ||||
-rw-r--r-- | net/ipv6/inet6_connection_sock.c | 4 |
6 files changed, 24 insertions, 29 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 8603c350fad0..fe41f3ceb008 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -173,11 +173,6 @@ struct fastopen_queue { | |||
173 | * %syn_wait_lock is necessary only to avoid proc interface having to grab the main | 173 | * %syn_wait_lock is necessary only to avoid proc interface having to grab the main |
174 | * lock sock while browsing the listening hash (otherwise it's deadlock prone). | 174 | * lock sock while browsing the listening hash (otherwise it's deadlock prone). |
175 | * | 175 | * |
176 | * This lock is acquired in read mode only from listening_get_next() seq_file | ||
177 | * op and it's acquired in write mode _only_ from code that is actively | ||
178 | * changing rskq_accept_head. All readers that are holding the master sock lock | ||
179 | * don't need to grab this lock in read mode too as rskq_accept_head. writes | ||
180 | * are always protected from the main sock lock. | ||
181 | */ | 176 | */ |
182 | struct request_sock_queue { | 177 | struct request_sock_queue { |
183 | struct request_sock *rskq_accept_head; | 178 | struct request_sock *rskq_accept_head; |
@@ -192,7 +187,7 @@ struct request_sock_queue { | |||
192 | */ | 187 | */ |
193 | 188 | ||
194 | /* temporary alignment, our goal is to get rid of this lock */ | 189 | /* temporary alignment, our goal is to get rid of this lock */ |
195 | rwlock_t syn_wait_lock ____cacheline_aligned_in_smp; | 190 | spinlock_t syn_wait_lock ____cacheline_aligned_in_smp; |
196 | }; | 191 | }; |
197 | 192 | ||
198 | int reqsk_queue_alloc(struct request_sock_queue *queue, | 193 | int reqsk_queue_alloc(struct request_sock_queue *queue, |
@@ -223,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue, | |||
223 | struct listen_sock *lopt = queue->listen_opt; | 218 | struct listen_sock *lopt = queue->listen_opt; |
224 | struct request_sock **prev; | 219 | struct request_sock **prev; |
225 | 220 | ||
226 | write_lock(&queue->syn_wait_lock); | 221 | spin_lock(&queue->syn_wait_lock); |
227 | 222 | ||
228 | prev = &lopt->syn_table[req->rsk_hash]; | 223 | prev = &lopt->syn_table[req->rsk_hash]; |
229 | while (*prev != req) | 224 | while (*prev != req) |
230 | prev = &(*prev)->dl_next; | 225 | prev = &(*prev)->dl_next; |
231 | *prev = req->dl_next; | 226 | *prev = req->dl_next; |
232 | 227 | ||
233 | write_unlock(&queue->syn_wait_lock); | 228 | spin_unlock(&queue->syn_wait_lock); |
234 | if (del_timer(&req->rsk_timer)) | 229 | if (del_timer(&req->rsk_timer)) |
235 | reqsk_put(req); | 230 | reqsk_put(req); |
236 | } | 231 | } |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index cdc0ddd9ac9f..87b22c0bc08c 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
58 | return -ENOMEM; | 58 | return -ENOMEM; |
59 | 59 | ||
60 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); | 60 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); |
61 | rwlock_init(&queue->syn_wait_lock); | 61 | spin_lock_init(&queue->syn_wait_lock); |
62 | queue->rskq_accept_head = NULL; | 62 | queue->rskq_accept_head = NULL; |
63 | lopt->nr_table_entries = nr_table_entries; | 63 | lopt->nr_table_entries = nr_table_entries; |
64 | lopt->max_qlen_log = ilog2(nr_table_entries); | 64 | lopt->max_qlen_log = ilog2(nr_table_entries); |
65 | 65 | ||
66 | write_lock_bh(&queue->syn_wait_lock); | 66 | spin_lock_bh(&queue->syn_wait_lock); |
67 | queue->listen_opt = lopt; | 67 | queue->listen_opt = lopt; |
68 | write_unlock_bh(&queue->syn_wait_lock); | 68 | spin_unlock_bh(&queue->syn_wait_lock); |
69 | 69 | ||
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk( | |||
81 | { | 81 | { |
82 | struct listen_sock *lopt; | 82 | struct listen_sock *lopt; |
83 | 83 | ||
84 | write_lock_bh(&queue->syn_wait_lock); | 84 | spin_lock_bh(&queue->syn_wait_lock); |
85 | lopt = queue->listen_opt; | 85 | lopt = queue->listen_opt; |
86 | queue->listen_opt = NULL; | 86 | queue->listen_opt = NULL; |
87 | write_unlock_bh(&queue->syn_wait_lock); | 87 | spin_unlock_bh(&queue->syn_wait_lock); |
88 | 88 | ||
89 | return lopt; | 89 | return lopt; |
90 | } | 90 | } |
@@ -100,7 +100,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
100 | for (i = 0; i < lopt->nr_table_entries; i++) { | 100 | for (i = 0; i < lopt->nr_table_entries; i++) { |
101 | struct request_sock *req; | 101 | struct request_sock *req; |
102 | 102 | ||
103 | write_lock_bh(&queue->syn_wait_lock); | 103 | spin_lock_bh(&queue->syn_wait_lock); |
104 | while ((req = lopt->syn_table[i]) != NULL) { | 104 | while ((req = lopt->syn_table[i]) != NULL) { |
105 | lopt->syn_table[i] = req->dl_next; | 105 | lopt->syn_table[i] = req->dl_next; |
106 | atomic_inc(&lopt->qlen_dec); | 106 | atomic_inc(&lopt->qlen_dec); |
@@ -108,7 +108,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
108 | reqsk_put(req); | 108 | reqsk_put(req); |
109 | reqsk_put(req); | 109 | reqsk_put(req); |
110 | } | 110 | } |
111 | write_unlock_bh(&queue->syn_wait_lock); | 111 | spin_unlock_bh(&queue->syn_wait_lock); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 711ab143d4cb..79c0c9439fdc 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk, | |||
495 | u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd, | 495 | u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd, |
496 | lopt->nr_table_entries); | 496 | lopt->nr_table_entries); |
497 | 497 | ||
498 | write_lock(&icsk->icsk_accept_queue.syn_wait_lock); | 498 | spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); |
499 | for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { | 499 | for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { |
500 | const struct inet_request_sock *ireq = inet_rsk(req); | 500 | const struct inet_request_sock *ireq = inet_rsk(req); |
501 | 501 | ||
@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk, | |||
508 | break; | 508 | break; |
509 | } | 509 | } |
510 | } | 510 | } |
511 | write_unlock(&icsk->icsk_accept_queue.syn_wait_lock); | 511 | spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); |
512 | 512 | ||
513 | return req; | 513 | return req; |
514 | } | 514 | } |
@@ -650,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, | |||
650 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); | 650 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); |
651 | req->rsk_hash = hash; | 651 | req->rsk_hash = hash; |
652 | 652 | ||
653 | write_lock(&queue->syn_wait_lock); | 653 | spin_lock(&queue->syn_wait_lock); |
654 | req->dl_next = lopt->syn_table[hash]; | 654 | req->dl_next = lopt->syn_table[hash]; |
655 | lopt->syn_table[hash] = req; | 655 | lopt->syn_table[hash] = req; |
656 | write_unlock(&queue->syn_wait_lock); | 656 | spin_unlock(&queue->syn_wait_lock); |
657 | 657 | ||
658 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | 658 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); |
659 | } | 659 | } |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index f984b2001d0a..76322c9867d5 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
728 | 728 | ||
729 | entry.family = sk->sk_family; | 729 | entry.family = sk->sk_family; |
730 | 730 | ||
731 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 731 | spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
732 | 732 | ||
733 | lopt = icsk->icsk_accept_queue.listen_opt; | 733 | lopt = icsk->icsk_accept_queue.listen_opt; |
734 | if (!lopt || !listen_sock_qlen(lopt)) | 734 | if (!lopt || !listen_sock_qlen(lopt)) |
@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
776 | } | 776 | } |
777 | 777 | ||
778 | out: | 778 | out: |
779 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 779 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
780 | 780 | ||
781 | return err; | 781 | return err; |
782 | } | 782 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5554b8f33d41..8028ad5920a4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1909,13 +1909,13 @@ get_req: | |||
1909 | } | 1909 | } |
1910 | sk = sk_nulls_next(st->syn_wait_sk); | 1910 | sk = sk_nulls_next(st->syn_wait_sk); |
1911 | st->state = TCP_SEQ_STATE_LISTENING; | 1911 | st->state = TCP_SEQ_STATE_LISTENING; |
1912 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1912 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1913 | } else { | 1913 | } else { |
1914 | icsk = inet_csk(sk); | 1914 | icsk = inet_csk(sk); |
1915 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1915 | spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1916 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) | 1916 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
1917 | goto start_req; | 1917 | goto start_req; |
1918 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1918 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1919 | sk = sk_nulls_next(sk); | 1919 | sk = sk_nulls_next(sk); |
1920 | } | 1920 | } |
1921 | get_sk: | 1921 | get_sk: |
@@ -1927,7 +1927,7 @@ get_sk: | |||
1927 | goto out; | 1927 | goto out; |
1928 | } | 1928 | } |
1929 | icsk = inet_csk(sk); | 1929 | icsk = inet_csk(sk); |
1930 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1930 | spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1931 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { | 1931 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { |
1932 | start_req: | 1932 | start_req: |
1933 | st->uid = sock_i_uid(sk); | 1933 | st->uid = sock_i_uid(sk); |
@@ -1936,7 +1936,7 @@ start_req: | |||
1936 | st->sbucket = 0; | 1936 | st->sbucket = 0; |
1937 | goto get_req; | 1937 | goto get_req; |
1938 | } | 1938 | } |
1939 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1939 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1940 | } | 1940 | } |
1941 | spin_unlock_bh(&ilb->lock); | 1941 | spin_unlock_bh(&ilb->lock); |
1942 | st->offset = 0; | 1942 | st->offset = 0; |
@@ -2155,7 +2155,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
2155 | case TCP_SEQ_STATE_OPENREQ: | 2155 | case TCP_SEQ_STATE_OPENREQ: |
2156 | if (v) { | 2156 | if (v) { |
2157 | struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); | 2157 | struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); |
2158 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2158 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2159 | } | 2159 | } |
2160 | case TCP_SEQ_STATE_LISTENING: | 2160 | case TCP_SEQ_STATE_LISTENING: |
2161 | if (v != SEQ_START_TOKEN) | 2161 | if (v != SEQ_START_TOKEN) |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 2f3bbe569e8f..6927f3fb5597 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -124,7 +124,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk, | |||
124 | u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd, | 124 | u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd, |
125 | lopt->nr_table_entries); | 125 | lopt->nr_table_entries); |
126 | 126 | ||
127 | write_lock(&icsk->icsk_accept_queue.syn_wait_lock); | 127 | spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); |
128 | for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { | 128 | for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { |
129 | const struct inet_request_sock *ireq = inet_rsk(req); | 129 | const struct inet_request_sock *ireq = inet_rsk(req); |
130 | 130 | ||
@@ -138,7 +138,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk, | |||
138 | break; | 138 | break; |
139 | } | 139 | } |
140 | } | 140 | } |
141 | write_unlock(&icsk->icsk_accept_queue.syn_wait_lock); | 141 | spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); |
142 | 142 | ||
143 | return req; | 143 | return req; |
144 | } | 144 | } |