aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/request_sock.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-22 13:22:21 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 16:52:26 -0400
commitb282705336e03fc7b9377a278939594870a40f96 (patch)
treefbb9b0bf127fb3910e65b6ff6566fc12396385e4 /include/net/request_sock.h
parent8b929ab12fb2ab960adb3c3ec8d107fef5ff3243 (diff)
net: convert syn_wait_lock to a spinlock
This is a low hanging fruit, as we'll get rid of syn_wait_lock eventually. We hold syn_wait_lock for such small sections, that it makes no sense to use a read/write lock. A spin lock is simply faster. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r--include/net/request_sock.h11
1 files changed, 3 insertions, 8 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 8603c350fad0..fe41f3ceb008 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -173,11 +173,6 @@ struct fastopen_queue {
173 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main 173 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
174 * lock sock while browsing the listening hash (otherwise it's deadlock prone). 174 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
175 * 175 *
176 * This lock is acquired in read mode only from listening_get_next() seq_file
177 * op and it's acquired in write mode _only_ from code that is actively
178 * changing rskq_accept_head. All readers that are holding the master sock lock
179 * don't need to grab this lock in read mode too as rskq_accept_head. writes
180 * are always protected from the main sock lock.
181 */ 176 */
182struct request_sock_queue { 177struct request_sock_queue {
183 struct request_sock *rskq_accept_head; 178 struct request_sock *rskq_accept_head;
@@ -192,7 +187,7 @@ struct request_sock_queue {
192 */ 187 */
193 188
194 /* temporary alignment, our goal is to get rid of this lock */ 189 /* temporary alignment, our goal is to get rid of this lock */
195 rwlock_t syn_wait_lock ____cacheline_aligned_in_smp; 190 spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
196}; 191};
197 192
198int reqsk_queue_alloc(struct request_sock_queue *queue, 193int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -223,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
223 struct listen_sock *lopt = queue->listen_opt; 218 struct listen_sock *lopt = queue->listen_opt;
224 struct request_sock **prev; 219 struct request_sock **prev;
225 220
226 write_lock(&queue->syn_wait_lock); 221 spin_lock(&queue->syn_wait_lock);
227 222
228 prev = &lopt->syn_table[req->rsk_hash]; 223 prev = &lopt->syn_table[req->rsk_hash];
229 while (*prev != req) 224 while (*prev != req)
230 prev = &(*prev)->dl_next; 225 prev = &(*prev)->dl_next;
231 *prev = req->dl_next; 226 *prev = req->dl_next;
232 227
233 write_unlock(&queue->syn_wait_lock); 228 spin_unlock(&queue->syn_wait_lock);
234 if (del_timer(&req->rsk_timer)) 229 if (del_timer(&req->rsk_timer))
235 reqsk_put(req); 230 reqsk_put(req);
236} 231}