diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-06-19 01:47:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:47:59 -0400 |
commit | 0e87506fcc734647c7b2497eee4eb81e785c857a (patch) | |
tree | bb8863c59fdef2628f17b6773c52801792a57722 /include | |
parent | 60236fdd08b2169045a3bbfc5ffe1576e6c3c17b (diff) |
[NET] Generalise tcp_listen_opt
This chunks out the accept_queue and tcp_listen_opt code and moves
them to net/core/request_sock.c and include/net/request_sock.h, to
make it useful for other transport protocols, DCCP being the first one
to use it.
Next patches will rename tcp_listen_opt to accept_sock and remove the
inline tcp functions that just call a reqsk_queue_ function.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/tcp.h | 17 | ||||
-rw-r--r-- | include/net/request_sock.h | 178 | ||||
-rw-r--r-- | include/net/tcp.h | 46 |
3 files changed, 187 insertions, 54 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fb54292a15aa..97a7c9e03df5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -379,22 +379,7 @@ struct tcp_sock { | |||
379 | 379 | ||
380 | __u32 total_retrans; /* Total retransmits for entire connection */ | 380 | __u32 total_retrans; /* Total retransmits for entire connection */ |
381 | 381 | ||
382 | /* The syn_wait_lock is necessary only to avoid proc interface having | 382 | struct request_sock_queue accept_queue; /* FIFO of established children */ |
383 | * to grab the main lock sock while browsing the listening hash | ||
384 | * (otherwise it's deadlock prone). | ||
385 | * This lock is acquired in read mode only from listening_get_next() | ||
386 | * and it's acquired in write mode _only_ from code that is actively | ||
387 | * changing the syn_wait_queue. All readers that are holding | ||
388 | * the master sock lock don't need to grab this lock in read mode | ||
389 | * too as the syn_wait_queue writes are always protected from | ||
390 | * the main sock lock. | ||
391 | */ | ||
392 | rwlock_t syn_wait_lock; | ||
393 | struct tcp_listen_opt *listen_opt; | ||
394 | |||
395 | /* FIFO of established children */ | ||
396 | struct request_sock *accept_queue; | ||
397 | struct request_sock *accept_queue_tail; | ||
398 | 383 | ||
399 | unsigned int keepalive_time; /* time before keep alive takes place */ | 384 | unsigned int keepalive_time; /* time before keep alive takes place */ |
400 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ | 385 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ |
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 08a8fd1d1610..38943ed04e73 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -16,7 +16,9 @@ | |||
16 | #define _REQUEST_SOCK_H | 16 | #define _REQUEST_SOCK_H |
17 | 17 | ||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/spinlock.h> | ||
19 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | |||
20 | #include <net/sock.h> | 22 | #include <net/sock.h> |
21 | 23 | ||
22 | struct request_sock; | 24 | struct request_sock; |
@@ -74,4 +76,180 @@ static inline void reqsk_free(struct request_sock *req) | |||
74 | __reqsk_free(req); | 76 | __reqsk_free(req); |
75 | } | 77 | } |
76 | 78 | ||
79 | extern int sysctl_max_syn_backlog; | ||
80 | |||
81 | /** struct tcp_listen_opt - listen state | ||
82 | * | ||
83 | * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs | ||
84 | */ | ||
85 | struct tcp_listen_opt { | ||
86 | u8 max_qlen_log; | ||
87 | /* 3 bytes hole, try to use */ | ||
88 | int qlen; | ||
89 | int qlen_young; | ||
90 | int clock_hand; | ||
91 | u32 hash_rnd; | ||
92 | struct request_sock *syn_table[0]; | ||
93 | }; | ||
94 | |||
95 | /** struct request_sock_queue - queue of request_socks | ||
96 | * | ||
97 | * @rskq_accept_head - FIFO head of established children | ||
98 | * @rskq_accept_tail - FIFO tail of established children | ||
99 | * @syn_wait_lock - serializer | ||
100 | * | ||
101 | * %syn_wait_lock is necessary only to avoid proc interface having to grab the main | ||
102 | * lock sock while browsing the listening hash (otherwise it's deadlock prone). | ||
103 | * | ||
104 | * This lock is acquired in read mode only from listening_get_next() seq_file | ||
105 | * op and it's acquired in write mode _only_ from code that is actively | ||
106 | * changing rskq_accept_head. All readers that are holding the master sock lock | ||
107 | * don't need to grab this lock in read mode too as rskq_accept_head. writes | ||
108 | * are always protected from the main sock lock. | ||
109 | */ | ||
110 | struct request_sock_queue { | ||
111 | struct request_sock *rskq_accept_head; | ||
112 | struct request_sock *rskq_accept_tail; | ||
113 | rwlock_t syn_wait_lock; | ||
114 | struct tcp_listen_opt *listen_opt; | ||
115 | }; | ||
116 | |||
117 | extern int reqsk_queue_alloc(struct request_sock_queue *queue, | ||
118 | const int nr_table_entries); | ||
119 | |||
120 | static inline struct tcp_listen_opt *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue) | ||
121 | { | ||
122 | struct tcp_listen_opt *lopt; | ||
123 | |||
124 | write_lock_bh(&queue->syn_wait_lock); | ||
125 | lopt = queue->listen_opt; | ||
126 | queue->listen_opt = NULL; | ||
127 | write_unlock_bh(&queue->syn_wait_lock); | ||
128 | |||
129 | return lopt; | ||
130 | } | ||
131 | |||
132 | static inline void reqsk_queue_destroy(struct request_sock_queue *queue) | ||
133 | { | ||
134 | kfree(reqsk_queue_yank_listen_sk(queue)); | ||
135 | } | ||
136 | |||
137 | static inline struct request_sock * | ||
138 | reqsk_queue_yank_acceptq(struct request_sock_queue *queue) | ||
139 | { | ||
140 | struct request_sock *req = queue->rskq_accept_head; | ||
141 | |||
142 | queue->rskq_accept_head = queue->rskq_accept_head = NULL; | ||
143 | return req; | ||
144 | } | ||
145 | |||
146 | static inline int reqsk_queue_empty(struct request_sock_queue *queue) | ||
147 | { | ||
148 | return queue->rskq_accept_head == NULL; | ||
149 | } | ||
150 | |||
151 | static inline void reqsk_queue_unlink(struct request_sock_queue *queue, | ||
152 | struct request_sock *req, | ||
153 | struct request_sock **prev_req) | ||
154 | { | ||
155 | write_lock(&queue->syn_wait_lock); | ||
156 | *prev_req = req->dl_next; | ||
157 | write_unlock(&queue->syn_wait_lock); | ||
158 | } | ||
159 | |||
160 | static inline void reqsk_queue_add(struct request_sock_queue *queue, | ||
161 | struct request_sock *req, | ||
162 | struct sock *parent, | ||
163 | struct sock *child) | ||
164 | { | ||
165 | req->sk = child; | ||
166 | sk_acceptq_added(parent); | ||
167 | |||
168 | if (queue->rskq_accept_head == NULL) | ||
169 | queue->rskq_accept_head = req; | ||
170 | else | ||
171 | queue->rskq_accept_tail->dl_next = req; | ||
172 | |||
173 | queue->rskq_accept_tail = req; | ||
174 | req->dl_next = NULL; | ||
175 | } | ||
176 | |||
177 | static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) | ||
178 | { | ||
179 | struct request_sock *req = queue->rskq_accept_head; | ||
180 | |||
181 | BUG_TRAP(req != NULL); | ||
182 | |||
183 | queue->rskq_accept_head = req->dl_next; | ||
184 | if (queue->rskq_accept_head == NULL) | ||
185 | queue->rskq_accept_tail = NULL; | ||
186 | |||
187 | return req; | ||
188 | } | ||
189 | |||
190 | static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue, | ||
191 | struct sock *parent) | ||
192 | { | ||
193 | struct request_sock *req = reqsk_queue_remove(queue); | ||
194 | struct sock *child = req->sk; | ||
195 | |||
196 | BUG_TRAP(child != NULL); | ||
197 | |||
198 | sk_acceptq_removed(parent); | ||
199 | __reqsk_free(req); | ||
200 | return child; | ||
201 | } | ||
202 | |||
203 | static inline int reqsk_queue_removed(struct request_sock_queue *queue, | ||
204 | struct request_sock *req) | ||
205 | { | ||
206 | struct tcp_listen_opt *lopt = queue->listen_opt; | ||
207 | |||
208 | if (req->retrans == 0) | ||
209 | --lopt->qlen_young; | ||
210 | |||
211 | return --lopt->qlen; | ||
212 | } | ||
213 | |||
214 | static inline int reqsk_queue_added(struct request_sock_queue *queue) | ||
215 | { | ||
216 | struct tcp_listen_opt *lopt = queue->listen_opt; | ||
217 | const int prev_qlen = lopt->qlen; | ||
218 | |||
219 | lopt->qlen_young++; | ||
220 | lopt->qlen++; | ||
221 | return prev_qlen; | ||
222 | } | ||
223 | |||
224 | static inline int reqsk_queue_len(struct request_sock_queue *queue) | ||
225 | { | ||
226 | return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; | ||
227 | } | ||
228 | |||
229 | static inline int reqsk_queue_len_young(struct request_sock_queue *queue) | ||
230 | { | ||
231 | return queue->listen_opt->qlen_young; | ||
232 | } | ||
233 | |||
234 | static inline int reqsk_queue_is_full(struct request_sock_queue *queue) | ||
235 | { | ||
236 | return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; | ||
237 | } | ||
238 | |||
239 | static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, | ||
240 | u32 hash, struct request_sock *req, | ||
241 | unsigned timeout) | ||
242 | { | ||
243 | struct tcp_listen_opt *lopt = queue->listen_opt; | ||
244 | |||
245 | req->expires = jiffies + timeout; | ||
246 | req->retrans = 0; | ||
247 | req->sk = NULL; | ||
248 | req->dl_next = lopt->syn_table[hash]; | ||
249 | |||
250 | write_lock(&queue->syn_wait_lock); | ||
251 | lopt->syn_table[hash] = req; | ||
252 | write_unlock(&queue->syn_wait_lock); | ||
253 | } | ||
254 | |||
77 | #endif /* _REQUEST_SOCK_H */ | 255 | #endif /* _REQUEST_SOCK_H */ |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 6663086a5e35..a2e323c54457 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1686,71 +1686,41 @@ static inline int tcp_full_space(const struct sock *sk) | |||
1686 | static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, | 1686 | static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, |
1687 | struct sock *child) | 1687 | struct sock *child) |
1688 | { | 1688 | { |
1689 | struct tcp_sock *tp = tcp_sk(sk); | 1689 | reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); |
1690 | |||
1691 | req->sk = child; | ||
1692 | sk_acceptq_added(sk); | ||
1693 | |||
1694 | if (!tp->accept_queue_tail) { | ||
1695 | tp->accept_queue = req; | ||
1696 | } else { | ||
1697 | tp->accept_queue_tail->dl_next = req; | ||
1698 | } | ||
1699 | tp->accept_queue_tail = req; | ||
1700 | req->dl_next = NULL; | ||
1701 | } | 1690 | } |
1702 | 1691 | ||
1703 | struct tcp_listen_opt | ||
1704 | { | ||
1705 | u8 max_qlen_log; /* log_2 of maximal queued SYNs */ | ||
1706 | int qlen; | ||
1707 | int qlen_young; | ||
1708 | int clock_hand; | ||
1709 | u32 hash_rnd; | ||
1710 | struct request_sock *syn_table[TCP_SYNQ_HSIZE]; | ||
1711 | }; | ||
1712 | |||
1713 | static inline void | 1692 | static inline void |
1714 | tcp_synq_removed(struct sock *sk, struct request_sock *req) | 1693 | tcp_synq_removed(struct sock *sk, struct request_sock *req) |
1715 | { | 1694 | { |
1716 | struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; | 1695 | if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) |
1717 | |||
1718 | if (--lopt->qlen == 0) | ||
1719 | tcp_delete_keepalive_timer(sk); | 1696 | tcp_delete_keepalive_timer(sk); |
1720 | if (req->retrans == 0) | ||
1721 | lopt->qlen_young--; | ||
1722 | } | 1697 | } |
1723 | 1698 | ||
1724 | static inline void tcp_synq_added(struct sock *sk) | 1699 | static inline void tcp_synq_added(struct sock *sk) |
1725 | { | 1700 | { |
1726 | struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; | 1701 | if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) |
1727 | |||
1728 | if (lopt->qlen++ == 0) | ||
1729 | tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); | 1702 | tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); |
1730 | lopt->qlen_young++; | ||
1731 | } | 1703 | } |
1732 | 1704 | ||
1733 | static inline int tcp_synq_len(struct sock *sk) | 1705 | static inline int tcp_synq_len(struct sock *sk) |
1734 | { | 1706 | { |
1735 | return tcp_sk(sk)->listen_opt->qlen; | 1707 | return reqsk_queue_len(&tcp_sk(sk)->accept_queue); |
1736 | } | 1708 | } |
1737 | 1709 | ||
1738 | static inline int tcp_synq_young(struct sock *sk) | 1710 | static inline int tcp_synq_young(struct sock *sk) |
1739 | { | 1711 | { |
1740 | return tcp_sk(sk)->listen_opt->qlen_young; | 1712 | return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); |
1741 | } | 1713 | } |
1742 | 1714 | ||
1743 | static inline int tcp_synq_is_full(struct sock *sk) | 1715 | static inline int tcp_synq_is_full(struct sock *sk) |
1744 | { | 1716 | { |
1745 | return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; | 1717 | return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); |
1746 | } | 1718 | } |
1747 | 1719 | ||
1748 | static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, | 1720 | static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, |
1749 | struct request_sock **prev) | 1721 | struct request_sock **prev) |
1750 | { | 1722 | { |
1751 | write_lock(&tp->syn_wait_lock); | 1723 | reqsk_queue_unlink(&tp->accept_queue, req, prev); |
1752 | *prev = req->dl_next; | ||
1753 | write_unlock(&tp->syn_wait_lock); | ||
1754 | } | 1724 | } |
1755 | 1725 | ||
1756 | static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, | 1726 | static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, |