aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/request_sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r--include/net/request_sock.h186
1 files changed, 60 insertions, 126 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 87935cad2f7b..a0dde04eb178 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -32,17 +32,17 @@ struct request_sock_ops {
32 int obj_size; 32 int obj_size;
33 struct kmem_cache *slab; 33 struct kmem_cache *slab;
34 char *slab_name; 34 char *slab_name;
35 int (*rtx_syn_ack)(struct sock *sk, 35 int (*rtx_syn_ack)(const struct sock *sk,
36 struct request_sock *req); 36 struct request_sock *req);
37 void (*send_ack)(struct sock *sk, struct sk_buff *skb, 37 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
38 struct request_sock *req); 38 struct request_sock *req);
39 void (*send_reset)(struct sock *sk, 39 void (*send_reset)(const struct sock *sk,
40 struct sk_buff *skb); 40 struct sk_buff *skb);
41 void (*destructor)(struct request_sock *req); 41 void (*destructor)(struct request_sock *req);
42 void (*syn_ack_timeout)(const struct request_sock *req); 42 void (*syn_ack_timeout)(const struct request_sock *req);
43}; 43};
44 44
45int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); 45int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
46 46
47/* struct request_sock - mini sock to represent a connection request 47/* struct request_sock - mini sock to represent a connection request
48 */ 48 */
@@ -50,16 +50,15 @@ struct request_sock {
50 struct sock_common __req_common; 50 struct sock_common __req_common;
51#define rsk_refcnt __req_common.skc_refcnt 51#define rsk_refcnt __req_common.skc_refcnt
52#define rsk_hash __req_common.skc_hash 52#define rsk_hash __req_common.skc_hash
53#define rsk_listener __req_common.skc_listener
54#define rsk_window_clamp __req_common.skc_window_clamp
55#define rsk_rcv_wnd __req_common.skc_rcv_wnd
53 56
54 struct request_sock *dl_next; 57 struct request_sock *dl_next;
55 struct sock *rsk_listener;
56 u16 mss; 58 u16 mss;
57 u8 num_retrans; /* number of retransmits */ 59 u8 num_retrans; /* number of retransmits */
58 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ 60 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
59 u8 num_timeout:7; /* number of timeouts */ 61 u8 num_timeout:7; /* number of timeouts */
60 /* The following two fields can be easily recomputed I think -AK */
61 u32 window_clamp; /* window clamp at creation time */
62 u32 rcv_wnd; /* rcv_wnd offered first time */
63 u32 ts_recent; 62 u32 ts_recent;
64 struct timer_list rsk_timer; 63 struct timer_list rsk_timer;
65 const struct request_sock_ops *rsk_ops; 64 const struct request_sock_ops *rsk_ops;
@@ -69,15 +68,35 @@ struct request_sock {
69 u32 peer_secid; 68 u32 peer_secid;
70}; 69};
71 70
71static inline struct request_sock *inet_reqsk(struct sock *sk)
72{
73 return (struct request_sock *)sk;
74}
75
76static inline struct sock *req_to_sk(struct request_sock *req)
77{
78 return (struct sock *)req;
79}
80
72static inline struct request_sock * 81static inline struct request_sock *
73reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) 82reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
83 bool attach_listener)
74{ 84{
75 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); 85 struct request_sock *req;
86
87 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
76 88
77 if (req) { 89 if (req) {
78 req->rsk_ops = ops; 90 req->rsk_ops = ops;
79 sock_hold(sk_listener); 91 if (attach_listener) {
80 req->rsk_listener = sk_listener; 92 sock_hold(sk_listener);
93 req->rsk_listener = sk_listener;
94 } else {
95 req->rsk_listener = NULL;
96 }
97 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
98 sk_node_init(&req_to_sk(req)->sk_node);
99 sk_tx_queue_clear(req_to_sk(req));
81 req->saved_syn = NULL; 100 req->saved_syn = NULL;
82 /* Following is temporary. It is coupled with debugging 101 /* Following is temporary. It is coupled with debugging
83 * helpers in reqsk_put() & reqsk_free() 102 * helpers in reqsk_put() & reqsk_free()
@@ -87,16 +106,6 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
87 return req; 106 return req;
88} 107}
89 108
90static inline struct request_sock *inet_reqsk(struct sock *sk)
91{
92 return (struct request_sock *)sk;
93}
94
95static inline struct sock *req_to_sk(struct request_sock *req)
96{
97 return (struct sock *)req;
98}
99
100static inline void reqsk_free(struct request_sock *req) 109static inline void reqsk_free(struct request_sock *req)
101{ 110{
102 /* temporary debugging */ 111 /* temporary debugging */
@@ -117,26 +126,6 @@ static inline void reqsk_put(struct request_sock *req)
117 126
118extern int sysctl_max_syn_backlog; 127extern int sysctl_max_syn_backlog;
119 128
120/** struct listen_sock - listen state
121 *
122 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
123 */
124struct listen_sock {
125 int qlen_inc; /* protected by listener lock */
126 int young_inc;/* protected by listener lock */
127
128 /* following fields can be updated by timer */
129 atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
130 atomic_t young_dec;
131
132 u8 max_qlen_log ____cacheline_aligned_in_smp;
133 u8 synflood_warned;
134 /* 2 bytes hole, try to use */
135 u32 hash_rnd;
136 u32 nr_table_entries;
137 struct request_sock *syn_table[0];
138};
139
140/* 129/*
141 * For a TCP Fast Open listener - 130 * For a TCP Fast Open listener -
142 * lock - protects the access to all the reqsk, which is co-owned by 131 * lock - protects the access to all the reqsk, which is co-owned by
@@ -170,127 +159,72 @@ struct fastopen_queue {
170 * @rskq_accept_head - FIFO head of established children 159 * @rskq_accept_head - FIFO head of established children
171 * @rskq_accept_tail - FIFO tail of established children 160 * @rskq_accept_tail - FIFO tail of established children
172 * @rskq_defer_accept - User waits for some data after accept() 161 * @rskq_defer_accept - User waits for some data after accept()
173 * @syn_wait_lock - serializer
174 *
175 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
176 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
177 * 162 *
178 */ 163 */
179struct request_sock_queue { 164struct request_sock_queue {
165 spinlock_t rskq_lock;
166 u8 rskq_defer_accept;
167
168 u32 synflood_warned;
169 atomic_t qlen;
170 atomic_t young;
171
180 struct request_sock *rskq_accept_head; 172 struct request_sock *rskq_accept_head;
181 struct request_sock *rskq_accept_tail; 173 struct request_sock *rskq_accept_tail;
182 u8 rskq_defer_accept; 174 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
183 struct listen_sock *listen_opt; 175 * if TFO is enabled.
184 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
185 * enabled on this listener. Check
186 * max_qlen != 0 in fastopen_queue
187 * to determine if TFO is enabled
188 * right at this moment.
189 */ 176 */
190
191 /* temporary alignment, our goal is to get rid of this lock */
192 spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
193}; 177};
194 178
195int reqsk_queue_alloc(struct request_sock_queue *queue, 179void reqsk_queue_alloc(struct request_sock_queue *queue);
196 unsigned int nr_table_entries);
197 180
198void __reqsk_queue_destroy(struct request_sock_queue *queue);
199void reqsk_queue_destroy(struct request_sock_queue *queue);
200void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, 181void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
201 bool reset); 182 bool reset);
202 183
203static inline struct request_sock * 184static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
204 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
205{
206 struct request_sock *req = queue->rskq_accept_head;
207
208 queue->rskq_accept_head = NULL;
209 return req;
210}
211
212static inline int reqsk_queue_empty(struct request_sock_queue *queue)
213{ 185{
214 return queue->rskq_accept_head == NULL; 186 return queue->rskq_accept_head == NULL;
215} 187}
216 188
217static inline void reqsk_queue_add(struct request_sock_queue *queue, 189static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
218 struct request_sock *req, 190 struct sock *parent)
219 struct sock *parent,
220 struct sock *child)
221{ 191{
222 req->sk = child; 192 struct request_sock *req;
223 sk_acceptq_added(parent);
224
225 if (queue->rskq_accept_head == NULL)
226 queue->rskq_accept_head = req;
227 else
228 queue->rskq_accept_tail->dl_next = req;
229
230 queue->rskq_accept_tail = req;
231 req->dl_next = NULL;
232}
233
234static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
235{
236 struct request_sock *req = queue->rskq_accept_head;
237
238 WARN_ON(req == NULL);
239
240 queue->rskq_accept_head = req->dl_next;
241 if (queue->rskq_accept_head == NULL)
242 queue->rskq_accept_tail = NULL;
243 193
194 spin_lock_bh(&queue->rskq_lock);
195 req = queue->rskq_accept_head;
196 if (req) {
197 sk_acceptq_removed(parent);
198 queue->rskq_accept_head = req->dl_next;
199 if (queue->rskq_accept_head == NULL)
200 queue->rskq_accept_tail = NULL;
201 }
202 spin_unlock_bh(&queue->rskq_lock);
244 return req; 203 return req;
245} 204}
246 205
247static inline void reqsk_queue_removed(struct request_sock_queue *queue, 206static inline void reqsk_queue_removed(struct request_sock_queue *queue,
248 const struct request_sock *req) 207 const struct request_sock *req)
249{ 208{
250 struct listen_sock *lopt = queue->listen_opt;
251
252 if (req->num_timeout == 0) 209 if (req->num_timeout == 0)
253 atomic_inc(&lopt->young_dec); 210 atomic_dec(&queue->young);
254 atomic_inc(&lopt->qlen_dec); 211 atomic_dec(&queue->qlen);
255} 212}
256 213
257static inline void reqsk_queue_added(struct request_sock_queue *queue) 214static inline void reqsk_queue_added(struct request_sock_queue *queue)
258{ 215{
259 struct listen_sock *lopt = queue->listen_opt; 216 atomic_inc(&queue->young);
260 217 atomic_inc(&queue->qlen);
261 lopt->young_inc++;
262 lopt->qlen_inc++;
263}
264
265static inline int listen_sock_qlen(const struct listen_sock *lopt)
266{
267 return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
268}
269
270static inline int listen_sock_young(const struct listen_sock *lopt)
271{
272 return lopt->young_inc - atomic_read(&lopt->young_dec);
273} 218}
274 219
275static inline int reqsk_queue_len(const struct request_sock_queue *queue) 220static inline int reqsk_queue_len(const struct request_sock_queue *queue)
276{ 221{
277 const struct listen_sock *lopt = queue->listen_opt; 222 return atomic_read(&queue->qlen);
278
279 return lopt ? listen_sock_qlen(lopt) : 0;
280} 223}
281 224
282static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) 225static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
283{ 226{
284 return listen_sock_young(queue->listen_opt); 227 return atomic_read(&queue->young);
285} 228}
286 229
287static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
288{
289 return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
290}
291
292void reqsk_queue_hash_req(struct request_sock_queue *queue,
293 u32 hash, struct request_sock *req,
294 unsigned long timeout);
295
296#endif /* _REQUEST_SOCK_H */ 230#endif /* _REQUEST_SOCK_H */