diff options
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r-- | include/net/request_sock.h | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 4c0766e201e3..c3cdd6c9f448 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -106,6 +106,34 @@ struct listen_sock { | |||
106 | struct request_sock *syn_table[0]; | 106 | struct request_sock *syn_table[0]; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /* | ||
110 | * For a TCP Fast Open listener - | ||
111 | * lock - protects the access to all the reqsk, which is co-owned by | ||
112 | * the listener and the child socket. | ||
113 | * qlen - pending TFO requests (still in TCP_SYN_RECV). | ||
114 | * max_qlen - max TFO reqs allowed before TFO is disabled. | ||
115 | * | ||
116 | * XXX (TFO) - ideally these fields can be made as part of "listen_sock" | ||
117 | * structure above. But there is some implementation difficulty due to | ||
118 | * listen_sock being part of request_sock_queue hence will be freed when | ||
119 | * a listener is stopped. But TFO related fields may continue to be | ||
120 | * accessed even after a listener is closed, until its sk_refcnt drops | ||
121 | * to 0 implying no more outstanding TFO reqs. One solution is to keep | ||
122 | * listen_opt around until sk_refcnt drops to 0. But there is some other | ||
123 | * complexity that needs to be resolved. E.g., a listener can be disabled | ||
124 | * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. | ||
125 | */ | ||
126 | struct fastopen_queue { | ||
127 | struct request_sock *rskq_rst_head; /* Keep track of past TFO */ | ||
128 | struct request_sock *rskq_rst_tail; /* requests that caused RST. | ||
129 | * This is part of the defense | ||
130 | * against spoofing attack. | ||
131 | */ | ||
132 | spinlock_t lock; | ||
133 | int qlen; /* # of pending (TCP_SYN_RECV) reqs */ | ||
134 | int max_qlen; /* != 0 iff TFO is currently enabled */ | ||
135 | }; | ||
136 | |||
109 | /** struct request_sock_queue - queue of request_socks | 137 | /** struct request_sock_queue - queue of request_socks |
110 | * | 138 | * |
111 | * @rskq_accept_head - FIFO head of established children | 139 | * @rskq_accept_head - FIFO head of established children |
@@ -129,6 +157,12 @@ struct request_sock_queue { | |||
129 | u8 rskq_defer_accept; | 157 | u8 rskq_defer_accept; |
130 | /* 3 bytes hole, try to pack */ | 158 | /* 3 bytes hole, try to pack */ |
131 | struct listen_sock *listen_opt; | 159 | struct listen_sock *listen_opt; |
160 | struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been | ||
161 | * enabled on this listener. Check | ||
162 | * max_qlen != 0 in fastopen_queue | ||
163 | * to determine if TFO is enabled | ||
164 | * right at this moment. | ||
165 | */ | ||
132 | }; | 166 | }; |
133 | 167 | ||
134 | extern int reqsk_queue_alloc(struct request_sock_queue *queue, | 168 | extern int reqsk_queue_alloc(struct request_sock_queue *queue, |
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
136 | 170 | ||
137 | extern void __reqsk_queue_destroy(struct request_sock_queue *queue); | 171 | extern void __reqsk_queue_destroy(struct request_sock_queue *queue); |
138 | extern void reqsk_queue_destroy(struct request_sock_queue *queue); | 172 | extern void reqsk_queue_destroy(struct request_sock_queue *queue); |
173 | extern void reqsk_fastopen_remove(struct sock *sk, | ||
174 | struct request_sock *req, bool reset); | ||
139 | 175 | ||
140 | static inline struct request_sock * | 176 | static inline struct request_sock * |
141 | reqsk_queue_yank_acceptq(struct request_sock_queue *queue) | 177 | reqsk_queue_yank_acceptq(struct request_sock_queue *queue) |