aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_fastopen.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r--net/ipv4/tcp_fastopen.c78
1 files changed, 48 insertions, 30 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4c65ca1a86d1..cffd8f9ed1a9 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -125,6 +125,49 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
125 return false; 125 return false;
126} 126}
127 127
128
129/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
130 * queue this additional data / FIN.
131 */
132void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
133{
134 struct tcp_sock *tp = tcp_sk(sk);
135
136 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
137 return;
138
139 skb = skb_clone(skb, GFP_ATOMIC);
140 if (!skb)
141 return;
142
143 skb_dst_drop(skb);
144 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
145 * Hence, reset segs_in to 0 before calling tcp_segs_in()
146 * to avoid double counting. Also, tcp_segs_in() expects
147 * skb->len to include the tcp_hdrlen. Hence, it should
148 * be called before __skb_pull().
149 */
150 tp->segs_in = 0;
151 tcp_segs_in(tp, skb);
152 __skb_pull(skb, tcp_hdrlen(skb));
153 skb_set_owner_r(skb, sk);
154
155 TCP_SKB_CB(skb)->seq++;
156 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
157
158 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
159 __skb_queue_tail(&sk->sk_receive_queue, skb);
160 tp->syn_data_acked = 1;
161
162 /* u64_stats_update_begin(&tp->syncp) not needed here,
163 * as we certainly are not changing upper 32bit value (0)
164 */
165 tp->bytes_received = skb->len;
166
167 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
168 tcp_fin(sk);
169}
170
128static struct sock *tcp_fastopen_create_child(struct sock *sk, 171static struct sock *tcp_fastopen_create_child(struct sock *sk,
129 struct sk_buff *skb, 172 struct sk_buff *skb,
130 struct dst_entry *dst, 173 struct dst_entry *dst,
@@ -133,7 +176,6 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
133 struct tcp_sock *tp; 176 struct tcp_sock *tp;
134 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 177 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
135 struct sock *child; 178 struct sock *child;
136 u32 end_seq;
137 bool own_req; 179 bool own_req;
138 180
139 req->num_retrans = 0; 181 req->num_retrans = 0;
@@ -179,35 +221,11 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
179 tcp_init_metrics(child); 221 tcp_init_metrics(child);
180 tcp_init_buffer_space(child); 222 tcp_init_buffer_space(child);
181 223
182 /* Queue the data carried in the SYN packet. 224 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
183 * We used to play tricky games with skb_get(). 225
184 * With lockless listener, it is a dead end. 226 tcp_fastopen_add_skb(child, skb);
185 * Do not think about it. 227
186 * 228 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
187 * XXX (TFO) - we honor a zero-payload TFO request for now,
188 * (any reason not to?) but no need to queue the skb since
189 * there is no data. How about SYN+FIN?
190 */
191 end_seq = TCP_SKB_CB(skb)->end_seq;
192 if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
193 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
194
195 if (likely(skb2)) {
196 skb_dst_drop(skb2);
197 __skb_pull(skb2, tcp_hdrlen(skb));
198 skb_set_owner_r(skb2, child);
199 __skb_queue_tail(&child->sk_receive_queue, skb2);
200 tp->syn_data_acked = 1;
201
202 /* u64_stats_update_begin(&tp->syncp) not needed here,
203 * as we certainly are not changing upper 32bit value (0)
204 */
205 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
206 } else {
207 end_seq = TCP_SKB_CB(skb)->seq + 1;
208 }
209 }
210 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
211 /* tcp_conn_request() is sending the SYNACK, 229 /* tcp_conn_request() is sending the SYNACK,
212 * and queues the child into listener accept queue. 230 * and queues the child into listener accept queue.
213 */ 231 */