diff options
-rw-r--r-- | net/ipv4/tcp_fastopen.c | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 53db2c309572..ea82fd492c1b 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
134 | struct tcp_sock *tp; | 134 | struct tcp_sock *tp; |
135 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | 135 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
136 | struct sock *child; | 136 | struct sock *child; |
137 | u32 end_seq; | ||
137 | 138 | ||
138 | req->num_retrans = 0; | 139 | req->num_retrans = 0; |
139 | req->num_timeout = 0; | 140 | req->num_timeout = 0; |
@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
185 | 186 | ||
186 | /* Queue the data carried in the SYN packet. We need to first | 187 | /* Queue the data carried in the SYN packet. We need to first |
187 | * bump skb's refcnt because the caller will attempt to free it. | 188 | * bump skb's refcnt because the caller will attempt to free it. |
189 | * Note that IPv6 might also have used skb_get() trick | ||
190 | * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) | ||
191 | * So we need to eventually get a clone of the packet, | ||
192 | * before inserting it in sk_receive_queue. | ||
188 | * | 193 | * |
189 | * XXX (TFO) - we honor a zero-payload TFO request for now, | 194 | * XXX (TFO) - we honor a zero-payload TFO request for now, |
190 | * (any reason not to?) but no need to queue the skb since | 195 | * (any reason not to?) but no need to queue the skb since |
191 | * there is no data. How about SYN+FIN? | 196 | * there is no data. How about SYN+FIN? |
192 | */ | 197 | */ |
193 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) { | 198 | end_seq = TCP_SKB_CB(skb)->end_seq; |
194 | skb = skb_get(skb); | 199 | if (end_seq != TCP_SKB_CB(skb)->seq + 1) { |
195 | skb_dst_drop(skb); | 200 | struct sk_buff *skb2; |
196 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); | 201 | |
197 | skb_set_owner_r(skb, child); | 202 | if (unlikely(skb_shared(skb))) |
198 | __skb_queue_tail(&child->sk_receive_queue, skb); | 203 | skb2 = skb_clone(skb, GFP_ATOMIC); |
199 | tp->syn_data_acked = 1; | 204 | else |
205 | skb2 = skb_get(skb); | ||
206 | |||
207 | if (likely(skb2)) { | ||
208 | skb_dst_drop(skb2); | ||
209 | __skb_pull(skb2, tcp_hdrlen(skb)); | ||
210 | skb_set_owner_r(skb2, child); | ||
211 | __skb_queue_tail(&child->sk_receive_queue, skb2); | ||
212 | tp->syn_data_acked = 1; | ||
213 | } else { | ||
214 | end_seq = TCP_SKB_CB(skb)->seq + 1; | ||
215 | } | ||
200 | } | 216 | } |
201 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 217 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; |
202 | sk->sk_data_ready(sk); | 218 | sk->sk_data_ready(sk); |
203 | bh_unlock_sock(child); | 219 | bh_unlock_sock(child); |
204 | sock_put(child); | 220 | sock_put(child); |