aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/af_inet.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/af_inet.c')
-rw-r--r--net/ipv4/af_inet.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fe4582ca969..766c5965856 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -212,6 +212,26 @@ int inet_listen(struct socket *sock, int backlog)
212 * we can only allow the backlog to be adjusted. 212 * we can only allow the backlog to be adjusted.
213 */ 213 */
214 if (old_state != TCP_LISTEN) { 214 if (old_state != TCP_LISTEN) {
215 /* Check special setups for testing purpose to enable TFO w/o
216 * requiring TCP_FASTOPEN sockopt.
217 * Note that only TCP sockets (SOCK_STREAM) will reach here.
218 * Also fastopenq may already been allocated because this
219 * socket was in TCP_LISTEN state previously but was
220 * shutdown() (rather than close()).
221 */
222 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
223 inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
224 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
225 err = fastopen_init_queue(sk, backlog);
226 else if ((sysctl_tcp_fastopen &
227 TFO_SERVER_WO_SOCKOPT2) != 0)
228 err = fastopen_init_queue(sk,
229 ((uint)sysctl_tcp_fastopen) >> 16);
230 else
231 err = 0;
232 if (err)
233 goto out;
234 }
215 err = inet_csk_listen_start(sk, backlog); 235 err = inet_csk_listen_start(sk, backlog);
216 if (err) 236 if (err)
217 goto out; 237 goto out;
@@ -701,7 +721,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
701 721
702 sock_rps_record_flow(sk2); 722 sock_rps_record_flow(sk2);
703 WARN_ON(!((1 << sk2->sk_state) & 723 WARN_ON(!((1 << sk2->sk_state) &
704 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 724 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
725 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
705 726
706 sock_graft(sk2, newsock); 727 sock_graft(sk2, newsock);
707 728
@@ -1364,7 +1385,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1364 if (*(u8 *)iph != 0x45) 1385 if (*(u8 *)iph != 0x45)
1365 goto out_unlock; 1386 goto out_unlock;
1366 1387
1367 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1388 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1368 goto out_unlock; 1389 goto out_unlock;
1369 1390
1370 id = ntohl(*(__be32 *)&iph->id); 1391 id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1401,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1380 iph2 = ip_hdr(p); 1401 iph2 = ip_hdr(p);
1381 1402
1382 if ((iph->protocol ^ iph2->protocol) | 1403 if ((iph->protocol ^ iph2->protocol) |
1383 (iph->tos ^ iph2->tos) |
1384 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | 1404 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1385 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { 1405 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1386 NAPI_GRO_CB(p)->same_flow = 0; 1406 NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1410,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1390 /* All fields must match except length and checksum. */ 1410 /* All fields must match except length and checksum. */
1391 NAPI_GRO_CB(p)->flush |= 1411 NAPI_GRO_CB(p)->flush |=
1392 (iph->ttl ^ iph2->ttl) | 1412 (iph->ttl ^ iph2->ttl) |
1413 (iph->tos ^ iph2->tos) |
1393 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id); 1414 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
1394 1415
1395 NAPI_GRO_CB(p)->flush |= flush; 1416 NAPI_GRO_CB(p)->flush |= flush;