diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/syncookies.c | 39 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_diag.c | 25 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 72 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 48 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 25 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 2 |
7 files changed, 114 insertions, 106 deletions
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index e923d2f021aa..dd47e6da6fb3 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -190,6 +190,8 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | |||
190 | struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | 190 | struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
191 | struct ip_options *opt) | 191 | struct ip_options *opt) |
192 | { | 192 | { |
193 | struct inet_request_sock *ireq; | ||
194 | struct tcp_request_sock *treq; | ||
193 | struct tcp_sock *tp = tcp_sk(sk); | 195 | struct tcp_sock *tp = tcp_sk(sk); |
194 | __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; | 196 | __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; |
195 | struct sock *ret = sk; | 197 | struct sock *ret = sk; |
@@ -209,19 +211,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
209 | 211 | ||
210 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); | 212 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); |
211 | 213 | ||
212 | req = tcp_openreq_alloc(); | ||
213 | ret = NULL; | 214 | ret = NULL; |
215 | req = tcp_openreq_alloc(&or_ipv4); /* for safety */ | ||
214 | if (!req) | 216 | if (!req) |
215 | goto out; | 217 | goto out; |
216 | 218 | ||
217 | req->rcv_isn = htonl(skb->h.th->seq) - 1; | 219 | ireq = inet_rsk(req); |
218 | req->snt_isn = cookie; | 220 | treq = tcp_rsk(req); |
221 | treq->rcv_isn = htonl(skb->h.th->seq) - 1; | ||
222 | treq->snt_isn = cookie; | ||
219 | req->mss = mss; | 223 | req->mss = mss; |
220 | req->rmt_port = skb->h.th->source; | 224 | ireq->rmt_port = skb->h.th->source; |
221 | req->af.v4_req.loc_addr = skb->nh.iph->daddr; | 225 | ireq->loc_addr = skb->nh.iph->daddr; |
222 | req->af.v4_req.rmt_addr = skb->nh.iph->saddr; | 226 | ireq->rmt_addr = skb->nh.iph->saddr; |
223 | req->class = &or_ipv4; /* for savety */ | 227 | ireq->opt = NULL; |
224 | req->af.v4_req.opt = NULL; | ||
225 | 228 | ||
226 | /* We throwed the options of the initial SYN away, so we hope | 229 | /* We throwed the options of the initial SYN away, so we hope |
227 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 230 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
@@ -229,17 +232,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
229 | if (opt && opt->optlen) { | 232 | if (opt && opt->optlen) { |
230 | int opt_size = sizeof(struct ip_options) + opt->optlen; | 233 | int opt_size = sizeof(struct ip_options) + opt->optlen; |
231 | 234 | ||
232 | req->af.v4_req.opt = kmalloc(opt_size, GFP_ATOMIC); | 235 | ireq->opt = kmalloc(opt_size, GFP_ATOMIC); |
233 | if (req->af.v4_req.opt) { | 236 | if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { |
234 | if (ip_options_echo(req->af.v4_req.opt, skb)) { | 237 | kfree(ireq->opt); |
235 | kfree(req->af.v4_req.opt); | 238 | ireq->opt = NULL; |
236 | req->af.v4_req.opt = NULL; | ||
237 | } | ||
238 | } | 239 | } |
239 | } | 240 | } |
240 | 241 | ||
241 | req->snd_wscale = req->rcv_wscale = req->tstamp_ok = 0; | 242 | ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; |
242 | req->wscale_ok = req->sack_ok = 0; | 243 | ireq->wscale_ok = ireq->sack_ok = 0; |
243 | req->expires = 0UL; | 244 | req->expires = 0UL; |
244 | req->retrans = 0; | 245 | req->retrans = 0; |
245 | 246 | ||
@@ -253,8 +254,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
253 | struct flowi fl = { .nl_u = { .ip4_u = | 254 | struct flowi fl = { .nl_u = { .ip4_u = |
254 | { .daddr = ((opt && opt->srr) ? | 255 | { .daddr = ((opt && opt->srr) ? |
255 | opt->faddr : | 256 | opt->faddr : |
256 | req->af.v4_req.rmt_addr), | 257 | ireq->rmt_addr), |
257 | .saddr = req->af.v4_req.loc_addr, | 258 | .saddr = ireq->loc_addr, |
258 | .tos = RT_CONN_FLAGS(sk) } }, | 259 | .tos = RT_CONN_FLAGS(sk) } }, |
259 | .proto = IPPROTO_TCP, | 260 | .proto = IPPROTO_TCP, |
260 | .uli_u = { .ports = | 261 | .uli_u = { .ports = |
@@ -272,7 +273,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
272 | &req->rcv_wnd, &req->window_clamp, | 273 | &req->rcv_wnd, &req->window_clamp, |
273 | 0, &rcv_wscale); | 274 | 0, &rcv_wscale); |
274 | /* BTW win scale with syncookies is 0 by definition */ | 275 | /* BTW win scale with syncookies is 0 by definition */ |
275 | req->rcv_wscale = rcv_wscale; | 276 | ireq->rcv_wscale = rcv_wscale; |
276 | 277 | ||
277 | ret = get_cookie_sock(sk, skb, req, &rt->u.dst); | 278 | ret = get_cookie_sock(sk, skb, req, &rt->u.dst); |
278 | out: return ret; | 279 | out: return ret; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0d9a4fd5f1a4..a3cabfa2022a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -271,7 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; | |||
271 | 271 | ||
272 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); | 272 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); |
273 | 273 | ||
274 | kmem_cache_t *tcp_openreq_cachep; | ||
275 | kmem_cache_t *tcp_bucket_cachep; | 274 | kmem_cache_t *tcp_bucket_cachep; |
276 | kmem_cache_t *tcp_timewait_cachep; | 275 | kmem_cache_t *tcp_timewait_cachep; |
277 | 276 | ||
@@ -2271,13 +2270,6 @@ void __init tcp_init(void) | |||
2271 | __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), | 2270 | __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), |
2272 | sizeof(skb->cb)); | 2271 | sizeof(skb->cb)); |
2273 | 2272 | ||
2274 | tcp_openreq_cachep = kmem_cache_create("tcp_open_request", | ||
2275 | sizeof(struct open_request), | ||
2276 | 0, SLAB_HWCACHE_ALIGN, | ||
2277 | NULL, NULL); | ||
2278 | if (!tcp_openreq_cachep) | ||
2279 | panic("tcp_init: Cannot alloc open_request cache."); | ||
2280 | |||
2281 | tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", | 2273 | tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", |
2282 | sizeof(struct tcp_bind_bucket), | 2274 | sizeof(struct tcp_bind_bucket), |
2283 | 0, SLAB_HWCACHE_ALIGN, | 2275 | 0, SLAB_HWCACHE_ALIGN, |
@@ -2374,7 +2366,6 @@ EXPORT_SYMBOL(tcp_destroy_sock); | |||
2374 | EXPORT_SYMBOL(tcp_disconnect); | 2366 | EXPORT_SYMBOL(tcp_disconnect); |
2375 | EXPORT_SYMBOL(tcp_getsockopt); | 2367 | EXPORT_SYMBOL(tcp_getsockopt); |
2376 | EXPORT_SYMBOL(tcp_ioctl); | 2368 | EXPORT_SYMBOL(tcp_ioctl); |
2377 | EXPORT_SYMBOL(tcp_openreq_cachep); | ||
2378 | EXPORT_SYMBOL(tcp_poll); | 2369 | EXPORT_SYMBOL(tcp_poll); |
2379 | EXPORT_SYMBOL(tcp_read_sock); | 2370 | EXPORT_SYMBOL(tcp_read_sock); |
2380 | EXPORT_SYMBOL(tcp_recvmsg); | 2371 | EXPORT_SYMBOL(tcp_recvmsg); |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 8faa8948f75c..700ff2413588 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -458,6 +458,7 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
458 | struct open_request *req, | 458 | struct open_request *req, |
459 | u32 pid, u32 seq) | 459 | u32 pid, u32 seq) |
460 | { | 460 | { |
461 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
461 | struct inet_sock *inet = inet_sk(sk); | 462 | struct inet_sock *inet = inet_sk(sk); |
462 | unsigned char *b = skb->tail; | 463 | unsigned char *b = skb->tail; |
463 | struct tcpdiagmsg *r; | 464 | struct tcpdiagmsg *r; |
@@ -482,9 +483,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
482 | tmo = 0; | 483 | tmo = 0; |
483 | 484 | ||
484 | r->id.tcpdiag_sport = inet->sport; | 485 | r->id.tcpdiag_sport = inet->sport; |
485 | r->id.tcpdiag_dport = req->rmt_port; | 486 | r->id.tcpdiag_dport = ireq->rmt_port; |
486 | r->id.tcpdiag_src[0] = req->af.v4_req.loc_addr; | 487 | r->id.tcpdiag_src[0] = ireq->loc_addr; |
487 | r->id.tcpdiag_dst[0] = req->af.v4_req.rmt_addr; | 488 | r->id.tcpdiag_dst[0] = ireq->rmt_addr; |
488 | r->tcpdiag_expires = jiffies_to_msecs(tmo), | 489 | r->tcpdiag_expires = jiffies_to_msecs(tmo), |
489 | r->tcpdiag_rqueue = 0; | 490 | r->tcpdiag_rqueue = 0; |
490 | r->tcpdiag_wqueue = 0; | 491 | r->tcpdiag_wqueue = 0; |
@@ -493,9 +494,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
493 | #ifdef CONFIG_IP_TCPDIAG_IPV6 | 494 | #ifdef CONFIG_IP_TCPDIAG_IPV6 |
494 | if (r->tcpdiag_family == AF_INET6) { | 495 | if (r->tcpdiag_family == AF_INET6) { |
495 | ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src, | 496 | ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src, |
496 | &req->af.v6_req.loc_addr); | 497 | &tcp6_rsk(req)->loc_addr); |
497 | ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst, | 498 | ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst, |
498 | &req->af.v6_req.rmt_addr); | 499 | &tcp6_rsk(req)->rmt_addr); |
499 | } | 500 | } |
500 | #endif | 501 | #endif |
501 | nlh->nlmsg_len = skb->tail - b; | 502 | nlh->nlmsg_len = skb->tail - b; |
@@ -545,9 +546,11 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
545 | 546 | ||
546 | reqnum = 0; | 547 | reqnum = 0; |
547 | for (req = head; req; reqnum++, req = req->dl_next) { | 548 | for (req = head; req; reqnum++, req = req->dl_next) { |
549 | struct inet_request_sock *ireq = inet_rsk(req); | ||
550 | |||
548 | if (reqnum < s_reqnum) | 551 | if (reqnum < s_reqnum) |
549 | continue; | 552 | continue; |
550 | if (r->id.tcpdiag_dport != req->rmt_port && | 553 | if (r->id.tcpdiag_dport != ireq->rmt_port && |
551 | r->id.tcpdiag_dport) | 554 | r->id.tcpdiag_dport) |
552 | continue; | 555 | continue; |
553 | 556 | ||
@@ -555,16 +558,16 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
555 | entry.saddr = | 558 | entry.saddr = |
556 | #ifdef CONFIG_IP_TCPDIAG_IPV6 | 559 | #ifdef CONFIG_IP_TCPDIAG_IPV6 |
557 | (entry.family == AF_INET6) ? | 560 | (entry.family == AF_INET6) ? |
558 | req->af.v6_req.loc_addr.s6_addr32 : | 561 | tcp6_rsk(req)->loc_addr.s6_addr32 : |
559 | #endif | 562 | #endif |
560 | &req->af.v4_req.loc_addr; | 563 | &ireq->loc_addr; |
561 | entry.daddr = | 564 | entry.daddr = |
562 | #ifdef CONFIG_IP_TCPDIAG_IPV6 | 565 | #ifdef CONFIG_IP_TCPDIAG_IPV6 |
563 | (entry.family == AF_INET6) ? | 566 | (entry.family == AF_INET6) ? |
564 | req->af.v6_req.rmt_addr.s6_addr32 : | 567 | tcp6_rsk(req)->rmt_addr.s6_addr32 : |
565 | #endif | 568 | #endif |
566 | &req->af.v4_req.rmt_addr; | 569 | &ireq->rmt_addr; |
567 | entry.dport = ntohs(req->rmt_port); | 570 | entry.dport = ntohs(ireq->rmt_port); |
568 | 571 | ||
569 | if (!tcpdiag_bc_run(RTA_DATA(bc), | 572 | if (!tcpdiag_bc_run(RTA_DATA(bc), |
570 | RTA_PAYLOAD(bc), &entry)) | 573 | RTA_PAYLOAD(bc), &entry)) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dad98e4a5043..e156be90df14 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -880,9 +880,11 @@ static struct open_request *tcp_v4_search_req(struct tcp_sock *tp, | |||
880 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; | 880 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; |
881 | (req = *prev) != NULL; | 881 | (req = *prev) != NULL; |
882 | prev = &req->dl_next) { | 882 | prev = &req->dl_next) { |
883 | if (req->rmt_port == rport && | 883 | const struct inet_request_sock *ireq = inet_rsk(req); |
884 | req->af.v4_req.rmt_addr == raddr && | 884 | |
885 | req->af.v4_req.loc_addr == laddr && | 885 | if (ireq->rmt_port == rport && |
886 | ireq->rmt_addr == raddr && | ||
887 | ireq->loc_addr == laddr && | ||
886 | TCP_INET_FAMILY(req->class->family)) { | 888 | TCP_INET_FAMILY(req->class->family)) { |
887 | BUG_TRAP(!req->sk); | 889 | BUG_TRAP(!req->sk); |
888 | *prevp = prev; | 890 | *prevp = prev; |
@@ -897,7 +899,7 @@ static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) | |||
897 | { | 899 | { |
898 | struct tcp_sock *tp = tcp_sk(sk); | 900 | struct tcp_sock *tp = tcp_sk(sk); |
899 | struct tcp_listen_opt *lopt = tp->listen_opt; | 901 | struct tcp_listen_opt *lopt = tp->listen_opt; |
900 | u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd); | 902 | u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); |
901 | 903 | ||
902 | req->expires = jiffies + TCP_TIMEOUT_INIT; | 904 | req->expires = jiffies + TCP_TIMEOUT_INIT; |
903 | req->retrans = 0; | 905 | req->retrans = 0; |
@@ -1065,7 +1067,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
1065 | */ | 1067 | */ |
1066 | BUG_TRAP(!req->sk); | 1068 | BUG_TRAP(!req->sk); |
1067 | 1069 | ||
1068 | if (seq != req->snt_isn) { | 1070 | if (seq != tcp_rsk(req)->snt_isn) { |
1069 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 1071 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
1070 | goto out; | 1072 | goto out; |
1071 | } | 1073 | } |
@@ -1256,7 +1258,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
1256 | 1258 | ||
1257 | static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req) | 1259 | static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req) |
1258 | { | 1260 | { |
1259 | tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd, | 1261 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, |
1260 | req->ts_recent); | 1262 | req->ts_recent); |
1261 | } | 1263 | } |
1262 | 1264 | ||
@@ -1264,18 +1266,19 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, | |||
1264 | struct open_request *req) | 1266 | struct open_request *req) |
1265 | { | 1267 | { |
1266 | struct rtable *rt; | 1268 | struct rtable *rt; |
1267 | struct ip_options *opt = req->af.v4_req.opt; | 1269 | const struct inet_request_sock *ireq = inet_rsk(req); |
1270 | struct ip_options *opt = inet_rsk(req)->opt; | ||
1268 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | 1271 | struct flowi fl = { .oif = sk->sk_bound_dev_if, |
1269 | .nl_u = { .ip4_u = | 1272 | .nl_u = { .ip4_u = |
1270 | { .daddr = ((opt && opt->srr) ? | 1273 | { .daddr = ((opt && opt->srr) ? |
1271 | opt->faddr : | 1274 | opt->faddr : |
1272 | req->af.v4_req.rmt_addr), | 1275 | ireq->rmt_addr), |
1273 | .saddr = req->af.v4_req.loc_addr, | 1276 | .saddr = ireq->loc_addr, |
1274 | .tos = RT_CONN_FLAGS(sk) } }, | 1277 | .tos = RT_CONN_FLAGS(sk) } }, |
1275 | .proto = IPPROTO_TCP, | 1278 | .proto = IPPROTO_TCP, |
1276 | .uli_u = { .ports = | 1279 | .uli_u = { .ports = |
1277 | { .sport = inet_sk(sk)->sport, | 1280 | { .sport = inet_sk(sk)->sport, |
1278 | .dport = req->rmt_port } } }; | 1281 | .dport = ireq->rmt_port } } }; |
1279 | 1282 | ||
1280 | if (ip_route_output_flow(&rt, &fl, sk, 0)) { | 1283 | if (ip_route_output_flow(&rt, &fl, sk, 0)) { |
1281 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 1284 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); |
@@ -1297,6 +1300,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, | |||
1297 | static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, | 1300 | static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, |
1298 | struct dst_entry *dst) | 1301 | struct dst_entry *dst) |
1299 | { | 1302 | { |
1303 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
1300 | int err = -1; | 1304 | int err = -1; |
1301 | struct sk_buff * skb; | 1305 | struct sk_buff * skb; |
1302 | 1306 | ||
@@ -1310,14 +1314,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, | |||
1310 | struct tcphdr *th = skb->h.th; | 1314 | struct tcphdr *th = skb->h.th; |
1311 | 1315 | ||
1312 | th->check = tcp_v4_check(th, skb->len, | 1316 | th->check = tcp_v4_check(th, skb->len, |
1313 | req->af.v4_req.loc_addr, | 1317 | ireq->loc_addr, |
1314 | req->af.v4_req.rmt_addr, | 1318 | ireq->rmt_addr, |
1315 | csum_partial((char *)th, skb->len, | 1319 | csum_partial((char *)th, skb->len, |
1316 | skb->csum)); | 1320 | skb->csum)); |
1317 | 1321 | ||
1318 | err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr, | 1322 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
1319 | req->af.v4_req.rmt_addr, | 1323 | ireq->rmt_addr, |
1320 | req->af.v4_req.opt); | 1324 | ireq->opt); |
1321 | if (err == NET_XMIT_CN) | 1325 | if (err == NET_XMIT_CN) |
1322 | err = 0; | 1326 | err = 0; |
1323 | } | 1327 | } |
@@ -1332,8 +1336,8 @@ out: | |||
1332 | */ | 1336 | */ |
1333 | static void tcp_v4_or_free(struct open_request *req) | 1337 | static void tcp_v4_or_free(struct open_request *req) |
1334 | { | 1338 | { |
1335 | if (req->af.v4_req.opt) | 1339 | if (inet_rsk(req)->opt) |
1336 | kfree(req->af.v4_req.opt); | 1340 | kfree(inet_rsk(req)->opt); |
1337 | } | 1341 | } |
1338 | 1342 | ||
1339 | static inline void syn_flood_warning(struct sk_buff *skb) | 1343 | static inline void syn_flood_warning(struct sk_buff *skb) |
@@ -1387,6 +1391,7 @@ int sysctl_max_syn_backlog = 256; | |||
1387 | 1391 | ||
1388 | struct or_calltable or_ipv4 = { | 1392 | struct or_calltable or_ipv4 = { |
1389 | .family = PF_INET, | 1393 | .family = PF_INET, |
1394 | .obj_size = sizeof(struct tcp_request_sock), | ||
1390 | .rtx_syn_ack = tcp_v4_send_synack, | 1395 | .rtx_syn_ack = tcp_v4_send_synack, |
1391 | .send_ack = tcp_v4_or_send_ack, | 1396 | .send_ack = tcp_v4_or_send_ack, |
1392 | .destructor = tcp_v4_or_free, | 1397 | .destructor = tcp_v4_or_free, |
@@ -1395,6 +1400,7 @@ struct or_calltable or_ipv4 = { | |||
1395 | 1400 | ||
1396 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1401 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
1397 | { | 1402 | { |
1403 | struct inet_request_sock *ireq; | ||
1398 | struct tcp_options_received tmp_opt; | 1404 | struct tcp_options_received tmp_opt; |
1399 | struct open_request *req; | 1405 | struct open_request *req; |
1400 | __u32 saddr = skb->nh.iph->saddr; | 1406 | __u32 saddr = skb->nh.iph->saddr; |
@@ -1433,7 +1439,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1433 | if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) | 1439 | if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) |
1434 | goto drop; | 1440 | goto drop; |
1435 | 1441 | ||
1436 | req = tcp_openreq_alloc(); | 1442 | req = tcp_openreq_alloc(&or_ipv4); |
1437 | if (!req) | 1443 | if (!req) |
1438 | goto drop; | 1444 | goto drop; |
1439 | 1445 | ||
@@ -1461,10 +1467,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1461 | 1467 | ||
1462 | tcp_openreq_init(req, &tmp_opt, skb); | 1468 | tcp_openreq_init(req, &tmp_opt, skb); |
1463 | 1469 | ||
1464 | req->af.v4_req.loc_addr = daddr; | 1470 | ireq = inet_rsk(req); |
1465 | req->af.v4_req.rmt_addr = saddr; | 1471 | ireq->loc_addr = daddr; |
1466 | req->af.v4_req.opt = tcp_v4_save_options(sk, skb); | 1472 | ireq->rmt_addr = saddr; |
1467 | req->class = &or_ipv4; | 1473 | ireq->opt = tcp_v4_save_options(sk, skb); |
1468 | if (!want_cookie) | 1474 | if (!want_cookie) |
1469 | TCP_ECN_create_request(req, skb->h.th); | 1475 | TCP_ECN_create_request(req, skb->h.th); |
1470 | 1476 | ||
@@ -1523,7 +1529,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1523 | 1529 | ||
1524 | isn = tcp_v4_init_sequence(sk, skb); | 1530 | isn = tcp_v4_init_sequence(sk, skb); |
1525 | } | 1531 | } |
1526 | req->snt_isn = isn; | 1532 | tcp_rsk(req)->snt_isn = isn; |
1527 | 1533 | ||
1528 | if (tcp_v4_send_synack(sk, req, dst)) | 1534 | if (tcp_v4_send_synack(sk, req, dst)) |
1529 | goto drop_and_free; | 1535 | goto drop_and_free; |
@@ -1551,6 +1557,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1551 | struct open_request *req, | 1557 | struct open_request *req, |
1552 | struct dst_entry *dst) | 1558 | struct dst_entry *dst) |
1553 | { | 1559 | { |
1560 | struct inet_request_sock *ireq; | ||
1554 | struct inet_sock *newinet; | 1561 | struct inet_sock *newinet; |
1555 | struct tcp_sock *newtp; | 1562 | struct tcp_sock *newtp; |
1556 | struct sock *newsk; | 1563 | struct sock *newsk; |
@@ -1570,11 +1577,12 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1570 | 1577 | ||
1571 | newtp = tcp_sk(newsk); | 1578 | newtp = tcp_sk(newsk); |
1572 | newinet = inet_sk(newsk); | 1579 | newinet = inet_sk(newsk); |
1573 | newinet->daddr = req->af.v4_req.rmt_addr; | 1580 | ireq = inet_rsk(req); |
1574 | newinet->rcv_saddr = req->af.v4_req.loc_addr; | 1581 | newinet->daddr = ireq->rmt_addr; |
1575 | newinet->saddr = req->af.v4_req.loc_addr; | 1582 | newinet->rcv_saddr = ireq->loc_addr; |
1576 | newinet->opt = req->af.v4_req.opt; | 1583 | newinet->saddr = ireq->loc_addr; |
1577 | req->af.v4_req.opt = NULL; | 1584 | newinet->opt = ireq->opt; |
1585 | ireq->opt = NULL; | ||
1578 | newinet->mc_index = tcp_v4_iif(skb); | 1586 | newinet->mc_index = tcp_v4_iif(skb); |
1579 | newinet->mc_ttl = skb->nh.iph->ttl; | 1587 | newinet->mc_ttl = skb->nh.iph->ttl; |
1580 | newtp->ext_header_len = 0; | 1588 | newtp->ext_header_len = 0; |
@@ -2454,15 +2462,16 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) | |||
2454 | static void get_openreq4(struct sock *sk, struct open_request *req, | 2462 | static void get_openreq4(struct sock *sk, struct open_request *req, |
2455 | char *tmpbuf, int i, int uid) | 2463 | char *tmpbuf, int i, int uid) |
2456 | { | 2464 | { |
2465 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
2457 | int ttd = req->expires - jiffies; | 2466 | int ttd = req->expires - jiffies; |
2458 | 2467 | ||
2459 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | 2468 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" |
2460 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p", | 2469 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p", |
2461 | i, | 2470 | i, |
2462 | req->af.v4_req.loc_addr, | 2471 | ireq->loc_addr, |
2463 | ntohs(inet_sk(sk)->sport), | 2472 | ntohs(inet_sk(sk)->sport), |
2464 | req->af.v4_req.rmt_addr, | 2473 | ireq->rmt_addr, |
2465 | ntohs(req->rmt_port), | 2474 | ntohs(ireq->rmt_port), |
2466 | TCP_SYN_RECV, | 2475 | TCP_SYN_RECV, |
2467 | 0, 0, /* could print option size, but that is af dependent. */ | 2476 | 0, 0, /* could print option size, but that is af dependent. */ |
2468 | 1, /* timers active (only the expire timer) */ | 2477 | 1, /* timers active (only the expire timer) */ |
@@ -2618,6 +2627,7 @@ struct proto tcp_prot = { | |||
2618 | .sysctl_rmem = sysctl_tcp_rmem, | 2627 | .sysctl_rmem = sysctl_tcp_rmem, |
2619 | .max_header = MAX_TCP_HEADER, | 2628 | .max_header = MAX_TCP_HEADER, |
2620 | .obj_size = sizeof(struct tcp_sock), | 2629 | .obj_size = sizeof(struct tcp_sock), |
2630 | .rsk_prot = &or_ipv4, | ||
2621 | }; | 2631 | }; |
2622 | 2632 | ||
2623 | 2633 | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index eea1a17a9ac2..1037401c7cc8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -692,6 +692,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
692 | struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); | 692 | struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); |
693 | 693 | ||
694 | if(newsk != NULL) { | 694 | if(newsk != NULL) { |
695 | struct inet_request_sock *ireq = inet_rsk(req); | ||
696 | struct tcp_request_sock *treq = tcp_rsk(req); | ||
695 | struct tcp_sock *newtp; | 697 | struct tcp_sock *newtp; |
696 | struct sk_filter *filter; | 698 | struct sk_filter *filter; |
697 | 699 | ||
@@ -703,7 +705,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
703 | tcp_sk(newsk)->bind_hash = NULL; | 705 | tcp_sk(newsk)->bind_hash = NULL; |
704 | 706 | ||
705 | /* Clone the TCP header template */ | 707 | /* Clone the TCP header template */ |
706 | inet_sk(newsk)->dport = req->rmt_port; | 708 | inet_sk(newsk)->dport = ireq->rmt_port; |
707 | 709 | ||
708 | sock_lock_init(newsk); | 710 | sock_lock_init(newsk); |
709 | bh_lock_sock(newsk); | 711 | bh_lock_sock(newsk); |
@@ -739,14 +741,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
739 | /* Now setup tcp_sock */ | 741 | /* Now setup tcp_sock */ |
740 | newtp = tcp_sk(newsk); | 742 | newtp = tcp_sk(newsk); |
741 | newtp->pred_flags = 0; | 743 | newtp->pred_flags = 0; |
742 | newtp->rcv_nxt = req->rcv_isn + 1; | 744 | newtp->rcv_nxt = treq->rcv_isn + 1; |
743 | newtp->snd_nxt = req->snt_isn + 1; | 745 | newtp->snd_nxt = treq->snt_isn + 1; |
744 | newtp->snd_una = req->snt_isn + 1; | 746 | newtp->snd_una = treq->snt_isn + 1; |
745 | newtp->snd_sml = req->snt_isn + 1; | 747 | newtp->snd_sml = treq->snt_isn + 1; |
746 | 748 | ||
747 | tcp_prequeue_init(newtp); | 749 | tcp_prequeue_init(newtp); |
748 | 750 | ||
749 | tcp_init_wl(newtp, req->snt_isn, req->rcv_isn); | 751 | tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); |
750 | 752 | ||
751 | newtp->retransmits = 0; | 753 | newtp->retransmits = 0; |
752 | newtp->backoff = 0; | 754 | newtp->backoff = 0; |
@@ -775,10 +777,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
775 | tcp_set_ca_state(newtp, TCP_CA_Open); | 777 | tcp_set_ca_state(newtp, TCP_CA_Open); |
776 | tcp_init_xmit_timers(newsk); | 778 | tcp_init_xmit_timers(newsk); |
777 | skb_queue_head_init(&newtp->out_of_order_queue); | 779 | skb_queue_head_init(&newtp->out_of_order_queue); |
778 | newtp->rcv_wup = req->rcv_isn + 1; | 780 | newtp->rcv_wup = treq->rcv_isn + 1; |
779 | newtp->write_seq = req->snt_isn + 1; | 781 | newtp->write_seq = treq->snt_isn + 1; |
780 | newtp->pushed_seq = newtp->write_seq; | 782 | newtp->pushed_seq = newtp->write_seq; |
781 | newtp->copied_seq = req->rcv_isn + 1; | 783 | newtp->copied_seq = treq->rcv_isn + 1; |
782 | 784 | ||
783 | newtp->rx_opt.saw_tstamp = 0; | 785 | newtp->rx_opt.saw_tstamp = 0; |
784 | 786 | ||
@@ -808,18 +810,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
808 | newsk->sk_socket = NULL; | 810 | newsk->sk_socket = NULL; |
809 | newsk->sk_sleep = NULL; | 811 | newsk->sk_sleep = NULL; |
810 | 812 | ||
811 | newtp->rx_opt.tstamp_ok = req->tstamp_ok; | 813 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
812 | if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) { | 814 | if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
813 | if (sysctl_tcp_fack) | 815 | if (sysctl_tcp_fack) |
814 | newtp->rx_opt.sack_ok |= 2; | 816 | newtp->rx_opt.sack_ok |= 2; |
815 | } | 817 | } |
816 | newtp->window_clamp = req->window_clamp; | 818 | newtp->window_clamp = req->window_clamp; |
817 | newtp->rcv_ssthresh = req->rcv_wnd; | 819 | newtp->rcv_ssthresh = req->rcv_wnd; |
818 | newtp->rcv_wnd = req->rcv_wnd; | 820 | newtp->rcv_wnd = req->rcv_wnd; |
819 | newtp->rx_opt.wscale_ok = req->wscale_ok; | 821 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
820 | if (newtp->rx_opt.wscale_ok) { | 822 | if (newtp->rx_opt.wscale_ok) { |
821 | newtp->rx_opt.snd_wscale = req->snd_wscale; | 823 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
822 | newtp->rx_opt.rcv_wscale = req->rcv_wscale; | 824 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; |
823 | } else { | 825 | } else { |
824 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | 826 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; |
825 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | 827 | newtp->window_clamp = min(newtp->window_clamp, 65535U); |
@@ -881,7 +883,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
881 | } | 883 | } |
882 | 884 | ||
883 | /* Check for pure retransmitted SYN. */ | 885 | /* Check for pure retransmitted SYN. */ |
884 | if (TCP_SKB_CB(skb)->seq == req->rcv_isn && | 886 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
885 | flg == TCP_FLAG_SYN && | 887 | flg == TCP_FLAG_SYN && |
886 | !paws_reject) { | 888 | !paws_reject) { |
887 | /* | 889 | /* |
@@ -959,7 +961,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
959 | * Invalid ACK: reset will be sent by listening socket | 961 | * Invalid ACK: reset will be sent by listening socket |
960 | */ | 962 | */ |
961 | if ((flg & TCP_FLAG_ACK) && | 963 | if ((flg & TCP_FLAG_ACK) && |
962 | (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1)) | 964 | (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) |
963 | return sk; | 965 | return sk; |
964 | 966 | ||
965 | /* Also, it would be not so bad idea to check rcv_tsecr, which | 967 | /* Also, it would be not so bad idea to check rcv_tsecr, which |
@@ -970,7 +972,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
970 | /* RFC793: "first check sequence number". */ | 972 | /* RFC793: "first check sequence number". */ |
971 | 973 | ||
972 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | 974 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
973 | req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) { | 975 | tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { |
974 | /* Out of window: send ACK and drop. */ | 976 | /* Out of window: send ACK and drop. */ |
975 | if (!(flg & TCP_FLAG_RST)) | 977 | if (!(flg & TCP_FLAG_RST)) |
976 | req->class->send_ack(skb, req); | 978 | req->class->send_ack(skb, req); |
@@ -981,12 +983,12 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
981 | 983 | ||
982 | /* In sequence, PAWS is OK. */ | 984 | /* In sequence, PAWS is OK. */ |
983 | 985 | ||
984 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) | 986 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) |
985 | req->ts_recent = tmp_opt.rcv_tsval; | 987 | req->ts_recent = tmp_opt.rcv_tsval; |
986 | 988 | ||
987 | if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { | 989 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
988 | /* Truncate SYN, it is out of window starting | 990 | /* Truncate SYN, it is out of window starting |
989 | at req->rcv_isn+1. */ | 991 | at tcp_rsk(req)->rcv_isn + 1. */ |
990 | flg &= ~TCP_FLAG_SYN; | 992 | flg &= ~TCP_FLAG_SYN; |
991 | } | 993 | } |
992 | 994 | ||
@@ -1003,8 +1005,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
1003 | return NULL; | 1005 | return NULL; |
1004 | 1006 | ||
1005 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | 1007 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ |
1006 | if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { | 1008 | if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
1007 | req->acked = 1; | 1009 | inet_rsk(req)->acked = 1; |
1008 | return NULL; | 1010 | return NULL; |
1009 | } | 1011 | } |
1010 | 1012 | ||
@@ -1026,7 +1028,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
1026 | 1028 | ||
1027 | listen_overflow: | 1029 | listen_overflow: |
1028 | if (!sysctl_tcp_abort_on_overflow) { | 1030 | if (!sysctl_tcp_abort_on_overflow) { |
1029 | req->acked = 1; | 1031 | inet_rsk(req)->acked = 1; |
1030 | return NULL; | 1032 | return NULL; |
1031 | } | 1033 | } |
1032 | 1034 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fa24e7ae1f40..f3c8747caf91 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1358,6 +1358,7 @@ int tcp_send_synack(struct sock *sk) | |||
1358 | struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 1358 | struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
1359 | struct open_request *req) | 1359 | struct open_request *req) |
1360 | { | 1360 | { |
1361 | struct inet_request_sock *ireq = inet_rsk(req); | ||
1361 | struct tcp_sock *tp = tcp_sk(sk); | 1362 | struct tcp_sock *tp = tcp_sk(sk); |
1362 | struct tcphdr *th; | 1363 | struct tcphdr *th; |
1363 | int tcp_header_size; | 1364 | int tcp_header_size; |
@@ -1373,47 +1374,47 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
1373 | skb->dst = dst_clone(dst); | 1374 | skb->dst = dst_clone(dst); |
1374 | 1375 | ||
1375 | tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + | 1376 | tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + |
1376 | (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + | 1377 | (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + |
1377 | (req->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + | 1378 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + |
1378 | /* SACK_PERM is in the place of NOP NOP of TS */ | 1379 | /* SACK_PERM is in the place of NOP NOP of TS */ |
1379 | ((req->sack_ok && !req->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); | 1380 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); |
1380 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 1381 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
1381 | 1382 | ||
1382 | memset(th, 0, sizeof(struct tcphdr)); | 1383 | memset(th, 0, sizeof(struct tcphdr)); |
1383 | th->syn = 1; | 1384 | th->syn = 1; |
1384 | th->ack = 1; | 1385 | th->ack = 1; |
1385 | if (dst->dev->features&NETIF_F_TSO) | 1386 | if (dst->dev->features&NETIF_F_TSO) |
1386 | req->ecn_ok = 0; | 1387 | ireq->ecn_ok = 0; |
1387 | TCP_ECN_make_synack(req, th); | 1388 | TCP_ECN_make_synack(req, th); |
1388 | th->source = inet_sk(sk)->sport; | 1389 | th->source = inet_sk(sk)->sport; |
1389 | th->dest = req->rmt_port; | 1390 | th->dest = ireq->rmt_port; |
1390 | TCP_SKB_CB(skb)->seq = req->snt_isn; | 1391 | TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; |
1391 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; | 1392 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; |
1392 | TCP_SKB_CB(skb)->sacked = 0; | 1393 | TCP_SKB_CB(skb)->sacked = 0; |
1393 | skb_shinfo(skb)->tso_segs = 1; | 1394 | skb_shinfo(skb)->tso_segs = 1; |
1394 | skb_shinfo(skb)->tso_size = 0; | 1395 | skb_shinfo(skb)->tso_size = 0; |
1395 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 1396 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
1396 | th->ack_seq = htonl(req->rcv_isn + 1); | 1397 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
1397 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 1398 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
1398 | __u8 rcv_wscale; | 1399 | __u8 rcv_wscale; |
1399 | /* Set this up on the first call only */ | 1400 | /* Set this up on the first call only */ |
1400 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | 1401 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
1401 | /* tcp_full_space because it is guaranteed to be the first packet */ | 1402 | /* tcp_full_space because it is guaranteed to be the first packet */ |
1402 | tcp_select_initial_window(tcp_full_space(sk), | 1403 | tcp_select_initial_window(tcp_full_space(sk), |
1403 | dst_metric(dst, RTAX_ADVMSS) - (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | 1404 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
1404 | &req->rcv_wnd, | 1405 | &req->rcv_wnd, |
1405 | &req->window_clamp, | 1406 | &req->window_clamp, |
1406 | req->wscale_ok, | 1407 | ireq->wscale_ok, |
1407 | &rcv_wscale); | 1408 | &rcv_wscale); |
1408 | req->rcv_wscale = rcv_wscale; | 1409 | ireq->rcv_wscale = rcv_wscale; |
1409 | } | 1410 | } |
1410 | 1411 | ||
1411 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 1412 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
1412 | th->window = htons(req->rcv_wnd); | 1413 | th->window = htons(req->rcv_wnd); |
1413 | 1414 | ||
1414 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 1415 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
1415 | tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), req->tstamp_ok, | 1416 | tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, |
1416 | req->sack_ok, req->wscale_ok, req->rcv_wscale, | 1417 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, |
1417 | TCP_SKB_CB(skb)->when, | 1418 | TCP_SKB_CB(skb)->when, |
1418 | req->ts_recent); | 1419 | req->ts_recent); |
1419 | 1420 | ||
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 799ebe061e2c..ba30ca0aa6a3 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -513,7 +513,7 @@ static void tcp_synack_timer(struct sock *sk) | |||
513 | while ((req = *reqp) != NULL) { | 513 | while ((req = *reqp) != NULL) { |
514 | if (time_after_eq(now, req->expires)) { | 514 | if (time_after_eq(now, req->expires)) { |
515 | if ((req->retrans < thresh || | 515 | if ((req->retrans < thresh || |
516 | (req->acked && req->retrans < max_retries)) | 516 | (inet_rsk(req)->acked && req->retrans < max_retries)) |
517 | && !req->class->rtx_syn_ack(sk, req, NULL)) { | 517 | && !req->class->rtx_syn_ack(sk, req, NULL)) { |
518 | unsigned long timeo; | 518 | unsigned long timeo; |
519 | 519 | ||