aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-10-09 18:21:29 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-10 00:08:07 -0400
commit634fb979e8f3a70f04c1f2f519d0cd1142eb5c1a (patch)
treeffd7f7ef4a313c94859180d1bc20f2713a11f987 /net/ipv4
parent8a29111c7ca68d928dfab58636f3f6acf0ac04f7 (diff)
inet: includes a sock_common in request_sock
TCP listener refactoring, part 5 : We want to be able to insert request sockets (SYN_RECV) into main ehash table instead of the per listener hash table to allow RCU lookups and remove listener lock contention. This patch includes the needed struct sock_common in front of struct request_sock This means there is no more inet6_request_sock IPv6 specific structure. Following inet_request_sock fields were renamed as they became macros to reference fields from struct sock_common. Prefix ir_ was chosen to avoid name collisions. loc_port -> ir_loc_port loc_addr -> ir_loc_addr rmt_addr -> ir_rmt_addr rmt_port -> ir_rmt_port iif -> ir_iif Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c23
-rw-r--r--net/ipv4/inet_diag.c22
-rw-r--r--net/ipv4/syncookies.c12
-rw-r--r--net/ipv4/tcp_ipv4.c38
-rw-r--r--net/ipv4/tcp_metrics.c8
-rw-r--r--net/ipv4/tcp_output.c4
6 files changed, 55 insertions, 52 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 56e82a4027b4..2ffd931d652f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -412,8 +412,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
413 sk->sk_protocol, 413 sk->sk_protocol,
414 flags, 414 flags,
415 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 415 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
416 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 416 ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
417 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 417 security_req_classify_flow(req, flowi4_to_flowi(fl4));
418 rt = ip_route_output_flow(net, fl4, sk); 418 rt = ip_route_output_flow(net, fl4, sk);
419 if (IS_ERR(rt)) 419 if (IS_ERR(rt))
@@ -448,8 +448,8 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
448 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 448 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
450 sk->sk_protocol, inet_sk_flowi_flags(sk), 450 sk->sk_protocol, inet_sk_flowi_flags(sk),
451 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 451 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
452 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 452 ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
453 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 453 security_req_classify_flow(req, flowi4_to_flowi(fl4));
454 rt = ip_route_output_flow(net, fl4, sk); 454 rt = ip_route_output_flow(net, fl4, sk);
455 if (IS_ERR(rt)) 455 if (IS_ERR(rt))
@@ -495,9 +495,9 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
495 prev = &req->dl_next) { 495 prev = &req->dl_next) {
496 const struct inet_request_sock *ireq = inet_rsk(req); 496 const struct inet_request_sock *ireq = inet_rsk(req);
497 497
498 if (ireq->rmt_port == rport && 498 if (ireq->ir_rmt_port == rport &&
499 ireq->rmt_addr == raddr && 499 ireq->ir_rmt_addr == raddr &&
500 ireq->loc_addr == laddr && 500 ireq->ir_loc_addr == laddr &&
501 AF_INET_FAMILY(req->rsk_ops->family)) { 501 AF_INET_FAMILY(req->rsk_ops->family)) {
502 WARN_ON(req->sk); 502 WARN_ON(req->sk);
503 *prevp = prev; 503 *prevp = prev;
@@ -514,7 +514,8 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
514{ 514{
515 struct inet_connection_sock *icsk = inet_csk(sk); 515 struct inet_connection_sock *icsk = inet_csk(sk);
516 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 516 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
517 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, 517 const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
518 inet_rsk(req)->ir_rmt_port,
518 lopt->hash_rnd, lopt->nr_table_entries); 519 lopt->hash_rnd, lopt->nr_table_entries);
519 520
520 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); 521 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -674,9 +675,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
674 newsk->sk_state = TCP_SYN_RECV; 675 newsk->sk_state = TCP_SYN_RECV;
675 newicsk->icsk_bind_hash = NULL; 676 newicsk->icsk_bind_hash = NULL;
676 677
677 inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; 678 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
678 inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); 679 inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->ir_loc_port);
679 inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; 680 inet_sk(newsk)->inet_sport = inet_rsk(req)->ir_loc_port;
680 newsk->sk_write_space = sk_stream_write_space; 681 newsk->sk_write_space = sk_stream_write_space;
681 682
682 newicsk->icsk_retransmits = 0; 683 newicsk->icsk_retransmits = 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ecc179d676e4..41e1c3ea8b51 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -679,12 +679,12 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
679#if IS_ENABLED(CONFIG_IPV6) 679#if IS_ENABLED(CONFIG_IPV6)
680 if (sk->sk_family == AF_INET6) { 680 if (sk->sk_family == AF_INET6) {
681 if (req->rsk_ops->family == AF_INET6) { 681 if (req->rsk_ops->family == AF_INET6) {
682 entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32; 682 entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
683 entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32; 683 entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
684 } else if (req->rsk_ops->family == AF_INET) { 684 } else if (req->rsk_ops->family == AF_INET) {
685 ipv6_addr_set_v4mapped(ireq->loc_addr, 685 ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
686 &entry->saddr_storage); 686 &entry->saddr_storage);
687 ipv6_addr_set_v4mapped(ireq->rmt_addr, 687 ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
688 &entry->daddr_storage); 688 &entry->daddr_storage);
689 entry->saddr = entry->saddr_storage.s6_addr32; 689 entry->saddr = entry->saddr_storage.s6_addr32;
690 entry->daddr = entry->daddr_storage.s6_addr32; 690 entry->daddr = entry->daddr_storage.s6_addr32;
@@ -692,8 +692,8 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
692 } else 692 } else
693#endif 693#endif
694 { 694 {
695 entry->saddr = &ireq->loc_addr; 695 entry->saddr = &ireq->ir_loc_addr;
696 entry->daddr = &ireq->rmt_addr; 696 entry->daddr = &ireq->ir_rmt_addr;
697 } 697 }
698} 698}
699 699
@@ -728,9 +728,9 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
728 tmo = 0; 728 tmo = 0;
729 729
730 r->id.idiag_sport = inet->inet_sport; 730 r->id.idiag_sport = inet->inet_sport;
731 r->id.idiag_dport = ireq->rmt_port; 731 r->id.idiag_dport = ireq->ir_rmt_port;
732 r->id.idiag_src[0] = ireq->loc_addr; 732 r->id.idiag_src[0] = ireq->ir_loc_addr;
733 r->id.idiag_dst[0] = ireq->rmt_addr; 733 r->id.idiag_dst[0] = ireq->ir_rmt_addr;
734 r->idiag_expires = jiffies_to_msecs(tmo); 734 r->idiag_expires = jiffies_to_msecs(tmo);
735 r->idiag_rqueue = 0; 735 r->idiag_rqueue = 0;
736 r->idiag_wqueue = 0; 736 r->idiag_wqueue = 0;
@@ -789,13 +789,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
789 789
790 if (reqnum < s_reqnum) 790 if (reqnum < s_reqnum)
791 continue; 791 continue;
792 if (r->id.idiag_dport != ireq->rmt_port && 792 if (r->id.idiag_dport != ireq->ir_rmt_port &&
793 r->id.idiag_dport) 793 r->id.idiag_dport)
794 continue; 794 continue;
795 795
796 if (bc) { 796 if (bc) {
797 inet_diag_req_addrs(sk, req, &entry); 797 inet_diag_req_addrs(sk, req, &entry);
798 entry.dport = ntohs(ireq->rmt_port); 798 entry.dport = ntohs(ireq->ir_rmt_port);
799 799
800 if (!inet_diag_bc_run(bc, &entry)) 800 if (!inet_diag_bc_run(bc, &entry))
801 continue; 801 continue;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 15e024105f91..984e21cf3c50 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -304,10 +304,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
304 treq->rcv_isn = ntohl(th->seq) - 1; 304 treq->rcv_isn = ntohl(th->seq) - 1;
305 treq->snt_isn = cookie; 305 treq->snt_isn = cookie;
306 req->mss = mss; 306 req->mss = mss;
307 ireq->loc_port = th->dest; 307 ireq->ir_loc_port = th->dest;
308 ireq->rmt_port = th->source; 308 ireq->ir_rmt_port = th->source;
309 ireq->loc_addr = ip_hdr(skb)->daddr; 309 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
310 ireq->rmt_addr = ip_hdr(skb)->saddr; 310 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
311 ireq->ecn_ok = ecn_ok; 311 ireq->ecn_ok = ecn_ok;
312 ireq->snd_wscale = tcp_opt.snd_wscale; 312 ireq->snd_wscale = tcp_opt.snd_wscale;
313 ireq->sack_ok = tcp_opt.sack_ok; 313 ireq->sack_ok = tcp_opt.sack_ok;
@@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
347 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, 347 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
348 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, 348 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
349 inet_sk_flowi_flags(sk), 349 inet_sk_flowi_flags(sk),
350 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, 350 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
351 ireq->loc_addr, th->source, th->dest); 351 ireq->ir_loc_addr, th->source, th->dest);
352 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 352 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
353 rt = ip_route_output_key(sock_net(sk), &fl4); 353 rt = ip_route_output_key(sock_net(sk), &fl4);
354 if (IS_ERR(rt)) { 354 if (IS_ERR(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e4695dde1af6..114d1b748cbb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -835,11 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
835 skb = tcp_make_synack(sk, dst, req, NULL); 835 skb = tcp_make_synack(sk, dst, req, NULL);
836 836
837 if (skb) { 837 if (skb) {
838 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 838 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
839 839
840 skb_set_queue_mapping(skb, queue_mapping); 840 skb_set_queue_mapping(skb, queue_mapping);
841 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 841 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 ireq->rmt_addr, 842 ireq->ir_rmt_addr,
843 ireq->opt); 843 ireq->opt);
844 err = net_xmit_eval(err); 844 err = net_xmit_eval(err);
845 if (!tcp_rsk(req)->snt_synack && !err) 845 if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +972,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972{ 972{
973 union tcp_md5_addr *addr; 973 union tcp_md5_addr *addr;
974 974
975 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; 975 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
976 return tcp_md5_do_lookup(sk, addr, AF_INET); 976 return tcp_md5_do_lookup(sk, addr, AF_INET);
977} 977}
978 978
@@ -1149,8 +1149,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1149 saddr = inet_sk(sk)->inet_saddr; 1149 saddr = inet_sk(sk)->inet_saddr;
1150 daddr = inet_sk(sk)->inet_daddr; 1150 daddr = inet_sk(sk)->inet_daddr;
1151 } else if (req) { 1151 } else if (req) {
1152 saddr = inet_rsk(req)->loc_addr; 1152 saddr = inet_rsk(req)->ir_loc_addr;
1153 daddr = inet_rsk(req)->rmt_addr; 1153 daddr = inet_rsk(req)->ir_rmt_addr;
1154 } else { 1154 } else {
1155 const struct iphdr *iph = ip_hdr(skb); 1155 const struct iphdr *iph = ip_hdr(skb);
1156 saddr = iph->saddr; 1156 saddr = iph->saddr;
@@ -1366,8 +1366,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1366 kfree_skb(skb_synack); 1366 kfree_skb(skb_synack);
1367 return -1; 1367 return -1;
1368 } 1368 }
1369 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1369 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1370 ireq->rmt_addr, ireq->opt); 1370 ireq->ir_rmt_addr, ireq->opt);
1371 err = net_xmit_eval(err); 1371 err = net_xmit_eval(err);
1372 if (!err) 1372 if (!err)
1373 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1373 tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1502,8 +1502,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1502 tcp_openreq_init(req, &tmp_opt, skb); 1502 tcp_openreq_init(req, &tmp_opt, skb);
1503 1503
1504 ireq = inet_rsk(req); 1504 ireq = inet_rsk(req);
1505 ireq->loc_addr = daddr; 1505 ireq->ir_loc_addr = daddr;
1506 ireq->rmt_addr = saddr; 1506 ireq->ir_rmt_addr = saddr;
1507 ireq->no_srccheck = inet_sk(sk)->transparent; 1507 ireq->no_srccheck = inet_sk(sk)->transparent;
1508 ireq->opt = tcp_v4_save_options(skb); 1508 ireq->opt = tcp_v4_save_options(skb);
1509 1509
@@ -1578,15 +1578,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); 1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1579 1579
1580 if (skb_synack) { 1580 if (skb_synack) {
1581 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr); 1581 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb)); 1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1583 } else 1583 } else
1584 goto drop_and_free; 1584 goto drop_and_free;
1585 1585
1586 if (likely(!do_fastopen)) { 1586 if (likely(!do_fastopen)) {
1587 int err; 1587 int err;
1588 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1588 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1589 ireq->rmt_addr, ireq->opt); 1589 ireq->ir_rmt_addr, ireq->opt);
1590 err = net_xmit_eval(err); 1590 err = net_xmit_eval(err);
1591 if (err || want_cookie) 1591 if (err || want_cookie)
1592 goto drop_and_free; 1592 goto drop_and_free;
@@ -1644,9 +1644,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1644 newtp = tcp_sk(newsk); 1644 newtp = tcp_sk(newsk);
1645 newinet = inet_sk(newsk); 1645 newinet = inet_sk(newsk);
1646 ireq = inet_rsk(req); 1646 ireq = inet_rsk(req);
1647 newinet->inet_daddr = ireq->rmt_addr; 1647 newinet->inet_daddr = ireq->ir_rmt_addr;
1648 newinet->inet_rcv_saddr = ireq->loc_addr; 1648 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1649 newinet->inet_saddr = ireq->loc_addr; 1649 newinet->inet_saddr = ireq->ir_loc_addr;
1650 inet_opt = ireq->opt; 1650 inet_opt = ireq->opt;
1651 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1651 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1652 ireq->opt = NULL; 1652 ireq->opt = NULL;
@@ -2548,10 +2548,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2548 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2548 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2549 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", 2549 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
2550 i, 2550 i,
2551 ireq->loc_addr, 2551 ireq->ir_loc_addr,
2552 ntohs(inet_sk(sk)->inet_sport), 2552 ntohs(inet_sk(sk)->inet_sport),
2553 ireq->rmt_addr, 2553 ireq->ir_rmt_addr,
2554 ntohs(ireq->rmt_port), 2554 ntohs(ireq->ir_rmt_port),
2555 TCP_SYN_RECV, 2555 TCP_SYN_RECV,
2556 0, 0, /* could print option size, but that is af dependent. */ 2556 0, 0, /* could print option size, but that is af dependent. */
2557 1, /* timers active (only the expire timer) */ 2557 1, /* timers active (only the expire timer) */
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 8fcc2cb9dba4..4a2a84110dfb 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -215,13 +215,15 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
215 addr.family = req->rsk_ops->family; 215 addr.family = req->rsk_ops->family;
216 switch (addr.family) { 216 switch (addr.family) {
217 case AF_INET: 217 case AF_INET:
218 addr.addr.a4 = inet_rsk(req)->rmt_addr; 218 addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
219 hash = (__force unsigned int) addr.addr.a4; 219 hash = (__force unsigned int) addr.addr.a4;
220 break; 220 break;
221#if IS_ENABLED(CONFIG_IPV6)
221 case AF_INET6: 222 case AF_INET6:
222 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr; 223 *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
223 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr); 224 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
224 break; 225 break;
226#endif
225 default: 227 default:
226 return NULL; 228 return NULL;
227 } 229 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c6f01f2cdb32..faec81353522 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2734,8 +2734,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2734 th->syn = 1; 2734 th->syn = 1;
2735 th->ack = 1; 2735 th->ack = 1;
2736 TCP_ECN_make_synack(req, th); 2736 TCP_ECN_make_synack(req, th);
2737 th->source = ireq->loc_port; 2737 th->source = ireq->ir_loc_port;
2738 th->dest = ireq->rmt_port; 2738 th->dest = ireq->ir_rmt_port;
2739 /* Setting of flags are superfluous here for callers (and ECE is 2739 /* Setting of flags are superfluous here for callers (and ECE is
2740 * not even correctly set) 2740 * not even correctly set)
2741 */ 2741 */