aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 03:40:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 03:40:34 -0500
commit42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (patch)
tree2b2b0c03b5389c1301800119333967efafd994ca /net/ipv6/tcp_ipv6.c
parent5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (diff)
parent75ecab1df14d90e86cebef9ec5c76befde46e65f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) The addition of nftables. No longer will we need protocol aware firewall filtering modules, it can all live in userspace. At the core of nftables is a, for lack of a better term, virtual machine that executes byte codes to inspect packet or metadata (arriving interface index, etc.) and make verdict decisions. Besides support for loading packet contents and comparing them, the interpreter supports lookups in various datastructures as fundamental operations. For example sets are supports, and therefore one could create a set of whitelist IP address entries which have ACCEPT verdicts attached to them, and use the appropriate byte codes to do such lookups. Since the interpreted code is composed in userspace, userspace can do things like optimize things before giving it to the kernel. Another major improvement is the capability of atomically updating portions of the ruleset. In the existing netfilter implementation, one has to update the entire rule set in order to make a change and this is very expensive. Userspace tools exist to create nftables rules using existing netfilter rule sets, but both kernel implementations will need to co-exist for quite some time as we transition from the old to the new stuff. Kudos to Patrick McHardy, Pablo Neira Ayuso, and others who have worked so hard on this. 2) Daniel Borkmann and Hannes Frederic Sowa made several improvements to our pseudo-random number generator, mostly used for things like UDP port randomization and netfitler, amongst other things. In particular the taus88 generater is updated to taus113, and test cases are added. 3) Support 64-bit rates in HTB and TBF schedulers, from Eric Dumazet and Yang Yingliang. 4) Add support for new 577xx tigon3 chips to tg3 driver, from Nithin Sujir. 5) Fix two fatal flaws in TCP dynamic right sizing, from Eric Dumazet, Neal Cardwell, and Yuchung Cheng. 6) Allow IP_TOS and IP_TTL to be specified in sendmsg() ancillary control message data, much like other socket option attributes. From Francesco Fusco. 7) Allow applications to specify a cap on the rate computed automatically by the kernel for pacing flows, via a new SO_MAX_PACING_RATE socket option. From Eric Dumazet. 8) Make the initial autotuned send buffer sizing in TCP more closely reflect actual needs, from Eric Dumazet. 9) Currently early socket demux only happens for TCP sockets, but we can do it for connected UDP sockets too. Implementation from Shawn Bohrer. 10) Refactor inet socket demux with the goal of improving hash demux performance for listening sockets. With the main goals being able to use RCU lookups on even request sockets, and eliminating the listening lock contention. From Eric Dumazet. 11) The bonding layer has many demuxes in it's fast path, and an RCU conversion was started back in 3.11, several changes here extend the RCU usage to even more locations. From Ding Tianhong and Wang Yufen, based upon suggestions by Nikolay Aleksandrov and Veaceslav Falico. 12) Allow stackability of segmentation offloads to, in particular, allow segmentation offloading over tunnels. From Eric Dumazet. 13) Significantly improve the handling of secret keys we input into the various hash functions in the inet hashtables, TCP fast open, as well as syncookies. From Hannes Frederic Sowa. The key fundamental operation is "net_get_random_once()" which uses static keys. Hannes even extended this to ipv4/ipv6 fragmentation handling and our generic flow dissector. 14) The generic driver layer takes care now to set the driver data to NULL on device removal, so it's no longer necessary for drivers to explicitly set it to NULL any more. Many drivers have been cleaned up in this way, from Jingoo Han. 15) Add a BPF based packet scheduler classifier, from Daniel Borkmann. 16) Improve CRC32 interfaces and generic SKB checksum iterators so that SCTP's checksumming can more cleanly be handled. Also from Daniel Borkmann. 17) Add a new PMTU discovery mode, IP_PMTUDISC_INTERFACE, which forces using the interface MTU value. This helps avoid PMTU attacks, particularly on DNS servers. From Hannes Frederic Sowa. 18) Use generic XPS for transmit queue steering rather than internal (re-)implementation in virtio-net. From Jason Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1622 commits) random32: add test cases for taus113 implementation random32: upgrade taus88 generator to taus113 from errata paper random32: move rnd_state to linux/random.h random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized random32: add periodic reseeding random32: fix off-by-one in seeding requirement PHY: Add RTL8201CP phy_driver to realtek xtsonic: add missing platform_set_drvdata() in xtsonic_probe() macmace: add missing platform_set_drvdata() in mace_probe() ethernet/arc/arc_emac: add missing platform_set_drvdata() in arc_emac_probe() ipv6: protect for_each_sk_fl_rcu in mem_check with rcu_read_lock_bh vlan: Implement vlan_dev_get_egress_qos_mask as an inline. ixgbe: add warning when max_vfs is out of range. igb: Update link modes display in ethtool netfilter: push reasm skb through instead of original frag skbs ip6_output: fragment outgoing reassembled skb properly MAINTAINERS: mv643xx_eth: take over maintainership from Lennart net_sched: tbf: support of 64bit rates ixgbe: deleting dfwd stations out of order can cause null ptr deref ixgbe: fix build err, num_rx_queues is only available with CONFIG_RPS ...
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c113
1 files changed, 57 insertions, 56 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5c71501fc917..0740f93a114a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -192,13 +192,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
192 } 192 }
193 193
194 if (tp->rx_opt.ts_recent_stamp && 194 if (tp->rx_opt.ts_recent_stamp &&
195 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { 195 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
196 tp->rx_opt.ts_recent = 0; 196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0; 197 tp->rx_opt.ts_recent_stamp = 0;
198 tp->write_seq = 0; 198 tp->write_seq = 0;
199 } 199 }
200 200
201 np->daddr = usin->sin6_addr; 201 sk->sk_v6_daddr = usin->sin6_addr;
202 np->flow_label = fl6.flowlabel; 202 np->flow_label = fl6.flowlabel;
203 203
204 /* 204 /*
@@ -237,17 +237,17 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
237 } else { 237 } else {
238 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); 238 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
239 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, 239 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
240 &np->rcv_saddr); 240 &sk->sk_v6_rcv_saddr);
241 } 241 }
242 242
243 return err; 243 return err;
244 } 244 }
245 245
246 if (!ipv6_addr_any(&np->rcv_saddr)) 246 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
247 saddr = &np->rcv_saddr; 247 saddr = &sk->sk_v6_rcv_saddr;
248 248
249 fl6.flowi6_proto = IPPROTO_TCP; 249 fl6.flowi6_proto = IPPROTO_TCP;
250 fl6.daddr = np->daddr; 250 fl6.daddr = sk->sk_v6_daddr;
251 fl6.saddr = saddr ? *saddr : np->saddr; 251 fl6.saddr = saddr ? *saddr : np->saddr;
252 fl6.flowi6_oif = sk->sk_bound_dev_if; 252 fl6.flowi6_oif = sk->sk_bound_dev_if;
253 fl6.flowi6_mark = sk->sk_mark; 253 fl6.flowi6_mark = sk->sk_mark;
@@ -266,7 +266,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
266 266
267 if (saddr == NULL) { 267 if (saddr == NULL) {
268 saddr = &fl6.saddr; 268 saddr = &fl6.saddr;
269 np->rcv_saddr = *saddr; 269 sk->sk_v6_rcv_saddr = *saddr;
270 } 270 }
271 271
272 /* set the source address */ 272 /* set the source address */
@@ -279,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
279 rt = (struct rt6_info *) dst; 279 rt = (struct rt6_info *) dst;
280 if (tcp_death_row.sysctl_tw_recycle && 280 if (tcp_death_row.sysctl_tw_recycle &&
281 !tp->rx_opt.ts_recent_stamp && 281 !tp->rx_opt.ts_recent_stamp &&
282 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) 282 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
283 tcp_fetch_timewait_stamp(sk, dst); 283 tcp_fetch_timewait_stamp(sk, dst);
284 284
285 icsk->icsk_ext_hdr_len = 0; 285 icsk->icsk_ext_hdr_len = 0;
@@ -298,7 +298,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
298 298
299 if (!tp->write_seq && likely(!tp->repair)) 299 if (!tp->write_seq && likely(!tp->repair))
300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
301 np->daddr.s6_addr32, 301 sk->sk_v6_daddr.s6_addr32,
302 inet->inet_sport, 302 inet->inet_sport,
303 inet->inet_dport); 303 inet->inet_dport);
304 304
@@ -465,7 +465,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
465 struct request_sock *req, 465 struct request_sock *req,
466 u16 queue_mapping) 466 u16 queue_mapping)
467{ 467{
468 struct inet6_request_sock *treq = inet6_rsk(req); 468 struct inet_request_sock *ireq = inet_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk); 469 struct ipv6_pinfo *np = inet6_sk(sk);
470 struct sk_buff * skb; 470 struct sk_buff * skb;
471 int err = -ENOMEM; 471 int err = -ENOMEM;
@@ -477,9 +477,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
477 skb = tcp_make_synack(sk, dst, req, NULL); 477 skb = tcp_make_synack(sk, dst, req, NULL);
478 478
479 if (skb) { 479 if (skb) {
480 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
481 &ireq->ir_v6_rmt_addr);
481 482
482 fl6->daddr = treq->rmt_addr; 483 fl6->daddr = ireq->ir_v6_rmt_addr;
483 skb_set_queue_mapping(skb, queue_mapping); 484 skb_set_queue_mapping(skb, queue_mapping);
484 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 485 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
485 err = net_xmit_eval(err); 486 err = net_xmit_eval(err);
@@ -502,7 +503,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
502 503
503static void tcp_v6_reqsk_destructor(struct request_sock *req) 504static void tcp_v6_reqsk_destructor(struct request_sock *req)
504{ 505{
505 kfree_skb(inet6_rsk(req)->pktopts); 506 kfree_skb(inet_rsk(req)->pktopts);
506} 507}
507 508
508#ifdef CONFIG_TCP_MD5SIG 509#ifdef CONFIG_TCP_MD5SIG
@@ -515,13 +516,13 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
515static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 516static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
516 struct sock *addr_sk) 517 struct sock *addr_sk)
517{ 518{
518 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); 519 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
519} 520}
520 521
521static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, 522static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
522 struct request_sock *req) 523 struct request_sock *req)
523{ 524{
524 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 525 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
525} 526}
526 527
527static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 528static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
@@ -621,10 +622,10 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
621 622
622 if (sk) { 623 if (sk) {
623 saddr = &inet6_sk(sk)->saddr; 624 saddr = &inet6_sk(sk)->saddr;
624 daddr = &inet6_sk(sk)->daddr; 625 daddr = &sk->sk_v6_daddr;
625 } else if (req) { 626 } else if (req) {
626 saddr = &inet6_rsk(req)->loc_addr; 627 saddr = &inet_rsk(req)->ir_v6_loc_addr;
627 daddr = &inet6_rsk(req)->rmt_addr; 628 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
628 } else { 629 } else {
629 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 630 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
630 saddr = &ip6h->saddr; 631 saddr = &ip6h->saddr;
@@ -949,7 +950,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
949{ 950{
950 struct tcp_options_received tmp_opt; 951 struct tcp_options_received tmp_opt;
951 struct request_sock *req; 952 struct request_sock *req;
952 struct inet6_request_sock *treq; 953 struct inet_request_sock *ireq;
953 struct ipv6_pinfo *np = inet6_sk(sk); 954 struct ipv6_pinfo *np = inet6_sk(sk);
954 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
955 __u32 isn = TCP_SKB_CB(skb)->when; 956 __u32 isn = TCP_SKB_CB(skb)->when;
@@ -994,25 +995,25 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
994 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 995 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
995 tcp_openreq_init(req, &tmp_opt, skb); 996 tcp_openreq_init(req, &tmp_opt, skb);
996 997
997 treq = inet6_rsk(req); 998 ireq = inet_rsk(req);
998 treq->rmt_addr = ipv6_hdr(skb)->saddr; 999 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
999 treq->loc_addr = ipv6_hdr(skb)->daddr; 1000 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1000 if (!want_cookie || tmp_opt.tstamp_ok) 1001 if (!want_cookie || tmp_opt.tstamp_ok)
1001 TCP_ECN_create_request(req, skb, sock_net(sk)); 1002 TCP_ECN_create_request(req, skb, sock_net(sk));
1002 1003
1003 treq->iif = sk->sk_bound_dev_if; 1004 ireq->ir_iif = sk->sk_bound_dev_if;
1004 1005
1005 /* So that link locals have meaning */ 1006 /* So that link locals have meaning */
1006 if (!sk->sk_bound_dev_if && 1007 if (!sk->sk_bound_dev_if &&
1007 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1008 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
1008 treq->iif = inet6_iif(skb); 1009 ireq->ir_iif = inet6_iif(skb);
1009 1010
1010 if (!isn) { 1011 if (!isn) {
1011 if (ipv6_opt_accepted(sk, skb) || 1012 if (ipv6_opt_accepted(sk, skb) ||
1012 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1013 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1013 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1014 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1014 atomic_inc(&skb->users); 1015 atomic_inc(&skb->users);
1015 treq->pktopts = skb; 1016 ireq->pktopts = skb;
1016 } 1017 }
1017 1018
1018 if (want_cookie) { 1019 if (want_cookie) {
@@ -1051,7 +1052,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1051 * to the moment of synflood. 1052 * to the moment of synflood.
1052 */ 1053 */
1053 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", 1054 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1054 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source)); 1055 &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
1055 goto drop_and_release; 1056 goto drop_and_release;
1056 } 1057 }
1057 1058
@@ -1086,7 +1087,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1086 struct request_sock *req, 1087 struct request_sock *req,
1087 struct dst_entry *dst) 1088 struct dst_entry *dst)
1088{ 1089{
1089 struct inet6_request_sock *treq; 1090 struct inet_request_sock *ireq;
1090 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1091 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1091 struct tcp6_sock *newtcp6sk; 1092 struct tcp6_sock *newtcp6sk;
1092 struct inet_sock *newinet; 1093 struct inet_sock *newinet;
@@ -1116,11 +1117,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1116 1117
1117 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1118 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1118 1119
1119 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); 1120 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1120 1121
1121 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); 1122 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1122 1123
1123 newnp->rcv_saddr = newnp->saddr; 1124 newsk->sk_v6_rcv_saddr = newnp->saddr;
1124 1125
1125 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1126 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1126 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1127 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1151,7 +1152,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1151 return newsk; 1152 return newsk;
1152 } 1153 }
1153 1154
1154 treq = inet6_rsk(req); 1155 ireq = inet_rsk(req);
1155 1156
1156 if (sk_acceptq_is_full(sk)) 1157 if (sk_acceptq_is_full(sk))
1157 goto out_overflow; 1158 goto out_overflow;
@@ -1185,10 +1186,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1185 1186
1186 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1187 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1187 1188
1188 newnp->daddr = treq->rmt_addr; 1189 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1189 newnp->saddr = treq->loc_addr; 1190 newnp->saddr = ireq->ir_v6_loc_addr;
1190 newnp->rcv_saddr = treq->loc_addr; 1191 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1191 newsk->sk_bound_dev_if = treq->iif; 1192 newsk->sk_bound_dev_if = ireq->ir_iif;
1192 1193
1193 /* Now IPv6 options... 1194 /* Now IPv6 options...
1194 1195
@@ -1203,11 +1204,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1203 1204
1204 /* Clone pktoptions received with SYN */ 1205 /* Clone pktoptions received with SYN */
1205 newnp->pktoptions = NULL; 1206 newnp->pktoptions = NULL;
1206 if (treq->pktopts != NULL) { 1207 if (ireq->pktopts != NULL) {
1207 newnp->pktoptions = skb_clone(treq->pktopts, 1208 newnp->pktoptions = skb_clone(ireq->pktopts,
1208 sk_gfp_atomic(sk, GFP_ATOMIC)); 1209 sk_gfp_atomic(sk, GFP_ATOMIC));
1209 consume_skb(treq->pktopts); 1210 consume_skb(ireq->pktopts);
1210 treq->pktopts = NULL; 1211 ireq->pktopts = NULL;
1211 if (newnp->pktoptions) 1212 if (newnp->pktoptions)
1212 skb_set_owner_r(newnp->pktoptions, newsk); 1213 skb_set_owner_r(newnp->pktoptions, newsk);
1213 } 1214 }
@@ -1244,13 +1245,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1244 1245
1245#ifdef CONFIG_TCP_MD5SIG 1246#ifdef CONFIG_TCP_MD5SIG
1246 /* Copy over the MD5 key from the original socket */ 1247 /* Copy over the MD5 key from the original socket */
1247 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { 1248 if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
1248 /* We're using one, so create a matching key 1249 /* We're using one, so create a matching key
1249 * on the newsk structure. If we fail to get 1250 * on the newsk structure. If we fail to get
1250 * memory, then we end up not copying the key 1251 * memory, then we end up not copying the key
1251 * across. Shucks. 1252 * across. Shucks.
1252 */ 1253 */
1253 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr, 1254 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1254 AF_INET6, key->key, key->keylen, 1255 AF_INET6, key->key, key->keylen,
1255 sk_gfp_atomic(sk, GFP_ATOMIC)); 1256 sk_gfp_atomic(sk, GFP_ATOMIC));
1256 } 1257 }
@@ -1722,8 +1723,8 @@ static void get_openreq6(struct seq_file *seq,
1722 const struct sock *sk, struct request_sock *req, int i, kuid_t uid) 1723 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1723{ 1724{
1724 int ttd = req->expires - jiffies; 1725 int ttd = req->expires - jiffies;
1725 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1726 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1726 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 1727 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1727 1728
1728 if (ttd < 0) 1729 if (ttd < 0)
1729 ttd = 0; 1730 ttd = 0;
@@ -1734,10 +1735,10 @@ static void get_openreq6(struct seq_file *seq,
1734 i, 1735 i,
1735 src->s6_addr32[0], src->s6_addr32[1], 1736 src->s6_addr32[0], src->s6_addr32[1],
1736 src->s6_addr32[2], src->s6_addr32[3], 1737 src->s6_addr32[2], src->s6_addr32[3],
1737 ntohs(inet_rsk(req)->loc_port), 1738 inet_rsk(req)->ir_num,
1738 dest->s6_addr32[0], dest->s6_addr32[1], 1739 dest->s6_addr32[0], dest->s6_addr32[1],
1739 dest->s6_addr32[2], dest->s6_addr32[3], 1740 dest->s6_addr32[2], dest->s6_addr32[3],
1740 ntohs(inet_rsk(req)->rmt_port), 1741 ntohs(inet_rsk(req)->ir_rmt_port),
1741 TCP_SYN_RECV, 1742 TCP_SYN_RECV,
1742 0,0, /* could print option size, but that is af dependent. */ 1743 0,0, /* could print option size, but that is af dependent. */
1743 1, /* timers active (only the expire timer) */ 1744 1, /* timers active (only the expire timer) */
@@ -1758,10 +1759,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1758 const struct inet_sock *inet = inet_sk(sp); 1759 const struct inet_sock *inet = inet_sk(sp);
1759 const struct tcp_sock *tp = tcp_sk(sp); 1760 const struct tcp_sock *tp = tcp_sk(sp);
1760 const struct inet_connection_sock *icsk = inet_csk(sp); 1761 const struct inet_connection_sock *icsk = inet_csk(sp);
1761 const struct ipv6_pinfo *np = inet6_sk(sp);
1762 1762
1763 dest = &np->daddr; 1763 dest = &sp->sk_v6_daddr;
1764 src = &np->rcv_saddr; 1764 src = &sp->sk_v6_rcv_saddr;
1765 destp = ntohs(inet->inet_dport); 1765 destp = ntohs(inet->inet_dport);
1766 srcp = ntohs(inet->inet_sport); 1766 srcp = ntohs(inet->inet_sport);
1767 1767
@@ -1810,11 +1810,10 @@ static void get_timewait6_sock(struct seq_file *seq,
1810{ 1810{
1811 const struct in6_addr *dest, *src; 1811 const struct in6_addr *dest, *src;
1812 __u16 destp, srcp; 1812 __u16 destp, srcp;
1813 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 1813 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1814 long delta = tw->tw_ttd - jiffies;
1815 1814
1816 dest = &tw6->tw_v6_daddr; 1815 dest = &tw->tw_v6_daddr;
1817 src = &tw6->tw_v6_rcv_saddr; 1816 src = &tw->tw_v6_rcv_saddr;
1818 destp = ntohs(tw->tw_dport); 1817 destp = ntohs(tw->tw_dport);
1819 srcp = ntohs(tw->tw_sport); 1818 srcp = ntohs(tw->tw_sport);
1820 1819
@@ -1834,6 +1833,7 @@ static void get_timewait6_sock(struct seq_file *seq,
1834static int tcp6_seq_show(struct seq_file *seq, void *v) 1833static int tcp6_seq_show(struct seq_file *seq, void *v)
1835{ 1834{
1836 struct tcp_iter_state *st; 1835 struct tcp_iter_state *st;
1836 struct sock *sk = v;
1837 1837
1838 if (v == SEQ_START_TOKEN) { 1838 if (v == SEQ_START_TOKEN) {
1839 seq_puts(seq, 1839 seq_puts(seq,
@@ -1849,14 +1849,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
1849 switch (st->state) { 1849 switch (st->state) {
1850 case TCP_SEQ_STATE_LISTENING: 1850 case TCP_SEQ_STATE_LISTENING:
1851 case TCP_SEQ_STATE_ESTABLISHED: 1851 case TCP_SEQ_STATE_ESTABLISHED:
1852 get_tcp6_sock(seq, v, st->num); 1852 if (sk->sk_state == TCP_TIME_WAIT)
1853 get_timewait6_sock(seq, v, st->num);
1854 else
1855 get_tcp6_sock(seq, v, st->num);
1853 break; 1856 break;
1854 case TCP_SEQ_STATE_OPENREQ: 1857 case TCP_SEQ_STATE_OPENREQ:
1855 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); 1858 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1856 break; 1859 break;
1857 case TCP_SEQ_STATE_TIME_WAIT:
1858 get_timewait6_sock(seq, v, st->num);
1859 break;
1860 } 1860 }
1861out: 1861out:
1862 return 0; 1862 return 0;
@@ -1929,6 +1929,7 @@ struct proto tcpv6_prot = {
1929 .memory_allocated = &tcp_memory_allocated, 1929 .memory_allocated = &tcp_memory_allocated,
1930 .memory_pressure = &tcp_memory_pressure, 1930 .memory_pressure = &tcp_memory_pressure,
1931 .orphan_count = &tcp_orphan_count, 1931 .orphan_count = &tcp_orphan_count,
1932 .sysctl_mem = sysctl_tcp_mem,
1932 .sysctl_wmem = sysctl_tcp_wmem, 1933 .sysctl_wmem = sysctl_tcp_wmem,
1933 .sysctl_rmem = sysctl_tcp_rmem, 1934 .sysctl_rmem = sysctl_tcp_rmem,
1934 .max_header = MAX_TCP_HEADER, 1935 .max_header = MAX_TCP_HEADER,