aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c437
1 files changed, 153 insertions, 284 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ef29cfd936d3..794734f1d230 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -47,6 +47,7 @@
47 47
48#include <net/tcp.h> 48#include <net/tcp.h>
49#include <net/ndisc.h> 49#include <net/ndisc.h>
50#include <net/inet6_hashtables.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/transp_v6.h> 52#include <net/transp_v6.h>
52#include <net/addrconf.h> 53#include <net/addrconf.h>
@@ -75,34 +76,11 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75static struct tcp_func ipv6_mapped; 76static struct tcp_func ipv6_mapped;
76static struct tcp_func ipv6_specific; 77static struct tcp_func ipv6_specific;
77 78
78/* I have no idea if this is a good hash for v6 or not. -DaveM */ 79static inline int tcp_v6_bind_conflict(const struct sock *sk,
79static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, 80 const struct inet_bind_bucket *tb)
80 struct in6_addr *faddr, u16 fport)
81{ 81{
82 int hashent = (lport ^ fport); 82 const struct sock *sk2;
83 83 const struct hlist_node *node;
84 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
85 hashent ^= hashent>>16;
86 hashent ^= hashent>>8;
87 return (hashent & (tcp_ehash_size - 1));
88}
89
90static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
91{
92 struct inet_sock *inet = inet_sk(sk);
93 struct ipv6_pinfo *np = inet6_sk(sk);
94 struct in6_addr *laddr = &np->rcv_saddr;
95 struct in6_addr *faddr = &np->daddr;
96 __u16 lport = inet->num;
97 __u16 fport = inet->dport;
98 return tcp_v6_hashfn(laddr, lport, faddr, fport);
99}
100
101static inline int tcp_v6_bind_conflict(struct sock *sk,
102 struct tcp_bind_bucket *tb)
103{
104 struct sock *sk2;
105 struct hlist_node *node;
106 84
107 /* We must walk the whole port owner list in this case. -DaveM */ 85 /* We must walk the whole port owner list in this case. -DaveM */
108 sk_for_each_bound(sk2, node, &tb->owners) { 86 sk_for_each_bound(sk2, node, &tb->owners) {
@@ -126,8 +104,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk,
126 */ 104 */
127static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 105static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
128{ 106{
129 struct tcp_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
130 struct tcp_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
131 struct hlist_node *node; 109 struct hlist_node *node;
132 int ret; 110 int ret;
133 111
@@ -138,25 +116,25 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
138 int remaining = (high - low) + 1; 116 int remaining = (high - low) + 1;
139 int rover; 117 int rover;
140 118
141 spin_lock(&tcp_portalloc_lock); 119 spin_lock(&tcp_hashinfo.portalloc_lock);
142 if (tcp_port_rover < low) 120 if (tcp_hashinfo.port_rover < low)
143 rover = low; 121 rover = low;
144 else 122 else
145 rover = tcp_port_rover; 123 rover = tcp_hashinfo.port_rover;
146 do { rover++; 124 do { rover++;
147 if (rover > high) 125 if (rover > high)
148 rover = low; 126 rover = low;
149 head = &tcp_bhash[tcp_bhashfn(rover)]; 127 head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
150 spin_lock(&head->lock); 128 spin_lock(&head->lock);
151 tb_for_each(tb, node, &head->chain) 129 inet_bind_bucket_for_each(tb, node, &head->chain)
152 if (tb->port == rover) 130 if (tb->port == rover)
153 goto next; 131 goto next;
154 break; 132 break;
155 next: 133 next:
156 spin_unlock(&head->lock); 134 spin_unlock(&head->lock);
157 } while (--remaining > 0); 135 } while (--remaining > 0);
158 tcp_port_rover = rover; 136 tcp_hashinfo.port_rover = rover;
159 spin_unlock(&tcp_portalloc_lock); 137 spin_unlock(&tcp_hashinfo.portalloc_lock);
160 138
161 /* Exhausted local port range during search? It is not 139 /* Exhausted local port range during search? It is not
162 * possible for us to be holding one of the bind hash 140 * possible for us to be holding one of the bind hash
@@ -171,9 +149,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
171 /* OK, here is the one we will use. */ 149 /* OK, here is the one we will use. */
172 snum = rover; 150 snum = rover;
173 } else { 151 } else {
174 head = &tcp_bhash[tcp_bhashfn(snum)]; 152 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
175 spin_lock(&head->lock); 153 spin_lock(&head->lock);
176 tb_for_each(tb, node, &head->chain) 154 inet_bind_bucket_for_each(tb, node, &head->chain)
177 if (tb->port == snum) 155 if (tb->port == snum)
178 goto tb_found; 156 goto tb_found;
179 } 157 }
@@ -192,8 +170,11 @@ tb_found:
192 } 170 }
193tb_not_found: 171tb_not_found:
194 ret = 1; 172 ret = 1;
195 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) 173 if (tb == NULL) {
196 goto fail_unlock; 174 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
175 if (tb == NULL)
176 goto fail_unlock;
177 }
197 if (hlist_empty(&tb->owners)) { 178 if (hlist_empty(&tb->owners)) {
198 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 179 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
199 tb->fastreuse = 1; 180 tb->fastreuse = 1;
@@ -204,9 +185,9 @@ tb_not_found:
204 tb->fastreuse = 0; 185 tb->fastreuse = 0;
205 186
206success: 187success:
207 if (!tcp_sk(sk)->bind_hash) 188 if (!inet_csk(sk)->icsk_bind_hash)
208 tcp_bind_hash(sk, tb, snum); 189 inet_bind_hash(sk, tb, snum);
209 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 190 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
210 ret = 0; 191 ret = 0;
211 192
212fail_unlock: 193fail_unlock:
@@ -224,13 +205,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
224 BUG_TRAP(sk_unhashed(sk)); 205 BUG_TRAP(sk_unhashed(sk));
225 206
226 if (sk->sk_state == TCP_LISTEN) { 207 if (sk->sk_state == TCP_LISTEN) {
227 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; 208 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
228 lock = &tcp_lhash_lock; 209 lock = &tcp_hashinfo.lhash_lock;
229 tcp_listen_wlock(); 210 inet_listen_wlock(&tcp_hashinfo);
230 } else { 211 } else {
231 sk->sk_hashent = tcp_v6_sk_hashfn(sk); 212 sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
232 list = &tcp_ehash[sk->sk_hashent].chain; 213 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
233 lock = &tcp_ehash[sk->sk_hashent].lock; 214 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
234 write_lock(lock); 215 write_lock(lock);
235 } 216 }
236 217
@@ -255,131 +236,11 @@ static void tcp_v6_hash(struct sock *sk)
255 } 236 }
256} 237}
257 238
258static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
259{
260 struct sock *sk;
261 struct hlist_node *node;
262 struct sock *result = NULL;
263 int score, hiscore;
264
265 hiscore=0;
266 read_lock(&tcp_lhash_lock);
267 sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
268 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
269 struct ipv6_pinfo *np = inet6_sk(sk);
270
271 score = 1;
272 if (!ipv6_addr_any(&np->rcv_saddr)) {
273 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
274 continue;
275 score++;
276 }
277 if (sk->sk_bound_dev_if) {
278 if (sk->sk_bound_dev_if != dif)
279 continue;
280 score++;
281 }
282 if (score == 3) {
283 result = sk;
284 break;
285 }
286 if (score > hiscore) {
287 hiscore = score;
288 result = sk;
289 }
290 }
291 }
292 if (result)
293 sock_hold(result);
294 read_unlock(&tcp_lhash_lock);
295 return result;
296}
297
298/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
299 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
300 *
301 * The sockhash lock must be held as a reader here.
302 */
303
304static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
305 struct in6_addr *daddr, u16 hnum,
306 int dif)
307{
308 struct tcp_ehash_bucket *head;
309 struct sock *sk;
310 struct hlist_node *node;
311 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
312 int hash;
313
314 /* Optimize here for direct hit, only listening connections can
315 * have wildcards anyways.
316 */
317 hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
318 head = &tcp_ehash[hash];
319 read_lock(&head->lock);
320 sk_for_each(sk, node, &head->chain) {
321 /* For IPV6 do the cheaper port and family tests first. */
322 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
323 goto hit; /* You sunk my battleship! */
324 }
325 /* Must check for a TIME_WAIT'er before going to listener hash. */
326 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
327 /* FIXME: acme: check this... */
328 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
329
330 if(*((__u32 *)&(tw->tw_dport)) == ports &&
331 sk->sk_family == PF_INET6) {
332 if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
333 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
334 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
335 goto hit;
336 }
337 }
338 read_unlock(&head->lock);
339 return NULL;
340
341hit:
342 sock_hold(sk);
343 read_unlock(&head->lock);
344 return sk;
345}
346
347
348static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
349 struct in6_addr *daddr, u16 hnum,
350 int dif)
351{
352 struct sock *sk;
353
354 sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
355
356 if (sk)
357 return sk;
358
359 return tcp_v6_lookup_listener(daddr, hnum, dif);
360}
361
362inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
363 struct in6_addr *daddr, u16 dport,
364 int dif)
365{
366 struct sock *sk;
367
368 local_bh_disable();
369 sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
370 local_bh_enable();
371
372 return sk;
373}
374
375EXPORT_SYMBOL_GPL(tcp_v6_lookup);
376
377
378/* 239/*
379 * Open request hash tables. 240 * Open request hash tables.
380 */ 241 */
381 242
382static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) 243static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
383{ 244{
384 u32 a, b, c; 245 u32 a, b, c;
385 246
@@ -399,14 +260,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
399 return c & (TCP_SYNQ_HSIZE - 1); 260 return c & (TCP_SYNQ_HSIZE - 1);
400} 261}
401 262
402static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, 263static struct request_sock *tcp_v6_search_req(const struct sock *sk,
403 struct request_sock ***prevp, 264 struct request_sock ***prevp,
404 __u16 rport, 265 __u16 rport,
405 struct in6_addr *raddr, 266 struct in6_addr *raddr,
406 struct in6_addr *laddr, 267 struct in6_addr *laddr,
407 int iif) 268 int iif)
408{ 269{
409 struct listen_sock *lopt = tp->accept_queue.listen_opt; 270 const struct inet_connection_sock *icsk = inet_csk(sk);
271 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
410 struct request_sock *req, **prev; 272 struct request_sock *req, **prev;
411 273
412 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; 274 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
@@ -451,44 +313,48 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
451 } 313 }
452} 314}
453 315
454static int __tcp_v6_check_established(struct sock *sk, __u16 lport, 316static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
455 struct tcp_tw_bucket **twp) 317 struct inet_timewait_sock **twp)
456{ 318{
457 struct inet_sock *inet = inet_sk(sk); 319 struct inet_sock *inet = inet_sk(sk);
458 struct ipv6_pinfo *np = inet6_sk(sk); 320 const struct ipv6_pinfo *np = inet6_sk(sk);
459 struct in6_addr *daddr = &np->rcv_saddr; 321 const struct in6_addr *daddr = &np->rcv_saddr;
460 struct in6_addr *saddr = &np->daddr; 322 const struct in6_addr *saddr = &np->daddr;
461 int dif = sk->sk_bound_dev_if; 323 const int dif = sk->sk_bound_dev_if;
462 u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 324 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
463 int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); 325 const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport,
464 struct tcp_ehash_bucket *head = &tcp_ehash[hash]; 326 tcp_hashinfo.ehash_size);
327 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
465 struct sock *sk2; 328 struct sock *sk2;
466 struct hlist_node *node; 329 const struct hlist_node *node;
467 struct tcp_tw_bucket *tw; 330 struct inet_timewait_sock *tw;
468 331
469 write_lock(&head->lock); 332 write_lock(&head->lock);
470 333
471 /* Check TIME-WAIT sockets first. */ 334 /* Check TIME-WAIT sockets first. */
472 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 335 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
473 tw = (struct tcp_tw_bucket*)sk2; 336 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
337
338 tw = inet_twsk(sk2);
474 339
475 if(*((__u32 *)&(tw->tw_dport)) == ports && 340 if(*((__u32 *)&(tw->tw_dport)) == ports &&
476 sk2->sk_family == PF_INET6 && 341 sk2->sk_family == PF_INET6 &&
477 ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && 342 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
478 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && 343 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
479 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 344 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
345 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
480 struct tcp_sock *tp = tcp_sk(sk); 346 struct tcp_sock *tp = tcp_sk(sk);
481 347
482 if (tw->tw_ts_recent_stamp && 348 if (tcptw->tw_ts_recent_stamp &&
483 (!twp || (sysctl_tcp_tw_reuse && 349 (!twp ||
484 xtime.tv_sec - 350 (sysctl_tcp_tw_reuse &&
485 tw->tw_ts_recent_stamp > 1))) { 351 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
486 /* See comment in tcp_ipv4.c */ 352 /* See comment in tcp_ipv4.c */
487 tp->write_seq = tw->tw_snd_nxt + 65535 + 2; 353 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
488 if (!tp->write_seq) 354 if (!tp->write_seq)
489 tp->write_seq = 1; 355 tp->write_seq = 1;
490 tp->rx_opt.ts_recent = tw->tw_ts_recent; 356 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
491 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 357 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
492 sock_hold(sk2); 358 sock_hold(sk2);
493 goto unique; 359 goto unique;
494 } else 360 } else
@@ -499,7 +365,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
499 365
500 /* And established part... */ 366 /* And established part... */
501 sk_for_each(sk2, node, &head->chain) { 367 sk_for_each(sk2, node, &head->chain) {
502 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif)) 368 if (INET6_MATCH(sk2, saddr, daddr, ports, dif))
503 goto not_unique; 369 goto not_unique;
504 } 370 }
505 371
@@ -515,10 +381,10 @@ unique:
515 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 381 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
516 } else if (tw) { 382 } else if (tw) {
517 /* Silly. Should hash-dance instead... */ 383 /* Silly. Should hash-dance instead... */
518 tcp_tw_deschedule(tw); 384 inet_twsk_deschedule(tw, &tcp_death_row);
519 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 385 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
520 386
521 tcp_tw_put(tw); 387 inet_twsk_put(tw);
522 } 388 }
523 return 0; 389 return 0;
524 390
@@ -540,8 +406,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk)
540static int tcp_v6_hash_connect(struct sock *sk) 406static int tcp_v6_hash_connect(struct sock *sk)
541{ 407{
542 unsigned short snum = inet_sk(sk)->num; 408 unsigned short snum = inet_sk(sk)->num;
543 struct tcp_bind_hashbucket *head; 409 struct inet_bind_hashbucket *head;
544 struct tcp_bind_bucket *tb; 410 struct inet_bind_bucket *tb;
545 int ret; 411 int ret;
546 412
547 if (!snum) { 413 if (!snum) {
@@ -553,19 +419,19 @@ static int tcp_v6_hash_connect(struct sock *sk)
553 static u32 hint; 419 static u32 hint;
554 u32 offset = hint + tcpv6_port_offset(sk); 420 u32 offset = hint + tcpv6_port_offset(sk);
555 struct hlist_node *node; 421 struct hlist_node *node;
556 struct tcp_tw_bucket *tw = NULL; 422 struct inet_timewait_sock *tw = NULL;
557 423
558 local_bh_disable(); 424 local_bh_disable();
559 for (i = 1; i <= range; i++) { 425 for (i = 1; i <= range; i++) {
560 port = low + (i + offset) % range; 426 port = low + (i + offset) % range;
561 head = &tcp_bhash[tcp_bhashfn(port)]; 427 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
562 spin_lock(&head->lock); 428 spin_lock(&head->lock);
563 429
564 /* Does not bother with rcv_saddr checks, 430 /* Does not bother with rcv_saddr checks,
565 * because the established check is already 431 * because the established check is already
566 * unique enough. 432 * unique enough.
567 */ 433 */
568 tb_for_each(tb, node, &head->chain) { 434 inet_bind_bucket_for_each(tb, node, &head->chain) {
569 if (tb->port == port) { 435 if (tb->port == port) {
570 BUG_TRAP(!hlist_empty(&tb->owners)); 436 BUG_TRAP(!hlist_empty(&tb->owners));
571 if (tb->fastreuse >= 0) 437 if (tb->fastreuse >= 0)
@@ -578,7 +444,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
578 } 444 }
579 } 445 }
580 446
581 tb = tcp_bucket_create(head, port); 447 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
582 if (!tb) { 448 if (!tb) {
583 spin_unlock(&head->lock); 449 spin_unlock(&head->lock);
584 break; 450 break;
@@ -597,7 +463,7 @@ ok:
597 hint += i; 463 hint += i;
598 464
599 /* Head lock still held and bh's disabled */ 465 /* Head lock still held and bh's disabled */
600 tcp_bind_hash(sk, tb, port); 466 inet_bind_hash(sk, tb, port);
601 if (sk_unhashed(sk)) { 467 if (sk_unhashed(sk)) {
602 inet_sk(sk)->sport = htons(port); 468 inet_sk(sk)->sport = htons(port);
603 __tcp_v6_hash(sk); 469 __tcp_v6_hash(sk);
@@ -605,16 +471,16 @@ ok:
605 spin_unlock(&head->lock); 471 spin_unlock(&head->lock);
606 472
607 if (tw) { 473 if (tw) {
608 tcp_tw_deschedule(tw); 474 inet_twsk_deschedule(tw, &tcp_death_row);
609 tcp_tw_put(tw); 475 inet_twsk_put(tw);
610 } 476 }
611 477
612 ret = 0; 478 ret = 0;
613 goto out; 479 goto out;
614 } 480 }
615 481
616 head = &tcp_bhash[tcp_bhashfn(snum)]; 482 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
617 tb = tcp_sk(sk)->bind_hash; 483 tb = inet_csk(sk)->icsk_bind_hash;
618 spin_lock_bh(&head->lock); 484 spin_lock_bh(&head->lock);
619 485
620 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 486 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -631,11 +497,6 @@ out:
631 } 497 }
632} 498}
633 499
634static __inline__ int tcp_v6_iif(struct sk_buff *skb)
635{
636 return IP6CB(skb)->iif;
637}
638
639static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 500static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
640 int addr_len) 501 int addr_len)
641{ 502{
@@ -827,14 +688,15 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
827 int type, int code, int offset, __u32 info) 688 int type, int code, int offset, __u32 info)
828{ 689{
829 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 690 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
830 struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 691 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
831 struct ipv6_pinfo *np; 692 struct ipv6_pinfo *np;
832 struct sock *sk; 693 struct sock *sk;
833 int err; 694 int err;
834 struct tcp_sock *tp; 695 struct tcp_sock *tp;
835 __u32 seq; 696 __u32 seq;
836 697
837 sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 698 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
699 th->source, skb->dev->ifindex);
838 700
839 if (sk == NULL) { 701 if (sk == NULL) {
840 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 702 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
@@ -842,7 +704,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
842 } 704 }
843 705
844 if (sk->sk_state == TCP_TIME_WAIT) { 706 if (sk->sk_state == TCP_TIME_WAIT) {
845 tcp_tw_put((struct tcp_tw_bucket*)sk); 707 inet_twsk_put((struct inet_timewait_sock *)sk);
846 return; 708 return;
847 } 709 }
848 710
@@ -920,8 +782,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
920 if (sock_owned_by_user(sk)) 782 if (sock_owned_by_user(sk))
921 goto out; 783 goto out;
922 784
923 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, 785 req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
924 &hdr->saddr, tcp_v6_iif(skb)); 786 &hdr->saddr, inet6_iif(skb));
925 if (!req) 787 if (!req)
926 goto out; 788 goto out;
927 789
@@ -935,7 +797,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
935 goto out; 797 goto out;
936 } 798 }
937 799
938 tcp_synq_drop(sk, req, prev); 800 inet_csk_reqsk_queue_drop(sk, req, prev);
939 goto out; 801 goto out;
940 802
941 case TCP_SYN_SENT: 803 case TCP_SYN_SENT:
@@ -1132,7 +994,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
1132 buff->csum); 994 buff->csum);
1133 995
1134 fl.proto = IPPROTO_TCP; 996 fl.proto = IPPROTO_TCP;
1135 fl.oif = tcp_v6_iif(skb); 997 fl.oif = inet6_iif(skb);
1136 fl.fl_ip_dport = t1->dest; 998 fl.fl_ip_dport = t1->dest;
1137 fl.fl_ip_sport = t1->source; 999 fl.fl_ip_sport = t1->source;
1138 1000
@@ -1201,7 +1063,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1201 buff->csum); 1063 buff->csum);
1202 1064
1203 fl.proto = IPPROTO_TCP; 1065 fl.proto = IPPROTO_TCP;
1204 fl.oif = tcp_v6_iif(skb); 1066 fl.oif = inet6_iif(skb);
1205 fl.fl_ip_dport = t1->dest; 1067 fl.fl_ip_dport = t1->dest;
1206 fl.fl_ip_sport = t1->source; 1068 fl.fl_ip_sport = t1->source;
1207 1069
@@ -1220,12 +1082,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1220 1082
1221static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1083static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1222{ 1084{
1223 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 1085 struct inet_timewait_sock *tw = inet_twsk(sk);
1086 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1224 1087
1225 tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 1088 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1226 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 1089 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1090 tcptw->tw_ts_recent);
1227 1091
1228 tcp_tw_put(tw); 1092 inet_twsk_put(tw);
1229} 1093}
1230 1094
1231static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1095static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1237,28 +1101,25 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1237static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 1101static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1238{ 1102{
1239 struct request_sock *req, **prev; 1103 struct request_sock *req, **prev;
1240 struct tcphdr *th = skb->h.th; 1104 const struct tcphdr *th = skb->h.th;
1241 struct tcp_sock *tp = tcp_sk(sk);
1242 struct sock *nsk; 1105 struct sock *nsk;
1243 1106
1244 /* Find possible connection requests. */ 1107 /* Find possible connection requests. */
1245 req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, 1108 req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
1246 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); 1109 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1247 if (req) 1110 if (req)
1248 return tcp_check_req(sk, skb, req, prev); 1111 return tcp_check_req(sk, skb, req, prev);
1249 1112
1250 nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr, 1113 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1251 th->source, 1114 th->source, &skb->nh.ipv6h->daddr,
1252 &skb->nh.ipv6h->daddr, 1115 ntohs(th->dest), inet6_iif(skb));
1253 ntohs(th->dest),
1254 tcp_v6_iif(skb));
1255 1116
1256 if (nsk) { 1117 if (nsk) {
1257 if (nsk->sk_state != TCP_TIME_WAIT) { 1118 if (nsk->sk_state != TCP_TIME_WAIT) {
1258 bh_lock_sock(nsk); 1119 bh_lock_sock(nsk);
1259 return nsk; 1120 return nsk;
1260 } 1121 }
1261 tcp_tw_put((struct tcp_tw_bucket*)nsk); 1122 inet_twsk_put((struct inet_timewait_sock *)nsk);
1262 return NULL; 1123 return NULL;
1263 } 1124 }
1264 1125
@@ -1271,12 +1132,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1271 1132
1272static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) 1133static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
1273{ 1134{
1274 struct tcp_sock *tp = tcp_sk(sk); 1135 struct inet_connection_sock *icsk = inet_csk(sk);
1275 struct listen_sock *lopt = tp->accept_queue.listen_opt; 1136 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
1276 u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 1137 const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
1277 1138
1278 reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); 1139 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
1279 tcp_synq_added(sk); 1140 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
1280} 1141}
1281 1142
1282 1143
@@ -1301,13 +1162,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1301 /* 1162 /*
1302 * There are no SYN attacks on IPv6, yet... 1163 * There are no SYN attacks on IPv6, yet...
1303 */ 1164 */
1304 if (tcp_synq_is_full(sk) && !isn) { 1165 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1305 if (net_ratelimit()) 1166 if (net_ratelimit())
1306 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1167 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1307 goto drop; 1168 goto drop;
1308 } 1169 }
1309 1170
1310 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1171 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1311 goto drop; 1172 goto drop;
1312 1173
1313 req = reqsk_alloc(&tcp6_request_sock_ops); 1174 req = reqsk_alloc(&tcp6_request_sock_ops);
@@ -1339,7 +1200,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1339 /* So that link locals have meaning */ 1200 /* So that link locals have meaning */
1340 if (!sk->sk_bound_dev_if && 1201 if (!sk->sk_bound_dev_if &&
1341 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1202 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1342 treq->iif = tcp_v6_iif(skb); 1203 treq->iif = inet6_iif(skb);
1343 1204
1344 if (isn == 0) 1205 if (isn == 0)
1345 isn = tcp_v6_init_sequence(sk,skb); 1206 isn = tcp_v6_init_sequence(sk,skb);
@@ -1404,15 +1265,14 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1404 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1265 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1405 newnp->pktoptions = NULL; 1266 newnp->pktoptions = NULL;
1406 newnp->opt = NULL; 1267 newnp->opt = NULL;
1407 newnp->mcast_oif = tcp_v6_iif(skb); 1268 newnp->mcast_oif = inet6_iif(skb);
1408 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1269 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1409 1270
1410 /* Charge newly allocated IPv6 socket. Though it is mapped, 1271 /*
1411 * it is IPv6 yet. 1272 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1273 * here, tcp_create_openreq_child now does this for us, see the comment in
1274 * that function for the gory details. -acme
1412 */ 1275 */
1413#ifdef INET_REFCNT_DEBUG
1414 atomic_inc(&inet6_sock_nr);
1415#endif
1416 1276
1417 /* It is tricky place. Until this moment IPv4 tcp 1277 /* It is tricky place. Until this moment IPv4 tcp
1418 worked with IPv6 af_tcp.af_specific. 1278 worked with IPv6 af_tcp.af_specific.
@@ -1467,10 +1327,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1467 if (newsk == NULL) 1327 if (newsk == NULL)
1468 goto out; 1328 goto out;
1469 1329
1470 /* Charge newly allocated IPv6 socket */ 1330 /*
1471#ifdef INET_REFCNT_DEBUG 1331 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1472 atomic_inc(&inet6_sock_nr); 1332 * count here, tcp_create_openreq_child now does this for us, see the
1473#endif 1333 * comment in that function for the gory details. -acme
1334 */
1474 1335
1475 ip6_dst_store(newsk, dst, NULL); 1336 ip6_dst_store(newsk, dst, NULL);
1476 newsk->sk_route_caps = dst->dev->features & 1337 newsk->sk_route_caps = dst->dev->features &
@@ -1509,7 +1370,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1509 skb_set_owner_r(newnp->pktoptions, newsk); 1370 skb_set_owner_r(newnp->pktoptions, newsk);
1510 } 1371 }
1511 newnp->opt = NULL; 1372 newnp->opt = NULL;
1512 newnp->mcast_oif = tcp_v6_iif(skb); 1373 newnp->mcast_oif = inet6_iif(skb);
1513 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1374 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1514 1375
1515 /* Clone native IPv6 options from listening socket (if any) 1376 /* Clone native IPv6 options from listening socket (if any)
@@ -1536,7 +1397,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1536 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1397 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1537 1398
1538 __tcp_v6_hash(newsk); 1399 __tcp_v6_hash(newsk);
1539 tcp_inherit_port(sk, newsk); 1400 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1540 1401
1541 return newsk; 1402 return newsk;
1542 1403
@@ -1557,7 +1418,7 @@ static int tcp_v6_checksum_init(struct sk_buff *skb)
1557 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1418 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1558 &skb->nh.ipv6h->daddr,skb->csum)) 1419 &skb->nh.ipv6h->daddr,skb->csum))
1559 return 0; 1420 return 0;
1560 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v6 csum failed\n")); 1421 LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
1561 } 1422 }
1562 if (skb->len <= 76) { 1423 if (skb->len <= 76) {
1563 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1424 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
@@ -1684,7 +1545,7 @@ ipv6_pktoptions:
1684 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1545 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1685 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1546 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1686 if (np->rxopt.bits.rxinfo) 1547 if (np->rxopt.bits.rxinfo)
1687 np->mcast_oif = tcp_v6_iif(opt_skb); 1548 np->mcast_oif = inet6_iif(opt_skb);
1688 if (np->rxopt.bits.rxhlim) 1549 if (np->rxopt.bits.rxhlim)
1689 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit; 1550 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1690 if (ipv6_opt_accepted(sk, opt_skb)) { 1551 if (ipv6_opt_accepted(sk, opt_skb)) {
@@ -1739,8 +1600,9 @@ static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1739 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h); 1600 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1740 TCP_SKB_CB(skb)->sacked = 0; 1601 TCP_SKB_CB(skb)->sacked = 0;
1741 1602
1742 sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source, 1603 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1743 &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1604 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1605 inet6_iif(skb));
1744 1606
1745 if (!sk) 1607 if (!sk)
1746 goto no_tcp_socket; 1608 goto no_tcp_socket;
@@ -1795,26 +1657,29 @@ discard_and_relse:
1795 1657
1796do_time_wait: 1658do_time_wait:
1797 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1659 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1798 tcp_tw_put((struct tcp_tw_bucket *) sk); 1660 inet_twsk_put((struct inet_timewait_sock *)sk);
1799 goto discard_it; 1661 goto discard_it;
1800 } 1662 }
1801 1663
1802 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1664 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1803 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1665 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1804 tcp_tw_put((struct tcp_tw_bucket *) sk); 1666 inet_twsk_put((struct inet_timewait_sock *)sk);
1805 goto discard_it; 1667 goto discard_it;
1806 } 1668 }
1807 1669
1808 switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1670 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1809 skb, th, skb->len)) { 1671 skb, th)) {
1810 case TCP_TW_SYN: 1672 case TCP_TW_SYN:
1811 { 1673 {
1812 struct sock *sk2; 1674 struct sock *sk2;
1813 1675
1814 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1676 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1677 &skb->nh.ipv6h->daddr,
1678 ntohs(th->dest), inet6_iif(skb));
1815 if (sk2 != NULL) { 1679 if (sk2 != NULL) {
1816 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1680 struct inet_timewait_sock *tw = inet_twsk(sk);
1817 tcp_tw_put((struct tcp_tw_bucket *)sk); 1681 inet_twsk_deschedule(tw, &tcp_death_row);
1682 inet_twsk_put(tw);
1818 sk = sk2; 1683 sk = sk2;
1819 goto process; 1684 goto process;
1820 } 1685 }
@@ -1983,7 +1848,7 @@ static struct tcp_func ipv6_specific = {
1983static struct tcp_func ipv6_mapped = { 1848static struct tcp_func ipv6_mapped = {
1984 .queue_xmit = ip_queue_xmit, 1849 .queue_xmit = ip_queue_xmit,
1985 .send_check = tcp_v4_send_check, 1850 .send_check = tcp_v4_send_check,
1986 .rebuild_header = tcp_v4_rebuild_header, 1851 .rebuild_header = inet_sk_rebuild_header,
1987 .conn_request = tcp_v6_conn_request, 1852 .conn_request = tcp_v6_conn_request,
1988 .syn_recv_sock = tcp_v6_syn_recv_sock, 1853 .syn_recv_sock = tcp_v6_syn_recv_sock,
1989 .remember_stamp = tcp_v4_remember_stamp, 1854 .remember_stamp = tcp_v4_remember_stamp,
@@ -2002,13 +1867,14 @@ static struct tcp_func ipv6_mapped = {
2002 */ 1867 */
2003static int tcp_v6_init_sock(struct sock *sk) 1868static int tcp_v6_init_sock(struct sock *sk)
2004{ 1869{
1870 struct inet_connection_sock *icsk = inet_csk(sk);
2005 struct tcp_sock *tp = tcp_sk(sk); 1871 struct tcp_sock *tp = tcp_sk(sk);
2006 1872
2007 skb_queue_head_init(&tp->out_of_order_queue); 1873 skb_queue_head_init(&tp->out_of_order_queue);
2008 tcp_init_xmit_timers(sk); 1874 tcp_init_xmit_timers(sk);
2009 tcp_prequeue_init(tp); 1875 tcp_prequeue_init(tp);
2010 1876
2011 tp->rto = TCP_TIMEOUT_INIT; 1877 icsk->icsk_rto = TCP_TIMEOUT_INIT;
2012 tp->mdev = TCP_TIMEOUT_INIT; 1878 tp->mdev = TCP_TIMEOUT_INIT;
2013 1879
2014 /* So many TCP implementations out there (incorrectly) count the 1880 /* So many TCP implementations out there (incorrectly) count the
@@ -2030,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
2030 sk->sk_state = TCP_CLOSE; 1896 sk->sk_state = TCP_CLOSE;
2031 1897
2032 tp->af_specific = &ipv6_specific; 1898 tp->af_specific = &ipv6_specific;
2033 tp->ca_ops = &tcp_init_congestion_ops; 1899 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
2034 sk->sk_write_space = sk_stream_write_space; 1900 sk->sk_write_space = sk_stream_write_space;
2035 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1901 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2036 1902
@@ -2044,8 +1910,6 @@ static int tcp_v6_init_sock(struct sock *sk)
2044 1910
2045static int tcp_v6_destroy_sock(struct sock *sk) 1911static int tcp_v6_destroy_sock(struct sock *sk)
2046{ 1912{
2047 extern int tcp_v4_destroy_sock(struct sock *sk);
2048
2049 tcp_v4_destroy_sock(sk); 1913 tcp_v4_destroy_sock(sk);
2050 return inet6_destroy_sock(sk); 1914 return inet6_destroy_sock(sk);
2051} 1915}
@@ -2091,18 +1955,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2091 unsigned long timer_expires; 1955 unsigned long timer_expires;
2092 struct inet_sock *inet = inet_sk(sp); 1956 struct inet_sock *inet = inet_sk(sp);
2093 struct tcp_sock *tp = tcp_sk(sp); 1957 struct tcp_sock *tp = tcp_sk(sp);
1958 const struct inet_connection_sock *icsk = inet_csk(sp);
2094 struct ipv6_pinfo *np = inet6_sk(sp); 1959 struct ipv6_pinfo *np = inet6_sk(sp);
2095 1960
2096 dest = &np->daddr; 1961 dest = &np->daddr;
2097 src = &np->rcv_saddr; 1962 src = &np->rcv_saddr;
2098 destp = ntohs(inet->dport); 1963 destp = ntohs(inet->dport);
2099 srcp = ntohs(inet->sport); 1964 srcp = ntohs(inet->sport);
2100 if (tp->pending == TCP_TIME_RETRANS) { 1965
1966 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2101 timer_active = 1; 1967 timer_active = 1;
2102 timer_expires = tp->timeout; 1968 timer_expires = icsk->icsk_timeout;
2103 } else if (tp->pending == TCP_TIME_PROBE0) { 1969 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2104 timer_active = 4; 1970 timer_active = 4;
2105 timer_expires = tp->timeout; 1971 timer_expires = icsk->icsk_timeout;
2106 } else if (timer_pending(&sp->sk_timer)) { 1972 } else if (timer_pending(&sp->sk_timer)) {
2107 timer_active = 2; 1973 timer_active = 2;
2108 timer_expires = sp->sk_timer.expires; 1974 timer_expires = sp->sk_timer.expires;
@@ -2123,28 +1989,31 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2123 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 1989 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
2124 timer_active, 1990 timer_active,
2125 jiffies_to_clock_t(timer_expires - jiffies), 1991 jiffies_to_clock_t(timer_expires - jiffies),
2126 tp->retransmits, 1992 icsk->icsk_retransmits,
2127 sock_i_uid(sp), 1993 sock_i_uid(sp),
2128 tp->probes_out, 1994 icsk->icsk_probes_out,
2129 sock_i_ino(sp), 1995 sock_i_ino(sp),
2130 atomic_read(&sp->sk_refcnt), sp, 1996 atomic_read(&sp->sk_refcnt), sp,
2131 tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, 1997 icsk->icsk_rto,
1998 icsk->icsk_ack.ato,
1999 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2132 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 2000 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2133 ); 2001 );
2134} 2002}
2135 2003
2136static void get_timewait6_sock(struct seq_file *seq, 2004static void get_timewait6_sock(struct seq_file *seq,
2137 struct tcp_tw_bucket *tw, int i) 2005 struct inet_timewait_sock *tw, int i)
2138{ 2006{
2139 struct in6_addr *dest, *src; 2007 struct in6_addr *dest, *src;
2140 __u16 destp, srcp; 2008 __u16 destp, srcp;
2009 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
2141 int ttd = tw->tw_ttd - jiffies; 2010 int ttd = tw->tw_ttd - jiffies;
2142 2011
2143 if (ttd < 0) 2012 if (ttd < 0)
2144 ttd = 0; 2013 ttd = 0;
2145 2014
2146 dest = &tw->tw_v6_daddr; 2015 dest = &tcp6tw->tw_v6_daddr;
2147 src = &tw->tw_v6_rcv_saddr; 2016 src = &tcp6tw->tw_v6_rcv_saddr;
2148 destp = ntohs(tw->tw_dport); 2017 destp = ntohs(tw->tw_dport);
2149 srcp = ntohs(tw->tw_sport); 2018 srcp = ntohs(tw->tw_sport);
2150 2019
@@ -2219,7 +2088,7 @@ struct proto tcpv6_prot = {
2219 .close = tcp_close, 2088 .close = tcp_close,
2220 .connect = tcp_v6_connect, 2089 .connect = tcp_v6_connect,
2221 .disconnect = tcp_disconnect, 2090 .disconnect = tcp_disconnect,
2222 .accept = tcp_accept, 2091 .accept = inet_csk_accept,
2223 .ioctl = tcp_ioctl, 2092 .ioctl = tcp_ioctl,
2224 .init = tcp_v6_init_sock, 2093 .init = tcp_v6_init_sock,
2225 .destroy = tcp_v6_destroy_sock, 2094 .destroy = tcp_v6_destroy_sock,
@@ -2236,11 +2105,13 @@ struct proto tcpv6_prot = {
2236 .sockets_allocated = &tcp_sockets_allocated, 2105 .sockets_allocated = &tcp_sockets_allocated,
2237 .memory_allocated = &tcp_memory_allocated, 2106 .memory_allocated = &tcp_memory_allocated,
2238 .memory_pressure = &tcp_memory_pressure, 2107 .memory_pressure = &tcp_memory_pressure,
2108 .orphan_count = &tcp_orphan_count,
2239 .sysctl_mem = sysctl_tcp_mem, 2109 .sysctl_mem = sysctl_tcp_mem,
2240 .sysctl_wmem = sysctl_tcp_wmem, 2110 .sysctl_wmem = sysctl_tcp_wmem,
2241 .sysctl_rmem = sysctl_tcp_rmem, 2111 .sysctl_rmem = sysctl_tcp_rmem,
2242 .max_header = MAX_TCP_HEADER, 2112 .max_header = MAX_TCP_HEADER,
2243 .obj_size = sizeof(struct tcp6_sock), 2113 .obj_size = sizeof(struct tcp6_sock),
2114 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2244 .rsk_prot = &tcp6_request_sock_ops, 2115 .rsk_prot = &tcp6_request_sock_ops,
2245}; 2116};
2246 2117
@@ -2250,8 +2121,6 @@ static struct inet6_protocol tcpv6_protocol = {
2250 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2121 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2251}; 2122};
2252 2123
2253extern struct proto_ops inet6_stream_ops;
2254
2255static struct inet_protosw tcpv6_protosw = { 2124static struct inet_protosw tcpv6_protosw = {
2256 .type = SOCK_STREAM, 2125 .type = SOCK_STREAM,
2257 .protocol = IPPROTO_TCP, 2126 .protocol = IPPROTO_TCP,