aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c448
1 files changed, 161 insertions, 287 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f6e288dc116e..794734f1d230 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -47,6 +47,7 @@
47 47
48#include <net/tcp.h> 48#include <net/tcp.h>
49#include <net/ndisc.h> 49#include <net/ndisc.h>
50#include <net/inet6_hashtables.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/transp_v6.h> 52#include <net/transp_v6.h>
52#include <net/addrconf.h> 53#include <net/addrconf.h>
@@ -75,34 +76,11 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75static struct tcp_func ipv6_mapped; 76static struct tcp_func ipv6_mapped;
76static struct tcp_func ipv6_specific; 77static struct tcp_func ipv6_specific;
77 78
78/* I have no idea if this is a good hash for v6 or not. -DaveM */ 79static inline int tcp_v6_bind_conflict(const struct sock *sk,
79static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, 80 const struct inet_bind_bucket *tb)
80 struct in6_addr *faddr, u16 fport)
81{ 81{
82 int hashent = (lport ^ fport); 82 const struct sock *sk2;
83 83 const struct hlist_node *node;
84 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
85 hashent ^= hashent>>16;
86 hashent ^= hashent>>8;
87 return (hashent & (tcp_ehash_size - 1));
88}
89
90static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
91{
92 struct inet_sock *inet = inet_sk(sk);
93 struct ipv6_pinfo *np = inet6_sk(sk);
94 struct in6_addr *laddr = &np->rcv_saddr;
95 struct in6_addr *faddr = &np->daddr;
96 __u16 lport = inet->num;
97 __u16 fport = inet->dport;
98 return tcp_v6_hashfn(laddr, lport, faddr, fport);
99}
100
101static inline int tcp_v6_bind_conflict(struct sock *sk,
102 struct tcp_bind_bucket *tb)
103{
104 struct sock *sk2;
105 struct hlist_node *node;
106 84
107 /* We must walk the whole port owner list in this case. -DaveM */ 85 /* We must walk the whole port owner list in this case. -DaveM */
108 sk_for_each_bound(sk2, node, &tb->owners) { 86 sk_for_each_bound(sk2, node, &tb->owners) {
@@ -126,8 +104,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk,
126 */ 104 */
127static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 105static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
128{ 106{
129 struct tcp_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
130 struct tcp_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
131 struct hlist_node *node; 109 struct hlist_node *node;
132 int ret; 110 int ret;
133 111
@@ -138,37 +116,42 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
138 int remaining = (high - low) + 1; 116 int remaining = (high - low) + 1;
139 int rover; 117 int rover;
140 118
141 spin_lock(&tcp_portalloc_lock); 119 spin_lock(&tcp_hashinfo.portalloc_lock);
142 if (tcp_port_rover < low) 120 if (tcp_hashinfo.port_rover < low)
143 rover = low; 121 rover = low;
144 else 122 else
145 rover = tcp_port_rover; 123 rover = tcp_hashinfo.port_rover;
146 do { rover++; 124 do { rover++;
147 if (rover > high) 125 if (rover > high)
148 rover = low; 126 rover = low;
149 head = &tcp_bhash[tcp_bhashfn(rover)]; 127 head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
150 spin_lock(&head->lock); 128 spin_lock(&head->lock);
151 tb_for_each(tb, node, &head->chain) 129 inet_bind_bucket_for_each(tb, node, &head->chain)
152 if (tb->port == rover) 130 if (tb->port == rover)
153 goto next; 131 goto next;
154 break; 132 break;
155 next: 133 next:
156 spin_unlock(&head->lock); 134 spin_unlock(&head->lock);
157 } while (--remaining > 0); 135 } while (--remaining > 0);
158 tcp_port_rover = rover; 136 tcp_hashinfo.port_rover = rover;
159 spin_unlock(&tcp_portalloc_lock); 137 spin_unlock(&tcp_hashinfo.portalloc_lock);
160 138
161 /* Exhausted local port range during search? */ 139 /* Exhausted local port range during search? It is not
140 * possible for us to be holding one of the bind hash
141 * locks if this test triggers, because if 'remaining'
142 * drops to zero, we broke out of the do/while loop at
143 * the top level, not from the 'break;' statement.
144 */
162 ret = 1; 145 ret = 1;
163 if (remaining <= 0) 146 if (unlikely(remaining <= 0))
164 goto fail; 147 goto fail;
165 148
166 /* OK, here is the one we will use. */ 149 /* OK, here is the one we will use. */
167 snum = rover; 150 snum = rover;
168 } else { 151 } else {
169 head = &tcp_bhash[tcp_bhashfn(snum)]; 152 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
170 spin_lock(&head->lock); 153 spin_lock(&head->lock);
171 tb_for_each(tb, node, &head->chain) 154 inet_bind_bucket_for_each(tb, node, &head->chain)
172 if (tb->port == snum) 155 if (tb->port == snum)
173 goto tb_found; 156 goto tb_found;
174 } 157 }
@@ -187,8 +170,11 @@ tb_found:
187 } 170 }
188tb_not_found: 171tb_not_found:
189 ret = 1; 172 ret = 1;
190 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) 173 if (tb == NULL) {
191 goto fail_unlock; 174 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
175 if (tb == NULL)
176 goto fail_unlock;
177 }
192 if (hlist_empty(&tb->owners)) { 178 if (hlist_empty(&tb->owners)) {
193 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 179 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
194 tb->fastreuse = 1; 180 tb->fastreuse = 1;
@@ -199,9 +185,9 @@ tb_not_found:
199 tb->fastreuse = 0; 185 tb->fastreuse = 0;
200 186
201success: 187success:
202 if (!tcp_sk(sk)->bind_hash) 188 if (!inet_csk(sk)->icsk_bind_hash)
203 tcp_bind_hash(sk, tb, snum); 189 inet_bind_hash(sk, tb, snum);
204 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 190 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
205 ret = 0; 191 ret = 0;
206 192
207fail_unlock: 193fail_unlock:
@@ -219,13 +205,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
219 BUG_TRAP(sk_unhashed(sk)); 205 BUG_TRAP(sk_unhashed(sk));
220 206
221 if (sk->sk_state == TCP_LISTEN) { 207 if (sk->sk_state == TCP_LISTEN) {
222 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; 208 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
223 lock = &tcp_lhash_lock; 209 lock = &tcp_hashinfo.lhash_lock;
224 tcp_listen_wlock(); 210 inet_listen_wlock(&tcp_hashinfo);
225 } else { 211 } else {
226 sk->sk_hashent = tcp_v6_sk_hashfn(sk); 212 sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
227 list = &tcp_ehash[sk->sk_hashent].chain; 213 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
228 lock = &tcp_ehash[sk->sk_hashent].lock; 214 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
229 write_lock(lock); 215 write_lock(lock);
230 } 216 }
231 217
@@ -250,131 +236,11 @@ static void tcp_v6_hash(struct sock *sk)
250 } 236 }
251} 237}
252 238
253static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
254{
255 struct sock *sk;
256 struct hlist_node *node;
257 struct sock *result = NULL;
258 int score, hiscore;
259
260 hiscore=0;
261 read_lock(&tcp_lhash_lock);
262 sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
263 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
264 struct ipv6_pinfo *np = inet6_sk(sk);
265
266 score = 1;
267 if (!ipv6_addr_any(&np->rcv_saddr)) {
268 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
269 continue;
270 score++;
271 }
272 if (sk->sk_bound_dev_if) {
273 if (sk->sk_bound_dev_if != dif)
274 continue;
275 score++;
276 }
277 if (score == 3) {
278 result = sk;
279 break;
280 }
281 if (score > hiscore) {
282 hiscore = score;
283 result = sk;
284 }
285 }
286 }
287 if (result)
288 sock_hold(result);
289 read_unlock(&tcp_lhash_lock);
290 return result;
291}
292
293/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
294 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
295 *
296 * The sockhash lock must be held as a reader here.
297 */
298
299static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
300 struct in6_addr *daddr, u16 hnum,
301 int dif)
302{
303 struct tcp_ehash_bucket *head;
304 struct sock *sk;
305 struct hlist_node *node;
306 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
307 int hash;
308
309 /* Optimize here for direct hit, only listening connections can
310 * have wildcards anyways.
311 */
312 hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
313 head = &tcp_ehash[hash];
314 read_lock(&head->lock);
315 sk_for_each(sk, node, &head->chain) {
316 /* For IPV6 do the cheaper port and family tests first. */
317 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
318 goto hit; /* You sunk my battleship! */
319 }
320 /* Must check for a TIME_WAIT'er before going to listener hash. */
321 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
322 /* FIXME: acme: check this... */
323 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
324
325 if(*((__u32 *)&(tw->tw_dport)) == ports &&
326 sk->sk_family == PF_INET6) {
327 if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
328 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
329 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
330 goto hit;
331 }
332 }
333 read_unlock(&head->lock);
334 return NULL;
335
336hit:
337 sock_hold(sk);
338 read_unlock(&head->lock);
339 return sk;
340}
341
342
343static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
344 struct in6_addr *daddr, u16 hnum,
345 int dif)
346{
347 struct sock *sk;
348
349 sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
350
351 if (sk)
352 return sk;
353
354 return tcp_v6_lookup_listener(daddr, hnum, dif);
355}
356
357inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
358 struct in6_addr *daddr, u16 dport,
359 int dif)
360{
361 struct sock *sk;
362
363 local_bh_disable();
364 sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
365 local_bh_enable();
366
367 return sk;
368}
369
370EXPORT_SYMBOL_GPL(tcp_v6_lookup);
371
372
373/* 239/*
374 * Open request hash tables. 240 * Open request hash tables.
375 */ 241 */
376 242
377static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) 243static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
378{ 244{
379 u32 a, b, c; 245 u32 a, b, c;
380 246
@@ -394,14 +260,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
394 return c & (TCP_SYNQ_HSIZE - 1); 260 return c & (TCP_SYNQ_HSIZE - 1);
395} 261}
396 262
397static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, 263static struct request_sock *tcp_v6_search_req(const struct sock *sk,
398 struct request_sock ***prevp, 264 struct request_sock ***prevp,
399 __u16 rport, 265 __u16 rport,
400 struct in6_addr *raddr, 266 struct in6_addr *raddr,
401 struct in6_addr *laddr, 267 struct in6_addr *laddr,
402 int iif) 268 int iif)
403{ 269{
404 struct listen_sock *lopt = tp->accept_queue.listen_opt; 270 const struct inet_connection_sock *icsk = inet_csk(sk);
271 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
405 struct request_sock *req, **prev; 272 struct request_sock *req, **prev;
406 273
407 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; 274 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
@@ -446,44 +313,48 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
446 } 313 }
447} 314}
448 315
449static int __tcp_v6_check_established(struct sock *sk, __u16 lport, 316static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
450 struct tcp_tw_bucket **twp) 317 struct inet_timewait_sock **twp)
451{ 318{
452 struct inet_sock *inet = inet_sk(sk); 319 struct inet_sock *inet = inet_sk(sk);
453 struct ipv6_pinfo *np = inet6_sk(sk); 320 const struct ipv6_pinfo *np = inet6_sk(sk);
454 struct in6_addr *daddr = &np->rcv_saddr; 321 const struct in6_addr *daddr = &np->rcv_saddr;
455 struct in6_addr *saddr = &np->daddr; 322 const struct in6_addr *saddr = &np->daddr;
456 int dif = sk->sk_bound_dev_if; 323 const int dif = sk->sk_bound_dev_if;
457 u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 324 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
458 int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); 325 const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport,
459 struct tcp_ehash_bucket *head = &tcp_ehash[hash]; 326 tcp_hashinfo.ehash_size);
327 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
460 struct sock *sk2; 328 struct sock *sk2;
461 struct hlist_node *node; 329 const struct hlist_node *node;
462 struct tcp_tw_bucket *tw; 330 struct inet_timewait_sock *tw;
463 331
464 write_lock(&head->lock); 332 write_lock(&head->lock);
465 333
466 /* Check TIME-WAIT sockets first. */ 334 /* Check TIME-WAIT sockets first. */
467 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 335 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
468 tw = (struct tcp_tw_bucket*)sk2; 336 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
337
338 tw = inet_twsk(sk2);
469 339
470 if(*((__u32 *)&(tw->tw_dport)) == ports && 340 if(*((__u32 *)&(tw->tw_dport)) == ports &&
471 sk2->sk_family == PF_INET6 && 341 sk2->sk_family == PF_INET6 &&
472 ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && 342 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
473 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && 343 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
474 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 344 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
345 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
475 struct tcp_sock *tp = tcp_sk(sk); 346 struct tcp_sock *tp = tcp_sk(sk);
476 347
477 if (tw->tw_ts_recent_stamp && 348 if (tcptw->tw_ts_recent_stamp &&
478 (!twp || (sysctl_tcp_tw_reuse && 349 (!twp ||
479 xtime.tv_sec - 350 (sysctl_tcp_tw_reuse &&
480 tw->tw_ts_recent_stamp > 1))) { 351 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
481 /* See comment in tcp_ipv4.c */ 352 /* See comment in tcp_ipv4.c */
482 tp->write_seq = tw->tw_snd_nxt + 65535 + 2; 353 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
483 if (!tp->write_seq) 354 if (!tp->write_seq)
484 tp->write_seq = 1; 355 tp->write_seq = 1;
485 tp->rx_opt.ts_recent = tw->tw_ts_recent; 356 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
486 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 357 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
487 sock_hold(sk2); 358 sock_hold(sk2);
488 goto unique; 359 goto unique;
489 } else 360 } else
@@ -494,7 +365,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
494 365
495 /* And established part... */ 366 /* And established part... */
496 sk_for_each(sk2, node, &head->chain) { 367 sk_for_each(sk2, node, &head->chain) {
497 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif)) 368 if (INET6_MATCH(sk2, saddr, daddr, ports, dif))
498 goto not_unique; 369 goto not_unique;
499 } 370 }
500 371
@@ -510,10 +381,10 @@ unique:
510 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 381 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
511 } else if (tw) { 382 } else if (tw) {
512 /* Silly. Should hash-dance instead... */ 383 /* Silly. Should hash-dance instead... */
513 tcp_tw_deschedule(tw); 384 inet_twsk_deschedule(tw, &tcp_death_row);
514 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 385 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
515 386
516 tcp_tw_put(tw); 387 inet_twsk_put(tw);
517 } 388 }
518 return 0; 389 return 0;
519 390
@@ -535,8 +406,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk)
535static int tcp_v6_hash_connect(struct sock *sk) 406static int tcp_v6_hash_connect(struct sock *sk)
536{ 407{
537 unsigned short snum = inet_sk(sk)->num; 408 unsigned short snum = inet_sk(sk)->num;
538 struct tcp_bind_hashbucket *head; 409 struct inet_bind_hashbucket *head;
539 struct tcp_bind_bucket *tb; 410 struct inet_bind_bucket *tb;
540 int ret; 411 int ret;
541 412
542 if (!snum) { 413 if (!snum) {
@@ -548,19 +419,19 @@ static int tcp_v6_hash_connect(struct sock *sk)
548 static u32 hint; 419 static u32 hint;
549 u32 offset = hint + tcpv6_port_offset(sk); 420 u32 offset = hint + tcpv6_port_offset(sk);
550 struct hlist_node *node; 421 struct hlist_node *node;
551 struct tcp_tw_bucket *tw = NULL; 422 struct inet_timewait_sock *tw = NULL;
552 423
553 local_bh_disable(); 424 local_bh_disable();
554 for (i = 1; i <= range; i++) { 425 for (i = 1; i <= range; i++) {
555 port = low + (i + offset) % range; 426 port = low + (i + offset) % range;
556 head = &tcp_bhash[tcp_bhashfn(port)]; 427 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
557 spin_lock(&head->lock); 428 spin_lock(&head->lock);
558 429
559 /* Does not bother with rcv_saddr checks, 430 /* Does not bother with rcv_saddr checks,
560 * because the established check is already 431 * because the established check is already
561 * unique enough. 432 * unique enough.
562 */ 433 */
563 tb_for_each(tb, node, &head->chain) { 434 inet_bind_bucket_for_each(tb, node, &head->chain) {
564 if (tb->port == port) { 435 if (tb->port == port) {
565 BUG_TRAP(!hlist_empty(&tb->owners)); 436 BUG_TRAP(!hlist_empty(&tb->owners));
566 if (tb->fastreuse >= 0) 437 if (tb->fastreuse >= 0)
@@ -573,7 +444,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
573 } 444 }
574 } 445 }
575 446
576 tb = tcp_bucket_create(head, port); 447 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
577 if (!tb) { 448 if (!tb) {
578 spin_unlock(&head->lock); 449 spin_unlock(&head->lock);
579 break; 450 break;
@@ -592,7 +463,7 @@ ok:
592 hint += i; 463 hint += i;
593 464
594 /* Head lock still held and bh's disabled */ 465 /* Head lock still held and bh's disabled */
595 tcp_bind_hash(sk, tb, port); 466 inet_bind_hash(sk, tb, port);
596 if (sk_unhashed(sk)) { 467 if (sk_unhashed(sk)) {
597 inet_sk(sk)->sport = htons(port); 468 inet_sk(sk)->sport = htons(port);
598 __tcp_v6_hash(sk); 469 __tcp_v6_hash(sk);
@@ -600,16 +471,16 @@ ok:
600 spin_unlock(&head->lock); 471 spin_unlock(&head->lock);
601 472
602 if (tw) { 473 if (tw) {
603 tcp_tw_deschedule(tw); 474 inet_twsk_deschedule(tw, &tcp_death_row);
604 tcp_tw_put(tw); 475 inet_twsk_put(tw);
605 } 476 }
606 477
607 ret = 0; 478 ret = 0;
608 goto out; 479 goto out;
609 } 480 }
610 481
611 head = &tcp_bhash[tcp_bhashfn(snum)]; 482 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
612 tb = tcp_sk(sk)->bind_hash; 483 tb = inet_csk(sk)->icsk_bind_hash;
613 spin_lock_bh(&head->lock); 484 spin_lock_bh(&head->lock);
614 485
615 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 486 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -626,11 +497,6 @@ out:
626 } 497 }
627} 498}
628 499
629static __inline__ int tcp_v6_iif(struct sk_buff *skb)
630{
631 return IP6CB(skb)->iif;
632}
633
634static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 500static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
635 int addr_len) 501 int addr_len)
636{ 502{
@@ -822,14 +688,15 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
822 int type, int code, int offset, __u32 info) 688 int type, int code, int offset, __u32 info)
823{ 689{
824 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 690 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
825 struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 691 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
826 struct ipv6_pinfo *np; 692 struct ipv6_pinfo *np;
827 struct sock *sk; 693 struct sock *sk;
828 int err; 694 int err;
829 struct tcp_sock *tp; 695 struct tcp_sock *tp;
830 __u32 seq; 696 __u32 seq;
831 697
832 sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 698 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
699 th->source, skb->dev->ifindex);
833 700
834 if (sk == NULL) { 701 if (sk == NULL) {
835 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 702 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
@@ -837,7 +704,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
837 } 704 }
838 705
839 if (sk->sk_state == TCP_TIME_WAIT) { 706 if (sk->sk_state == TCP_TIME_WAIT) {
840 tcp_tw_put((struct tcp_tw_bucket*)sk); 707 inet_twsk_put((struct inet_timewait_sock *)sk);
841 return; 708 return;
842 } 709 }
843 710
@@ -915,8 +782,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
915 if (sock_owned_by_user(sk)) 782 if (sock_owned_by_user(sk))
916 goto out; 783 goto out;
917 784
918 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, 785 req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
919 &hdr->saddr, tcp_v6_iif(skb)); 786 &hdr->saddr, inet6_iif(skb));
920 if (!req) 787 if (!req)
921 goto out; 788 goto out;
922 789
@@ -930,7 +797,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
930 goto out; 797 goto out;
931 } 798 }
932 799
933 tcp_synq_drop(sk, req, prev); 800 inet_csk_reqsk_queue_drop(sk, req, prev);
934 goto out; 801 goto out;
935 802
936 case TCP_SYN_SENT: 803 case TCP_SYN_SENT:
@@ -1127,7 +994,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
1127 buff->csum); 994 buff->csum);
1128 995
1129 fl.proto = IPPROTO_TCP; 996 fl.proto = IPPROTO_TCP;
1130 fl.oif = tcp_v6_iif(skb); 997 fl.oif = inet6_iif(skb);
1131 fl.fl_ip_dport = t1->dest; 998 fl.fl_ip_dport = t1->dest;
1132 fl.fl_ip_sport = t1->source; 999 fl.fl_ip_sport = t1->source;
1133 1000
@@ -1196,7 +1063,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1196 buff->csum); 1063 buff->csum);
1197 1064
1198 fl.proto = IPPROTO_TCP; 1065 fl.proto = IPPROTO_TCP;
1199 fl.oif = tcp_v6_iif(skb); 1066 fl.oif = inet6_iif(skb);
1200 fl.fl_ip_dport = t1->dest; 1067 fl.fl_ip_dport = t1->dest;
1201 fl.fl_ip_sport = t1->source; 1068 fl.fl_ip_sport = t1->source;
1202 1069
@@ -1215,12 +1082,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1215 1082
1216static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1083static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1217{ 1084{
1218 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 1085 struct inet_timewait_sock *tw = inet_twsk(sk);
1086 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1219 1087
1220 tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 1088 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1221 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 1089 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1090 tcptw->tw_ts_recent);
1222 1091
1223 tcp_tw_put(tw); 1092 inet_twsk_put(tw);
1224} 1093}
1225 1094
1226static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1095static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1232,28 +1101,25 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1232static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 1101static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1233{ 1102{
1234 struct request_sock *req, **prev; 1103 struct request_sock *req, **prev;
1235 struct tcphdr *th = skb->h.th; 1104 const struct tcphdr *th = skb->h.th;
1236 struct tcp_sock *tp = tcp_sk(sk);
1237 struct sock *nsk; 1105 struct sock *nsk;
1238 1106
1239 /* Find possible connection requests. */ 1107 /* Find possible connection requests. */
1240 req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, 1108 req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
1241 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); 1109 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1242 if (req) 1110 if (req)
1243 return tcp_check_req(sk, skb, req, prev); 1111 return tcp_check_req(sk, skb, req, prev);
1244 1112
1245 nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr, 1113 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1246 th->source, 1114 th->source, &skb->nh.ipv6h->daddr,
1247 &skb->nh.ipv6h->daddr, 1115 ntohs(th->dest), inet6_iif(skb));
1248 ntohs(th->dest),
1249 tcp_v6_iif(skb));
1250 1116
1251 if (nsk) { 1117 if (nsk) {
1252 if (nsk->sk_state != TCP_TIME_WAIT) { 1118 if (nsk->sk_state != TCP_TIME_WAIT) {
1253 bh_lock_sock(nsk); 1119 bh_lock_sock(nsk);
1254 return nsk; 1120 return nsk;
1255 } 1121 }
1256 tcp_tw_put((struct tcp_tw_bucket*)nsk); 1122 inet_twsk_put((struct inet_timewait_sock *)nsk);
1257 return NULL; 1123 return NULL;
1258 } 1124 }
1259 1125
@@ -1266,12 +1132,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1266 1132
1267static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) 1133static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
1268{ 1134{
1269 struct tcp_sock *tp = tcp_sk(sk); 1135 struct inet_connection_sock *icsk = inet_csk(sk);
1270 struct listen_sock *lopt = tp->accept_queue.listen_opt; 1136 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
1271 u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 1137 const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
1272 1138
1273 reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); 1139 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
1274 tcp_synq_added(sk); 1140 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
1275} 1141}
1276 1142
1277 1143
@@ -1296,13 +1162,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1296 /* 1162 /*
1297 * There are no SYN attacks on IPv6, yet... 1163 * There are no SYN attacks on IPv6, yet...
1298 */ 1164 */
1299 if (tcp_synq_is_full(sk) && !isn) { 1165 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1300 if (net_ratelimit()) 1166 if (net_ratelimit())
1301 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1167 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1302 goto drop; 1168 goto drop;
1303 } 1169 }
1304 1170
1305 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1171 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1306 goto drop; 1172 goto drop;
1307 1173
1308 req = reqsk_alloc(&tcp6_request_sock_ops); 1174 req = reqsk_alloc(&tcp6_request_sock_ops);
@@ -1334,7 +1200,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1334 /* So that link locals have meaning */ 1200 /* So that link locals have meaning */
1335 if (!sk->sk_bound_dev_if && 1201 if (!sk->sk_bound_dev_if &&
1336 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1202 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1337 treq->iif = tcp_v6_iif(skb); 1203 treq->iif = inet6_iif(skb);
1338 1204
1339 if (isn == 0) 1205 if (isn == 0)
1340 isn = tcp_v6_init_sequence(sk,skb); 1206 isn = tcp_v6_init_sequence(sk,skb);
@@ -1399,15 +1265,14 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1399 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1265 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1400 newnp->pktoptions = NULL; 1266 newnp->pktoptions = NULL;
1401 newnp->opt = NULL; 1267 newnp->opt = NULL;
1402 newnp->mcast_oif = tcp_v6_iif(skb); 1268 newnp->mcast_oif = inet6_iif(skb);
1403 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1269 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1404 1270
1405 /* Charge newly allocated IPv6 socket. Though it is mapped, 1271 /*
1406 * it is IPv6 yet. 1272 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1273 * here, tcp_create_openreq_child now does this for us, see the comment in
1274 * that function for the gory details. -acme
1407 */ 1275 */
1408#ifdef INET_REFCNT_DEBUG
1409 atomic_inc(&inet6_sock_nr);
1410#endif
1411 1276
1412 /* It is tricky place. Until this moment IPv4 tcp 1277 /* It is tricky place. Until this moment IPv4 tcp
1413 worked with IPv6 af_tcp.af_specific. 1278 worked with IPv6 af_tcp.af_specific.
@@ -1462,10 +1327,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1462 if (newsk == NULL) 1327 if (newsk == NULL)
1463 goto out; 1328 goto out;
1464 1329
1465 /* Charge newly allocated IPv6 socket */ 1330 /*
1466#ifdef INET_REFCNT_DEBUG 1331 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1467 atomic_inc(&inet6_sock_nr); 1332 * count here, tcp_create_openreq_child now does this for us, see the
1468#endif 1333 * comment in that function for the gory details. -acme
1334 */
1469 1335
1470 ip6_dst_store(newsk, dst, NULL); 1336 ip6_dst_store(newsk, dst, NULL);
1471 newsk->sk_route_caps = dst->dev->features & 1337 newsk->sk_route_caps = dst->dev->features &
@@ -1504,7 +1370,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1504 skb_set_owner_r(newnp->pktoptions, newsk); 1370 skb_set_owner_r(newnp->pktoptions, newsk);
1505 } 1371 }
1506 newnp->opt = NULL; 1372 newnp->opt = NULL;
1507 newnp->mcast_oif = tcp_v6_iif(skb); 1373 newnp->mcast_oif = inet6_iif(skb);
1508 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1374 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1509 1375
1510 /* Clone native IPv6 options from listening socket (if any) 1376 /* Clone native IPv6 options from listening socket (if any)
@@ -1531,7 +1397,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1531 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1397 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1532 1398
1533 __tcp_v6_hash(newsk); 1399 __tcp_v6_hash(newsk);
1534 tcp_inherit_port(sk, newsk); 1400 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1535 1401
1536 return newsk; 1402 return newsk;
1537 1403
@@ -1552,7 +1418,7 @@ static int tcp_v6_checksum_init(struct sk_buff *skb)
1552 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1418 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1553 &skb->nh.ipv6h->daddr,skb->csum)) 1419 &skb->nh.ipv6h->daddr,skb->csum))
1554 return 0; 1420 return 0;
1555 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v6 csum failed\n")); 1421 LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
1556 } 1422 }
1557 if (skb->len <= 76) { 1423 if (skb->len <= 76) {
1558 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1424 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
@@ -1679,7 +1545,7 @@ ipv6_pktoptions:
1679 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1545 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1680 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1546 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1681 if (np->rxopt.bits.rxinfo) 1547 if (np->rxopt.bits.rxinfo)
1682 np->mcast_oif = tcp_v6_iif(opt_skb); 1548 np->mcast_oif = inet6_iif(opt_skb);
1683 if (np->rxopt.bits.rxhlim) 1549 if (np->rxopt.bits.rxhlim)
1684 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit; 1550 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1685 if (ipv6_opt_accepted(sk, opt_skb)) { 1551 if (ipv6_opt_accepted(sk, opt_skb)) {
@@ -1734,8 +1600,9 @@ static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1734 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h); 1600 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1735 TCP_SKB_CB(skb)->sacked = 0; 1601 TCP_SKB_CB(skb)->sacked = 0;
1736 1602
1737 sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source, 1603 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1738 &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1604 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1605 inet6_iif(skb));
1739 1606
1740 if (!sk) 1607 if (!sk)
1741 goto no_tcp_socket; 1608 goto no_tcp_socket;
@@ -1790,26 +1657,29 @@ discard_and_relse:
1790 1657
1791do_time_wait: 1658do_time_wait:
1792 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1659 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1793 tcp_tw_put((struct tcp_tw_bucket *) sk); 1660 inet_twsk_put((struct inet_timewait_sock *)sk);
1794 goto discard_it; 1661 goto discard_it;
1795 } 1662 }
1796 1663
1797 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1664 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1798 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1665 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1799 tcp_tw_put((struct tcp_tw_bucket *) sk); 1666 inet_twsk_put((struct inet_timewait_sock *)sk);
1800 goto discard_it; 1667 goto discard_it;
1801 } 1668 }
1802 1669
1803 switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1670 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1804 skb, th, skb->len)) { 1671 skb, th)) {
1805 case TCP_TW_SYN: 1672 case TCP_TW_SYN:
1806 { 1673 {
1807 struct sock *sk2; 1674 struct sock *sk2;
1808 1675
1809 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1676 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1677 &skb->nh.ipv6h->daddr,
1678 ntohs(th->dest), inet6_iif(skb));
1810 if (sk2 != NULL) { 1679 if (sk2 != NULL) {
1811 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1680 struct inet_timewait_sock *tw = inet_twsk(sk);
1812 tcp_tw_put((struct tcp_tw_bucket *)sk); 1681 inet_twsk_deschedule(tw, &tcp_death_row);
1682 inet_twsk_put(tw);
1813 sk = sk2; 1683 sk = sk2;
1814 goto process; 1684 goto process;
1815 } 1685 }
@@ -1978,7 +1848,7 @@ static struct tcp_func ipv6_specific = {
1978static struct tcp_func ipv6_mapped = { 1848static struct tcp_func ipv6_mapped = {
1979 .queue_xmit = ip_queue_xmit, 1849 .queue_xmit = ip_queue_xmit,
1980 .send_check = tcp_v4_send_check, 1850 .send_check = tcp_v4_send_check,
1981 .rebuild_header = tcp_v4_rebuild_header, 1851 .rebuild_header = inet_sk_rebuild_header,
1982 .conn_request = tcp_v6_conn_request, 1852 .conn_request = tcp_v6_conn_request,
1983 .syn_recv_sock = tcp_v6_syn_recv_sock, 1853 .syn_recv_sock = tcp_v6_syn_recv_sock,
1984 .remember_stamp = tcp_v4_remember_stamp, 1854 .remember_stamp = tcp_v4_remember_stamp,
@@ -1997,13 +1867,14 @@ static struct tcp_func ipv6_mapped = {
1997 */ 1867 */
1998static int tcp_v6_init_sock(struct sock *sk) 1868static int tcp_v6_init_sock(struct sock *sk)
1999{ 1869{
1870 struct inet_connection_sock *icsk = inet_csk(sk);
2000 struct tcp_sock *tp = tcp_sk(sk); 1871 struct tcp_sock *tp = tcp_sk(sk);
2001 1872
2002 skb_queue_head_init(&tp->out_of_order_queue); 1873 skb_queue_head_init(&tp->out_of_order_queue);
2003 tcp_init_xmit_timers(sk); 1874 tcp_init_xmit_timers(sk);
2004 tcp_prequeue_init(tp); 1875 tcp_prequeue_init(tp);
2005 1876
2006 tp->rto = TCP_TIMEOUT_INIT; 1877 icsk->icsk_rto = TCP_TIMEOUT_INIT;
2007 tp->mdev = TCP_TIMEOUT_INIT; 1878 tp->mdev = TCP_TIMEOUT_INIT;
2008 1879
2009 /* So many TCP implementations out there (incorrectly) count the 1880 /* So many TCP implementations out there (incorrectly) count the
@@ -2025,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
2025 sk->sk_state = TCP_CLOSE; 1896 sk->sk_state = TCP_CLOSE;
2026 1897
2027 tp->af_specific = &ipv6_specific; 1898 tp->af_specific = &ipv6_specific;
2028 tp->ca_ops = &tcp_init_congestion_ops; 1899 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
2029 sk->sk_write_space = sk_stream_write_space; 1900 sk->sk_write_space = sk_stream_write_space;
2030 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1901 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2031 1902
@@ -2039,8 +1910,6 @@ static int tcp_v6_init_sock(struct sock *sk)
2039 1910
2040static int tcp_v6_destroy_sock(struct sock *sk) 1911static int tcp_v6_destroy_sock(struct sock *sk)
2041{ 1912{
2042 extern int tcp_v4_destroy_sock(struct sock *sk);
2043
2044 tcp_v4_destroy_sock(sk); 1913 tcp_v4_destroy_sock(sk);
2045 return inet6_destroy_sock(sk); 1914 return inet6_destroy_sock(sk);
2046} 1915}
@@ -2086,18 +1955,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2086 unsigned long timer_expires; 1955 unsigned long timer_expires;
2087 struct inet_sock *inet = inet_sk(sp); 1956 struct inet_sock *inet = inet_sk(sp);
2088 struct tcp_sock *tp = tcp_sk(sp); 1957 struct tcp_sock *tp = tcp_sk(sp);
1958 const struct inet_connection_sock *icsk = inet_csk(sp);
2089 struct ipv6_pinfo *np = inet6_sk(sp); 1959 struct ipv6_pinfo *np = inet6_sk(sp);
2090 1960
2091 dest = &np->daddr; 1961 dest = &np->daddr;
2092 src = &np->rcv_saddr; 1962 src = &np->rcv_saddr;
2093 destp = ntohs(inet->dport); 1963 destp = ntohs(inet->dport);
2094 srcp = ntohs(inet->sport); 1964 srcp = ntohs(inet->sport);
2095 if (tp->pending == TCP_TIME_RETRANS) { 1965
1966 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2096 timer_active = 1; 1967 timer_active = 1;
2097 timer_expires = tp->timeout; 1968 timer_expires = icsk->icsk_timeout;
2098 } else if (tp->pending == TCP_TIME_PROBE0) { 1969 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2099 timer_active = 4; 1970 timer_active = 4;
2100 timer_expires = tp->timeout; 1971 timer_expires = icsk->icsk_timeout;
2101 } else if (timer_pending(&sp->sk_timer)) { 1972 } else if (timer_pending(&sp->sk_timer)) {
2102 timer_active = 2; 1973 timer_active = 2;
2103 timer_expires = sp->sk_timer.expires; 1974 timer_expires = sp->sk_timer.expires;
@@ -2118,28 +1989,31 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2118 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 1989 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
2119 timer_active, 1990 timer_active,
2120 jiffies_to_clock_t(timer_expires - jiffies), 1991 jiffies_to_clock_t(timer_expires - jiffies),
2121 tp->retransmits, 1992 icsk->icsk_retransmits,
2122 sock_i_uid(sp), 1993 sock_i_uid(sp),
2123 tp->probes_out, 1994 icsk->icsk_probes_out,
2124 sock_i_ino(sp), 1995 sock_i_ino(sp),
2125 atomic_read(&sp->sk_refcnt), sp, 1996 atomic_read(&sp->sk_refcnt), sp,
2126 tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, 1997 icsk->icsk_rto,
1998 icsk->icsk_ack.ato,
1999 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2127 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 2000 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2128 ); 2001 );
2129} 2002}
2130 2003
2131static void get_timewait6_sock(struct seq_file *seq, 2004static void get_timewait6_sock(struct seq_file *seq,
2132 struct tcp_tw_bucket *tw, int i) 2005 struct inet_timewait_sock *tw, int i)
2133{ 2006{
2134 struct in6_addr *dest, *src; 2007 struct in6_addr *dest, *src;
2135 __u16 destp, srcp; 2008 __u16 destp, srcp;
2009 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
2136 int ttd = tw->tw_ttd - jiffies; 2010 int ttd = tw->tw_ttd - jiffies;
2137 2011
2138 if (ttd < 0) 2012 if (ttd < 0)
2139 ttd = 0; 2013 ttd = 0;
2140 2014
2141 dest = &tw->tw_v6_daddr; 2015 dest = &tcp6tw->tw_v6_daddr;
2142 src = &tw->tw_v6_rcv_saddr; 2016 src = &tcp6tw->tw_v6_rcv_saddr;
2143 destp = ntohs(tw->tw_dport); 2017 destp = ntohs(tw->tw_dport);
2144 srcp = ntohs(tw->tw_sport); 2018 srcp = ntohs(tw->tw_sport);
2145 2019
@@ -2214,7 +2088,7 @@ struct proto tcpv6_prot = {
2214 .close = tcp_close, 2088 .close = tcp_close,
2215 .connect = tcp_v6_connect, 2089 .connect = tcp_v6_connect,
2216 .disconnect = tcp_disconnect, 2090 .disconnect = tcp_disconnect,
2217 .accept = tcp_accept, 2091 .accept = inet_csk_accept,
2218 .ioctl = tcp_ioctl, 2092 .ioctl = tcp_ioctl,
2219 .init = tcp_v6_init_sock, 2093 .init = tcp_v6_init_sock,
2220 .destroy = tcp_v6_destroy_sock, 2094 .destroy = tcp_v6_destroy_sock,
@@ -2231,11 +2105,13 @@ struct proto tcpv6_prot = {
2231 .sockets_allocated = &tcp_sockets_allocated, 2105 .sockets_allocated = &tcp_sockets_allocated,
2232 .memory_allocated = &tcp_memory_allocated, 2106 .memory_allocated = &tcp_memory_allocated,
2233 .memory_pressure = &tcp_memory_pressure, 2107 .memory_pressure = &tcp_memory_pressure,
2108 .orphan_count = &tcp_orphan_count,
2234 .sysctl_mem = sysctl_tcp_mem, 2109 .sysctl_mem = sysctl_tcp_mem,
2235 .sysctl_wmem = sysctl_tcp_wmem, 2110 .sysctl_wmem = sysctl_tcp_wmem,
2236 .sysctl_rmem = sysctl_tcp_rmem, 2111 .sysctl_rmem = sysctl_tcp_rmem,
2237 .max_header = MAX_TCP_HEADER, 2112 .max_header = MAX_TCP_HEADER,
2238 .obj_size = sizeof(struct tcp6_sock), 2113 .obj_size = sizeof(struct tcp6_sock),
2114 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2239 .rsk_prot = &tcp6_request_sock_ops, 2115 .rsk_prot = &tcp6_request_sock_ops,
2240}; 2116};
2241 2117
@@ -2245,8 +2121,6 @@ static struct inet6_protocol tcpv6_protocol = {
2245 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2121 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2246}; 2122};
2247 2123
2248extern struct proto_ops inet6_stream_ops;
2249
2250static struct inet_protosw tcpv6_protosw = { 2124static struct inet_protosw tcpv6_protosw = {
2251 .type = SOCK_STREAM, 2125 .type = SOCK_STREAM,
2252 .protocol = IPPROTO_TCP, 2126 .protocol = IPPROTO_TCP,