aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:09:30 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:42:13 -0400
commit8feaf0c0a5488b3d898a9c207eb6678f44ba3f26 (patch)
treeddd004afe2f7c8295f6fdb94d34f78a42b5961cb /net
parent33b62231908c58ae04185e4f1063d1e35a7c8576 (diff)
[INET]: Generalise tcp_tw_bucket, aka TIME_WAIT sockets
This paves the way to generalise the rest of the sock ID lookup routines and saves some bytes in TCPv4 TIME_WAIT sockets on distro kernels (where IPv6 is always built as a module): [root@qemu ~]# grep tw_sock /proc/slabinfo tw_sock_TCPv6 0 0 128 31 1 tw_sock_TCP 0 0 96 41 1 [root@qemu ~]# Now if a protocol wants to use the TIME_WAIT generic infrastructure it only has to set the sk_prot->twsk_obj_size field with the size of its inet_timewait_sock derived sock and proto_register will create sk_prot->twsk_slab, for now its only for INET sockets, but we can introduce timewait_sock later if some non INET transport protocolo wants to use this stuff. Next changesets will take advantage of this new infrastructure to generalise even more TCP code. [acme@toy net-2.6.14]$ grep built-in /tmp/before.size /tmp/after.size /tmp/before.size: 188646 11764 5068 205478 322a6 net/ipv4/built-in.o /tmp/after.size: 188144 11764 5068 204976 320b0 net/ipv4/built-in.o [acme@toy net-2.6.14]$ Tested with both IPv4 & IPv6 (::1 (localhost) & ::ffff:172.20.0.1 (qemu host)). Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c35
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_diag.c10
-rw-r--r--net/ipv4/tcp_ipv4.c107
-rw-r--r--net/ipv4/tcp_minisocks.c142
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/tcp_ipv6.c100
7 files changed, 222 insertions, 184 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index a1a23be10aa3..aba31fedf2ac 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1378,7 +1378,8 @@ static LIST_HEAD(proto_list);
1378 1378
1379int proto_register(struct proto *prot, int alloc_slab) 1379int proto_register(struct proto *prot, int alloc_slab)
1380{ 1380{
1381 char *request_sock_slab_name; 1381 char *request_sock_slab_name = NULL;
1382 char *timewait_sock_slab_name;
1382 int rc = -ENOBUFS; 1383 int rc = -ENOBUFS;
1383 1384
1384 if (alloc_slab) { 1385 if (alloc_slab) {
@@ -1409,6 +1410,23 @@ int proto_register(struct proto *prot, int alloc_slab)
1409 goto out_free_request_sock_slab_name; 1410 goto out_free_request_sock_slab_name;
1410 } 1411 }
1411 } 1412 }
1413
1414 if (prot->twsk_obj_size) {
1415 static const char mask[] = "tw_sock_%s";
1416
1417 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1418
1419 if (timewait_sock_slab_name == NULL)
1420 goto out_free_request_sock_slab;
1421
1422 sprintf(timewait_sock_slab_name, mask, prot->name);
1423 prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
1424 prot->twsk_obj_size,
1425 0, SLAB_HWCACHE_ALIGN,
1426 NULL, NULL);
1427 if (prot->twsk_slab == NULL)
1428 goto out_free_timewait_sock_slab_name;
1429 }
1412 } 1430 }
1413 1431
1414 write_lock(&proto_list_lock); 1432 write_lock(&proto_list_lock);
@@ -1417,6 +1435,13 @@ int proto_register(struct proto *prot, int alloc_slab)
1417 rc = 0; 1435 rc = 0;
1418out: 1436out:
1419 return rc; 1437 return rc;
1438out_free_timewait_sock_slab_name:
1439 kfree(timewait_sock_slab_name);
1440out_free_request_sock_slab:
1441 if (prot->rsk_prot && prot->rsk_prot->slab) {
1442 kmem_cache_destroy(prot->rsk_prot->slab);
1443 prot->rsk_prot->slab = NULL;
1444 }
1420out_free_request_sock_slab_name: 1445out_free_request_sock_slab_name:
1421 kfree(request_sock_slab_name); 1446 kfree(request_sock_slab_name);
1422out_free_sock_slab: 1447out_free_sock_slab:
@@ -1444,6 +1469,14 @@ void proto_unregister(struct proto *prot)
1444 prot->rsk_prot->slab = NULL; 1469 prot->rsk_prot->slab = NULL;
1445 } 1470 }
1446 1471
1472 if (prot->twsk_slab != NULL) {
1473 const char *name = kmem_cache_name(prot->twsk_slab);
1474
1475 kmem_cache_destroy(prot->twsk_slab);
1476 kfree(name);
1477 prot->twsk_slab = NULL;
1478 }
1479
1447 list_del(&prot->node); 1480 list_del(&prot->node);
1448 write_unlock(&proto_list_lock); 1481 write_unlock(&proto_list_lock);
1449} 1482}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2f4b1a374bb7..f1a708bf7a97 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -271,8 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 271
272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); 272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273 273
274kmem_cache_t *tcp_timewait_cachep;
275
276atomic_t tcp_orphan_count = ATOMIC_INIT(0); 274atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277 275
278int sysctl_tcp_mem[3]; 276int sysctl_tcp_mem[3];
@@ -2264,13 +2262,6 @@ void __init tcp_init(void)
2264 if (!tcp_hashinfo.bind_bucket_cachep) 2262 if (!tcp_hashinfo.bind_bucket_cachep)
2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); 2263 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2266 2264
2267 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2268 sizeof(struct tcp_tw_bucket),
2269 0, SLAB_HWCACHE_ALIGN,
2270 NULL, NULL);
2271 if (!tcp_timewait_cachep)
2272 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2273
2274 /* Size and allocate the main established and bind bucket 2265 /* Size and allocate the main established and bind bucket
2275 * hash tables. 2266 * hash tables.
2276 * 2267 *
@@ -2363,4 +2354,3 @@ EXPORT_SYMBOL(tcp_sendpage);
2363EXPORT_SYMBOL(tcp_setsockopt); 2354EXPORT_SYMBOL(tcp_setsockopt);
2364EXPORT_SYMBOL(tcp_shutdown); 2355EXPORT_SYMBOL(tcp_shutdown);
2365EXPORT_SYMBOL(tcp_statistics); 2356EXPORT_SYMBOL(tcp_statistics);
2366EXPORT_SYMBOL(tcp_timewait_cachep);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 1a89a03c449b..6f2d6f2276b9 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -81,7 +81,7 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
81 r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 81 r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
82 82
83 if (r->tcpdiag_state == TCP_TIME_WAIT) { 83 if (r->tcpdiag_state == TCP_TIME_WAIT) {
84 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket*)sk; 84 const struct inet_timewait_sock *tw = inet_twsk(sk);
85 long tmo = tw->tw_ttd - jiffies; 85 long tmo = tw->tw_ttd - jiffies;
86 if (tmo < 0) 86 if (tmo < 0)
87 tmo = 0; 87 tmo = 0;
@@ -99,10 +99,12 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
99 r->tcpdiag_inode = 0; 99 r->tcpdiag_inode = 0;
100#ifdef CONFIG_IP_TCPDIAG_IPV6 100#ifdef CONFIG_IP_TCPDIAG_IPV6
101 if (r->tcpdiag_family == AF_INET6) { 101 if (r->tcpdiag_family == AF_INET6) {
102 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
103
102 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src, 104 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
103 &tw->tw_v6_rcv_saddr); 105 &tcp6tw->tw_v6_rcv_saddr);
104 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst, 106 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
105 &tw->tw_v6_daddr); 107 &tcp6tw->tw_v6_daddr);
106 } 108 }
107#endif 109#endif
108 nlh->nlmsg_len = skb->tail - b; 110 nlh->nlmsg_len = skb->tail - b;
@@ -239,7 +241,7 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
239out: 241out:
240 if (sk) { 242 if (sk) {
241 if (sk->sk_state == TCP_TIME_WAIT) 243 if (sk->sk_state == TCP_TIME_WAIT)
242 tcp_tw_put((struct tcp_tw_bucket*)sk); 244 inet_twsk_put((struct inet_timewait_sock *)sk);
243 else 245 else
244 sock_put(sk); 246 sock_put(sk);
245 } 247 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a678709b36f6..ce423e48ebe0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -106,7 +106,7 @@ int sysctl_local_port_range[2] = { 1024, 4999 };
106 106
107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
108{ 108{
109 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); 109 const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
110 struct sock *sk2; 110 struct sock *sk2;
111 struct hlist_node *node; 111 struct hlist_node *node;
112 int reuse = sk->sk_reuse; 112 int reuse = sk->sk_reuse;
@@ -119,7 +119,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb
119 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 119 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
120 if (!reuse || !sk2->sk_reuse || 120 if (!reuse || !sk2->sk_reuse ||
121 sk2->sk_state == TCP_LISTEN) { 121 sk2->sk_state == TCP_LISTEN) {
122 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); 122 const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
123 if (!sk2_rcv_saddr || !sk_rcv_saddr || 123 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
124 sk2_rcv_saddr == sk_rcv_saddr) 124 sk2_rcv_saddr == sk_rcv_saddr)
125 break; 125 break;
@@ -251,10 +251,10 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
251 const int dif) 251 const int dif)
252{ 252{
253 struct inet_ehash_bucket *head; 253 struct inet_ehash_bucket *head;
254 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) 254 INET_ADDR_COOKIE(acookie, saddr, daddr)
255 __u32 ports = TCP_COMBINED_PORTS(sport, hnum); 255 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
256 struct sock *sk; 256 struct sock *sk;
257 struct hlist_node *node; 257 const struct hlist_node *node;
258 /* Optimize here for direct hit, only listening connections can 258 /* Optimize here for direct hit, only listening connections can
259 * have wildcards anyways. 259 * have wildcards anyways.
260 */ 260 */
@@ -262,13 +262,13 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
262 head = &tcp_hashinfo.ehash[hash]; 262 head = &tcp_hashinfo.ehash[hash];
263 read_lock(&head->lock); 263 read_lock(&head->lock);
264 sk_for_each(sk, node, &head->chain) { 264 sk_for_each(sk, node, &head->chain) {
265 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) 265 if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif))
266 goto hit; /* You sunk my battleship! */ 266 goto hit; /* You sunk my battleship! */
267 } 267 }
268 268
269 /* Must check for a TIME_WAIT'er before going to listener hash. */ 269 /* Must check for a TIME_WAIT'er before going to listener hash. */
270 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { 270 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
271 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) 271 if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
272 goto hit; 272 goto hit;
273 } 273 }
274 sk = NULL; 274 sk = NULL;
@@ -313,27 +313,28 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
313 313
314/* called with local bh disabled */ 314/* called with local bh disabled */
315static int __tcp_v4_check_established(struct sock *sk, __u16 lport, 315static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
316 struct tcp_tw_bucket **twp) 316 struct inet_timewait_sock **twp)
317{ 317{
318 struct inet_sock *inet = inet_sk(sk); 318 struct inet_sock *inet = inet_sk(sk);
319 u32 daddr = inet->rcv_saddr; 319 u32 daddr = inet->rcv_saddr;
320 u32 saddr = inet->daddr; 320 u32 saddr = inet->daddr;
321 int dif = sk->sk_bound_dev_if; 321 int dif = sk->sk_bound_dev_if;
322 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) 322 INET_ADDR_COOKIE(acookie, saddr, daddr)
323 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 323 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
324 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); 324 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
325 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; 325 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
326 struct sock *sk2; 326 struct sock *sk2;
327 struct hlist_node *node; 327 const struct hlist_node *node;
328 struct tcp_tw_bucket *tw; 328 struct inet_timewait_sock *tw;
329 329
330 write_lock(&head->lock); 330 write_lock(&head->lock);
331 331
332 /* Check TIME-WAIT sockets first. */ 332 /* Check TIME-WAIT sockets first. */
333 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { 333 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
334 tw = (struct tcp_tw_bucket *)sk2; 334 tw = inet_twsk(sk2);
335 335
336 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 336 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
337 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
337 struct tcp_sock *tp = tcp_sk(sk); 338 struct tcp_sock *tp = tcp_sk(sk);
338 339
339 /* With PAWS, it is safe from the viewpoint 340 /* With PAWS, it is safe from the viewpoint
@@ -350,15 +351,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
350 fall back to VJ's scheme and use initial 351 fall back to VJ's scheme and use initial
351 timestamp retrieved from peer table. 352 timestamp retrieved from peer table.
352 */ 353 */
353 if (tw->tw_ts_recent_stamp && 354 if (tcptw->tw_ts_recent_stamp &&
354 (!twp || (sysctl_tcp_tw_reuse && 355 (!twp || (sysctl_tcp_tw_reuse &&
355 xtime.tv_sec - 356 xtime.tv_sec -
356 tw->tw_ts_recent_stamp > 1))) { 357 tcptw->tw_ts_recent_stamp > 1))) {
357 if ((tp->write_seq = 358 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
358 tw->tw_snd_nxt + 65535 + 2) == 0) 359 if (tp->write_seq == 0)
359 tp->write_seq = 1; 360 tp->write_seq = 1;
360 tp->rx_opt.ts_recent = tw->tw_ts_recent; 361 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
361 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 362 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
362 sock_hold(sk2); 363 sock_hold(sk2);
363 goto unique; 364 goto unique;
364 } else 365 } else
@@ -369,7 +370,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
369 370
370 /* And established part... */ 371 /* And established part... */
371 sk_for_each(sk2, node, &head->chain) { 372 sk_for_each(sk2, node, &head->chain) {
372 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 373 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
373 goto not_unique; 374 goto not_unique;
374 } 375 }
375 376
@@ -392,7 +393,7 @@ unique:
392 tcp_tw_deschedule(tw); 393 tcp_tw_deschedule(tw);
393 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 394 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
394 395
395 tcp_tw_put(tw); 396 inet_twsk_put(tw);
396 } 397 }
397 398
398 return 0; 399 return 0;
@@ -429,7 +430,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
429 static u32 hint; 430 static u32 hint;
430 u32 offset = hint + connect_port_offset(sk); 431 u32 offset = hint + connect_port_offset(sk);
431 struct hlist_node *node; 432 struct hlist_node *node;
432 struct tcp_tw_bucket *tw = NULL; 433 struct inet_timewait_sock *tw = NULL;
433 434
434 local_bh_disable(); 435 local_bh_disable();
435 for (i = 1; i <= range; i++) { 436 for (i = 1; i <= range; i++) {
@@ -482,7 +483,7 @@ ok:
482 483
483 if (tw) { 484 if (tw) {
484 tcp_tw_deschedule(tw); 485 tcp_tw_deschedule(tw);
485 tcp_tw_put(tw); 486 inet_twsk_put(tw);
486 } 487 }
487 488
488 ret = 0; 489 ret = 0;
@@ -757,7 +758,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
757 return; 758 return;
758 } 759 }
759 if (sk->sk_state == TCP_TIME_WAIT) { 760 if (sk->sk_state == TCP_TIME_WAIT) {
760 tcp_tw_put((struct tcp_tw_bucket *)sk); 761 inet_twsk_put((struct inet_timewait_sock *)sk);
761 return; 762 return;
762 } 763 }
763 764
@@ -1002,12 +1003,13 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1002 1003
1003static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 1004static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1004{ 1005{
1005 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 1006 struct inet_timewait_sock *tw = inet_twsk(sk);
1007 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1006 1008
1007 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 1009 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1008 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 1010 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
1009 1011
1010 tcp_tw_put(tw); 1012 inet_twsk_put(tw);
1011} 1013}
1012 1014
1013static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1015static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1368,7 +1370,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1368 bh_lock_sock(nsk); 1370 bh_lock_sock(nsk);
1369 return nsk; 1371 return nsk;
1370 } 1372 }
1371 tcp_tw_put((struct tcp_tw_bucket *)nsk); 1373 inet_twsk_put((struct inet_timewait_sock *)nsk);
1372 return NULL; 1374 return NULL;
1373 } 1375 }
1374 1376
@@ -1557,25 +1559,25 @@ discard_and_relse:
1557 1559
1558do_time_wait: 1560do_time_wait:
1559 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1561 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1560 tcp_tw_put((struct tcp_tw_bucket *) sk); 1562 inet_twsk_put((struct inet_timewait_sock *) sk);
1561 goto discard_it; 1563 goto discard_it;
1562 } 1564 }
1563 1565
1564 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1566 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1565 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1567 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1566 tcp_tw_put((struct tcp_tw_bucket *) sk); 1568 inet_twsk_put((struct inet_timewait_sock *) sk);
1567 goto discard_it; 1569 goto discard_it;
1568 } 1570 }
1569 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1571 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1570 skb, th, skb->len)) { 1572 skb, th)) {
1571 case TCP_TW_SYN: { 1573 case TCP_TW_SYN: {
1572 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1574 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1573 skb->nh.iph->daddr, 1575 skb->nh.iph->daddr,
1574 ntohs(th->dest), 1576 ntohs(th->dest),
1575 tcp_v4_iif(skb)); 1577 tcp_v4_iif(skb));
1576 if (sk2) { 1578 if (sk2) {
1577 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1579 tcp_tw_deschedule((struct inet_timewait_sock *)sk);
1578 tcp_tw_put((struct tcp_tw_bucket *)sk); 1580 inet_twsk_put((struct inet_timewait_sock *)sk);
1579 sk = sk2; 1581 sk = sk2;
1580 goto process; 1582 goto process;
1581 } 1583 }
@@ -1639,18 +1641,18 @@ int tcp_v4_remember_stamp(struct sock *sk)
1639 return 0; 1641 return 0;
1640} 1642}
1641 1643
1642int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw) 1644int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1643{ 1645{
1644 struct inet_peer *peer = NULL; 1646 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1645
1646 peer = inet_getpeer(tw->tw_daddr, 1);
1647 1647
1648 if (peer) { 1648 if (peer) {
1649 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 || 1649 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1650
1651 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1650 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && 1652 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1651 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) { 1653 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1652 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp; 1654 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1653 peer->tcp_ts = tw->tw_ts_recent; 1655 peer->tcp_ts = tcptw->tw_ts_recent;
1654 } 1656 }
1655 inet_putpeer(peer); 1657 inet_putpeer(peer);
1656 return 1; 1658 return 1;
@@ -1758,13 +1760,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
1758#ifdef CONFIG_PROC_FS 1760#ifdef CONFIG_PROC_FS
1759/* Proc filesystem TCP sock list dumping. */ 1761/* Proc filesystem TCP sock list dumping. */
1760 1762
1761static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head) 1763static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1762{ 1764{
1763 return hlist_empty(head) ? NULL : 1765 return hlist_empty(head) ? NULL :
1764 list_entry(head->first, struct tcp_tw_bucket, tw_node); 1766 list_entry(head->first, struct inet_timewait_sock, tw_node);
1765} 1767}
1766 1768
1767static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw) 1769static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1768{ 1770{
1769 return tw->tw_node.next ? 1771 return tw->tw_node.next ?
1770 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; 1772 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
@@ -1860,7 +1862,7 @@ static void *established_get_first(struct seq_file *seq)
1860 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { 1862 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1861 struct sock *sk; 1863 struct sock *sk;
1862 struct hlist_node *node; 1864 struct hlist_node *node;
1863 struct tcp_tw_bucket *tw; 1865 struct inet_timewait_sock *tw;
1864 1866
1865 /* We can reschedule _before_ having picked the target: */ 1867 /* We can reschedule _before_ having picked the target: */
1866 cond_resched_softirq(); 1868 cond_resched_softirq();
@@ -1874,8 +1876,8 @@ static void *established_get_first(struct seq_file *seq)
1874 goto out; 1876 goto out;
1875 } 1877 }
1876 st->state = TCP_SEQ_STATE_TIME_WAIT; 1878 st->state = TCP_SEQ_STATE_TIME_WAIT;
1877 tw_for_each(tw, node, 1879 inet_twsk_for_each(tw, node,
1878 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { 1880 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1879 if (tw->tw_family != st->family) { 1881 if (tw->tw_family != st->family) {
1880 continue; 1882 continue;
1881 } 1883 }
@@ -1892,7 +1894,7 @@ out:
1892static void *established_get_next(struct seq_file *seq, void *cur) 1894static void *established_get_next(struct seq_file *seq, void *cur)
1893{ 1895{
1894 struct sock *sk = cur; 1896 struct sock *sk = cur;
1895 struct tcp_tw_bucket *tw; 1897 struct inet_timewait_sock *tw;
1896 struct hlist_node *node; 1898 struct hlist_node *node;
1897 struct tcp_iter_state* st = seq->private; 1899 struct tcp_iter_state* st = seq->private;
1898 1900
@@ -2159,7 +2161,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2159 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); 2161 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2160} 2162}
2161 2163
2162static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) 2164static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
2163{ 2165{
2164 unsigned int dest, src; 2166 unsigned int dest, src;
2165 __u16 destp, srcp; 2167 __u16 destp, srcp;
@@ -2261,6 +2263,7 @@ struct proto tcp_prot = {
2261 .sysctl_rmem = sysctl_tcp_rmem, 2263 .sysctl_rmem = sysctl_tcp_rmem,
2262 .max_header = MAX_TCP_HEADER, 2264 .max_header = MAX_TCP_HEADER,
2263 .obj_size = sizeof(struct tcp_sock), 2265 .obj_size = sizeof(struct tcp_sock),
2266 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2264 .rsk_prot = &tcp_request_sock_ops, 2267 .rsk_prot = &tcp_request_sock_ops,
2265}; 2268};
2266 2269
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f29e2f6ebe1b..5b5a49335fbb 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -41,7 +41,7 @@ int sysctl_tcp_max_tw_buckets = NR_FILE*2;
41int sysctl_tcp_syncookies = SYNC_INIT; 41int sysctl_tcp_syncookies = SYNC_INIT;
42int sysctl_tcp_abort_on_overflow; 42int sysctl_tcp_abort_on_overflow;
43 43
44static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); 44static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo);
45 45
46static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 46static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
47{ 47{
@@ -58,7 +58,7 @@ int tcp_tw_count;
58 58
59 59
60/* Must be called with locally disabled BHs. */ 60/* Must be called with locally disabled BHs. */
61static void tcp_timewait_kill(struct tcp_tw_bucket *tw) 61static void tcp_timewait_kill(struct inet_timewait_sock *tw)
62{ 62{
63 struct inet_bind_hashbucket *bhead; 63 struct inet_bind_hashbucket *bhead;
64 struct inet_bind_bucket *tb; 64 struct inet_bind_bucket *tb;
@@ -85,11 +85,11 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
85 85
86#ifdef SOCK_REFCNT_DEBUG 86#ifdef SOCK_REFCNT_DEBUG
87 if (atomic_read(&tw->tw_refcnt) != 1) { 87 if (atomic_read(&tw->tw_refcnt) != 1) {
88 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, 88 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
89 atomic_read(&tw->tw_refcnt)); 89 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
90 } 90 }
91#endif 91#endif
92 tcp_tw_put(tw); 92 inet_twsk_put(tw);
93} 93}
94 94
95/* 95/*
@@ -121,19 +121,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
121 * to avoid misread sequence numbers, states etc. --ANK 121 * to avoid misread sequence numbers, states etc. --ANK
122 */ 122 */
123enum tcp_tw_status 123enum tcp_tw_status
124tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, 124tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
125 struct tcphdr *th, unsigned len) 125 const struct tcphdr *th)
126{ 126{
127 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
127 struct tcp_options_received tmp_opt; 128 struct tcp_options_received tmp_opt;
128 int paws_reject = 0; 129 int paws_reject = 0;
129 130
130 tmp_opt.saw_tstamp = 0; 131 tmp_opt.saw_tstamp = 0;
131 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { 132 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
132 tcp_parse_options(skb, &tmp_opt, 0); 133 tcp_parse_options(skb, &tmp_opt, 0);
133 134
134 if (tmp_opt.saw_tstamp) { 135 if (tmp_opt.saw_tstamp) {
135 tmp_opt.ts_recent = tw->tw_ts_recent; 136 tmp_opt.ts_recent = tcptw->tw_ts_recent;
136 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 137 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
137 paws_reject = tcp_paws_check(&tmp_opt, th->rst); 138 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
138 } 139 }
139 } 140 }
@@ -144,20 +145,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
144 /* Out of window, send ACK */ 145 /* Out of window, send ACK */
145 if (paws_reject || 146 if (paws_reject ||
146 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 147 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
147 tw->tw_rcv_nxt, 148 tcptw->tw_rcv_nxt,
148 tw->tw_rcv_nxt + tw->tw_rcv_wnd)) 149 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
149 return TCP_TW_ACK; 150 return TCP_TW_ACK;
150 151
151 if (th->rst) 152 if (th->rst)
152 goto kill; 153 goto kill;
153 154
154 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) 155 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
155 goto kill_with_rst; 156 goto kill_with_rst;
156 157
157 /* Dup ACK? */ 158 /* Dup ACK? */
158 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || 159 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
159 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 160 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
160 tcp_tw_put(tw); 161 inet_twsk_put(tw);
161 return TCP_TW_SUCCESS; 162 return TCP_TW_SUCCESS;
162 } 163 }
163 164
@@ -165,19 +166,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
165 * reset. 166 * reset.
166 */ 167 */
167 if (!th->fin || 168 if (!th->fin ||
168 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { 169 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
169kill_with_rst: 170kill_with_rst:
170 tcp_tw_deschedule(tw); 171 tcp_tw_deschedule(tw);
171 tcp_tw_put(tw); 172 inet_twsk_put(tw);
172 return TCP_TW_RST; 173 return TCP_TW_RST;
173 } 174 }
174 175
175 /* FIN arrived, enter true time-wait state. */ 176 /* FIN arrived, enter true time-wait state. */
176 tw->tw_substate = TCP_TIME_WAIT; 177 tw->tw_substate = TCP_TIME_WAIT;
177 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 178 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
178 if (tmp_opt.saw_tstamp) { 179 if (tmp_opt.saw_tstamp) {
179 tw->tw_ts_recent_stamp = xtime.tv_sec; 180 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
180 tw->tw_ts_recent = tmp_opt.rcv_tsval; 181 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
181 } 182 }
182 183
183 /* I am shamed, but failed to make it more elegant. 184 /* I am shamed, but failed to make it more elegant.
@@ -186,7 +187,7 @@ kill_with_rst:
186 * do not undertsnad recycling in any case, it not 187 * do not undertsnad recycling in any case, it not
187 * a big problem in practice. --ANK */ 188 * a big problem in practice. --ANK */
188 if (tw->tw_family == AF_INET && 189 if (tw->tw_family == AF_INET &&
189 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && 190 sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp &&
190 tcp_v4_tw_remember_stamp(tw)) 191 tcp_v4_tw_remember_stamp(tw))
191 tcp_tw_schedule(tw, tw->tw_timeout); 192 tcp_tw_schedule(tw, tw->tw_timeout);
192 else 193 else
@@ -212,7 +213,7 @@ kill_with_rst:
212 */ 213 */
213 214
214 if (!paws_reject && 215 if (!paws_reject &&
215 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && 216 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
216 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 217 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
217 /* In window segment, it may be only reset or bare ack. */ 218 /* In window segment, it may be only reset or bare ack. */
218 219
@@ -224,18 +225,18 @@ kill_with_rst:
224 if (sysctl_tcp_rfc1337 == 0) { 225 if (sysctl_tcp_rfc1337 == 0) {
225kill: 226kill:
226 tcp_tw_deschedule(tw); 227 tcp_tw_deschedule(tw);
227 tcp_tw_put(tw); 228 inet_twsk_put(tw);
228 return TCP_TW_SUCCESS; 229 return TCP_TW_SUCCESS;
229 } 230 }
230 } 231 }
231 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 232 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
232 233
233 if (tmp_opt.saw_tstamp) { 234 if (tmp_opt.saw_tstamp) {
234 tw->tw_ts_recent = tmp_opt.rcv_tsval; 235 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
235 tw->tw_ts_recent_stamp = xtime.tv_sec; 236 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
236 } 237 }
237 238
238 tcp_tw_put(tw); 239 inet_twsk_put(tw);
239 return TCP_TW_SUCCESS; 240 return TCP_TW_SUCCESS;
240 } 241 }
241 242
@@ -257,9 +258,10 @@ kill:
257 */ 258 */
258 259
259 if (th->syn && !th->rst && !th->ack && !paws_reject && 260 if (th->syn && !th->rst && !th->ack && !paws_reject &&
260 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || 261 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
261 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 262 (tmp_opt.saw_tstamp &&
262 u32 isn = tw->tw_snd_nxt + 65535 + 2; 263 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
264 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
263 if (isn == 0) 265 if (isn == 0)
264 isn++; 266 isn++;
265 TCP_SKB_CB(skb)->when = isn; 267 TCP_SKB_CB(skb)->when = isn;
@@ -284,7 +286,7 @@ kill:
284 */ 286 */
285 return TCP_TW_ACK; 287 return TCP_TW_ACK;
286 } 288 }
287 tcp_tw_put(tw); 289 inet_twsk_put(tw);
288 return TCP_TW_SUCCESS; 290 return TCP_TW_SUCCESS;
289} 291}
290 292
@@ -293,7 +295,7 @@ kill:
293 * relevant info into it from the SK, and mess with hash chains 295 * relevant info into it from the SK, and mess with hash chains
294 * and list linkage. 296 * and list linkage.
295 */ 297 */
296static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) 298static void __tcp_tw_hashdance(struct sock *sk, struct inet_timewait_sock *tw)
297{ 299{
298 const struct inet_sock *inet = inet_sk(sk); 300 const struct inet_sock *inet = inet_sk(sk);
299 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent]; 301 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
@@ -306,7 +308,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
306 spin_lock(&bhead->lock); 308 spin_lock(&bhead->lock);
307 tw->tw_tb = inet->bind_hash; 309 tw->tw_tb = inet->bind_hash;
308 BUG_TRAP(inet->bind_hash); 310 BUG_TRAP(inet->bind_hash);
309 tw_add_bind_node(tw, &tw->tw_tb->owners); 311 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
310 spin_unlock(&bhead->lock); 312 spin_unlock(&bhead->lock);
311 313
312 write_lock(&ehead->lock); 314 write_lock(&ehead->lock);
@@ -316,7 +318,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
316 sock_prot_dec_use(sk->sk_prot); 318 sock_prot_dec_use(sk->sk_prot);
317 319
318 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ 320 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
319 tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain); 321 inet_twsk_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
320 atomic_inc(&tw->tw_refcnt); 322 atomic_inc(&tw->tw_refcnt);
321 323
322 write_unlock(&ehead->lock); 324 write_unlock(&ehead->lock);
@@ -327,19 +329,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
327 */ 329 */
328void tcp_time_wait(struct sock *sk, int state, int timeo) 330void tcp_time_wait(struct sock *sk, int state, int timeo)
329{ 331{
330 struct tcp_tw_bucket *tw = NULL; 332 struct inet_timewait_sock *tw = NULL;
331 struct tcp_sock *tp = tcp_sk(sk); 333 const struct tcp_sock *tp = tcp_sk(sk);
332 int recycle_ok = 0; 334 int recycle_ok = 0;
333 335
334 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) 336 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
335 recycle_ok = tp->af_specific->remember_stamp(sk); 337 recycle_ok = tp->af_specific->remember_stamp(sk);
336 338
337 if (tcp_tw_count < sysctl_tcp_max_tw_buckets) 339 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
338 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); 340 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, SLAB_ATOMIC);
341
342 if (tw != NULL) {
343 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
344 const struct inet_sock *inet = inet_sk(sk);
345 const int rto = (tp->rto << 2) - (tp->rto >> 1);
339 346
340 if(tw != NULL) { 347 /* Remember our protocol */
341 struct inet_sock *inet = inet_sk(sk); 348 tw->tw_prot = sk->sk_prot_creator;
342 int rto = (tp->rto<<2) - (tp->rto>>1);
343 349
344 /* Give us an identity. */ 350 /* Give us an identity. */
345 tw->tw_daddr = inet->daddr; 351 tw->tw_daddr = inet->daddr;
@@ -356,25 +362,23 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
356 atomic_set(&tw->tw_refcnt, 1); 362 atomic_set(&tw->tw_refcnt, 1);
357 363
358 tw->tw_hashent = sk->sk_hashent; 364 tw->tw_hashent = sk->sk_hashent;
359 tw->tw_rcv_nxt = tp->rcv_nxt; 365 tcptw->tw_rcv_nxt = tp->rcv_nxt;
360 tw->tw_snd_nxt = tp->snd_nxt; 366 tcptw->tw_snd_nxt = tp->snd_nxt;
361 tw->tw_rcv_wnd = tcp_receive_window(tp); 367 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
362 tw->tw_ts_recent = tp->rx_opt.ts_recent; 368 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
363 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 369 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
364 tw_dead_node_init(tw); 370 inet_twsk_dead_node_init(tw);
365 371
366#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 372#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
367 if (tw->tw_family == PF_INET6) { 373 if (tw->tw_family == PF_INET6) {
368 struct ipv6_pinfo *np = inet6_sk(sk); 374 struct ipv6_pinfo *np = inet6_sk(sk);
375 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
369 376
370 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); 377 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
371 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); 378 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
372 tw->tw_v6_ipv6only = np->ipv6only; 379 tw->tw_ipv6only = np->ipv6only;
373 } else { 380 } else
374 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr)); 381 tw->tw_ipv6only = 0;
375 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
376 tw->tw_v6_ipv6only = 0;
377 }
378#endif 382#endif
379 /* Linkage updates. */ 383 /* Linkage updates. */
380 __tcp_tw_hashdance(sk, tw); 384 __tcp_tw_hashdance(sk, tw);
@@ -392,7 +396,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
392 } 396 }
393 397
394 tcp_tw_schedule(tw, timeo); 398 tcp_tw_schedule(tw, timeo);
395 tcp_tw_put(tw); 399 inet_twsk_put(tw);
396 } else { 400 } else {
397 /* Sorry, if we're out of memory, just CLOSE this 401 /* Sorry, if we're out of memory, just CLOSE this
398 * socket up. We've got bigger problems than 402 * socket up. We've got bigger problems than
@@ -427,7 +431,7 @@ static u32 twkill_thread_slots;
427/* Returns non-zero if quota exceeded. */ 431/* Returns non-zero if quota exceeded. */
428static int tcp_do_twkill_work(int slot, unsigned int quota) 432static int tcp_do_twkill_work(int slot, unsigned int quota)
429{ 433{
430 struct tcp_tw_bucket *tw; 434 struct inet_timewait_sock *tw;
431 struct hlist_node *node; 435 struct hlist_node *node;
432 unsigned int killed; 436 unsigned int killed;
433 int ret; 437 int ret;
@@ -441,11 +445,11 @@ static int tcp_do_twkill_work(int slot, unsigned int quota)
441 killed = 0; 445 killed = 0;
442 ret = 0; 446 ret = 0;
443rescan: 447rescan:
444 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) { 448 inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
445 __tw_del_dead_node(tw); 449 __inet_twsk_del_dead_node(tw);
446 spin_unlock(&tw_death_lock); 450 spin_unlock(&tw_death_lock);
447 tcp_timewait_kill(tw); 451 tcp_timewait_kill(tw);
448 tcp_tw_put(tw); 452 inet_twsk_put(tw);
449 killed++; 453 killed++;
450 spin_lock(&tw_death_lock); 454 spin_lock(&tw_death_lock);
451 if (killed > quota) { 455 if (killed > quota) {
@@ -531,11 +535,11 @@ static void twkill_work(void *dummy)
531 */ 535 */
532 536
533/* This is for handling early-kills of TIME_WAIT sockets. */ 537/* This is for handling early-kills of TIME_WAIT sockets. */
534void tcp_tw_deschedule(struct tcp_tw_bucket *tw) 538void tcp_tw_deschedule(struct inet_timewait_sock *tw)
535{ 539{
536 spin_lock(&tw_death_lock); 540 spin_lock(&tw_death_lock);
537 if (tw_del_dead_node(tw)) { 541 if (inet_twsk_del_dead_node(tw)) {
538 tcp_tw_put(tw); 542 inet_twsk_put(tw);
539 if (--tcp_tw_count == 0) 543 if (--tcp_tw_count == 0)
540 del_timer(&tcp_tw_timer); 544 del_timer(&tcp_tw_timer);
541 } 545 }
@@ -552,7 +556,7 @@ static struct timer_list tcp_twcal_timer =
552 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0); 556 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
553static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; 557static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
554 558
555static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) 559static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo)
556{ 560{
557 struct hlist_head *list; 561 struct hlist_head *list;
558 int slot; 562 int slot;
@@ -586,7 +590,7 @@ static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
586 spin_lock(&tw_death_lock); 590 spin_lock(&tw_death_lock);
587 591
588 /* Unlink it, if it was scheduled */ 592 /* Unlink it, if it was scheduled */
589 if (tw_del_dead_node(tw)) 593 if (inet_twsk_del_dead_node(tw))
590 tcp_tw_count--; 594 tcp_tw_count--;
591 else 595 else
592 atomic_inc(&tw->tw_refcnt); 596 atomic_inc(&tw->tw_refcnt);
@@ -644,13 +648,13 @@ void tcp_twcal_tick(unsigned long dummy)
644 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { 648 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
645 if (time_before_eq(j, now)) { 649 if (time_before_eq(j, now)) {
646 struct hlist_node *node, *safe; 650 struct hlist_node *node, *safe;
647 struct tcp_tw_bucket *tw; 651 struct inet_timewait_sock *tw;
648 652
649 tw_for_each_inmate_safe(tw, node, safe, 653 inet_twsk_for_each_inmate_safe(tw, node, safe,
650 &tcp_twcal_row[slot]) { 654 &tcp_twcal_row[slot]) {
651 __tw_del_dead_node(tw); 655 __inet_twsk_del_dead_node(tw);
652 tcp_timewait_kill(tw); 656 tcp_timewait_kill(tw);
653 tcp_tw_put(tw); 657 inet_twsk_put(tw);
654 killed++; 658 killed++;
655 } 659 }
656 } else { 660 } else {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 77004b9456c0..4582d9cf4bbe 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1041,7 +1041,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
1041 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 1041 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
1042 const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2); 1042 const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2);
1043 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; 1043 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
1044 u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); 1044 u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
1045 int sk_ipv6only = ipv6_only_sock(sk); 1045 int sk_ipv6only = ipv6_only_sock(sk);
1046 int sk2_ipv6only = tcp_v6_ipv6only(sk2); 1046 int sk2_ipv6only = tcp_v6_ipv6only(sk2);
1047 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 1047 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93a66b9a76e1..af8ad5bb273b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -308,33 +308,32 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
308 struct in6_addr *daddr, u16 hnum, 308 struct in6_addr *daddr, u16 hnum,
309 int dif) 309 int dif)
310{ 310{
311 struct inet_ehash_bucket *head;
312 struct sock *sk; 311 struct sock *sk;
313 struct hlist_node *node; 312 const struct hlist_node *node;
314 __u32 ports = TCP_COMBINED_PORTS(sport, hnum); 313 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
315 int hash;
316
317 /* Optimize here for direct hit, only listening connections can 314 /* Optimize here for direct hit, only listening connections can
318 * have wildcards anyways. 315 * have wildcards anyways.
319 */ 316 */
320 hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); 317 const int hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
321 head = &tcp_hashinfo.ehash[hash]; 318 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
319
322 read_lock(&head->lock); 320 read_lock(&head->lock);
323 sk_for_each(sk, node, &head->chain) { 321 sk_for_each(sk, node, &head->chain) {
324 /* For IPV6 do the cheaper port and family tests first. */ 322 /* For IPV6 do the cheaper port and family tests first. */
325 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif)) 323 if (INET6_MATCH(sk, saddr, daddr, ports, dif))
326 goto hit; /* You sunk my battleship! */ 324 goto hit; /* You sunk my battleship! */
327 } 325 }
328 /* Must check for a TIME_WAIT'er before going to listener hash. */ 326 /* Must check for a TIME_WAIT'er before going to listener hash. */
329 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { 327 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
330 /* FIXME: acme: check this... */ 328 const struct inet_timewait_sock *tw = inet_twsk(sk);
331 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
332 329
333 if(*((__u32 *)&(tw->tw_dport)) == ports && 330 if(*((__u32 *)&(tw->tw_dport)) == ports &&
334 sk->sk_family == PF_INET6) { 331 sk->sk_family == PF_INET6) {
335 if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && 332 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
336 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && 333
337 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)) 334 if (ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
335 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
336 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
338 goto hit; 337 goto hit;
339 } 338 }
340 } 339 }
@@ -455,43 +454,46 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
455} 454}
456 455
457static int __tcp_v6_check_established(struct sock *sk, __u16 lport, 456static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
458 struct tcp_tw_bucket **twp) 457 struct inet_timewait_sock **twp)
459{ 458{
460 struct inet_sock *inet = inet_sk(sk); 459 struct inet_sock *inet = inet_sk(sk);
461 struct ipv6_pinfo *np = inet6_sk(sk); 460 struct ipv6_pinfo *np = inet6_sk(sk);
462 struct in6_addr *daddr = &np->rcv_saddr; 461 struct in6_addr *daddr = &np->rcv_saddr;
463 struct in6_addr *saddr = &np->daddr; 462 struct in6_addr *saddr = &np->daddr;
464 int dif = sk->sk_bound_dev_if; 463 int dif = sk->sk_bound_dev_if;
465 u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 464 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
466 int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); 465 const int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
467 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; 466 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
468 struct sock *sk2; 467 struct sock *sk2;
469 struct hlist_node *node; 468 const struct hlist_node *node;
470 struct tcp_tw_bucket *tw; 469 struct inet_timewait_sock *tw;
471 470
472 write_lock(&head->lock); 471 write_lock(&head->lock);
473 472
474 /* Check TIME-WAIT sockets first. */ 473 /* Check TIME-WAIT sockets first. */
475 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { 474 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
476 tw = (struct tcp_tw_bucket*)sk2; 475 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
476
477 tw = inet_twsk(sk2);
477 478
478 if(*((__u32 *)&(tw->tw_dport)) == ports && 479 if(*((__u32 *)&(tw->tw_dport)) == ports &&
479 sk2->sk_family == PF_INET6 && 480 sk2->sk_family == PF_INET6 &&
480 ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && 481 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
481 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && 482 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
482 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 483 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
484 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
483 struct tcp_sock *tp = tcp_sk(sk); 485 struct tcp_sock *tp = tcp_sk(sk);
484 486
485 if (tw->tw_ts_recent_stamp && 487 if (tcptw->tw_ts_recent_stamp &&
486 (!twp || (sysctl_tcp_tw_reuse && 488 (!twp ||
487 xtime.tv_sec - 489 (sysctl_tcp_tw_reuse &&
488 tw->tw_ts_recent_stamp > 1))) { 490 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
489 /* See comment in tcp_ipv4.c */ 491 /* See comment in tcp_ipv4.c */
490 tp->write_seq = tw->tw_snd_nxt + 65535 + 2; 492 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
491 if (!tp->write_seq) 493 if (!tp->write_seq)
492 tp->write_seq = 1; 494 tp->write_seq = 1;
493 tp->rx_opt.ts_recent = tw->tw_ts_recent; 495 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
494 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 496 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
495 sock_hold(sk2); 497 sock_hold(sk2);
496 goto unique; 498 goto unique;
497 } else 499 } else
@@ -502,7 +504,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
502 504
503 /* And established part... */ 505 /* And established part... */
504 sk_for_each(sk2, node, &head->chain) { 506 sk_for_each(sk2, node, &head->chain) {
505 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif)) 507 if (INET6_MATCH(sk2, saddr, daddr, ports, dif))
506 goto not_unique; 508 goto not_unique;
507 } 509 }
508 510
@@ -521,7 +523,7 @@ unique:
521 tcp_tw_deschedule(tw); 523 tcp_tw_deschedule(tw);
522 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 524 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
523 525
524 tcp_tw_put(tw); 526 inet_twsk_put(tw);
525 } 527 }
526 return 0; 528 return 0;
527 529
@@ -556,7 +558,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
556 static u32 hint; 558 static u32 hint;
557 u32 offset = hint + tcpv6_port_offset(sk); 559 u32 offset = hint + tcpv6_port_offset(sk);
558 struct hlist_node *node; 560 struct hlist_node *node;
559 struct tcp_tw_bucket *tw = NULL; 561 struct inet_timewait_sock *tw = NULL;
560 562
561 local_bh_disable(); 563 local_bh_disable();
562 for (i = 1; i <= range; i++) { 564 for (i = 1; i <= range; i++) {
@@ -609,7 +611,7 @@ ok:
609 611
610 if (tw) { 612 if (tw) {
611 tcp_tw_deschedule(tw); 613 tcp_tw_deschedule(tw);
612 tcp_tw_put(tw); 614 inet_twsk_put(tw);
613 } 615 }
614 616
615 ret = 0; 617 ret = 0;
@@ -845,7 +847,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
845 } 847 }
846 848
847 if (sk->sk_state == TCP_TIME_WAIT) { 849 if (sk->sk_state == TCP_TIME_WAIT) {
848 tcp_tw_put((struct tcp_tw_bucket*)sk); 850 inet_twsk_put((struct inet_timewait_sock *)sk);
849 return; 851 return;
850 } 852 }
851 853
@@ -1223,12 +1225,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1223 1225
1224static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1226static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1225{ 1227{
1226 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 1228 struct inet_timewait_sock *tw = inet_twsk(sk);
1229 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1227 1230
1228 tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 1231 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1229 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 1232 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1233 tcptw->tw_ts_recent);
1230 1234
1231 tcp_tw_put(tw); 1235 inet_twsk_put(tw);
1232} 1236}
1233 1237
1234static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1238static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1261,7 +1265,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1261 bh_lock_sock(nsk); 1265 bh_lock_sock(nsk);
1262 return nsk; 1266 return nsk;
1263 } 1267 }
1264 tcp_tw_put((struct tcp_tw_bucket*)nsk); 1268 inet_twsk_put((struct inet_timewait_sock *)nsk);
1265 return NULL; 1269 return NULL;
1266 } 1270 }
1267 1271
@@ -1798,26 +1802,26 @@ discard_and_relse:
1798 1802
1799do_time_wait: 1803do_time_wait:
1800 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1804 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1801 tcp_tw_put((struct tcp_tw_bucket *) sk); 1805 inet_twsk_put((struct inet_timewait_sock *)sk);
1802 goto discard_it; 1806 goto discard_it;
1803 } 1807 }
1804 1808
1805 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1809 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1806 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1810 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1807 tcp_tw_put((struct tcp_tw_bucket *) sk); 1811 inet_twsk_put((struct inet_timewait_sock *)sk);
1808 goto discard_it; 1812 goto discard_it;
1809 } 1813 }
1810 1814
1811 switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1815 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1812 skb, th, skb->len)) { 1816 skb, th)) {
1813 case TCP_TW_SYN: 1817 case TCP_TW_SYN:
1814 { 1818 {
1815 struct sock *sk2; 1819 struct sock *sk2;
1816 1820
1817 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1821 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1818 if (sk2 != NULL) { 1822 if (sk2 != NULL) {
1819 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1823 tcp_tw_deschedule((struct inet_timewait_sock *)sk);
1820 tcp_tw_put((struct tcp_tw_bucket *)sk); 1824 inet_twsk_put((struct inet_timewait_sock *)sk);
1821 sk = sk2; 1825 sk = sk2;
1822 goto process; 1826 goto process;
1823 } 1827 }
@@ -2137,17 +2141,18 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2137} 2141}
2138 2142
2139static void get_timewait6_sock(struct seq_file *seq, 2143static void get_timewait6_sock(struct seq_file *seq,
2140 struct tcp_tw_bucket *tw, int i) 2144 struct inet_timewait_sock *tw, int i)
2141{ 2145{
2142 struct in6_addr *dest, *src; 2146 struct in6_addr *dest, *src;
2143 __u16 destp, srcp; 2147 __u16 destp, srcp;
2148 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
2144 int ttd = tw->tw_ttd - jiffies; 2149 int ttd = tw->tw_ttd - jiffies;
2145 2150
2146 if (ttd < 0) 2151 if (ttd < 0)
2147 ttd = 0; 2152 ttd = 0;
2148 2153
2149 dest = &tw->tw_v6_daddr; 2154 dest = &tcp6tw->tw_v6_daddr;
2150 src = &tw->tw_v6_rcv_saddr; 2155 src = &tcp6tw->tw_v6_rcv_saddr;
2151 destp = ntohs(tw->tw_dport); 2156 destp = ntohs(tw->tw_dport);
2152 srcp = ntohs(tw->tw_sport); 2157 srcp = ntohs(tw->tw_sport);
2153 2158
@@ -2244,6 +2249,7 @@ struct proto tcpv6_prot = {
2244 .sysctl_rmem = sysctl_tcp_rmem, 2249 .sysctl_rmem = sysctl_tcp_rmem,
2245 .max_header = MAX_TCP_HEADER, 2250 .max_header = MAX_TCP_HEADER,
2246 .obj_size = sizeof(struct tcp6_sock), 2251 .obj_size = sizeof(struct tcp6_sock),
2252 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2247 .rsk_prot = &tcp6_request_sock_ops, 2253 .rsk_prot = &tcp6_request_sock_ops,
2248}; 2254};
2249 2255