diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 581016a6a93f..a1f812159ced 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -273,6 +273,8 @@ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); | |||
273 | 273 | ||
274 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); | 274 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); |
275 | 275 | ||
276 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | ||
277 | |||
276 | int sysctl_tcp_mem[3]; | 278 | int sysctl_tcp_mem[3]; |
277 | int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; | 279 | int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; |
278 | int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; | 280 | int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; |
@@ -454,12 +456,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
454 | return put_user(answ, (int __user *)arg); | 456 | return put_user(answ, (int __user *)arg); |
455 | } | 457 | } |
456 | 458 | ||
457 | 459 | int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) | |
458 | int tcp_listen_start(struct sock *sk) | ||
459 | { | 460 | { |
460 | struct inet_sock *inet = inet_sk(sk); | 461 | struct inet_sock *inet = inet_sk(sk); |
461 | struct inet_connection_sock *icsk = inet_csk(sk); | 462 | struct inet_connection_sock *icsk = inet_csk(sk); |
462 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE); | 463 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); |
463 | 464 | ||
464 | if (rc != 0) | 465 | if (rc != 0) |
465 | return rc; | 466 | return rc; |
@@ -488,12 +489,13 @@ int tcp_listen_start(struct sock *sk) | |||
488 | return -EADDRINUSE; | 489 | return -EADDRINUSE; |
489 | } | 490 | } |
490 | 491 | ||
492 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); | ||
493 | |||
491 | /* | 494 | /* |
492 | * This routine closes sockets which have been at least partially | 495 | * This routine closes sockets which have been at least partially |
493 | * opened, but not yet accepted. | 496 | * opened, but not yet accepted. |
494 | */ | 497 | */ |
495 | 498 | static void inet_csk_listen_stop(struct sock *sk) | |
496 | static void tcp_listen_stop (struct sock *sk) | ||
497 | { | 499 | { |
498 | struct inet_connection_sock *icsk = inet_csk(sk); | 500 | struct inet_connection_sock *icsk = inet_csk(sk); |
499 | struct request_sock *acc_req; | 501 | struct request_sock *acc_req; |
@@ -524,13 +526,13 @@ static void tcp_listen_stop (struct sock *sk) | |||
524 | BUG_TRAP(!sock_owned_by_user(child)); | 526 | BUG_TRAP(!sock_owned_by_user(child)); |
525 | sock_hold(child); | 527 | sock_hold(child); |
526 | 528 | ||
527 | tcp_disconnect(child, O_NONBLOCK); | 529 | sk->sk_prot->disconnect(child, O_NONBLOCK); |
528 | 530 | ||
529 | sock_orphan(child); | 531 | sock_orphan(child); |
530 | 532 | ||
531 | atomic_inc(&tcp_orphan_count); | 533 | atomic_inc(sk->sk_prot->orphan_count); |
532 | 534 | ||
533 | tcp_destroy_sock(child); | 535 | inet_csk_destroy_sock(child); |
534 | 536 | ||
535 | bh_unlock_sock(child); | 537 | bh_unlock_sock(child); |
536 | local_bh_enable(); | 538 | local_bh_enable(); |
@@ -542,6 +544,8 @@ static void tcp_listen_stop (struct sock *sk) | |||
542 | BUG_TRAP(!sk->sk_ack_backlog); | 544 | BUG_TRAP(!sk->sk_ack_backlog); |
543 | } | 545 | } |
544 | 546 | ||
547 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); | ||
548 | |||
545 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) | 549 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
546 | { | 550 | { |
547 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; | 551 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
@@ -1561,7 +1565,7 @@ void tcp_shutdown(struct sock *sk, int how) | |||
1561 | * can assume the socket waitqueue is inactive and nobody will | 1565 | * can assume the socket waitqueue is inactive and nobody will |
1562 | * try to jump onto it. | 1566 | * try to jump onto it. |
1563 | */ | 1567 | */ |
1564 | void tcp_destroy_sock(struct sock *sk) | 1568 | void inet_csk_destroy_sock(struct sock *sk) |
1565 | { | 1569 | { |
1566 | BUG_TRAP(sk->sk_state == TCP_CLOSE); | 1570 | BUG_TRAP(sk->sk_state == TCP_CLOSE); |
1567 | BUG_TRAP(sock_flag(sk, SOCK_DEAD)); | 1571 | BUG_TRAP(sock_flag(sk, SOCK_DEAD)); |
@@ -1580,7 +1584,7 @@ void tcp_destroy_sock(struct sock *sk) | |||
1580 | 1584 | ||
1581 | sk_refcnt_debug_release(sk); | 1585 | sk_refcnt_debug_release(sk); |
1582 | 1586 | ||
1583 | atomic_dec(&tcp_orphan_count); | 1587 | atomic_dec(sk->sk_prot->orphan_count); |
1584 | sock_put(sk); | 1588 | sock_put(sk); |
1585 | } | 1589 | } |
1586 | 1590 | ||
@@ -1596,7 +1600,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1596 | tcp_set_state(sk, TCP_CLOSE); | 1600 | tcp_set_state(sk, TCP_CLOSE); |
1597 | 1601 | ||
1598 | /* Special case. */ | 1602 | /* Special case. */ |
1599 | tcp_listen_stop(sk); | 1603 | inet_csk_listen_stop(sk); |
1600 | 1604 | ||
1601 | goto adjudge_to_death; | 1605 | goto adjudge_to_death; |
1602 | } | 1606 | } |
@@ -1704,7 +1708,7 @@ adjudge_to_death: | |||
1704 | if (tmo > TCP_TIMEWAIT_LEN) { | 1708 | if (tmo > TCP_TIMEWAIT_LEN) { |
1705 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); | 1709 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); |
1706 | } else { | 1710 | } else { |
1707 | atomic_inc(&tcp_orphan_count); | 1711 | atomic_inc(sk->sk_prot->orphan_count); |
1708 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 1712 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
1709 | goto out; | 1713 | goto out; |
1710 | } | 1714 | } |
@@ -1712,7 +1716,7 @@ adjudge_to_death: | |||
1712 | } | 1716 | } |
1713 | if (sk->sk_state != TCP_CLOSE) { | 1717 | if (sk->sk_state != TCP_CLOSE) { |
1714 | sk_stream_mem_reclaim(sk); | 1718 | sk_stream_mem_reclaim(sk); |
1715 | if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans || | 1719 | if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans || |
1716 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | 1720 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && |
1717 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { | 1721 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { |
1718 | if (net_ratelimit()) | 1722 | if (net_ratelimit()) |
@@ -1723,10 +1727,10 @@ adjudge_to_death: | |||
1723 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 1727 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); |
1724 | } | 1728 | } |
1725 | } | 1729 | } |
1726 | atomic_inc(&tcp_orphan_count); | 1730 | atomic_inc(sk->sk_prot->orphan_count); |
1727 | 1731 | ||
1728 | if (sk->sk_state == TCP_CLOSE) | 1732 | if (sk->sk_state == TCP_CLOSE) |
1729 | tcp_destroy_sock(sk); | 1733 | inet_csk_destroy_sock(sk); |
1730 | /* Otherwise, socket is reprieved until protocol close. */ | 1734 | /* Otherwise, socket is reprieved until protocol close. */ |
1731 | 1735 | ||
1732 | out: | 1736 | out: |
@@ -1757,7 +1761,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1757 | 1761 | ||
1758 | /* ABORT function of RFC793 */ | 1762 | /* ABORT function of RFC793 */ |
1759 | if (old_state == TCP_LISTEN) { | 1763 | if (old_state == TCP_LISTEN) { |
1760 | tcp_listen_stop(sk); | 1764 | inet_csk_listen_stop(sk); |
1761 | } else if (tcp_need_reset(old_state) || | 1765 | } else if (tcp_need_reset(old_state) || |
1762 | (tp->snd_nxt != tp->write_seq && | 1766 | (tp->snd_nxt != tp->write_seq && |
1763 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { | 1767 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { |
@@ -2253,7 +2257,7 @@ void __init tcp_init(void) | |||
2253 | } | 2257 | } |
2254 | 2258 | ||
2255 | EXPORT_SYMBOL(tcp_close); | 2259 | EXPORT_SYMBOL(tcp_close); |
2256 | EXPORT_SYMBOL(tcp_destroy_sock); | 2260 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
2257 | EXPORT_SYMBOL(tcp_disconnect); | 2261 | EXPORT_SYMBOL(tcp_disconnect); |
2258 | EXPORT_SYMBOL(tcp_getsockopt); | 2262 | EXPORT_SYMBOL(tcp_getsockopt); |
2259 | EXPORT_SYMBOL(tcp_ioctl); | 2263 | EXPORT_SYMBOL(tcp_ioctl); |