diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
commit | eb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (patch) | |
tree | 5ac6f43899648abeab1d43aad3107f664e7f13d5 /net/ipv4/tcp.c | |
parent | c4762aba0b1f72659aae9ce37b772ca8bd8f06f4 (diff) | |
parent | 14b395e35d1afdd8019d11b92e28041fad591b71 (diff) |
Merge branch 'linus' into cpus4096-for-linus
Conflicts:
net/sunrpc/svc.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 102 |
1 files changed, 78 insertions, 24 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1d723de18686..0b491bf03db4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -279,8 +277,6 @@ | |||
279 | 277 | ||
280 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | 278 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
281 | 279 | ||
282 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; | ||
283 | |||
284 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); | 280 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); |
285 | 281 | ||
286 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | 282 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
@@ -318,10 +314,10 @@ int tcp_memory_pressure __read_mostly; | |||
318 | 314 | ||
319 | EXPORT_SYMBOL(tcp_memory_pressure); | 315 | EXPORT_SYMBOL(tcp_memory_pressure); |
320 | 316 | ||
321 | void tcp_enter_memory_pressure(void) | 317 | void tcp_enter_memory_pressure(struct sock *sk) |
322 | { | 318 | { |
323 | if (!tcp_memory_pressure) { | 319 | if (!tcp_memory_pressure) { |
324 | NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); | 320 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
325 | tcp_memory_pressure = 1; | 321 | tcp_memory_pressure = 1; |
326 | } | 322 | } |
327 | } | 323 | } |
@@ -346,8 +342,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
346 | return inet_csk_listen_poll(sk); | 342 | return inet_csk_listen_poll(sk); |
347 | 343 | ||
348 | /* Socket is not locked. We are protected from async events | 344 | /* Socket is not locked. We are protected from async events |
349 | by poll logic and correct handling of state changes | 345 | * by poll logic and correct handling of state changes |
350 | made by another threads is impossible in any case. | 346 | * made by other threads is impossible in any case. |
351 | */ | 347 | */ |
352 | 348 | ||
353 | mask = 0; | 349 | mask = 0; |
@@ -373,10 +369,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
373 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | 369 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP |
374 | * if and only if shutdown has been made in both directions. | 370 | * if and only if shutdown has been made in both directions. |
375 | * Actually, it is interesting to look how Solaris and DUX | 371 | * Actually, it is interesting to look how Solaris and DUX |
376 | * solve this dilemma. I would prefer, if PULLHUP were maskable, | 372 | * solve this dilemma. I would prefer, if POLLHUP were maskable, |
377 | * then we could set it on SND_SHUTDOWN. BTW examples given | 373 | * then we could set it on SND_SHUTDOWN. BTW examples given |
378 | * in Stevens' books assume exactly this behaviour, it explains | 374 | * in Stevens' books assume exactly this behaviour, it explains |
379 | * why PULLHUP is incompatible with POLLOUT. --ANK | 375 | * why POLLHUP is incompatible with POLLOUT. --ANK |
380 | * | 376 | * |
381 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | 377 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
382 | * blocking on fresh not-connected or disconnected socket. --ANK | 378 | * blocking on fresh not-connected or disconnected socket. --ANK |
@@ -651,7 +647,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
651 | } | 647 | } |
652 | __kfree_skb(skb); | 648 | __kfree_skb(skb); |
653 | } else { | 649 | } else { |
654 | sk->sk_prot->enter_memory_pressure(); | 650 | sk->sk_prot->enter_memory_pressure(sk); |
655 | sk_stream_moderate_sndbuf(sk); | 651 | sk_stream_moderate_sndbuf(sk); |
656 | } | 652 | } |
657 | return NULL; | 653 | return NULL; |
@@ -1155,7 +1151,7 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1155 | struct sk_buff *skb; | 1151 | struct sk_buff *skb; |
1156 | struct tcp_sock *tp = tcp_sk(sk); | 1152 | struct tcp_sock *tp = tcp_sk(sk); |
1157 | 1153 | ||
1158 | NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); | 1154 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); |
1159 | 1155 | ||
1160 | /* RX process wants to run with disabled BHs, though it is not | 1156 | /* RX process wants to run with disabled BHs, though it is not |
1161 | * necessary */ | 1157 | * necessary */ |
@@ -1477,7 +1473,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1477 | /* __ Restore normal policy in scheduler __ */ | 1473 | /* __ Restore normal policy in scheduler __ */ |
1478 | 1474 | ||
1479 | if ((chunk = len - tp->ucopy.len) != 0) { | 1475 | if ((chunk = len - tp->ucopy.len) != 0) { |
1480 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); | 1476 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
1481 | len -= chunk; | 1477 | len -= chunk; |
1482 | copied += chunk; | 1478 | copied += chunk; |
1483 | } | 1479 | } |
@@ -1488,7 +1484,7 @@ do_prequeue: | |||
1488 | tcp_prequeue_process(sk); | 1484 | tcp_prequeue_process(sk); |
1489 | 1485 | ||
1490 | if ((chunk = len - tp->ucopy.len) != 0) { | 1486 | if ((chunk = len - tp->ucopy.len) != 0) { |
1491 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | 1487 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1492 | len -= chunk; | 1488 | len -= chunk; |
1493 | copied += chunk; | 1489 | copied += chunk; |
1494 | } | 1490 | } |
@@ -1603,7 +1599,7 @@ skip_copy: | |||
1603 | tcp_prequeue_process(sk); | 1599 | tcp_prequeue_process(sk); |
1604 | 1600 | ||
1605 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | 1601 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { |
1606 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | 1602 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1607 | len -= chunk; | 1603 | len -= chunk; |
1608 | copied += chunk; | 1604 | copied += chunk; |
1609 | } | 1605 | } |
@@ -1670,12 +1666,12 @@ void tcp_set_state(struct sock *sk, int state) | |||
1670 | switch (state) { | 1666 | switch (state) { |
1671 | case TCP_ESTABLISHED: | 1667 | case TCP_ESTABLISHED: |
1672 | if (oldstate != TCP_ESTABLISHED) | 1668 | if (oldstate != TCP_ESTABLISHED) |
1673 | TCP_INC_STATS(TCP_MIB_CURRESTAB); | 1669 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1674 | break; | 1670 | break; |
1675 | 1671 | ||
1676 | case TCP_CLOSE: | 1672 | case TCP_CLOSE: |
1677 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | 1673 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) |
1678 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | 1674 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
1679 | 1675 | ||
1680 | sk->sk_prot->unhash(sk); | 1676 | sk->sk_prot->unhash(sk); |
1681 | if (inet_csk(sk)->icsk_bind_hash && | 1677 | if (inet_csk(sk)->icsk_bind_hash && |
@@ -1684,7 +1680,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
1684 | /* fall through */ | 1680 | /* fall through */ |
1685 | default: | 1681 | default: |
1686 | if (oldstate==TCP_ESTABLISHED) | 1682 | if (oldstate==TCP_ESTABLISHED) |
1687 | TCP_DEC_STATS(TCP_MIB_CURRESTAB); | 1683 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1688 | } | 1684 | } |
1689 | 1685 | ||
1690 | /* Change state AFTER socket is unhashed to avoid closed | 1686 | /* Change state AFTER socket is unhashed to avoid closed |
@@ -1795,13 +1791,13 @@ void tcp_close(struct sock *sk, long timeout) | |||
1795 | */ | 1791 | */ |
1796 | if (data_was_unread) { | 1792 | if (data_was_unread) { |
1797 | /* Unread data was tossed, zap the connection. */ | 1793 | /* Unread data was tossed, zap the connection. */ |
1798 | NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); | 1794 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1799 | tcp_set_state(sk, TCP_CLOSE); | 1795 | tcp_set_state(sk, TCP_CLOSE); |
1800 | tcp_send_active_reset(sk, GFP_KERNEL); | 1796 | tcp_send_active_reset(sk, GFP_KERNEL); |
1801 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1797 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1802 | /* Check zero linger _after_ checking for unread data. */ | 1798 | /* Check zero linger _after_ checking for unread data. */ |
1803 | sk->sk_prot->disconnect(sk, 0); | 1799 | sk->sk_prot->disconnect(sk, 0); |
1804 | NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); | 1800 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
1805 | } else if (tcp_close_state(sk)) { | 1801 | } else if (tcp_close_state(sk)) { |
1806 | /* We FIN if the application ate all the data before | 1802 | /* We FIN if the application ate all the data before |
1807 | * zapping the connection. | 1803 | * zapping the connection. |
@@ -1873,7 +1869,8 @@ adjudge_to_death: | |||
1873 | if (tp->linger2 < 0) { | 1869 | if (tp->linger2 < 0) { |
1874 | tcp_set_state(sk, TCP_CLOSE); | 1870 | tcp_set_state(sk, TCP_CLOSE); |
1875 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1871 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1876 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); | 1872 | NET_INC_STATS_BH(sock_net(sk), |
1873 | LINUX_MIB_TCPABORTONLINGER); | ||
1877 | } else { | 1874 | } else { |
1878 | const int tmo = tcp_fin_time(sk); | 1875 | const int tmo = tcp_fin_time(sk); |
1879 | 1876 | ||
@@ -1895,7 +1892,8 @@ adjudge_to_death: | |||
1895 | "sockets\n"); | 1892 | "sockets\n"); |
1896 | tcp_set_state(sk, TCP_CLOSE); | 1893 | tcp_set_state(sk, TCP_CLOSE); |
1897 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1894 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1898 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 1895 | NET_INC_STATS_BH(sock_net(sk), |
1896 | LINUX_MIB_TCPABORTONMEMORY); | ||
1899 | } | 1897 | } |
1900 | } | 1898 | } |
1901 | 1899 | ||
@@ -2590,12 +2588,69 @@ void __tcp_put_md5sig_pool(void) | |||
2590 | } | 2588 | } |
2591 | 2589 | ||
2592 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | 2590 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); |
2591 | |||
2592 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | ||
2593 | struct tcphdr *th) | ||
2594 | { | ||
2595 | struct scatterlist sg; | ||
2596 | int err; | ||
2597 | |||
2598 | __sum16 old_checksum = th->check; | ||
2599 | th->check = 0; | ||
2600 | /* options aren't included in the hash */ | ||
2601 | sg_init_one(&sg, th, sizeof(struct tcphdr)); | ||
2602 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); | ||
2603 | th->check = old_checksum; | ||
2604 | return err; | ||
2605 | } | ||
2606 | |||
2607 | EXPORT_SYMBOL(tcp_md5_hash_header); | ||
2608 | |||
2609 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | ||
2610 | struct sk_buff *skb, unsigned header_len) | ||
2611 | { | ||
2612 | struct scatterlist sg; | ||
2613 | const struct tcphdr *tp = tcp_hdr(skb); | ||
2614 | struct hash_desc *desc = &hp->md5_desc; | ||
2615 | unsigned i; | ||
2616 | const unsigned head_data_len = skb_headlen(skb) > header_len ? | ||
2617 | skb_headlen(skb) - header_len : 0; | ||
2618 | const struct skb_shared_info *shi = skb_shinfo(skb); | ||
2619 | |||
2620 | sg_init_table(&sg, 1); | ||
2621 | |||
2622 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | ||
2623 | if (crypto_hash_update(desc, &sg, head_data_len)) | ||
2624 | return 1; | ||
2625 | |||
2626 | for (i = 0; i < shi->nr_frags; ++i) { | ||
2627 | const struct skb_frag_struct *f = &shi->frags[i]; | ||
2628 | sg_set_page(&sg, f->page, f->size, f->page_offset); | ||
2629 | if (crypto_hash_update(desc, &sg, f->size)) | ||
2630 | return 1; | ||
2631 | } | ||
2632 | |||
2633 | return 0; | ||
2634 | } | ||
2635 | |||
2636 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); | ||
2637 | |||
2638 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) | ||
2639 | { | ||
2640 | struct scatterlist sg; | ||
2641 | |||
2642 | sg_init_one(&sg, key->key, key->keylen); | ||
2643 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | ||
2644 | } | ||
2645 | |||
2646 | EXPORT_SYMBOL(tcp_md5_hash_key); | ||
2647 | |||
2593 | #endif | 2648 | #endif |
2594 | 2649 | ||
2595 | void tcp_done(struct sock *sk) | 2650 | void tcp_done(struct sock *sk) |
2596 | { | 2651 | { |
2597 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | 2652 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
2598 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | 2653 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
2599 | 2654 | ||
2600 | tcp_set_state(sk, TCP_CLOSE); | 2655 | tcp_set_state(sk, TCP_CLOSE); |
2601 | tcp_clear_xmit_timers(sk); | 2656 | tcp_clear_xmit_timers(sk); |
@@ -2732,4 +2787,3 @@ EXPORT_SYMBOL(tcp_splice_read); | |||
2732 | EXPORT_SYMBOL(tcp_sendpage); | 2787 | EXPORT_SYMBOL(tcp_sendpage); |
2733 | EXPORT_SYMBOL(tcp_setsockopt); | 2788 | EXPORT_SYMBOL(tcp_setsockopt); |
2734 | EXPORT_SYMBOL(tcp_shutdown); | 2789 | EXPORT_SYMBOL(tcp_shutdown); |
2735 | EXPORT_SYMBOL(tcp_statistics); | ||