diff options
author | YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 2007-02-09 09:24:47 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-02-11 02:19:39 -0500 |
commit | e905a9edab7f4f14f9213b52234e4a346c690911 (patch) | |
tree | 9e52a5f47eec47c5685c347ff7af22290a10305b /net/ipv4/tcp_ipv4.c | |
parent | 642656518b2e64fd59d9bbd15b6885cac5fe99b1 (diff) |
[NET] IPV4: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f51d6404c61c..0ba74bbe7d30 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -303,7 +303,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) | |||
303 | /* We don't check in the destentry if pmtu discovery is forbidden | 303 | /* We don't check in the destentry if pmtu discovery is forbidden |
304 | * on this route. We just assume that no packet_to_big packets | 304 | * on this route. We just assume that no packet_to_big packets |
305 | * are send back when pmtu discovery is not active. | 305 | * are send back when pmtu discovery is not active. |
306 | * There is a small race when the user changes this flag in the | 306 | * There is a small race when the user changes this flag in the |
307 | * route, but I think that's acceptable. | 307 | * route, but I think that's acceptable. |
308 | */ | 308 | */ |
309 | if ((dst = __sk_dst_check(sk, 0)) == NULL) | 309 | if ((dst = __sk_dst_check(sk, 0)) == NULL) |
@@ -880,7 +880,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
880 | 880 | ||
881 | if (md5sig->alloced4 == md5sig->entries4) { | 881 | if (md5sig->alloced4 == md5sig->entries4) { |
882 | keys = kmalloc((sizeof(*keys) * | 882 | keys = kmalloc((sizeof(*keys) * |
883 | (md5sig->entries4 + 1)), GFP_ATOMIC); | 883 | (md5sig->entries4 + 1)), GFP_ATOMIC); |
884 | if (!keys) { | 884 | if (!keys) { |
885 | kfree(newkey); | 885 | kfree(newkey); |
886 | tcp_free_md5sig_pool(); | 886 | tcp_free_md5sig_pool(); |
@@ -934,7 +934,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
934 | memcpy(&tp->md5sig_info->keys4[i], | 934 | memcpy(&tp->md5sig_info->keys4[i], |
935 | &tp->md5sig_info->keys4[i+1], | 935 | &tp->md5sig_info->keys4[i+1], |
936 | (tp->md5sig_info->entries4 - i) * | 936 | (tp->md5sig_info->entries4 - i) * |
937 | sizeof(struct tcp4_md5sig_key)); | 937 | sizeof(struct tcp4_md5sig_key)); |
938 | } | 938 | } |
939 | tcp_free_md5sig_pool(); | 939 | tcp_free_md5sig_pool(); |
940 | return 0; | 940 | return 0; |
@@ -1388,7 +1388,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1388 | goto drop_and_free; | 1388 | goto drop_and_free; |
1389 | 1389 | ||
1390 | if (want_cookie) { | 1390 | if (want_cookie) { |
1391 | reqsk_free(req); | 1391 | reqsk_free(req); |
1392 | } else { | 1392 | } else { |
1393 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | 1393 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1394 | } | 1394 | } |
@@ -1704,7 +1704,7 @@ bad_packet: | |||
1704 | discard_it: | 1704 | discard_it: |
1705 | /* Discard frame. */ | 1705 | /* Discard frame. */ |
1706 | kfree_skb(skb); | 1706 | kfree_skb(skb); |
1707 | return 0; | 1707 | return 0; |
1708 | 1708 | ||
1709 | discard_and_relse: | 1709 | discard_and_relse: |
1710 | sock_put(sk); | 1710 | sock_put(sk); |
@@ -1890,10 +1890,10 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1890 | tcp_cleanup_congestion_control(sk); | 1890 | tcp_cleanup_congestion_control(sk); |
1891 | 1891 | ||
1892 | /* Cleanup up the write buffer. */ | 1892 | /* Cleanup up the write buffer. */ |
1893 | sk_stream_writequeue_purge(sk); | 1893 | sk_stream_writequeue_purge(sk); |
1894 | 1894 | ||
1895 | /* Cleans up our, hopefully empty, out_of_order_queue. */ | 1895 | /* Cleans up our, hopefully empty, out_of_order_queue. */ |
1896 | __skb_queue_purge(&tp->out_of_order_queue); | 1896 | __skb_queue_purge(&tp->out_of_order_queue); |
1897 | 1897 | ||
1898 | #ifdef CONFIG_TCP_MD5SIG | 1898 | #ifdef CONFIG_TCP_MD5SIG |
1899 | /* Clean up the MD5 key list, if any */ | 1899 | /* Clean up the MD5 key list, if any */ |
@@ -1906,7 +1906,7 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1906 | 1906 | ||
1907 | #ifdef CONFIG_NET_DMA | 1907 | #ifdef CONFIG_NET_DMA |
1908 | /* Cleans up our sk_async_wait_queue */ | 1908 | /* Cleans up our sk_async_wait_queue */ |
1909 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1909 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1910 | #endif | 1910 | #endif |
1911 | 1911 | ||
1912 | /* Clean prequeue, it must be empty really */ | 1912 | /* Clean prequeue, it must be empty really */ |
@@ -1983,7 +1983,7 @@ get_req: | |||
1983 | st->state = TCP_SEQ_STATE_LISTENING; | 1983 | st->state = TCP_SEQ_STATE_LISTENING; |
1984 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1984 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1985 | } else { | 1985 | } else { |
1986 | icsk = inet_csk(sk); | 1986 | icsk = inet_csk(sk); |
1987 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1987 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1988 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) | 1988 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
1989 | goto start_req; | 1989 | goto start_req; |
@@ -1996,7 +1996,7 @@ get_sk: | |||
1996 | cur = sk; | 1996 | cur = sk; |
1997 | goto out; | 1997 | goto out; |
1998 | } | 1998 | } |
1999 | icsk = inet_csk(sk); | 1999 | icsk = inet_csk(sk); |
2000 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2000 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2001 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { | 2001 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { |
2002 | start_req: | 2002 | start_req: |