aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 12de90a5047c..0ba74bbe7d30 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -191,7 +191,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
191 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 191 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
193 IPPROTO_TCP, 193 IPPROTO_TCP,
194 inet->sport, usin->sin_port, sk); 194 inet->sport, usin->sin_port, sk, 1);
195 if (tmp < 0) 195 if (tmp < 0)
196 return tmp; 196 return tmp;
197 197
@@ -303,7 +303,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
303 /* We don't check in the destentry if pmtu discovery is forbidden 303 /* We don't check in the destentry if pmtu discovery is forbidden
304 * on this route. We just assume that no packet_to_big packets 304 * on this route. We just assume that no packet_to_big packets
305 * are send back when pmtu discovery is not active. 305 * are send back when pmtu discovery is not active.
306 * There is a small race when the user changes this flag in the 306 * There is a small race when the user changes this flag in the
307 * route, but I think that's acceptable. 307 * route, but I think that's acceptable.
308 */ 308 */
309 if ((dst = __sk_dst_check(sk, 0)) == NULL) 309 if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -502,11 +502,11 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
502 struct tcphdr *th = skb->h.th; 502 struct tcphdr *th = skb->h.th;
503 503
504 if (skb->ip_summed == CHECKSUM_PARTIAL) { 504 if (skb->ip_summed == CHECKSUM_PARTIAL) {
505 th->check = ~tcp_v4_check(th, len, 505 th->check = ~tcp_v4_check(len, inet->saddr,
506 inet->saddr, inet->daddr, 0); 506 inet->daddr, 0);
507 skb->csum_offset = offsetof(struct tcphdr, check); 507 skb->csum_offset = offsetof(struct tcphdr, check);
508 } else { 508 } else {
509 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, 509 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
510 csum_partial((char *)th, 510 csum_partial((char *)th,
511 th->doff << 2, 511 th->doff << 2,
512 skb->csum)); 512 skb->csum));
@@ -525,7 +525,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
525 th = skb->h.th; 525 th = skb->h.th;
526 526
527 th->check = 0; 527 th->check = 0;
528 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
529 skb->csum_offset = offsetof(struct tcphdr, check); 529 skb->csum_offset = offsetof(struct tcphdr, check);
530 skb->ip_summed = CHECKSUM_PARTIAL; 530 skb->ip_summed = CHECKSUM_PARTIAL;
531 return 0; 531 return 0;
@@ -747,7 +747,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
747 if (skb) { 747 if (skb) {
748 struct tcphdr *th = skb->h.th; 748 struct tcphdr *th = skb->h.th;
749 749
750 th->check = tcp_v4_check(th, skb->len, 750 th->check = tcp_v4_check(skb->len,
751 ireq->loc_addr, 751 ireq->loc_addr,
752 ireq->rmt_addr, 752 ireq->rmt_addr,
753 csum_partial((char *)th, skb->len, 753 csum_partial((char *)th, skb->len,
@@ -880,7 +880,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
880 880
881 if (md5sig->alloced4 == md5sig->entries4) { 881 if (md5sig->alloced4 == md5sig->entries4) {
882 keys = kmalloc((sizeof(*keys) * 882 keys = kmalloc((sizeof(*keys) *
883 (md5sig->entries4 + 1)), GFP_ATOMIC); 883 (md5sig->entries4 + 1)), GFP_ATOMIC);
884 if (!keys) { 884 if (!keys) {
885 kfree(newkey); 885 kfree(newkey);
886 tcp_free_md5sig_pool(); 886 tcp_free_md5sig_pool();
@@ -934,7 +934,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
934 memcpy(&tp->md5sig_info->keys4[i], 934 memcpy(&tp->md5sig_info->keys4[i],
935 &tp->md5sig_info->keys4[i+1], 935 &tp->md5sig_info->keys4[i+1],
936 (tp->md5sig_info->entries4 - i) * 936 (tp->md5sig_info->entries4 - i) *
937 sizeof(struct tcp4_md5sig_key)); 937 sizeof(struct tcp4_md5sig_key));
938 } 938 }
939 tcp_free_md5sig_pool(); 939 tcp_free_md5sig_pool();
940 return 0; 940 return 0;
@@ -1388,7 +1388,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1388 goto drop_and_free; 1388 goto drop_and_free;
1389 1389
1390 if (want_cookie) { 1390 if (want_cookie) {
1391 reqsk_free(req); 1391 reqsk_free(req);
1392 } else { 1392 } else {
1393 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1393 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1394 } 1394 }
@@ -1514,7 +1514,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) 1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1515{ 1515{
1516 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1516 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1517 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, 1517 if (!tcp_v4_check(skb->len, skb->nh.iph->saddr,
1518 skb->nh.iph->daddr, skb->csum)) { 1518 skb->nh.iph->daddr, skb->csum)) {
1519 skb->ip_summed = CHECKSUM_UNNECESSARY; 1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 return 0; 1520 return 0;
@@ -1704,7 +1704,7 @@ bad_packet:
1704discard_it: 1704discard_it:
1705 /* Discard frame. */ 1705 /* Discard frame. */
1706 kfree_skb(skb); 1706 kfree_skb(skb);
1707 return 0; 1707 return 0;
1708 1708
1709discard_and_relse: 1709discard_and_relse:
1710 sock_put(sk); 1710 sock_put(sk);
@@ -1890,10 +1890,10 @@ int tcp_v4_destroy_sock(struct sock *sk)
1890 tcp_cleanup_congestion_control(sk); 1890 tcp_cleanup_congestion_control(sk);
1891 1891
1892 /* Cleanup up the write buffer. */ 1892 /* Cleanup up the write buffer. */
1893 sk_stream_writequeue_purge(sk); 1893 sk_stream_writequeue_purge(sk);
1894 1894
1895 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1895 /* Cleans up our, hopefully empty, out_of_order_queue. */
1896 __skb_queue_purge(&tp->out_of_order_queue); 1896 __skb_queue_purge(&tp->out_of_order_queue);
1897 1897
1898#ifdef CONFIG_TCP_MD5SIG 1898#ifdef CONFIG_TCP_MD5SIG
1899 /* Clean up the MD5 key list, if any */ 1899 /* Clean up the MD5 key list, if any */
@@ -1906,7 +1906,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1906 1906
1907#ifdef CONFIG_NET_DMA 1907#ifdef CONFIG_NET_DMA
1908 /* Cleans up our sk_async_wait_queue */ 1908 /* Cleans up our sk_async_wait_queue */
1909 __skb_queue_purge(&sk->sk_async_wait_queue); 1909 __skb_queue_purge(&sk->sk_async_wait_queue);
1910#endif 1910#endif
1911 1911
1912 /* Clean prequeue, it must be empty really */ 1912 /* Clean prequeue, it must be empty really */
@@ -1983,7 +1983,7 @@ get_req:
1983 st->state = TCP_SEQ_STATE_LISTENING; 1983 st->state = TCP_SEQ_STATE_LISTENING;
1984 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1984 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1985 } else { 1985 } else {
1986 icsk = inet_csk(sk); 1986 icsk = inet_csk(sk);
1987 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1987 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1988 if (reqsk_queue_len(&icsk->icsk_accept_queue)) 1988 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1989 goto start_req; 1989 goto start_req;
@@ -1996,7 +1996,7 @@ get_sk:
1996 cur = sk; 1996 cur = sk;
1997 goto out; 1997 goto out;
1998 } 1998 }
1999 icsk = inet_csk(sk); 1999 icsk = inet_csk(sk);
2000 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2000 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2001 if (reqsk_queue_len(&icsk->icsk_accept_queue)) { 2001 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2002start_req: 2002start_req:
@@ -2051,7 +2051,7 @@ static void *established_get_first(struct seq_file *seq)
2051 } 2051 }
2052 st->state = TCP_SEQ_STATE_TIME_WAIT; 2052 st->state = TCP_SEQ_STATE_TIME_WAIT;
2053 inet_twsk_for_each(tw, node, 2053 inet_twsk_for_each(tw, node,
2054 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { 2054 &tcp_hashinfo.ehash[st->bucket].twchain) {
2055 if (tw->tw_family != st->family) { 2055 if (tw->tw_family != st->family) {
2056 continue; 2056 continue;
2057 } 2057 }
@@ -2107,7 +2107,7 @@ get_tw:
2107 } 2107 }
2108 2108
2109 st->state = TCP_SEQ_STATE_TIME_WAIT; 2109 st->state = TCP_SEQ_STATE_TIME_WAIT;
2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); 2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2111 goto get_tw; 2111 goto get_tw;
2112found: 2112found:
2113 cur = sk; 2113 cur = sk;