aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c137
1 files changed, 85 insertions, 52 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5c8fa7f1e327..10172487921b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -97,11 +97,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
97} 97}
98#endif 98#endif
99 99
100struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 100struct inet_hashinfo tcp_hashinfo;
101 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
102 .lhash_users = ATOMIC_INIT(0),
103 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
104};
105 101
106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 102static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
107{ 103{
@@ -492,7 +488,7 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
492 skb->csum_offset = offsetof(struct tcphdr, check); 488 skb->csum_offset = offsetof(struct tcphdr, check);
493 } else { 489 } else {
494 th->check = tcp_v4_check(len, inet->saddr, inet->daddr, 490 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
495 csum_partial((char *)th, 491 csum_partial(th,
496 th->doff << 2, 492 th->doff << 2,
497 skb->csum)); 493 skb->csum));
498 } 494 }
@@ -726,7 +722,7 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
726 th->check = tcp_v4_check(skb->len, 722 th->check = tcp_v4_check(skb->len,
727 ireq->loc_addr, 723 ireq->loc_addr,
728 ireq->rmt_addr, 724 ireq->rmt_addr,
729 csum_partial((char *)th, skb->len, 725 csum_partial(th, skb->len,
730 skb->csum)); 726 skb->csum));
731 727
732 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 728 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
@@ -1139,10 +1135,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1139 1135
1140 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1136 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1141 if (net_ratelimit()) { 1137 if (net_ratelimit()) {
1142 printk(KERN_INFO "MD5 Hash failed for " 1138 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1143 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", 1139 &iph->saddr, ntohs(th->source),
1144 NIPQUAD(iph->saddr), ntohs(th->source), 1140 &iph->daddr, ntohs(th->dest),
1145 NIPQUAD(iph->daddr), ntohs(th->dest),
1146 genhash ? " tcp_v4_calc_md5_hash failed" : ""); 1141 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1147 } 1142 }
1148 return 1; 1143 return 1;
@@ -1297,10 +1292,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1297 * to destinations, already remembered 1292 * to destinations, already remembered
1298 * to the moment of synflood. 1293 * to the moment of synflood.
1299 */ 1294 */
1300 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " 1295 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1301 "request from " NIPQUAD_FMT "/%u\n", 1296 &saddr, ntohs(tcp_hdr(skb)->source));
1302 NIPQUAD(saddr),
1303 ntohs(tcp_hdr(skb)->source));
1304 goto drop_and_release; 1297 goto drop_and_release;
1305 } 1298 }
1306 1299
@@ -1804,7 +1797,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1804 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1797 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1805 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1798 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1806 1799
1807 atomic_inc(&tcp_sockets_allocated); 1800 percpu_counter_inc(&tcp_sockets_allocated);
1808 1801
1809 return 0; 1802 return 0;
1810} 1803}
@@ -1852,7 +1845,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1852 sk->sk_sndmsg_page = NULL; 1845 sk->sk_sndmsg_page = NULL;
1853 } 1846 }
1854 1847
1855 atomic_dec(&tcp_sockets_allocated); 1848 percpu_counter_dec(&tcp_sockets_allocated);
1856} 1849}
1857 1850
1858EXPORT_SYMBOL(tcp_v4_destroy_sock); 1851EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -1860,32 +1853,35 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
1860#ifdef CONFIG_PROC_FS 1853#ifdef CONFIG_PROC_FS
1861/* Proc filesystem TCP sock list dumping. */ 1854/* Proc filesystem TCP sock list dumping. */
1862 1855
1863static inline struct inet_timewait_sock *tw_head(struct hlist_head *head) 1856static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1864{ 1857{
1865 return hlist_empty(head) ? NULL : 1858 return hlist_nulls_empty(head) ? NULL :
1866 list_entry(head->first, struct inet_timewait_sock, tw_node); 1859 list_entry(head->first, struct inet_timewait_sock, tw_node);
1867} 1860}
1868 1861
1869static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) 1862static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1870{ 1863{
1871 return tw->tw_node.next ? 1864 return !is_a_nulls(tw->tw_node.next) ?
1872 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; 1865 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1873} 1866}
1874 1867
1875static void *listening_get_next(struct seq_file *seq, void *cur) 1868static void *listening_get_next(struct seq_file *seq, void *cur)
1876{ 1869{
1877 struct inet_connection_sock *icsk; 1870 struct inet_connection_sock *icsk;
1878 struct hlist_node *node; 1871 struct hlist_nulls_node *node;
1879 struct sock *sk = cur; 1872 struct sock *sk = cur;
1880 struct tcp_iter_state* st = seq->private; 1873 struct inet_listen_hashbucket *ilb;
1874 struct tcp_iter_state *st = seq->private;
1881 struct net *net = seq_file_net(seq); 1875 struct net *net = seq_file_net(seq);
1882 1876
1883 if (!sk) { 1877 if (!sk) {
1884 st->bucket = 0; 1878 st->bucket = 0;
1885 sk = sk_head(&tcp_hashinfo.listening_hash[0]); 1879 ilb = &tcp_hashinfo.listening_hash[0];
1880 spin_lock_bh(&ilb->lock);
1881 sk = sk_nulls_head(&ilb->head);
1886 goto get_sk; 1882 goto get_sk;
1887 } 1883 }
1888 1884 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1889 ++st->num; 1885 ++st->num;
1890 1886
1891 if (st->state == TCP_SEQ_STATE_OPENREQ) { 1887 if (st->state == TCP_SEQ_STATE_OPENREQ) {
@@ -1918,7 +1914,7 @@ get_req:
1918 sk = sk_next(sk); 1914 sk = sk_next(sk);
1919 } 1915 }
1920get_sk: 1916get_sk:
1921 sk_for_each_from(sk, node) { 1917 sk_nulls_for_each_from(sk, node) {
1922 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { 1918 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1923 cur = sk; 1919 cur = sk;
1924 goto out; 1920 goto out;
@@ -1935,8 +1931,11 @@ start_req:
1935 } 1931 }
1936 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1932 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1937 } 1933 }
1934 spin_unlock_bh(&ilb->lock);
1938 if (++st->bucket < INET_LHTABLE_SIZE) { 1935 if (++st->bucket < INET_LHTABLE_SIZE) {
1939 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); 1936 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1937 spin_lock_bh(&ilb->lock);
1938 sk = sk_nulls_head(&ilb->head);
1940 goto get_sk; 1939 goto get_sk;
1941 } 1940 }
1942 cur = NULL; 1941 cur = NULL;
@@ -1957,28 +1956,28 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1957 1956
1958static inline int empty_bucket(struct tcp_iter_state *st) 1957static inline int empty_bucket(struct tcp_iter_state *st)
1959{ 1958{
1960 return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) && 1959 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1961 hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain); 1960 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1962} 1961}
1963 1962
1964static void *established_get_first(struct seq_file *seq) 1963static void *established_get_first(struct seq_file *seq)
1965{ 1964{
1966 struct tcp_iter_state* st = seq->private; 1965 struct tcp_iter_state *st = seq->private;
1967 struct net *net = seq_file_net(seq); 1966 struct net *net = seq_file_net(seq);
1968 void *rc = NULL; 1967 void *rc = NULL;
1969 1968
1970 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { 1969 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1971 struct sock *sk; 1970 struct sock *sk;
1972 struct hlist_node *node; 1971 struct hlist_nulls_node *node;
1973 struct inet_timewait_sock *tw; 1972 struct inet_timewait_sock *tw;
1974 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1973 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1975 1974
1976 /* Lockless fast path for the common case of empty buckets */ 1975 /* Lockless fast path for the common case of empty buckets */
1977 if (empty_bucket(st)) 1976 if (empty_bucket(st))
1978 continue; 1977 continue;
1979 1978
1980 read_lock_bh(lock); 1979 spin_lock_bh(lock);
1981 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1980 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1982 if (sk->sk_family != st->family || 1981 if (sk->sk_family != st->family ||
1983 !net_eq(sock_net(sk), net)) { 1982 !net_eq(sock_net(sk), net)) {
1984 continue; 1983 continue;
@@ -1996,7 +1995,7 @@ static void *established_get_first(struct seq_file *seq)
1996 rc = tw; 1995 rc = tw;
1997 goto out; 1996 goto out;
1998 } 1997 }
1999 read_unlock_bh(lock); 1998 spin_unlock_bh(lock);
2000 st->state = TCP_SEQ_STATE_ESTABLISHED; 1999 st->state = TCP_SEQ_STATE_ESTABLISHED;
2001 } 2000 }
2002out: 2001out:
@@ -2007,8 +2006,8 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2007{ 2006{
2008 struct sock *sk = cur; 2007 struct sock *sk = cur;
2009 struct inet_timewait_sock *tw; 2008 struct inet_timewait_sock *tw;
2010 struct hlist_node *node; 2009 struct hlist_nulls_node *node;
2011 struct tcp_iter_state* st = seq->private; 2010 struct tcp_iter_state *st = seq->private;
2012 struct net *net = seq_file_net(seq); 2011 struct net *net = seq_file_net(seq);
2013 2012
2014 ++st->num; 2013 ++st->num;
@@ -2024,7 +2023,7 @@ get_tw:
2024 cur = tw; 2023 cur = tw;
2025 goto out; 2024 goto out;
2026 } 2025 }
2027 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2026 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2028 st->state = TCP_SEQ_STATE_ESTABLISHED; 2027 st->state = TCP_SEQ_STATE_ESTABLISHED;
2029 2028
2030 /* Look for next non empty bucket */ 2029 /* Look for next non empty bucket */
@@ -2034,12 +2033,12 @@ get_tw:
2034 if (st->bucket >= tcp_hashinfo.ehash_size) 2033 if (st->bucket >= tcp_hashinfo.ehash_size)
2035 return NULL; 2034 return NULL;
2036 2035
2037 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2036 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2038 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2037 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2039 } else 2038 } else
2040 sk = sk_next(sk); 2039 sk = sk_nulls_next(sk);
2041 2040
2042 sk_for_each_from(sk, node) { 2041 sk_nulls_for_each_from(sk, node) {
2043 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) 2042 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2044 goto found; 2043 goto found;
2045 } 2044 }
@@ -2067,14 +2066,12 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos)
2067static void *tcp_get_idx(struct seq_file *seq, loff_t pos) 2066static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2068{ 2067{
2069 void *rc; 2068 void *rc;
2070 struct tcp_iter_state* st = seq->private; 2069 struct tcp_iter_state *st = seq->private;
2071 2070
2072 inet_listen_lock(&tcp_hashinfo);
2073 st->state = TCP_SEQ_STATE_LISTENING; 2071 st->state = TCP_SEQ_STATE_LISTENING;
2074 rc = listening_get_idx(seq, &pos); 2072 rc = listening_get_idx(seq, &pos);
2075 2073
2076 if (!rc) { 2074 if (!rc) {
2077 inet_listen_unlock(&tcp_hashinfo);
2078 st->state = TCP_SEQ_STATE_ESTABLISHED; 2075 st->state = TCP_SEQ_STATE_ESTABLISHED;
2079 rc = established_get_idx(seq, pos); 2076 rc = established_get_idx(seq, pos);
2080 } 2077 }
@@ -2084,7 +2081,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2084 2081
2085static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) 2082static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2086{ 2083{
2087 struct tcp_iter_state* st = seq->private; 2084 struct tcp_iter_state *st = seq->private;
2088 st->state = TCP_SEQ_STATE_LISTENING; 2085 st->state = TCP_SEQ_STATE_LISTENING;
2089 st->num = 0; 2086 st->num = 0;
2090 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2087 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
@@ -2093,7 +2090,7 @@ static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2093static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2090static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2094{ 2091{
2095 void *rc = NULL; 2092 void *rc = NULL;
2096 struct tcp_iter_state* st; 2093 struct tcp_iter_state *st;
2097 2094
2098 if (v == SEQ_START_TOKEN) { 2095 if (v == SEQ_START_TOKEN) {
2099 rc = tcp_get_idx(seq, 0); 2096 rc = tcp_get_idx(seq, 0);
@@ -2106,7 +2103,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2106 case TCP_SEQ_STATE_LISTENING: 2103 case TCP_SEQ_STATE_LISTENING:
2107 rc = listening_get_next(seq, v); 2104 rc = listening_get_next(seq, v);
2108 if (!rc) { 2105 if (!rc) {
2109 inet_listen_unlock(&tcp_hashinfo);
2110 st->state = TCP_SEQ_STATE_ESTABLISHED; 2106 st->state = TCP_SEQ_STATE_ESTABLISHED;
2111 rc = established_get_first(seq); 2107 rc = established_get_first(seq);
2112 } 2108 }
@@ -2123,7 +2119,7 @@ out:
2123 2119
2124static void tcp_seq_stop(struct seq_file *seq, void *v) 2120static void tcp_seq_stop(struct seq_file *seq, void *v)
2125{ 2121{
2126 struct tcp_iter_state* st = seq->private; 2122 struct tcp_iter_state *st = seq->private;
2127 2123
2128 switch (st->state) { 2124 switch (st->state) {
2129 case TCP_SEQ_STATE_OPENREQ: 2125 case TCP_SEQ_STATE_OPENREQ:
@@ -2133,12 +2129,12 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2133 } 2129 }
2134 case TCP_SEQ_STATE_LISTENING: 2130 case TCP_SEQ_STATE_LISTENING:
2135 if (v != SEQ_START_TOKEN) 2131 if (v != SEQ_START_TOKEN)
2136 inet_listen_unlock(&tcp_hashinfo); 2132 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2137 break; 2133 break;
2138 case TCP_SEQ_STATE_TIME_WAIT: 2134 case TCP_SEQ_STATE_TIME_WAIT:
2139 case TCP_SEQ_STATE_ESTABLISHED: 2135 case TCP_SEQ_STATE_ESTABLISHED:
2140 if (v) 2136 if (v)
2141 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2137 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2142 break; 2138 break;
2143 } 2139 }
2144} 2140}
@@ -2284,7 +2280,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2284 2280
2285static int tcp4_seq_show(struct seq_file *seq, void *v) 2281static int tcp4_seq_show(struct seq_file *seq, void *v)
2286{ 2282{
2287 struct tcp_iter_state* st; 2283 struct tcp_iter_state *st;
2288 int len; 2284 int len;
2289 2285
2290 if (v == SEQ_START_TOKEN) { 2286 if (v == SEQ_START_TOKEN) {
@@ -2350,6 +2346,41 @@ void tcp4_proc_exit(void)
2350} 2346}
2351#endif /* CONFIG_PROC_FS */ 2347#endif /* CONFIG_PROC_FS */
2352 2348
2349struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2350{
2351 struct iphdr *iph = ip_hdr(skb);
2352
2353 switch (skb->ip_summed) {
2354 case CHECKSUM_COMPLETE:
2355 if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr,
2356 skb->csum)) {
2357 skb->ip_summed = CHECKSUM_UNNECESSARY;
2358 break;
2359 }
2360
2361 /* fall through */
2362 case CHECKSUM_NONE:
2363 NAPI_GRO_CB(skb)->flush = 1;
2364 return NULL;
2365 }
2366
2367 return tcp_gro_receive(head, skb);
2368}
2369EXPORT_SYMBOL(tcp4_gro_receive);
2370
2371int tcp4_gro_complete(struct sk_buff *skb)
2372{
2373 struct iphdr *iph = ip_hdr(skb);
2374 struct tcphdr *th = tcp_hdr(skb);
2375
2376 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2377 iph->saddr, iph->daddr, 0);
2378 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2379
2380 return tcp_gro_complete(skb);
2381}
2382EXPORT_SYMBOL(tcp4_gro_complete);
2383
2353struct proto tcp_prot = { 2384struct proto tcp_prot = {
2354 .name = "TCP", 2385 .name = "TCP",
2355 .owner = THIS_MODULE, 2386 .owner = THIS_MODULE,
@@ -2378,6 +2409,7 @@ struct proto tcp_prot = {
2378 .sysctl_rmem = sysctl_tcp_rmem, 2409 .sysctl_rmem = sysctl_tcp_rmem,
2379 .max_header = MAX_TCP_HEADER, 2410 .max_header = MAX_TCP_HEADER,
2380 .obj_size = sizeof(struct tcp_sock), 2411 .obj_size = sizeof(struct tcp_sock),
2412 .slab_flags = SLAB_DESTROY_BY_RCU,
2381 .twsk_prot = &tcp_timewait_sock_ops, 2413 .twsk_prot = &tcp_timewait_sock_ops,
2382 .rsk_prot = &tcp_request_sock_ops, 2414 .rsk_prot = &tcp_request_sock_ops,
2383 .h.hashinfo = &tcp_hashinfo, 2415 .h.hashinfo = &tcp_hashinfo,
@@ -2407,6 +2439,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
2407 2439
2408void __init tcp_v4_init(void) 2440void __init tcp_v4_init(void)
2409{ 2441{
2442 inet_hashinfo_init(&tcp_hashinfo);
2410 if (register_pernet_device(&tcp_sk_ops)) 2443 if (register_pernet_device(&tcp_sk_ops))
2411 panic("Failed to create the TCP control socket.\n"); 2444 panic("Failed to create the TCP control socket.\n");
2412} 2445}