aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c130
1 files changed, 118 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c5aca0bb116a..1f3d52946b3b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -277,8 +277,7 @@
277 277
278int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 278int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
279 279
280atomic_t tcp_orphan_count = ATOMIC_INIT(0); 280struct percpu_counter tcp_orphan_count;
281
282EXPORT_SYMBOL_GPL(tcp_orphan_count); 281EXPORT_SYMBOL_GPL(tcp_orphan_count);
283 282
284int sysctl_tcp_mem[3] __read_mostly; 283int sysctl_tcp_mem[3] __read_mostly;
@@ -290,9 +289,12 @@ EXPORT_SYMBOL(sysctl_tcp_rmem);
290EXPORT_SYMBOL(sysctl_tcp_wmem); 289EXPORT_SYMBOL(sysctl_tcp_wmem);
291 290
292atomic_t tcp_memory_allocated; /* Current allocated memory. */ 291atomic_t tcp_memory_allocated; /* Current allocated memory. */
293atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
294
295EXPORT_SYMBOL(tcp_memory_allocated); 292EXPORT_SYMBOL(tcp_memory_allocated);
293
294/*
295 * Current number of TCP sockets.
296 */
297struct percpu_counter tcp_sockets_allocated;
296EXPORT_SYMBOL(tcp_sockets_allocated); 298EXPORT_SYMBOL(tcp_sockets_allocated);
297 299
298/* 300/*
@@ -1680,7 +1682,7 @@ void tcp_set_state(struct sock *sk, int state)
1680 inet_put_port(sk); 1682 inet_put_port(sk);
1681 /* fall through */ 1683 /* fall through */
1682 default: 1684 default:
1683 if (oldstate==TCP_ESTABLISHED) 1685 if (oldstate == TCP_ESTABLISHED)
1684 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1686 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1685 } 1687 }
1686 1688
@@ -1690,7 +1692,7 @@ void tcp_set_state(struct sock *sk, int state)
1690 sk->sk_state = state; 1692 sk->sk_state = state;
1691 1693
1692#ifdef STATE_TRACE 1694#ifdef STATE_TRACE
1693 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); 1695 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1694#endif 1696#endif
1695} 1697}
1696EXPORT_SYMBOL_GPL(tcp_set_state); 1698EXPORT_SYMBOL_GPL(tcp_set_state);
@@ -1834,7 +1836,7 @@ adjudge_to_death:
1834 state = sk->sk_state; 1836 state = sk->sk_state;
1835 sock_hold(sk); 1837 sock_hold(sk);
1836 sock_orphan(sk); 1838 sock_orphan(sk);
1837 atomic_inc(sk->sk_prot->orphan_count); 1839 percpu_counter_inc(sk->sk_prot->orphan_count);
1838 1840
1839 /* It is the last release_sock in its life. It will remove backlog. */ 1841 /* It is the last release_sock in its life. It will remove backlog. */
1840 release_sock(sk); 1842 release_sock(sk);
@@ -1885,9 +1887,11 @@ adjudge_to_death:
1885 } 1887 }
1886 } 1888 }
1887 if (sk->sk_state != TCP_CLOSE) { 1889 if (sk->sk_state != TCP_CLOSE) {
1890 int orphan_count = percpu_counter_read_positive(
1891 sk->sk_prot->orphan_count);
1892
1888 sk_mem_reclaim(sk); 1893 sk_mem_reclaim(sk);
1889 if (tcp_too_many_orphans(sk, 1894 if (tcp_too_many_orphans(sk, orphan_count)) {
1890 atomic_read(sk->sk_prot->orphan_count))) {
1891 if (net_ratelimit()) 1895 if (net_ratelimit())
1892 printk(KERN_INFO "TCP: too many of orphaned " 1896 printk(KERN_INFO "TCP: too many of orphaned "
1893 "sockets\n"); 1897 "sockets\n");
@@ -2461,6 +2465,106 @@ out:
2461} 2465}
2462EXPORT_SYMBOL(tcp_tso_segment); 2466EXPORT_SYMBOL(tcp_tso_segment);
2463 2467
2468struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2469{
2470 struct sk_buff **pp = NULL;
2471 struct sk_buff *p;
2472 struct tcphdr *th;
2473 struct tcphdr *th2;
2474 unsigned int thlen;
2475 unsigned int flags;
2476 unsigned int total;
2477 unsigned int mss = 1;
2478 int flush = 1;
2479
2480 if (!pskb_may_pull(skb, sizeof(*th)))
2481 goto out;
2482
2483 th = tcp_hdr(skb);
2484 thlen = th->doff * 4;
2485 if (thlen < sizeof(*th))
2486 goto out;
2487
2488 if (!pskb_may_pull(skb, thlen))
2489 goto out;
2490
2491 th = tcp_hdr(skb);
2492 __skb_pull(skb, thlen);
2493
2494 flags = tcp_flag_word(th);
2495
2496 for (; (p = *head); head = &p->next) {
2497 if (!NAPI_GRO_CB(p)->same_flow)
2498 continue;
2499
2500 th2 = tcp_hdr(p);
2501
2502 if (th->source != th2->source || th->dest != th2->dest) {
2503 NAPI_GRO_CB(p)->same_flow = 0;
2504 continue;
2505 }
2506
2507 goto found;
2508 }
2509
2510 goto out_check_final;
2511
2512found:
2513 flush = NAPI_GRO_CB(p)->flush;
2514 flush |= flags & TCP_FLAG_CWR;
2515 flush |= (flags ^ tcp_flag_word(th2)) &
2516 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2517 flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
2518 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
2519
2520 total = p->len;
2521 mss = total;
2522 if (skb_shinfo(p)->frag_list)
2523 mss = skb_shinfo(p)->frag_list->len;
2524
2525 flush |= skb->len > mss || skb->len <= 0;
2526 flush |= ntohl(th2->seq) + total != ntohl(th->seq);
2527
2528 if (flush || skb_gro_receive(head, skb)) {
2529 mss = 1;
2530 goto out_check_final;
2531 }
2532
2533 p = *head;
2534 th2 = tcp_hdr(p);
2535 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2536
2537out_check_final:
2538 flush = skb->len < mss;
2539 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2540 TCP_FLAG_SYN | TCP_FLAG_FIN);
2541
2542 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2543 pp = head;
2544
2545out:
2546 NAPI_GRO_CB(skb)->flush |= flush;
2547
2548 return pp;
2549}
2550
2551int tcp_gro_complete(struct sk_buff *skb)
2552{
2553 struct tcphdr *th = tcp_hdr(skb);
2554
2555 skb->csum_start = skb_transport_header(skb) - skb->head;
2556 skb->csum_offset = offsetof(struct tcphdr, check);
2557 skb->ip_summed = CHECKSUM_PARTIAL;
2558
2559 skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
2560 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2561
2562 if (th->cwr)
2563 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2564
2565 return 0;
2566}
2567
2464#ifdef CONFIG_TCP_MD5SIG 2568#ifdef CONFIG_TCP_MD5SIG
2465static unsigned long tcp_md5sig_users; 2569static unsigned long tcp_md5sig_users;
2466static struct tcp_md5sig_pool **tcp_md5sig_pool; 2570static struct tcp_md5sig_pool **tcp_md5sig_pool;
@@ -2650,7 +2754,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
2650 2754
2651void tcp_done(struct sock *sk) 2755void tcp_done(struct sock *sk)
2652{ 2756{
2653 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 2757 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2654 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 2758 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2655 2759
2656 tcp_set_state(sk, TCP_CLOSE); 2760 tcp_set_state(sk, TCP_CLOSE);
@@ -2685,6 +2789,8 @@ void __init tcp_init(void)
2685 2789
2686 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 2790 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2687 2791
2792 percpu_counter_init(&tcp_sockets_allocated, 0);
2793 percpu_counter_init(&tcp_orphan_count, 0);
2688 tcp_hashinfo.bind_bucket_cachep = 2794 tcp_hashinfo.bind_bucket_cachep =
2689 kmem_cache_create("tcp_bind_bucket", 2795 kmem_cache_create("tcp_bind_bucket",
2690 sizeof(struct inet_bind_bucket), 0, 2796 sizeof(struct inet_bind_bucket), 0,
@@ -2707,8 +2813,8 @@ void __init tcp_init(void)
2707 thash_entries ? 0 : 512 * 1024); 2813 thash_entries ? 0 : 512 * 1024);
2708 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; 2814 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2709 for (i = 0; i < tcp_hashinfo.ehash_size; i++) { 2815 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2710 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2816 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
2711 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); 2817 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
2712 } 2818 }
2713 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 2819 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2714 panic("TCP: failed to alloc ehash_locks"); 2820 panic("TCP: failed to alloc ehash_locks");