diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fdaf965a6794..2cbfa6df7976 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -308,7 +308,7 @@ struct tcp_splice_state { | |||
308 | /* | 308 | /* |
309 | * Pressure flag: try to collapse. | 309 | * Pressure flag: try to collapse. |
310 | * Technical note: it is used by multiple contexts non atomically. | 310 | * Technical note: it is used by multiple contexts non atomically. |
311 | * All the sk_stream_mem_schedule() is of this nature: accounting | 311 | * All the __sk_mem_schedule() is of this nature: accounting |
312 | * is strict, actions are advisory and have some latency. | 312 | * is strict, actions are advisory and have some latency. |
313 | */ | 313 | */ |
314 | int tcp_memory_pressure __read_mostly; | 314 | int tcp_memory_pressure __read_mostly; |
@@ -485,7 +485,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |||
485 | tcb->sacked = 0; | 485 | tcb->sacked = 0; |
486 | skb_header_release(skb); | 486 | skb_header_release(skb); |
487 | tcp_add_write_queue_tail(sk, skb); | 487 | tcp_add_write_queue_tail(sk, skb); |
488 | sk_charge_skb(sk, skb); | 488 | sk->sk_wmem_queued += skb->truesize; |
489 | sk_mem_charge(sk, skb->truesize); | ||
489 | if (tp->nonagle & TCP_NAGLE_PUSH) | 490 | if (tp->nonagle & TCP_NAGLE_PUSH) |
490 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 491 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
491 | } | 492 | } |
@@ -638,7 +639,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
638 | 639 | ||
639 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | 640 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); |
640 | if (skb) { | 641 | if (skb) { |
641 | if (sk_stream_wmem_schedule(sk, skb->truesize)) { | 642 | if (sk_wmem_schedule(sk, skb->truesize)) { |
642 | /* | 643 | /* |
643 | * Make sure that we have exactly size bytes | 644 | * Make sure that we have exactly size bytes |
644 | * available to the caller, no more, no less. | 645 | * available to the caller, no more, no less. |
@@ -707,7 +708,7 @@ new_segment: | |||
707 | tcp_mark_push(tp, skb); | 708 | tcp_mark_push(tp, skb); |
708 | goto new_segment; | 709 | goto new_segment; |
709 | } | 710 | } |
710 | if (!sk_stream_wmem_schedule(sk, copy)) | 711 | if (!sk_wmem_schedule(sk, copy)) |
711 | goto wait_for_memory; | 712 | goto wait_for_memory; |
712 | 713 | ||
713 | if (can_coalesce) { | 714 | if (can_coalesce) { |
@@ -721,7 +722,7 @@ new_segment: | |||
721 | skb->data_len += copy; | 722 | skb->data_len += copy; |
722 | skb->truesize += copy; | 723 | skb->truesize += copy; |
723 | sk->sk_wmem_queued += copy; | 724 | sk->sk_wmem_queued += copy; |
724 | sk->sk_forward_alloc -= copy; | 725 | sk_mem_charge(sk, copy); |
725 | skb->ip_summed = CHECKSUM_PARTIAL; | 726 | skb->ip_summed = CHECKSUM_PARTIAL; |
726 | tp->write_seq += copy; | 727 | tp->write_seq += copy; |
727 | TCP_SKB_CB(skb)->end_seq += copy; | 728 | TCP_SKB_CB(skb)->end_seq += copy; |
@@ -928,7 +929,7 @@ new_segment: | |||
928 | if (copy > PAGE_SIZE - off) | 929 | if (copy > PAGE_SIZE - off) |
929 | copy = PAGE_SIZE - off; | 930 | copy = PAGE_SIZE - off; |
930 | 931 | ||
931 | if (!sk_stream_wmem_schedule(sk, copy)) | 932 | if (!sk_wmem_schedule(sk, copy)) |
932 | goto wait_for_memory; | 933 | goto wait_for_memory; |
933 | 934 | ||
934 | if (!page) { | 935 | if (!page) { |
@@ -1019,7 +1020,7 @@ do_fault: | |||
1019 | * reset, where we can be unlinking the send_head. | 1020 | * reset, where we can be unlinking the send_head. |
1020 | */ | 1021 | */ |
1021 | tcp_check_send_head(sk, skb); | 1022 | tcp_check_send_head(sk, skb); |
1022 | sk_stream_free_skb(sk, skb); | 1023 | sk_wmem_free_skb(sk, skb); |
1023 | } | 1024 | } |
1024 | 1025 | ||
1025 | do_error: | 1026 | do_error: |
@@ -1738,7 +1739,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1738 | __kfree_skb(skb); | 1739 | __kfree_skb(skb); |
1739 | } | 1740 | } |
1740 | 1741 | ||
1741 | sk_stream_mem_reclaim(sk); | 1742 | sk_mem_reclaim(sk); |
1742 | 1743 | ||
1743 | /* As outlined in RFC 2525, section 2.17, we send a RST here because | 1744 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
1744 | * data was lost. To witness the awful effects of the old behavior of | 1745 | * data was lost. To witness the awful effects of the old behavior of |
@@ -1841,7 +1842,7 @@ adjudge_to_death: | |||
1841 | } | 1842 | } |
1842 | } | 1843 | } |
1843 | if (sk->sk_state != TCP_CLOSE) { | 1844 | if (sk->sk_state != TCP_CLOSE) { |
1844 | sk_stream_mem_reclaim(sk); | 1845 | sk_mem_reclaim(sk); |
1845 | if (tcp_too_many_orphans(sk, | 1846 | if (tcp_too_many_orphans(sk, |
1846 | atomic_read(sk->sk_prot->orphan_count))) { | 1847 | atomic_read(sk->sk_prot->orphan_count))) { |
1847 | if (net_ratelimit()) | 1848 | if (net_ratelimit()) |
@@ -2658,11 +2659,11 @@ void __init tcp_init(void) | |||
2658 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); | 2659 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); |
2659 | max_share = min(4UL*1024*1024, limit); | 2660 | max_share = min(4UL*1024*1024, limit); |
2660 | 2661 | ||
2661 | sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; | 2662 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
2662 | sysctl_tcp_wmem[1] = 16*1024; | 2663 | sysctl_tcp_wmem[1] = 16*1024; |
2663 | sysctl_tcp_wmem[2] = max(64*1024, max_share); | 2664 | sysctl_tcp_wmem[2] = max(64*1024, max_share); |
2664 | 2665 | ||
2665 | sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; | 2666 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
2666 | sysctl_tcp_rmem[1] = 87380; | 2667 | sysctl_tcp_rmem[1] = 87380; |
2667 | sysctl_tcp_rmem[2] = max(87380, max_share); | 2668 | sysctl_tcp_rmem[2] = max(87380, max_share); |
2668 | 2669 | ||