diff options
author | Hideo Aoki <haoki@redhat.com> | 2007-12-31 03:11:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:00:18 -0500 |
commit | 3ab224be6d69de912ee21302745ea45a99274dbc (patch) | |
tree | 335dcef1cfacfefe3f36c21d5f144e011bc3bfba /net/ipv4/tcp.c | |
parent | a06b494b61de44617dd58612164bdde56fca7bfb (diff) |
[NET] CORE: Introducing new memory accounting interface.
This patch introduces new memory accounting functions for each network
protocol. Most of them are renamed from memory accounting functions
for stream protocols. At the same time, some stream memory accounting
functions are removed since other functions do same thing.
Renaming:
sk_stream_free_skb() -> sk_wmem_free_skb()
__sk_stream_mem_reclaim() -> __sk_mem_reclaim()
sk_stream_mem_reclaim() -> sk_mem_reclaim()
sk_stream_mem_schedule -> __sk_mem_schedule()
sk_stream_pages() -> sk_mem_pages()
sk_stream_rmem_schedule() -> sk_rmem_schedule()
sk_stream_wmem_schedule() -> sk_wmem_schedule()
sk_charge_skb() -> sk_mem_charge()
Removeing
sk_stream_rfree(): consolidates into sock_rfree()
sk_stream_set_owner_r(): consolidates into skb_set_owner_r()
sk_stream_mem_schedule()
The following functions are added.
sk_has_account(): check if the protocol supports accounting
sk_mem_uncharge(): do the opposite of sk_mem_charge()
In addition, to achieve consolidation, updating sk_wmem_queued is
removed from sk_mem_charge().
Next, to consolidate memory accounting functions, this patch adds
memory accounting calls to network core functions. Moreover, present
memory accounting call is renamed to new accounting call.
Finally we replace present memory accounting calls with new interface
in TCP and SCTP.
Signed-off-by: Takahiro Yasui <tyasui@redhat.com>
Signed-off-by: Hideo Aoki <haoki@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fdaf965a6794..2cbfa6df7976 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -308,7 +308,7 @@ struct tcp_splice_state { | |||
308 | /* | 308 | /* |
309 | * Pressure flag: try to collapse. | 309 | * Pressure flag: try to collapse. |
310 | * Technical note: it is used by multiple contexts non atomically. | 310 | * Technical note: it is used by multiple contexts non atomically. |
311 | * All the sk_stream_mem_schedule() is of this nature: accounting | 311 | * All the __sk_mem_schedule() is of this nature: accounting |
312 | * is strict, actions are advisory and have some latency. | 312 | * is strict, actions are advisory and have some latency. |
313 | */ | 313 | */ |
314 | int tcp_memory_pressure __read_mostly; | 314 | int tcp_memory_pressure __read_mostly; |
@@ -485,7 +485,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |||
485 | tcb->sacked = 0; | 485 | tcb->sacked = 0; |
486 | skb_header_release(skb); | 486 | skb_header_release(skb); |
487 | tcp_add_write_queue_tail(sk, skb); | 487 | tcp_add_write_queue_tail(sk, skb); |
488 | sk_charge_skb(sk, skb); | 488 | sk->sk_wmem_queued += skb->truesize; |
489 | sk_mem_charge(sk, skb->truesize); | ||
489 | if (tp->nonagle & TCP_NAGLE_PUSH) | 490 | if (tp->nonagle & TCP_NAGLE_PUSH) |
490 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 491 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
491 | } | 492 | } |
@@ -638,7 +639,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
638 | 639 | ||
639 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | 640 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); |
640 | if (skb) { | 641 | if (skb) { |
641 | if (sk_stream_wmem_schedule(sk, skb->truesize)) { | 642 | if (sk_wmem_schedule(sk, skb->truesize)) { |
642 | /* | 643 | /* |
643 | * Make sure that we have exactly size bytes | 644 | * Make sure that we have exactly size bytes |
644 | * available to the caller, no more, no less. | 645 | * available to the caller, no more, no less. |
@@ -707,7 +708,7 @@ new_segment: | |||
707 | tcp_mark_push(tp, skb); | 708 | tcp_mark_push(tp, skb); |
708 | goto new_segment; | 709 | goto new_segment; |
709 | } | 710 | } |
710 | if (!sk_stream_wmem_schedule(sk, copy)) | 711 | if (!sk_wmem_schedule(sk, copy)) |
711 | goto wait_for_memory; | 712 | goto wait_for_memory; |
712 | 713 | ||
713 | if (can_coalesce) { | 714 | if (can_coalesce) { |
@@ -721,7 +722,7 @@ new_segment: | |||
721 | skb->data_len += copy; | 722 | skb->data_len += copy; |
722 | skb->truesize += copy; | 723 | skb->truesize += copy; |
723 | sk->sk_wmem_queued += copy; | 724 | sk->sk_wmem_queued += copy; |
724 | sk->sk_forward_alloc -= copy; | 725 | sk_mem_charge(sk, copy); |
725 | skb->ip_summed = CHECKSUM_PARTIAL; | 726 | skb->ip_summed = CHECKSUM_PARTIAL; |
726 | tp->write_seq += copy; | 727 | tp->write_seq += copy; |
727 | TCP_SKB_CB(skb)->end_seq += copy; | 728 | TCP_SKB_CB(skb)->end_seq += copy; |
@@ -928,7 +929,7 @@ new_segment: | |||
928 | if (copy > PAGE_SIZE - off) | 929 | if (copy > PAGE_SIZE - off) |
929 | copy = PAGE_SIZE - off; | 930 | copy = PAGE_SIZE - off; |
930 | 931 | ||
931 | if (!sk_stream_wmem_schedule(sk, copy)) | 932 | if (!sk_wmem_schedule(sk, copy)) |
932 | goto wait_for_memory; | 933 | goto wait_for_memory; |
933 | 934 | ||
934 | if (!page) { | 935 | if (!page) { |
@@ -1019,7 +1020,7 @@ do_fault: | |||
1019 | * reset, where we can be unlinking the send_head. | 1020 | * reset, where we can be unlinking the send_head. |
1020 | */ | 1021 | */ |
1021 | tcp_check_send_head(sk, skb); | 1022 | tcp_check_send_head(sk, skb); |
1022 | sk_stream_free_skb(sk, skb); | 1023 | sk_wmem_free_skb(sk, skb); |
1023 | } | 1024 | } |
1024 | 1025 | ||
1025 | do_error: | 1026 | do_error: |
@@ -1738,7 +1739,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1738 | __kfree_skb(skb); | 1739 | __kfree_skb(skb); |
1739 | } | 1740 | } |
1740 | 1741 | ||
1741 | sk_stream_mem_reclaim(sk); | 1742 | sk_mem_reclaim(sk); |
1742 | 1743 | ||
1743 | /* As outlined in RFC 2525, section 2.17, we send a RST here because | 1744 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
1744 | * data was lost. To witness the awful effects of the old behavior of | 1745 | * data was lost. To witness the awful effects of the old behavior of |
@@ -1841,7 +1842,7 @@ adjudge_to_death: | |||
1841 | } | 1842 | } |
1842 | } | 1843 | } |
1843 | if (sk->sk_state != TCP_CLOSE) { | 1844 | if (sk->sk_state != TCP_CLOSE) { |
1844 | sk_stream_mem_reclaim(sk); | 1845 | sk_mem_reclaim(sk); |
1845 | if (tcp_too_many_orphans(sk, | 1846 | if (tcp_too_many_orphans(sk, |
1846 | atomic_read(sk->sk_prot->orphan_count))) { | 1847 | atomic_read(sk->sk_prot->orphan_count))) { |
1847 | if (net_ratelimit()) | 1848 | if (net_ratelimit()) |
@@ -2658,11 +2659,11 @@ void __init tcp_init(void) | |||
2658 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); | 2659 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); |
2659 | max_share = min(4UL*1024*1024, limit); | 2660 | max_share = min(4UL*1024*1024, limit); |
2660 | 2661 | ||
2661 | sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; | 2662 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
2662 | sysctl_tcp_wmem[1] = 16*1024; | 2663 | sysctl_tcp_wmem[1] = 16*1024; |
2663 | sysctl_tcp_wmem[2] = max(64*1024, max_share); | 2664 | sysctl_tcp_wmem[2] = max(64*1024, max_share); |
2664 | 2665 | ||
2665 | sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; | 2666 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
2666 | sysctl_tcp_rmem[1] = 87380; | 2667 | sysctl_tcp_rmem[1] = 87380; |
2667 | sysctl_tcp_rmem[2] = max(87380, max_share); | 2668 | sysctl_tcp_rmem[2] = max(87380, max_share); |
2668 | 2669 | ||