diff options
author | Hideo Aoki <haoki@redhat.com> | 2007-12-31 03:11:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:00:18 -0500 |
commit | 3ab224be6d69de912ee21302745ea45a99274dbc (patch) | |
tree | 335dcef1cfacfefe3f36c21d5f144e011bc3bfba /include/net/sock.h | |
parent | a06b494b61de44617dd58612164bdde56fca7bfb (diff) |
[NET] CORE: Introducing new memory accounting interface.
This patch introduces new memory accounting functions for each network
protocol. Most of them are renamed from memory accounting functions
for stream protocols. At the same time, some stream memory accounting
functions are removed since other functions do same thing.
Renaming:
sk_stream_free_skb() -> sk_wmem_free_skb()
__sk_stream_mem_reclaim() -> __sk_mem_reclaim()
sk_stream_mem_reclaim() -> sk_mem_reclaim()
sk_stream_mem_schedule -> __sk_mem_schedule()
sk_stream_pages() -> sk_mem_pages()
sk_stream_rmem_schedule() -> sk_rmem_schedule()
sk_stream_wmem_schedule() -> sk_wmem_schedule()
sk_charge_skb() -> sk_mem_charge()
Removeing
sk_stream_rfree(): consolidates into sock_rfree()
sk_stream_set_owner_r(): consolidates into skb_set_owner_r()
sk_stream_mem_schedule()
The following functions are added.
sk_has_account(): check if the protocol supports accounting
sk_mem_uncharge(): do the opposite of sk_mem_charge()
In addition, to achieve consolidation, updating sk_wmem_queued is
removed from sk_mem_charge().
Next, to consolidate memory accounting functions, this patch adds
memory accounting calls to network core functions. Moreover, present
memory accounting call is renamed to new accounting call.
Finally we replace present memory accounting calls with new interface
in TCP and SCTP.
Signed-off-by: Takahiro Yasui <tyasui@redhat.com>
Signed-off-by: Hideo Aoki <haoki@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 98 |
1 files changed, 57 insertions, 41 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index d27ba6fdd039..3d938f6c6725 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -460,25 +460,6 @@ static inline int sk_stream_memory_free(struct sock *sk) | |||
460 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 460 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
461 | } | 461 | } |
462 | 462 | ||
463 | extern void sk_stream_rfree(struct sk_buff *skb); | ||
464 | |||
465 | static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) | ||
466 | { | ||
467 | skb->sk = sk; | ||
468 | skb->destructor = sk_stream_rfree; | ||
469 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
470 | sk->sk_forward_alloc -= skb->truesize; | ||
471 | } | ||
472 | |||
473 | static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) | ||
474 | { | ||
475 | skb_truesize_check(skb); | ||
476 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | ||
477 | sk->sk_wmem_queued -= skb->truesize; | ||
478 | sk->sk_forward_alloc += skb->truesize; | ||
479 | __kfree_skb(skb); | ||
480 | } | ||
481 | |||
482 | /* The per-socket spinlock must be held here. */ | 463 | /* The per-socket spinlock must be held here. */ |
483 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 464 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
484 | { | 465 | { |
@@ -576,7 +557,7 @@ struct proto { | |||
576 | /* | 557 | /* |
577 | * Pressure flag: try to collapse. | 558 | * Pressure flag: try to collapse. |
578 | * Technical note: it is used by multiple contexts non atomically. | 559 | * Technical note: it is used by multiple contexts non atomically. |
579 | * All the sk_stream_mem_schedule() is of this nature: accounting | 560 | * All the __sk_mem_schedule() is of this nature: accounting |
580 | * is strict, actions are advisory and have some latency. | 561 | * is strict, actions are advisory and have some latency. |
581 | */ | 562 | */ |
582 | int *memory_pressure; | 563 | int *memory_pressure; |
@@ -712,33 +693,73 @@ static inline struct inode *SOCK_INODE(struct socket *socket) | |||
712 | return &container_of(socket, struct socket_alloc, socket)->vfs_inode; | 693 | return &container_of(socket, struct socket_alloc, socket)->vfs_inode; |
713 | } | 694 | } |
714 | 695 | ||
715 | extern void __sk_stream_mem_reclaim(struct sock *sk); | 696 | /* |
716 | extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); | 697 | * Functions for memory accounting |
698 | */ | ||
699 | extern int __sk_mem_schedule(struct sock *sk, int size, int kind); | ||
700 | extern void __sk_mem_reclaim(struct sock *sk); | ||
717 | 701 | ||
718 | #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) | 702 | #define SK_MEM_QUANTUM ((int)PAGE_SIZE) |
719 | #define SK_STREAM_MEM_QUANTUM_SHIFT ilog2(SK_STREAM_MEM_QUANTUM) | 703 | #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) |
704 | #define SK_MEM_SEND 0 | ||
705 | #define SK_MEM_RECV 1 | ||
720 | 706 | ||
721 | static inline int sk_stream_pages(int amt) | 707 | static inline int sk_mem_pages(int amt) |
722 | { | 708 | { |
723 | return (amt + SK_STREAM_MEM_QUANTUM - 1) >> SK_STREAM_MEM_QUANTUM_SHIFT; | 709 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; |
724 | } | 710 | } |
725 | 711 | ||
726 | static inline void sk_stream_mem_reclaim(struct sock *sk) | 712 | static inline int sk_has_account(struct sock *sk) |
727 | { | 713 | { |
728 | if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) | 714 | /* return true if protocol supports memory accounting */ |
729 | __sk_stream_mem_reclaim(sk); | 715 | return !!sk->sk_prot->memory_allocated; |
730 | } | 716 | } |
731 | 717 | ||
732 | static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) | 718 | static inline int sk_wmem_schedule(struct sock *sk, int size) |
733 | { | 719 | { |
734 | return (int)skb->truesize <= sk->sk_forward_alloc || | 720 | if (!sk_has_account(sk)) |
735 | sk_stream_mem_schedule(sk, skb->truesize, 1); | 721 | return 1; |
722 | return size <= sk->sk_forward_alloc || | ||
723 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | ||
736 | } | 724 | } |
737 | 725 | ||
738 | static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | 726 | static inline int sk_rmem_schedule(struct sock *sk, int size) |
739 | { | 727 | { |
728 | if (!sk_has_account(sk)) | ||
729 | return 1; | ||
740 | return size <= sk->sk_forward_alloc || | 730 | return size <= sk->sk_forward_alloc || |
741 | sk_stream_mem_schedule(sk, size, 0); | 731 | __sk_mem_schedule(sk, size, SK_MEM_RECV); |
732 | } | ||
733 | |||
734 | static inline void sk_mem_reclaim(struct sock *sk) | ||
735 | { | ||
736 | if (!sk_has_account(sk)) | ||
737 | return; | ||
738 | if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) | ||
739 | __sk_mem_reclaim(sk); | ||
740 | } | ||
741 | |||
742 | static inline void sk_mem_charge(struct sock *sk, int size) | ||
743 | { | ||
744 | if (!sk_has_account(sk)) | ||
745 | return; | ||
746 | sk->sk_forward_alloc -= size; | ||
747 | } | ||
748 | |||
749 | static inline void sk_mem_uncharge(struct sock *sk, int size) | ||
750 | { | ||
751 | if (!sk_has_account(sk)) | ||
752 | return; | ||
753 | sk->sk_forward_alloc += size; | ||
754 | } | ||
755 | |||
756 | static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | ||
757 | { | ||
758 | skb_truesize_check(skb); | ||
759 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | ||
760 | sk->sk_wmem_queued -= skb->truesize; | ||
761 | sk_mem_uncharge(sk, skb->truesize); | ||
762 | __kfree_skb(skb); | ||
742 | } | 763 | } |
743 | 764 | ||
744 | /* Used by processes to "lock" a socket state, so that | 765 | /* Used by processes to "lock" a socket state, so that |
@@ -1076,12 +1097,6 @@ static inline int sk_can_gso(const struct sock *sk) | |||
1076 | 1097 | ||
1077 | extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); | 1098 | extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); |
1078 | 1099 | ||
1079 | static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) | ||
1080 | { | ||
1081 | sk->sk_wmem_queued += skb->truesize; | ||
1082 | sk->sk_forward_alloc -= skb->truesize; | ||
1083 | } | ||
1084 | |||
1085 | static inline int skb_copy_to_page(struct sock *sk, char __user *from, | 1100 | static inline int skb_copy_to_page(struct sock *sk, char __user *from, |
1086 | struct sk_buff *skb, struct page *page, | 1101 | struct sk_buff *skb, struct page *page, |
1087 | int off, int copy) | 1102 | int off, int copy) |
@@ -1101,7 +1116,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, | |||
1101 | skb->data_len += copy; | 1116 | skb->data_len += copy; |
1102 | skb->truesize += copy; | 1117 | skb->truesize += copy; |
1103 | sk->sk_wmem_queued += copy; | 1118 | sk->sk_wmem_queued += copy; |
1104 | sk->sk_forward_alloc -= copy; | 1119 | sk_mem_charge(sk, copy); |
1105 | return 0; | 1120 | return 0; |
1106 | } | 1121 | } |
1107 | 1122 | ||
@@ -1127,6 +1142,7 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
1127 | skb->sk = sk; | 1142 | skb->sk = sk; |
1128 | skb->destructor = sock_rfree; | 1143 | skb->destructor = sock_rfree; |
1129 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | 1144 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
1145 | sk_mem_charge(sk, skb->truesize); | ||
1130 | } | 1146 | } |
1131 | 1147 | ||
1132 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1148 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, |