diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2010-03-05 16:41:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-06 14:26:23 -0500 |
commit | d559db086ff5be9bcc259e5aa50bf3d881eaf1d1 (patch) | |
tree | aa968c8a4093234e4623a34c0415bf9d8683671c /include/linux/mm.h | |
parent | 19b629f581320999ddb9f6597051b79cdb53459c (diff) |
mm: clean up mm_counter
Presently, per-mm statistics counter is defined by macro in sched.h
This patch modifies it to
- defined in mm.h as inlinf functions
- use array instead of macro's name creation.
This patch is for reducing patch size in future patch to modify
implementation of per-mm counter.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 90957f14195c..2124cdb2d1d0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -870,6 +870,110 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
870 | */ | 870 | */ |
871 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | 871 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
872 | struct page **pages); | 872 | struct page **pages); |
873 | /* | ||
874 | * per-process(per-mm_struct) statistics. | ||
875 | */ | ||
876 | #if USE_SPLIT_PTLOCKS | ||
877 | /* | ||
878 | * The mm counters are not protected by its page_table_lock, | ||
879 | * so must be incremented atomically. | ||
880 | */ | ||
881 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
882 | { | ||
883 | atomic_long_set(&mm->rss_stat.count[member], value); | ||
884 | } | ||
885 | |||
886 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | ||
887 | { | ||
888 | return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]); | ||
889 | } | ||
890 | |||
891 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
892 | { | ||
893 | atomic_long_add(value, &mm->rss_stat.count[member]); | ||
894 | } | ||
895 | |||
896 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
897 | { | ||
898 | atomic_long_inc(&mm->rss_stat.count[member]); | ||
899 | } | ||
900 | |||
901 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
902 | { | ||
903 | atomic_long_dec(&mm->rss_stat.count[member]); | ||
904 | } | ||
905 | |||
906 | #else /* !USE_SPLIT_PTLOCKS */ | ||
907 | /* | ||
908 | * The mm counters are protected by its page_table_lock, | ||
909 | * so can be incremented directly. | ||
910 | */ | ||
911 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
912 | { | ||
913 | mm->rss_stat.count[member] = value; | ||
914 | } | ||
915 | |||
916 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | ||
917 | { | ||
918 | return mm->rss_stat.count[member]; | ||
919 | } | ||
920 | |||
921 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
922 | { | ||
923 | mm->rss_stat.count[member] += value; | ||
924 | } | ||
925 | |||
926 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
927 | { | ||
928 | mm->rss_stat.count[member]++; | ||
929 | } | ||
930 | |||
931 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
932 | { | ||
933 | mm->rss_stat.count[member]--; | ||
934 | } | ||
935 | |||
936 | #endif /* !USE_SPLIT_PTLOCKS */ | ||
937 | |||
938 | static inline unsigned long get_mm_rss(struct mm_struct *mm) | ||
939 | { | ||
940 | return get_mm_counter(mm, MM_FILEPAGES) + | ||
941 | get_mm_counter(mm, MM_ANONPAGES); | ||
942 | } | ||
943 | |||
944 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) | ||
945 | { | ||
946 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
947 | } | ||
948 | |||
949 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
950 | { | ||
951 | return max(mm->hiwater_vm, mm->total_vm); | ||
952 | } | ||
953 | |||
954 | static inline void update_hiwater_rss(struct mm_struct *mm) | ||
955 | { | ||
956 | unsigned long _rss = get_mm_rss(mm); | ||
957 | |||
958 | if ((mm)->hiwater_rss < _rss) | ||
959 | (mm)->hiwater_rss = _rss; | ||
960 | } | ||
961 | |||
962 | static inline void update_hiwater_vm(struct mm_struct *mm) | ||
963 | { | ||
964 | if (mm->hiwater_vm < mm->total_vm) | ||
965 | mm->hiwater_vm = mm->total_vm; | ||
966 | } | ||
967 | |||
968 | static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, | ||
969 | struct mm_struct *mm) | ||
970 | { | ||
971 | unsigned long hiwater_rss = get_mm_hiwater_rss(mm); | ||
972 | |||
973 | if (*maxrss < hiwater_rss) | ||
974 | *maxrss = hiwater_rss; | ||
975 | } | ||
976 | |||
873 | 977 | ||
874 | /* | 978 | /* |
875 | * A callback you can register to apply pressure to ageable caches. | 979 | * A callback you can register to apply pressure to ageable caches. |