aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-05 16:41:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:23 -0500
commitd559db086ff5be9bcc259e5aa50bf3d881eaf1d1 (patch)
treeaa968c8a4093234e4623a34c0415bf9d8683671c /include
parent19b629f581320999ddb9f6597051b79cdb53459c (diff)
mm: clean up mm_counter
Presently, per-mm statistics counter is defined by macro in sched.h This patch modifies it to - defined in mm.h as inlinf functions - use array instead of macro's name creation. This patch is for reducing patch size in future patch to modify implementation of per-mm counter. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h104
-rw-r--r--include/linux/mm_types.h33
-rw-r--r--include/linux/sched.h54
3 files changed, 126 insertions, 65 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 90957f14195c..2124cdb2d1d0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -870,6 +870,110 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
870 */ 870 */
871int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 871int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
872 struct page **pages); 872 struct page **pages);
873/*
874 * per-process(per-mm_struct) statistics.
875 */
876#if USE_SPLIT_PTLOCKS
877/*
878 * The mm counters are not protected by its page_table_lock,
879 * so must be incremented atomically.
880 */
881static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
882{
883 atomic_long_set(&mm->rss_stat.count[member], value);
884}
885
886static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
887{
888 return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]);
889}
890
891static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
892{
893 atomic_long_add(value, &mm->rss_stat.count[member]);
894}
895
896static inline void inc_mm_counter(struct mm_struct *mm, int member)
897{
898 atomic_long_inc(&mm->rss_stat.count[member]);
899}
900
901static inline void dec_mm_counter(struct mm_struct *mm, int member)
902{
903 atomic_long_dec(&mm->rss_stat.count[member]);
904}
905
906#else /* !USE_SPLIT_PTLOCKS */
907/*
908 * The mm counters are protected by its page_table_lock,
909 * so can be incremented directly.
910 */
911static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
912{
913 mm->rss_stat.count[member] = value;
914}
915
916static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
917{
918 return mm->rss_stat.count[member];
919}
920
921static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
922{
923 mm->rss_stat.count[member] += value;
924}
925
926static inline void inc_mm_counter(struct mm_struct *mm, int member)
927{
928 mm->rss_stat.count[member]++;
929}
930
931static inline void dec_mm_counter(struct mm_struct *mm, int member)
932{
933 mm->rss_stat.count[member]--;
934}
935
936#endif /* !USE_SPLIT_PTLOCKS */
937
938static inline unsigned long get_mm_rss(struct mm_struct *mm)
939{
940 return get_mm_counter(mm, MM_FILEPAGES) +
941 get_mm_counter(mm, MM_ANONPAGES);
942}
943
944static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
945{
946 return max(mm->hiwater_rss, get_mm_rss(mm));
947}
948
949static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
950{
951 return max(mm->hiwater_vm, mm->total_vm);
952}
953
954static inline void update_hiwater_rss(struct mm_struct *mm)
955{
956 unsigned long _rss = get_mm_rss(mm);
957
958 if ((mm)->hiwater_rss < _rss)
959 (mm)->hiwater_rss = _rss;
960}
961
962static inline void update_hiwater_vm(struct mm_struct *mm)
963{
964 if (mm->hiwater_vm < mm->total_vm)
965 mm->hiwater_vm = mm->total_vm;
966}
967
968static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
969 struct mm_struct *mm)
970{
971 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
972
973 if (*maxrss < hiwater_rss)
974 *maxrss = hiwater_rss;
975}
976
873 977
874/* 978/*
875 * A callback you can register to apply pressure to ageable caches. 979 * A callback you can register to apply pressure to ageable caches.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 36f96271306c..e1ca64be6678 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -24,12 +24,6 @@ struct address_space;
24 24
25#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 25#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
26 26
27#if USE_SPLIT_PTLOCKS
28typedef atomic_long_t mm_counter_t;
29#else /* !USE_SPLIT_PTLOCKS */
30typedef unsigned long mm_counter_t;
31#endif /* !USE_SPLIT_PTLOCKS */
32
33/* 27/*
34 * Each physical page in the system has a struct page associated with 28 * Each physical page in the system has a struct page associated with
35 * it to keep track of whatever it is we are using the page for at the 29 * it to keep track of whatever it is we are using the page for at the
@@ -201,6 +195,22 @@ struct core_state {
201 struct completion startup; 195 struct completion startup;
202}; 196};
203 197
198enum {
199 MM_FILEPAGES,
200 MM_ANONPAGES,
201 NR_MM_COUNTERS
202};
203
204#if USE_SPLIT_PTLOCKS
205struct mm_rss_stat {
206 atomic_long_t count[NR_MM_COUNTERS];
207};
208#else /* !USE_SPLIT_PTLOCKS */
209struct mm_rss_stat {
210 unsigned long count[NR_MM_COUNTERS];
211};
212#endif /* !USE_SPLIT_PTLOCKS */
213
204struct mm_struct { 214struct mm_struct {
205 struct vm_area_struct * mmap; /* list of VMAs */ 215 struct vm_area_struct * mmap; /* list of VMAs */
206 struct rb_root mm_rb; 216 struct rb_root mm_rb;
@@ -227,11 +237,6 @@ struct mm_struct {
227 * by mmlist_lock 237 * by mmlist_lock
228 */ 238 */
229 239
230 /* Special counters, in some configurations protected by the
231 * page_table_lock, in other configurations by being atomic.
232 */
233 mm_counter_t _file_rss;
234 mm_counter_t _anon_rss;
235 240
236 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 241 unsigned long hiwater_rss; /* High-watermark of RSS usage */
237 unsigned long hiwater_vm; /* High-water virtual memory usage */ 242 unsigned long hiwater_vm; /* High-water virtual memory usage */
@@ -244,6 +249,12 @@ struct mm_struct {
244 249
245 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 250 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
246 251
252 /*
253 * Special counters, in some configurations protected by the
254 * page_table_lock, in other configurations by being atomic.
255 */
256 struct mm_rss_stat rss_stat;
257
247 struct linux_binfmt *binfmt; 258 struct linux_binfmt *binfmt;
248 259
249 cpumask_t cpu_vm_mask; 260 cpumask_t cpu_vm_mask;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b1753f7e48e..cbeafa49a53b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -396,60 +396,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
396static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 396static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
397#endif 397#endif
398 398
399#if USE_SPLIT_PTLOCKS
400/*
401 * The mm counters are not protected by its page_table_lock,
402 * so must be incremented atomically.
403 */
404#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
405#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
406#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
407#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
408#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
409
410#else /* !USE_SPLIT_PTLOCKS */
411/*
412 * The mm counters are protected by its page_table_lock,
413 * so can be incremented directly.
414 */
415#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
416#define get_mm_counter(mm, member) ((mm)->_##member)
417#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
418#define inc_mm_counter(mm, member) (mm)->_##member++
419#define dec_mm_counter(mm, member) (mm)->_##member--
420
421#endif /* !USE_SPLIT_PTLOCKS */
422
423#define get_mm_rss(mm) \
424 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
425#define update_hiwater_rss(mm) do { \
426 unsigned long _rss = get_mm_rss(mm); \
427 if ((mm)->hiwater_rss < _rss) \
428 (mm)->hiwater_rss = _rss; \
429} while (0)
430#define update_hiwater_vm(mm) do { \
431 if ((mm)->hiwater_vm < (mm)->total_vm) \
432 (mm)->hiwater_vm = (mm)->total_vm; \
433} while (0)
434
435static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
436{
437 return max(mm->hiwater_rss, get_mm_rss(mm));
438}
439
440static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
441 struct mm_struct *mm)
442{
443 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
444
445 if (*maxrss < hiwater_rss)
446 *maxrss = hiwater_rss;
447}
448
449static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
450{
451 return max(mm->hiwater_vm, mm->total_vm);
452}
453 399
454extern void set_dumpable(struct mm_struct *mm, int value); 400extern void set_dumpable(struct mm_struct *mm, int value);
455extern int get_dumpable(struct mm_struct *mm); 401extern int get_dumpable(struct mm_struct *mm);