aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-03-21 19:33:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:54:55 -0400
commit69c978232aaa99476f9bd002c2a29a84fa3779b5 (patch)
tree7edb0da034b8824040f4f7327dd31ad260532167
parent6131728914810a6c02e08750e13e45870101e862 (diff)
mm: make get_mm_counter static-inline
Make get_mm_counter() always static inline, it is simple enough for that. And remove unused set_mm_counter() bloat-o-meter: add/remove: 0/1 grow/shrink: 4/12 up/down: 99/-341 (-242) function old new delta try_to_unmap_one 886 952 +66 sys_remap_file_pages 1214 1230 +16 dup_mm 1684 1700 +16 do_exit 2277 2278 +1 zap_page_range 208 205 -3 unmap_region 304 296 -8 static.oom_kill_process 554 546 -8 try_to_unmap_file 1716 1700 -16 getrusage 925 909 -16 flush_old_exec 1704 1688 -16 static.dump_header 416 390 -26 acct_update_integrals 218 187 -31 do_task_stat 2986 2954 -32 get_mm_counter 34 - -34 xacct_add_tsk 371 334 -37 task_statm 172 118 -54 task_mem 383 323 -60 try_to_unmap_one() grows because update_hiwater_rss() now completely inline. Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Kirill A. Shutemov <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h21
-rw-r--r--mm/memory.c18
2 files changed, 11 insertions, 28 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 17b27cd269c4..378bccebc26c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1058,19 +1058,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1058/* 1058/*
1059 * per-process(per-mm_struct) statistics. 1059 * per-process(per-mm_struct) statistics.
1060 */ 1060 */
1061static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1062{
1063 atomic_long_set(&mm->rss_stat.count[member], value);
1064}
1065
1066#if defined(SPLIT_RSS_COUNTING)
1067unsigned long get_mm_counter(struct mm_struct *mm, int member);
1068#else
1069static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1061static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1070{ 1062{
1071 return atomic_long_read(&mm->rss_stat.count[member]); 1063 long val = atomic_long_read(&mm->rss_stat.count[member]);
1072} 1064
1065#ifdef SPLIT_RSS_COUNTING
1066 /*
1067 * counter is updated in asynchronous manner and may go to minus.
1068 * But it's never be expected number for users.
1069 */
1070 if (val < 0)
1071 val = 0;
1073#endif 1072#endif
1073 return (unsigned long)val;
1074}
1074 1075
1075static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1076static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1076{ 1077{
diff --git a/mm/memory.c b/mm/memory.c
index e01abb908b6b..a5de734e14a7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -160,24 +160,6 @@ static void check_sync_rss_stat(struct task_struct *task)
160 __sync_task_rss_stat(task, task->mm); 160 __sync_task_rss_stat(task, task->mm);
161} 161}
162 162
163unsigned long get_mm_counter(struct mm_struct *mm, int member)
164{
165 long val = 0;
166
167 /*
168 * Don't use task->mm here...for avoiding to use task_get_mm()..
169 * The caller must guarantee task->mm is not invalid.
170 */
171 val = atomic_long_read(&mm->rss_stat.count[member]);
172 /*
173 * counter is updated in asynchronous manner and may go to minus.
174 * But it's never be expected number for users.
175 */
176 if (val < 0)
177 return 0;
178 return (unsigned long)val;
179}
180
181void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) 163void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
182{ 164{
183 __sync_task_rss_stat(task, mm); 165 __sync_task_rss_stat(task, mm);