aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-05 16:41:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:24 -0500
commit34e55232e59f7b19050267a05ff1226e5cd122a5 (patch)
tree6b94e776e87d2a2fe1ceca7c5606901575323900 /include/linux/mm.h
parentd559db086ff5be9bcc259e5aa50bf3d881eaf1d1 (diff)
mm: avoid false sharing of mm_counter
Considering the nature of per mm stats, it's the shared object among threads and can be a cache-miss point in the page fault path. This patch adds per-thread cache for mm_counter. RSS value will be counted into a struct in task_struct and synchronized with mm's one at events. Now, in this patch, the event is the number of calls to handle_mm_fault. Per-thread value is added to mm at each 64 calls. rough estimation with small benchmark on parallel thread (2threads) shows [before] 4.5 cache-miss/faults [after] 4.0 cache-miss/faults Anyway, the most contended object is mmap_sem if the number of threads grows. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h8
1 files changed, 3 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2124cdb2d1d0..8e580c07d171 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -873,7 +873,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
873/* 873/*
874 * per-process(per-mm_struct) statistics. 874 * per-process(per-mm_struct) statistics.
875 */ 875 */
876#if USE_SPLIT_PTLOCKS 876#if defined(SPLIT_RSS_COUNTING)
877/* 877/*
878 * The mm counters are not protected by its page_table_lock, 878 * The mm counters are not protected by its page_table_lock,
879 * so must be incremented atomically. 879 * so must be incremented atomically.
@@ -883,10 +883,7 @@ static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
883 atomic_long_set(&mm->rss_stat.count[member], value); 883 atomic_long_set(&mm->rss_stat.count[member], value);
884} 884}
885 885
886static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 886unsigned long get_mm_counter(struct mm_struct *mm, int member);
887{
888 return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]);
889}
890 887
891static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 888static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
892{ 889{
@@ -974,6 +971,7 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
974 *maxrss = hiwater_rss; 971 *maxrss = hiwater_rss;
975} 972}
976 973
974void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
977 975
978/* 976/*
979 * A callback you can register to apply pressure to ageable caches. 977 * A callback you can register to apply pressure to ageable caches.