diff options
author | Matt Fleming <matt.fleming@linux.intel.com> | 2011-05-24 20:12:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:30 -0400 |
commit | 172703b08cd05e2d5196ac13e94cc186f629d58b (patch) | |
tree | d7c3f04008aa3327fb00e49e392bbd97948ff266 /include/linux/mm.h | |
parent | a197b59ae6e8bee56fcef37ea2482dc08414e2ac (diff) |
mm: delete non-atomic mm counter implementation
The problem with having two different types of counters is that developers
adding new code need to keep in mind whether it's safe to use both the
atomic and non-atomic implementations. For example, when adding new
callers of the *_mm_counter() functions a developer needs to ensure that
those paths are always executed with page_table_lock held, in case we're
using the non-atomic implementation of mm counters.
Hugh Dickins introduced the atomic mm counters in commit f412ac08c986
("[PATCH] mm: fix rss and mmlist locking"). When asked why he left the
non-atomic counters around he said,
| The only reason was to avoid adding costly atomic operations into a
| configuration that had no need for them there: the page_table_lock
| sufficed.
|
| Certainly it would be simpler just to delete the non-atomic variant.
|
| And I think it's fair to say that any configuration on which we're
| measuring performance to that degree (rather than "does it boot fast?"
| type measurements), would already be going the split ptlocks route.
Removing the non-atomic counters eases the maintenance burden because
developers no longer have to mindful of the two implementations when using
*_mm_counter().
Note that all architectures provide a means of atomically updating
atomic_long_t variables, even if they have to revert to the generic
spinlock implementation because they don't support 64-bit atomic
instructions (see lib/atomic64.c).
Signed-off-by: Matt Fleming <matt.fleming@linux.intel.com>
Acked-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 44 |
1 files changed, 7 insertions, 37 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 32309f6542e8..48e458190d88 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1053,65 +1053,35 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
1053 | /* | 1053 | /* |
1054 | * per-process(per-mm_struct) statistics. | 1054 | * per-process(per-mm_struct) statistics. |
1055 | */ | 1055 | */ |
1056 | #if defined(SPLIT_RSS_COUNTING) | ||
1057 | /* | ||
1058 | * The mm counters are not protected by its page_table_lock, | ||
1059 | * so must be incremented atomically. | ||
1060 | */ | ||
1061 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | 1056 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) |
1062 | { | 1057 | { |
1063 | atomic_long_set(&mm->rss_stat.count[member], value); | 1058 | atomic_long_set(&mm->rss_stat.count[member], value); |
1064 | } | 1059 | } |
1065 | 1060 | ||
1061 | #if defined(SPLIT_RSS_COUNTING) | ||
1066 | unsigned long get_mm_counter(struct mm_struct *mm, int member); | 1062 | unsigned long get_mm_counter(struct mm_struct *mm, int member); |
1067 | 1063 | #else | |
1068 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
1069 | { | ||
1070 | atomic_long_add(value, &mm->rss_stat.count[member]); | ||
1071 | } | ||
1072 | |||
1073 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
1074 | { | ||
1075 | atomic_long_inc(&mm->rss_stat.count[member]); | ||
1076 | } | ||
1077 | |||
1078 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
1079 | { | ||
1080 | atomic_long_dec(&mm->rss_stat.count[member]); | ||
1081 | } | ||
1082 | |||
1083 | #else /* !USE_SPLIT_PTLOCKS */ | ||
1084 | /* | ||
1085 | * The mm counters are protected by its page_table_lock, | ||
1086 | * so can be incremented directly. | ||
1087 | */ | ||
1088 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
1089 | { | ||
1090 | mm->rss_stat.count[member] = value; | ||
1091 | } | ||
1092 | |||
1093 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | 1064 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) |
1094 | { | 1065 | { |
1095 | return mm->rss_stat.count[member]; | 1066 | return atomic_long_read(&mm->rss_stat.count[member]); |
1096 | } | 1067 | } |
1068 | #endif | ||
1097 | 1069 | ||
1098 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | 1070 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) |
1099 | { | 1071 | { |
1100 | mm->rss_stat.count[member] += value; | 1072 | atomic_long_add(value, &mm->rss_stat.count[member]); |
1101 | } | 1073 | } |
1102 | 1074 | ||
1103 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | 1075 | static inline void inc_mm_counter(struct mm_struct *mm, int member) |
1104 | { | 1076 | { |
1105 | mm->rss_stat.count[member]++; | 1077 | atomic_long_inc(&mm->rss_stat.count[member]); |
1106 | } | 1078 | } |
1107 | 1079 | ||
1108 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | 1080 | static inline void dec_mm_counter(struct mm_struct *mm, int member) |
1109 | { | 1081 | { |
1110 | mm->rss_stat.count[member]--; | 1082 | atomic_long_dec(&mm->rss_stat.count[member]); |
1111 | } | 1083 | } |
1112 | 1084 | ||
1113 | #endif /* !USE_SPLIT_PTLOCKS */ | ||
1114 | |||
1115 | static inline unsigned long get_mm_rss(struct mm_struct *mm) | 1085 | static inline unsigned long get_mm_rss(struct mm_struct *mm) |
1116 | { | 1086 | { |
1117 | return get_mm_counter(mm, MM_FILEPAGES) + | 1087 | return get_mm_counter(mm, MM_FILEPAGES) + |