diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 12:04:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 12:04:48 -0400 |
commit | 95211279c5ad00a317c98221d7e4365e02f20836 (patch) | |
tree | 2ddc8625378d2915b8c96392f3cf6663b705ed55 /include/linux/mm.h | |
parent | 5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff) | |
parent | 12724850e8064f64b6223d26d78c0597c742c65a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton:
"A few misc things and all the MM queue"
* emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits)
memcg: avoid THP split in task migration
thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE
memcg: clean up existing move charge code
mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read()
mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event()
mm/memcontrol.c: s/stealed/stolen/
memcg: fix performance of mem_cgroup_begin_update_page_stat()
memcg: remove PCG_FILE_MAPPED
memcg: use new logic for page stat accounting
memcg: remove PCG_MOVE_LOCK flag from page_cgroup
memcg: simplify move_account() check
memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat)
memcg: kill dead prev_priority stubs
memcg: remove PCG_CACHE page_cgroup flag
memcg: let css_get_next() rely upon rcu_read_lock()
cgroup: revert ss_id_lock to spinlock
idr: make idr_get_next() good for rcu_read_lock()
memcg: remove unnecessary thp check in page stat accounting
memcg: remove redundant returns
memcg: enum lru_list lru
...
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index b5bb54d6d667..ee67e326b6f8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1040,6 +1040,9 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, | |||
1040 | !vma_growsup(vma->vm_next, addr); | 1040 | !vma_growsup(vma->vm_next, addr); |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | extern pid_t | ||
1044 | vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); | ||
1045 | |||
1043 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 1046 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
1044 | unsigned long old_addr, struct vm_area_struct *new_vma, | 1047 | unsigned long old_addr, struct vm_area_struct *new_vma, |
1045 | unsigned long new_addr, unsigned long len); | 1048 | unsigned long new_addr, unsigned long len); |
@@ -1058,19 +1061,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
1058 | /* | 1061 | /* |
1059 | * per-process(per-mm_struct) statistics. | 1062 | * per-process(per-mm_struct) statistics. |
1060 | */ | 1063 | */ |
1061 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
1062 | { | ||
1063 | atomic_long_set(&mm->rss_stat.count[member], value); | ||
1064 | } | ||
1065 | |||
1066 | #if defined(SPLIT_RSS_COUNTING) | ||
1067 | unsigned long get_mm_counter(struct mm_struct *mm, int member); | ||
1068 | #else | ||
1069 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | 1064 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) |
1070 | { | 1065 | { |
1071 | return atomic_long_read(&mm->rss_stat.count[member]); | 1066 | long val = atomic_long_read(&mm->rss_stat.count[member]); |
1072 | } | 1067 | |
1068 | #ifdef SPLIT_RSS_COUNTING | ||
1069 | /* | ||
1070 | * counter is updated in asynchronous manner and may go to minus. | ||
1071 | * But it's never be expected number for users. | ||
1072 | */ | ||
1073 | if (val < 0) | ||
1074 | val = 0; | ||
1073 | #endif | 1075 | #endif |
1076 | return (unsigned long)val; | ||
1077 | } | ||
1074 | 1078 | ||
1075 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | 1079 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) |
1076 | { | 1080 | { |
@@ -1127,9 +1131,9 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, | |||
1127 | } | 1131 | } |
1128 | 1132 | ||
1129 | #if defined(SPLIT_RSS_COUNTING) | 1133 | #if defined(SPLIT_RSS_COUNTING) |
1130 | void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); | 1134 | void sync_mm_rss(struct mm_struct *mm); |
1131 | #else | 1135 | #else |
1132 | static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) | 1136 | static inline void sync_mm_rss(struct mm_struct *mm) |
1133 | { | 1137 | { |
1134 | } | 1138 | } |
1135 | #endif | 1139 | #endif |
@@ -1291,8 +1295,6 @@ extern void get_pfn_range_for_nid(unsigned int nid, | |||
1291 | extern unsigned long find_min_pfn_with_active_regions(void); | 1295 | extern unsigned long find_min_pfn_with_active_regions(void); |
1292 | extern void free_bootmem_with_active_regions(int nid, | 1296 | extern void free_bootmem_with_active_regions(int nid, |
1293 | unsigned long max_low_pfn); | 1297 | unsigned long max_low_pfn); |
1294 | int add_from_early_node_map(struct range *range, int az, | ||
1295 | int nr_range, int nid); | ||
1296 | extern void sparse_memory_present_with_active_regions(int nid); | 1298 | extern void sparse_memory_present_with_active_regions(int nid); |
1297 | 1299 | ||
1298 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 1300 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |