aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
commit95211279c5ad00a317c98221d7e4365e02f20836 (patch)
tree2ddc8625378d2915b8c96392f3cf6663b705ed55 /include/linux
parent5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff)
parent12724850e8064f64b6223d26d78c0597c742c65a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton: "A few misc things and all the MM queue" * emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits) memcg: avoid THP split in task migration thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE memcg: clean up existing move charge code mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read() mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event() mm/memcontrol.c: s/stealed/stolen/ memcg: fix performance of mem_cgroup_begin_update_page_stat() memcg: remove PCG_FILE_MAPPED memcg: use new logic for page stat accounting memcg: remove PCG_MOVE_LOCK flag from page_cgroup memcg: simplify move_account() check memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat) memcg: kill dead prev_priority stubs memcg: remove PCG_CACHE page_cgroup flag memcg: let css_get_next() rely upon rcu_read_lock() cgroup: revert ss_id_lock to spinlock idr: make idr_get_next() good for rcu_read_lock() memcg: remove unnecessary thp check in page stat accounting memcg: remove redundant returns memcg: enum lru_list lru ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compaction.h20
-rw-r--r--include/linux/cpuset.h47
-rw-r--r--include/linux/huge_mm.h28
-rw-r--r--include/linux/hugetlb.h45
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/kernel-page-flags.h1
-rw-r--r--include/linux/memcontrol.h58
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mm.h30
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--include/linux/page_cgroup.h33
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/swap.h2
17 files changed, 165 insertions, 137 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 501adb1b2f43..5a85b3415c1b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -498,7 +498,7 @@ struct cgroup_subsys {
498 struct list_head sibling; 498 struct list_head sibling;
499 /* used when use_id == true */ 499 /* used when use_id == true */
500 struct idr idr; 500 struct idr idr;
501 rwlock_t id_lock; 501 spinlock_t id_lock;
502 502
503 /* should be defined only by modular subsystems */ 503 /* should be defined only by modular subsystems */
504 struct module *module; 504 struct module *module;
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index bb2bbdbe5464..51a90b7f2d60 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -23,6 +23,7 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask, 24 int order, gfp_t gfp_mask, nodemask_t *mask,
25 bool sync); 25 bool sync);
26extern int compact_pgdat(pg_data_t *pgdat, int order);
26extern unsigned long compaction_suitable(struct zone *zone, int order); 27extern unsigned long compaction_suitable(struct zone *zone, int order);
27 28
28/* Do not skip compaction more than 64 times */ 29/* Do not skip compaction more than 64 times */
@@ -33,20 +34,26 @@ extern unsigned long compaction_suitable(struct zone *zone, int order);
33 * allocation success. 1 << compact_defer_limit compactions are skipped up 34 * allocation success. 1 << compact_defer_limit compactions are skipped up
34 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 35 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
35 */ 36 */
36static inline void defer_compaction(struct zone *zone) 37static inline void defer_compaction(struct zone *zone, int order)
37{ 38{
38 zone->compact_considered = 0; 39 zone->compact_considered = 0;
39 zone->compact_defer_shift++; 40 zone->compact_defer_shift++;
40 41
42 if (order < zone->compact_order_failed)
43 zone->compact_order_failed = order;
44
41 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 45 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
42 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 46 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
43} 47}
44 48
45/* Returns true if compaction should be skipped this time */ 49/* Returns true if compaction should be skipped this time */
46static inline bool compaction_deferred(struct zone *zone) 50static inline bool compaction_deferred(struct zone *zone, int order)
47{ 51{
48 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 52 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
49 53
54 if (order < zone->compact_order_failed)
55 return false;
56
50 /* Avoid possible overflow */ 57 /* Avoid possible overflow */
51 if (++zone->compact_considered > defer_limit) 58 if (++zone->compact_considered > defer_limit)
52 zone->compact_considered = defer_limit; 59 zone->compact_considered = defer_limit;
@@ -62,16 +69,21 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
62 return COMPACT_CONTINUE; 69 return COMPACT_CONTINUE;
63} 70}
64 71
72static inline int compact_pgdat(pg_data_t *pgdat, int order)
73{
74 return COMPACT_CONTINUE;
75}
76
65static inline unsigned long compaction_suitable(struct zone *zone, int order) 77static inline unsigned long compaction_suitable(struct zone *zone, int order)
66{ 78{
67 return COMPACT_SKIPPED; 79 return COMPACT_SKIPPED;
68} 80}
69 81
70static inline void defer_compaction(struct zone *zone) 82static inline void defer_compaction(struct zone *zone, int order)
71{ 83{
72} 84}
73 85
74static inline bool compaction_deferred(struct zone *zone) 86static inline bool compaction_deferred(struct zone *zone, int order)
75{ 87{
76 return 1; 88 return 1;
77} 89}
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e9eaec522655..7a7e5fd2a277 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
89extern void cpuset_print_task_mems_allowed(struct task_struct *p); 89extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90 90
91/* 91/*
92 * reading current mems_allowed and mempolicy in the fastpath must protected 92 * get_mems_allowed is required when making decisions involving mems_allowed
93 * by get_mems_allowed() 93 * such as during page allocation. mems_allowed can be updated in parallel
94 * and depending on the new value an operation can fail potentially causing
95 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
96 * prevents these artificial failures.
94 */ 97 */
95static inline void get_mems_allowed(void) 98static inline unsigned int get_mems_allowed(void)
96{ 99{
97 current->mems_allowed_change_disable++; 100 return read_seqcount_begin(&current->mems_allowed_seq);
98
99 /*
100 * ensure that reading mems_allowed and mempolicy happens after the
101 * update of ->mems_allowed_change_disable.
102 *
103 * the write-side task finds ->mems_allowed_change_disable is not 0,
104 * and knows the read-side task is reading mems_allowed or mempolicy,
105 * so it will clear old bits lazily.
106 */
107 smp_mb();
108} 101}
109 102
110static inline void put_mems_allowed(void) 103/*
104 * If this returns false, the operation that took place after get_mems_allowed
105 * may have failed. It is up to the caller to retry the operation if
106 * appropriate.
107 */
108static inline bool put_mems_allowed(unsigned int seq)
111{ 109{
112 /* 110 return !read_seqcount_retry(&current->mems_allowed_seq, seq);
113 * ensure that reading mems_allowed and mempolicy before reducing
114 * mems_allowed_change_disable.
115 *
116 * the write-side task will know that the read-side task is still
117 * reading mems_allowed or mempolicy, don't clears old bits in the
118 * nodemask.
119 */
120 smp_mb();
121 --ACCESS_ONCE(current->mems_allowed_change_disable);
122} 111}
123 112
124static inline void set_mems_allowed(nodemask_t nodemask) 113static inline void set_mems_allowed(nodemask_t nodemask)
125{ 114{
126 task_lock(current); 115 task_lock(current);
116 write_seqcount_begin(&current->mems_allowed_seq);
127 current->mems_allowed = nodemask; 117 current->mems_allowed = nodemask;
118 write_seqcount_end(&current->mems_allowed_seq);
128 task_unlock(current); 119 task_unlock(current);
129} 120}
130 121
@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
234{ 225{
235} 226}
236 227
237static inline void get_mems_allowed(void) 228static inline unsigned int get_mems_allowed(void)
238{ 229{
230 return 0;
239} 231}
240 232
241static inline void put_mems_allowed(void) 233static inline bool put_mems_allowed(unsigned int seq)
242{ 234{
235 return true;
243} 236}
244 237
245#endif /* !CONFIG_CPUSETS */ 238#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 1b921299abc4..c8af7a2efb52 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -51,6 +51,9 @@ extern pmd_t *page_check_address_pmd(struct page *page,
51 unsigned long address, 51 unsigned long address,
52 enum page_check_address_pmd_flag flag); 52 enum page_check_address_pmd_flag flag);
53 53
54#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
56
54#ifdef CONFIG_TRANSPARENT_HUGEPAGE 57#ifdef CONFIG_TRANSPARENT_HUGEPAGE
55#define HPAGE_PMD_SHIFT HPAGE_SHIFT 58#define HPAGE_PMD_SHIFT HPAGE_SHIFT
56#define HPAGE_PMD_MASK HPAGE_MASK 59#define HPAGE_PMD_MASK HPAGE_MASK
@@ -102,8 +105,6 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
102 BUG_ON(pmd_trans_splitting(*____pmd) || \ 105 BUG_ON(pmd_trans_splitting(*____pmd) || \
103 pmd_trans_huge(*____pmd)); \ 106 pmd_trans_huge(*____pmd)); \
104 } while (0) 107 } while (0)
105#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
107#if HPAGE_PMD_ORDER > MAX_ORDER 108#if HPAGE_PMD_ORDER > MAX_ORDER
108#error "hugepages can't be allocated by the buddy allocator" 109#error "hugepages can't be allocated by the buddy allocator"
109#endif 110#endif
@@ -113,6 +114,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
113 unsigned long start, 114 unsigned long start,
114 unsigned long end, 115 unsigned long end,
115 long adjust_next); 116 long adjust_next);
117extern int __pmd_trans_huge_lock(pmd_t *pmd,
118 struct vm_area_struct *vma);
119/* mmap_sem must be held on entry */
120static inline int pmd_trans_huge_lock(pmd_t *pmd,
121 struct vm_area_struct *vma)
122{
123 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
124 if (pmd_trans_huge(*pmd))
125 return __pmd_trans_huge_lock(pmd, vma);
126 else
127 return 0;
128}
116static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 129static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
117 unsigned long start, 130 unsigned long start,
118 unsigned long end, 131 unsigned long end,
@@ -146,9 +159,9 @@ static inline struct page *compound_trans_head(struct page *page)
146 return page; 159 return page;
147} 160}
148#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 161#else /* CONFIG_TRANSPARENT_HUGEPAGE */
149#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 162#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
150#define HPAGE_PMD_MASK ({ BUG(); 0; }) 163#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
151#define HPAGE_PMD_SIZE ({ BUG(); 0; }) 164#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
152 165
153#define hpage_nr_pages(x) 1 166#define hpage_nr_pages(x) 1
154 167
@@ -176,6 +189,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
176 long adjust_next) 189 long adjust_next)
177{ 190{
178} 191}
192static inline int pmd_trans_huge_lock(pmd_t *pmd,
193 struct vm_area_struct *vma)
194{
195 return 0;
196}
179#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 197#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
180 198
181#endif /* _LINUX_HUGE_MM_H */ 199#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d9d6c868b86b..000837e126e6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -14,6 +14,15 @@ struct user_struct;
14#include <linux/shm.h> 14#include <linux/shm.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16 16
17struct hugepage_subpool {
18 spinlock_t lock;
19 long count;
20 long max_hpages, used_hpages;
21};
22
23struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
24void hugepage_put_subpool(struct hugepage_subpool *spool);
25
17int PageHuge(struct page *page); 26int PageHuge(struct page *page);
18 27
19void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 28void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
@@ -128,35 +137,14 @@ enum {
128}; 137};
129 138
130#ifdef CONFIG_HUGETLBFS 139#ifdef CONFIG_HUGETLBFS
131struct hugetlbfs_config {
132 uid_t uid;
133 gid_t gid;
134 umode_t mode;
135 long nr_blocks;
136 long nr_inodes;
137 struct hstate *hstate;
138};
139
140struct hugetlbfs_sb_info { 140struct hugetlbfs_sb_info {
141 long max_blocks; /* blocks allowed */
142 long free_blocks; /* blocks free */
143 long max_inodes; /* inodes allowed */ 141 long max_inodes; /* inodes allowed */
144 long free_inodes; /* inodes free */ 142 long free_inodes; /* inodes free */
145 spinlock_t stat_lock; 143 spinlock_t stat_lock;
146 struct hstate *hstate; 144 struct hstate *hstate;
145 struct hugepage_subpool *spool;
147}; 146};
148 147
149
150struct hugetlbfs_inode_info {
151 struct shared_policy policy;
152 struct inode vfs_inode;
153};
154
155static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
156{
157 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
158}
159
160static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 148static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
161{ 149{
162 return sb->s_fs_info; 150 return sb->s_fs_info;
@@ -164,10 +152,9 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
164 152
165extern const struct file_operations hugetlbfs_file_operations; 153extern const struct file_operations hugetlbfs_file_operations;
166extern const struct vm_operations_struct hugetlb_vm_ops; 154extern const struct vm_operations_struct hugetlb_vm_ops;
167struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 155struct file *hugetlb_file_setup(const char *name, unsigned long addr,
156 size_t size, vm_flags_t acct,
168 struct user_struct **user, int creat_flags); 157 struct user_struct **user, int creat_flags);
169int hugetlb_get_quota(struct address_space *mapping, long delta);
170void hugetlb_put_quota(struct address_space *mapping, long delta);
171 158
172static inline int is_file_hugepages(struct file *file) 159static inline int is_file_hugepages(struct file *file)
173{ 160{
@@ -179,15 +166,11 @@ static inline int is_file_hugepages(struct file *file)
179 return 0; 166 return 0;
180} 167}
181 168
182static inline void set_file_hugepages(struct file *file)
183{
184 file->f_op = &hugetlbfs_file_operations;
185}
186#else /* !CONFIG_HUGETLBFS */ 169#else /* !CONFIG_HUGETLBFS */
187 170
188#define is_file_hugepages(file) 0 171#define is_file_hugepages(file) 0
189#define set_file_hugepages(file) BUG() 172static inline struct file *
190static inline struct file *hugetlb_file_setup(const char *name, size_t size, 173hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
191 vm_flags_t acctflag, struct user_struct **user, int creat_flags) 174 vm_flags_t acctflag, struct user_struct **user, int creat_flags)
192{ 175{
193 return ERR_PTR(-ENOSYS); 176 return ERR_PTR(-ENOSYS);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f994d51f70f2..e4baff5f7ff4 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -29,6 +29,13 @@ extern struct fs_struct init_fs;
29#define INIT_GROUP_RWSEM(sig) 29#define INIT_GROUP_RWSEM(sig)
30#endif 30#endif
31 31
32#ifdef CONFIG_CPUSETS
33#define INIT_CPUSET_SEQ \
34 .mems_allowed_seq = SEQCNT_ZERO,
35#else
36#define INIT_CPUSET_SEQ
37#endif
38
32#define INIT_SIGNALS(sig) { \ 39#define INIT_SIGNALS(sig) { \
33 .nr_threads = 1, \ 40 .nr_threads = 1, \
34 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 41 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -192,6 +199,7 @@ extern struct cred init_cred;
192 INIT_FTRACE_GRAPH \ 199 INIT_FTRACE_GRAPH \
193 INIT_TRACE_RECURSION \ 200 INIT_TRACE_RECURSION \
194 INIT_TASK_RCU_PREEMPT(tsk) \ 201 INIT_TASK_RCU_PREEMPT(tsk) \
202 INIT_CPUSET_SEQ \
195} 203}
196 204
197 205
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index bd92a89f4b0a..26a65711676f 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -30,6 +30,7 @@
30#define KPF_NOPAGE 20 30#define KPF_NOPAGE 20
31 31
32#define KPF_KSM 21 32#define KPF_KSM 21
33#define KPF_THP 22
33 34
34/* kernel hacking assistances 35/* kernel hacking assistances
35 * WARNING: subject to change, never rely on them! 36 * WARNING: subject to change, never rely on them!
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b80de520670b..f94efd2f6c27 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -77,7 +77,8 @@ extern void mem_cgroup_uncharge_end(void);
77extern void mem_cgroup_uncharge_page(struct page *page); 77extern void mem_cgroup_uncharge_page(struct page *page);
78extern void mem_cgroup_uncharge_cache_page(struct page *page); 78extern void mem_cgroup_uncharge_cache_page(struct page *page);
79 79
80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask); 80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 int order);
81int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 82int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
82 83
83extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 84extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -140,6 +141,34 @@ static inline bool mem_cgroup_disabled(void)
140 return false; 141 return false;
141} 142}
142 143
144void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
145 unsigned long *flags);
146
147extern atomic_t memcg_moving;
148
149static inline void mem_cgroup_begin_update_page_stat(struct page *page,
150 bool *locked, unsigned long *flags)
151{
152 if (mem_cgroup_disabled())
153 return;
154 rcu_read_lock();
155 *locked = false;
156 if (atomic_read(&memcg_moving))
157 __mem_cgroup_begin_update_page_stat(page, locked, flags);
158}
159
160void __mem_cgroup_end_update_page_stat(struct page *page,
161 unsigned long *flags);
162static inline void mem_cgroup_end_update_page_stat(struct page *page,
163 bool *locked, unsigned long *flags)
164{
165 if (mem_cgroup_disabled())
166 return;
167 if (*locked)
168 __mem_cgroup_end_update_page_stat(page, flags);
169 rcu_read_unlock();
170}
171
143void mem_cgroup_update_page_stat(struct page *page, 172void mem_cgroup_update_page_stat(struct page *page,
144 enum mem_cgroup_page_stat_item idx, 173 enum mem_cgroup_page_stat_item idx,
145 int val); 174 int val);
@@ -298,21 +327,6 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
298{ 327{
299} 328}
300 329
301static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
302{
303 return 0;
304}
305
306static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *memcg,
307 int priority)
308{
309}
310
311static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *memcg,
312 int priority)
313{
314}
315
316static inline bool mem_cgroup_disabled(void) 330static inline bool mem_cgroup_disabled(void)
317{ 331{
318 return true; 332 return true;
@@ -355,6 +369,16 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
355{ 369{
356} 370}
357 371
372static inline void mem_cgroup_begin_update_page_stat(struct page *page,
373 bool *locked, unsigned long *flags)
374{
375}
376
377static inline void mem_cgroup_end_update_page_stat(struct page *page,
378 bool *locked, unsigned long *flags)
379{
380}
381
358static inline void mem_cgroup_inc_page_stat(struct page *page, 382static inline void mem_cgroup_inc_page_stat(struct page *page,
359 enum mem_cgroup_page_stat_item idx) 383 enum mem_cgroup_page_stat_item idx)
360{ 384{
@@ -391,7 +415,7 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
391 struct page *newpage) 415 struct page *newpage)
392{ 416{
393} 417}
394#endif /* CONFIG_CGROUP_MEM_CONT */ 418#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
395 419
396#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 420#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
397static inline bool 421static inline bool
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 05ed2828a553..855c337b20c3 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -8,7 +8,6 @@
8typedef struct page *new_page_t(struct page *, unsigned long private, int **); 8typedef struct page *new_page_t(struct page *, unsigned long private, int **);
9 9
10#ifdef CONFIG_MIGRATION 10#ifdef CONFIG_MIGRATION
11#define PAGE_MIGRATION 1
12 11
13extern void putback_lru_pages(struct list_head *l); 12extern void putback_lru_pages(struct list_head *l);
14extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
@@ -32,7 +31,6 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
32extern int migrate_huge_page_move_mapping(struct address_space *mapping, 31extern int migrate_huge_page_move_mapping(struct address_space *mapping,
33 struct page *newpage, struct page *page); 32 struct page *newpage, struct page *page);
34#else 33#else
35#define PAGE_MIGRATION 0
36 34
37static inline void putback_lru_pages(struct list_head *l) {} 35static inline void putback_lru_pages(struct list_head *l) {}
38static inline int migrate_pages(struct list_head *l, new_page_t x, 36static inline int migrate_pages(struct list_head *l, new_page_t x,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b5bb54d6d667..ee67e326b6f8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1040,6 +1040,9 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1040 !vma_growsup(vma->vm_next, addr); 1040 !vma_growsup(vma->vm_next, addr);
1041} 1041}
1042 1042
1043extern pid_t
1044vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1045
1043extern unsigned long move_page_tables(struct vm_area_struct *vma, 1046extern unsigned long move_page_tables(struct vm_area_struct *vma,
1044 unsigned long old_addr, struct vm_area_struct *new_vma, 1047 unsigned long old_addr, struct vm_area_struct *new_vma,
1045 unsigned long new_addr, unsigned long len); 1048 unsigned long new_addr, unsigned long len);
@@ -1058,19 +1061,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1058/* 1061/*
1059 * per-process(per-mm_struct) statistics. 1062 * per-process(per-mm_struct) statistics.
1060 */ 1063 */
1061static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1062{
1063 atomic_long_set(&mm->rss_stat.count[member], value);
1064}
1065
1066#if defined(SPLIT_RSS_COUNTING)
1067unsigned long get_mm_counter(struct mm_struct *mm, int member);
1068#else
1069static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1064static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1070{ 1065{
1071 return atomic_long_read(&mm->rss_stat.count[member]); 1066 long val = atomic_long_read(&mm->rss_stat.count[member]);
1072} 1067
1068#ifdef SPLIT_RSS_COUNTING
1069 /*
1070 * counter is updated in asynchronous manner and may go to minus.
1071 * But it's never be expected number for users.
1072 */
1073 if (val < 0)
1074 val = 0;
1073#endif 1075#endif
1076 return (unsigned long)val;
1077}
1074 1078
1075static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1079static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1076{ 1080{
@@ -1127,9 +1131,9 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1127} 1131}
1128 1132
1129#if defined(SPLIT_RSS_COUNTING) 1133#if defined(SPLIT_RSS_COUNTING)
1130void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); 1134void sync_mm_rss(struct mm_struct *mm);
1131#else 1135#else
1132static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) 1136static inline void sync_mm_rss(struct mm_struct *mm)
1133{ 1137{
1134} 1138}
1135#endif 1139#endif
@@ -1291,8 +1295,6 @@ extern void get_pfn_range_for_nid(unsigned int nid,
1291extern unsigned long find_min_pfn_with_active_regions(void); 1295extern unsigned long find_min_pfn_with_active_regions(void);
1292extern void free_bootmem_with_active_regions(int nid, 1296extern void free_bootmem_with_active_regions(int nid,
1293 unsigned long max_low_pfn); 1297 unsigned long max_low_pfn);
1294int add_from_early_node_map(struct range *range, int az,
1295 int nr_range, int nid);
1296extern void sparse_memory_present_with_active_regions(int nid); 1298extern void sparse_memory_present_with_active_regions(int nid);
1297 1299
1298#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1300#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 650ba2fb3301..dff711509661 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -365,6 +365,7 @@ struct zone {
365 */ 365 */
366 unsigned int compact_considered; 366 unsigned int compact_considered;
367 unsigned int compact_defer_shift; 367 unsigned int compact_defer_shift;
368 int compact_order_failed;
368#endif 369#endif
369 370
370 ZONE_PADDING(_pad1_) 371 ZONE_PADDING(_pad1_)
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 552fba9c7d5a..3d7647536b03 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -49,7 +49,7 @@ extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
49extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 49extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
50 50
51extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 51extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
52 int order, nodemask_t *mask); 52 int order, nodemask_t *mask, bool force_kill);
53extern int register_oom_notifier(struct notifier_block *nb); 53extern int register_oom_notifier(struct notifier_block *nb);
54extern int unregister_oom_notifier(struct notifier_block *nb); 54extern int unregister_oom_notifier(struct notifier_block *nb);
55 55
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e90a673be67e..6b25758e028e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -414,11 +414,26 @@ static inline int PageTransHuge(struct page *page)
414 return PageHead(page); 414 return PageHead(page);
415} 415}
416 416
417/*
418 * PageTransCompound returns true for both transparent huge pages
419 * and hugetlbfs pages, so it should only be called when it's known
420 * that hugetlbfs pages aren't involved.
421 */
417static inline int PageTransCompound(struct page *page) 422static inline int PageTransCompound(struct page *page)
418{ 423{
419 return PageCompound(page); 424 return PageCompound(page);
420} 425}
421 426
427/*
428 * PageTransTail returns true for both transparent huge pages
429 * and hugetlbfs pages, so it should only be called when it's known
430 * that hugetlbfs pages aren't involved.
431 */
432static inline int PageTransTail(struct page *page)
433{
434 return PageTail(page);
435}
436
422#else 437#else
423 438
424static inline int PageTransHuge(struct page *page) 439static inline int PageTransHuge(struct page *page)
@@ -430,6 +445,11 @@ static inline int PageTransCompound(struct page *page)
430{ 445{
431 return 0; 446 return 0;
432} 447}
448
449static inline int PageTransTail(struct page *page)
450{
451 return 0;
452}
433#endif 453#endif
434 454
435#ifdef CONFIG_MMU 455#ifdef CONFIG_MMU
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index a2d11771c84b..a88cdba27809 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -4,12 +4,8 @@
4enum { 4enum {
5 /* flags for mem_cgroup */ 5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ 6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */ 7 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */ 8 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 __NR_PCG_FLAGS, 9 __NR_PCG_FLAGS,
14}; 10};
15 11
@@ -64,19 +60,10 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
64static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ 60static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
65 { return test_and_clear_bit(PCG_##lname, &pc->flags); } 61 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
66 62
67/* Cache flag is set only once (at allocation) */
68TESTPCGFLAG(Cache, CACHE)
69CLEARPCGFLAG(Cache, CACHE)
70SETPCGFLAG(Cache, CACHE)
71
72TESTPCGFLAG(Used, USED) 63TESTPCGFLAG(Used, USED)
73CLEARPCGFLAG(Used, USED) 64CLEARPCGFLAG(Used, USED)
74SETPCGFLAG(Used, USED) 65SETPCGFLAG(Used, USED)
75 66
76SETPCGFLAG(FileMapped, FILE_MAPPED)
77CLEARPCGFLAG(FileMapped, FILE_MAPPED)
78TESTPCGFLAG(FileMapped, FILE_MAPPED)
79
80SETPCGFLAG(Migration, MIGRATION) 67SETPCGFLAG(Migration, MIGRATION)
81CLEARPCGFLAG(Migration, MIGRATION) 68CLEARPCGFLAG(Migration, MIGRATION)
82TESTPCGFLAG(Migration, MIGRATION) 69TESTPCGFLAG(Migration, MIGRATION)
@@ -85,7 +72,7 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
85{ 72{
86 /* 73 /*
87 * Don't take this lock in IRQ context. 74 * Don't take this lock in IRQ context.
88 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION 75 * This lock is for pc->mem_cgroup, USED, MIGRATION
89 */ 76 */
90 bit_spin_lock(PCG_LOCK, &pc->flags); 77 bit_spin_lock(PCG_LOCK, &pc->flags);
91} 78}
@@ -95,24 +82,6 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
95 bit_spin_unlock(PCG_LOCK, &pc->flags); 82 bit_spin_unlock(PCG_LOCK, &pc->flags);
96} 83}
97 84
98static inline void move_lock_page_cgroup(struct page_cgroup *pc,
99 unsigned long *flags)
100{
101 /*
102 * We know updates to pc->flags of page cache's stats are from both of
103 * usual context or IRQ context. Disable IRQ to avoid deadlock.
104 */
105 local_irq_save(*flags);
106 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
107}
108
109static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
110 unsigned long *flags)
111{
112 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
113 local_irq_restore(*flags);
114}
115
116#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 85#else /* CONFIG_CGROUP_MEM_RES_CTLR */
117struct page_cgroup; 86struct page_cgroup;
118 87
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1cdd62a2788a..fd07c4542cee 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -122,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *);
122int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 122int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
123void anon_vma_moveto_tail(struct vm_area_struct *); 123void anon_vma_moveto_tail(struct vm_area_struct *);
124int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 124int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
125void __anon_vma_link(struct vm_area_struct *);
126 125
127static inline void anon_vma_merge(struct vm_area_struct *vma, 126static inline void anon_vma_merge(struct vm_area_struct *vma,
128 struct vm_area_struct *next) 127 struct vm_area_struct *next)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e074e1e54f85..0c147a4260a5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1514,7 +1514,7 @@ struct task_struct {
1514#endif 1514#endif
1515#ifdef CONFIG_CPUSETS 1515#ifdef CONFIG_CPUSETS
1516 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1516 nodemask_t mems_allowed; /* Protected by alloc_lock */
1517 int mems_allowed_change_disable; 1517 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1518 int cpuset_mem_spread_rotor; 1518 int cpuset_mem_spread_rotor;
1519 int cpuset_slab_spread_rotor; 1519 int cpuset_slab_spread_rotor;
1520#endif 1520#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3e60228e7299..b86b5c20617d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -223,6 +223,7 @@ extern void lru_add_page_tail(struct zone* zone,
223extern void activate_page(struct page *); 223extern void activate_page(struct page *);
224extern void mark_page_accessed(struct page *); 224extern void mark_page_accessed(struct page *);
225extern void lru_add_drain(void); 225extern void lru_add_drain(void);
226extern void lru_add_drain_cpu(int cpu);
226extern int lru_add_drain_all(void); 227extern int lru_add_drain_all(void);
227extern void rotate_reclaimable_page(struct page *page); 228extern void rotate_reclaimable_page(struct page *page);
228extern void deactivate_page(struct page *page); 229extern void deactivate_page(struct page *page);
@@ -329,7 +330,6 @@ extern long total_swap_pages;
329extern void si_swapinfo(struct sysinfo *); 330extern void si_swapinfo(struct sysinfo *);
330extern swp_entry_t get_swap_page(void); 331extern swp_entry_t get_swap_page(void);
331extern swp_entry_t get_swap_page_of_type(int); 332extern swp_entry_t get_swap_page_of_type(int);
332extern int valid_swaphandles(swp_entry_t, unsigned long *);
333extern int add_swap_count_continuation(swp_entry_t, gfp_t); 333extern int add_swap_count_continuation(swp_entry_t, gfp_t);
334extern void swap_shmem_alloc(swp_entry_t); 334extern void swap_shmem_alloc(swp_entry_t);
335extern int swap_duplicate(swp_entry_t); 335extern int swap_duplicate(swp_entry_t);