aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:42:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:42:54 -0500
commit099469502f62fbe0d7e4f0b83a2f22538367f734 (patch)
tree5229c3818b2e6e09d35026d49314047121130536 /include/linux
parent7c17d86a8502c2e30c2eea777ed1b830aa3b447b (diff)
parent35f1526845a9d804206883e19bd257d3dcef758f (diff)
Merge branch 'akpm' (aka "Andrew's patch-bomb, take two")
Andrew explains: - various misc stuff - Most of the rest of MM: memcg, threaded hugepages, others. - cpumask - kexec - kdump - some direct-io performance tweaking - radix-tree optimisations - new selftests code A note on this: often people will develop a new userspace-visible feature and will develop userspace code to exercise/test that feature. Then they merge the patch and the selftest code dies. Sometimes we paste it into the changelog. Sometimes the code gets thrown into Documentation/(!). This saddens me. So this patch creates a bare-bones framework which will henceforth allow me to ask people to include their test apps in the kernel tree so we can keep them alive. Then when people enhance or fix the feature, I can ask them to update the test app too. The infrastruture is terribly trivial at present - let's see how it evolves. - checkpoint/restart feature work. A note on this: this is a project by various mad Russians to perform c/r mainly from userspace, with various oddball helper code added into the kernel where the need is demonstrated. So rather than some large central lump of code, what we have is little bits and pieces popping up in various places which either expose something new or which permit something which is normally kernel-private to be modified. The overall project is an ongoing thing. I've judged that the size and scope of the thing means that we're more likely to be successful with it if we integrate the support into mainline piecemeal rather than allowing it all to develop out-of-tree. However I'm less confident than the developers that it will all eventually work! So what I'm asking them to do is to wrap each piece of new code inside CONFIG_CHECKPOINT_RESTORE. So if it all eventually comes to tears and the project as a whole fails, it should be a simple matter to go through and delete all trace of it. This lot pretty much wraps up the -rc1 merge for me. * akpm: (96 commits) unlzo: fix input buffer free ramoops: update parameters only after successful init ramoops: fix use of rounddown_pow_of_two() c/r: prctl: add PR_SET_MM codes to set up mm_struct entries c/r: procfs: add start_data, end_data, start_brk members to /proc/$pid/stat v4 c/r: introduce CHECKPOINT_RESTORE symbol selftests: new x86 breakpoints selftest selftests: new very basic kernel selftests directory radix_tree: take radix_tree_path off stack radix_tree: remove radix_tree_indirect_to_ptr() dio: optimize cache misses in the submission path vfs: cache request_queue in struct block_device fs/direct-io.c: calculate fs_count correctly in get_more_blocks() drivers/parport/parport_pc.c: fix warnings panic: don't print redundant backtraces on oops sysctl: add the kernel.ns_last_pid control kdump: add udev events for memory online/offline include/linux/crash_dump.h needs elf.h kdump: fix crash_kexec()/smp_send_stop() race in panic() kdump: crashk_res init check for /sys/kernel/kexec_crash_size ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/eventpoll.h1
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/memcontrol.h105
-rw-r--r--include/linux/migrate.h23
-rw-r--r--include/linux/mm_inline.h44
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/page_cgroup.h46
-rw-r--r--include/linux/pagevec.h12
-rw-r--r--include/linux/prctl.h12
-rw-r--r--include/linux/radix-tree.h3
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/sched.h2
19 files changed, 169 insertions, 157 deletions
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 5c4abce94ad1..b936763f2236 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,6 +5,7 @@
5#include <linux/kexec.h> 5#include <linux/kexec.h>
6#include <linux/device.h> 6#include <linux/device.h>
7#include <linux/proc_fs.h> 7#include <linux/proc_fs.h>
8#include <linux/elf.h>
8 9
9#define ELFCORE_ADDR_MAX (-1ULL) 10#define ELFCORE_ADDR_MAX (-1ULL)
10#define ELFCORE_ADDR_ERR (-2ULL) 11#define ELFCORE_ADDR_ERR (-2ULL)
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index f362733186a5..657ab55beda0 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,6 +61,7 @@ struct file;
61static inline void eventpoll_init_file(struct file *file) 61static inline void eventpoll_init_file(struct file *file)
62{ 62{
63 INIT_LIST_HEAD(&file->f_ep_links); 63 INIT_LIST_HEAD(&file->f_ep_links);
64 INIT_LIST_HEAD(&file->f_tfile_llink);
64} 65}
65 66
66 67
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7aacf31418fe..4bc8169fb5a1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -525,6 +525,7 @@ enum positive_aop_returns {
525struct page; 525struct page;
526struct address_space; 526struct address_space;
527struct writeback_control; 527struct writeback_control;
528enum migrate_mode;
528 529
529struct iov_iter { 530struct iov_iter {
530 const struct iovec *iov; 531 const struct iovec *iov;
@@ -609,9 +610,12 @@ struct address_space_operations {
609 loff_t offset, unsigned long nr_segs); 610 loff_t offset, unsigned long nr_segs);
610 int (*get_xip_mem)(struct address_space *, pgoff_t, int, 611 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
611 void **, unsigned long *); 612 void **, unsigned long *);
612 /* migrate the contents of a page to the specified target */ 613 /*
614 * migrate the contents of a page to the specified target. If sync
615 * is false, it must not block.
616 */
613 int (*migratepage) (struct address_space *, 617 int (*migratepage) (struct address_space *,
614 struct page *, struct page *); 618 struct page *, struct page *, enum migrate_mode);
615 int (*launder_page) (struct page *); 619 int (*launder_page) (struct page *);
616 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 620 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
617 unsigned long); 621 unsigned long);
@@ -656,6 +660,7 @@ struct address_space {
656 * must be enforced here for CRIS, to let the least significant bit 660 * must be enforced here for CRIS, to let the least significant bit
657 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. 661 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
658 */ 662 */
663struct request_queue;
659 664
660struct block_device { 665struct block_device {
661 dev_t bd_dev; /* not a kdev_t - it's a search key */ 666 dev_t bd_dev; /* not a kdev_t - it's a search key */
@@ -678,6 +683,7 @@ struct block_device {
678 unsigned bd_part_count; 683 unsigned bd_part_count;
679 int bd_invalidated; 684 int bd_invalidated;
680 struct gendisk * bd_disk; 685 struct gendisk * bd_disk;
686 struct request_queue * bd_queue;
681 struct list_head bd_list; 687 struct list_head bd_list;
682 /* 688 /*
683 * Private data. You must have bd_claim'ed the block_device 689 * Private data. You must have bd_claim'ed the block_device
@@ -1001,6 +1007,7 @@ struct file {
1001#ifdef CONFIG_EPOLL 1007#ifdef CONFIG_EPOLL
1002 /* Used by fs/eventpoll.c to link all the hooks to this file */ 1008 /* Used by fs/eventpoll.c to link all the hooks to this file */
1003 struct list_head f_ep_links; 1009 struct list_head f_ep_links;
1010 struct list_head f_tfile_llink;
1004#endif /* #ifdef CONFIG_EPOLL */ 1011#endif /* #ifdef CONFIG_EPOLL */
1005 struct address_space *f_mapping; 1012 struct address_space *f_mapping;
1006#ifdef CONFIG_DEBUG_WRITECOUNT 1013#ifdef CONFIG_DEBUG_WRITECOUNT
@@ -2536,7 +2543,8 @@ extern int generic_check_addressable(unsigned, u64);
2536 2543
2537#ifdef CONFIG_MIGRATION 2544#ifdef CONFIG_MIGRATION
2538extern int buffer_migrate_page(struct address_space *, 2545extern int buffer_migrate_page(struct address_space *,
2539 struct page *, struct page *); 2546 struct page *, struct page *,
2547 enum migrate_mode);
2540#else 2548#else
2541#define buffer_migrate_page NULL 2549#define buffer_migrate_page NULL
2542#endif 2550#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a9ace9c32507..1b921299abc4 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
18 unsigned int flags); 18 unsigned int flags);
19extern int zap_huge_pmd(struct mmu_gather *tlb, 19extern int zap_huge_pmd(struct mmu_gather *tlb,
20 struct vm_area_struct *vma, 20 struct vm_area_struct *vma,
21 pmd_t *pmd); 21 pmd_t *pmd, unsigned long addr);
22extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 22extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
23 unsigned long addr, unsigned long end, 23 unsigned long addr, unsigned long end,
24 unsigned char *vec); 24 unsigned char *vec);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d0a7a0c71661..e8343422240a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -185,16 +185,17 @@ static inline void might_fault(void)
185 185
186extern struct atomic_notifier_head panic_notifier_list; 186extern struct atomic_notifier_head panic_notifier_list;
187extern long (*panic_blink)(int state); 187extern long (*panic_blink)(int state);
188NORET_TYPE void panic(const char * fmt, ...) 188__printf(1, 2)
189 __attribute__ ((NORET_AND format (printf, 1, 2))) __cold; 189void panic(const char *fmt, ...)
190 __noreturn __cold;
190extern void oops_enter(void); 191extern void oops_enter(void);
191extern void oops_exit(void); 192extern void oops_exit(void);
192void print_oops_end_marker(void); 193void print_oops_end_marker(void);
193extern int oops_may_print(void); 194extern int oops_may_print(void);
194NORET_TYPE void do_exit(long error_code) 195void do_exit(long error_code)
195 ATTRIB_NORET; 196 __noreturn;
196NORET_TYPE void complete_and_exit(struct completion *, long) 197void complete_and_exit(struct completion *, long)
197 ATTRIB_NORET; 198 __noreturn;
198 199
199/* Internal, do not use. */ 200/* Internal, do not use. */
200int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); 201int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index ee0c952188de..fee66317e071 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -18,7 +18,6 @@
18enum kmsg_dump_reason { 18enum kmsg_dump_reason {
19 KMSG_DUMP_OOPS, 19 KMSG_DUMP_OOPS,
20 KMSG_DUMP_PANIC, 20 KMSG_DUMP_PANIC,
21 KMSG_DUMP_KEXEC,
22 KMSG_DUMP_RESTART, 21 KMSG_DUMP_RESTART,
23 KMSG_DUMP_HALT, 22 KMSG_DUMP_HALT,
24 KMSG_DUMP_POWEROFF, 23 KMSG_DUMP_POWEROFF,
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 3f46aedea42f..807f1e533226 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -88,8 +88,4 @@
88 88
89#endif 89#endif
90 90
91#define NORET_TYPE /**/
92#define ATTRIB_NORET __attribute__((noreturn))
93#define NORET_AND noreturn,
94
95#endif 91#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f944591765eb..4d34356fe644 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -32,13 +32,11 @@ enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33}; 33};
34 34
35extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 35struct mem_cgroup_reclaim_cookie {
36 struct list_head *dst, 36 struct zone *zone;
37 unsigned long *scanned, int order, 37 int priority;
38 isolate_mode_t mode, 38 unsigned int generation;
39 struct zone *z, 39};
40 struct mem_cgroup *mem_cont,
41 int active, int file);
42 40
43#ifdef CONFIG_CGROUP_MEM_RES_CTLR 41#ifdef CONFIG_CGROUP_MEM_RES_CTLR
44/* 42/*
@@ -56,20 +54,21 @@ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
56 gfp_t gfp_mask); 54 gfp_t gfp_mask);
57/* for swap handling */ 55/* for swap handling */
58extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
59 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
60extern void mem_cgroup_commit_charge_swapin(struct page *page, 58extern void mem_cgroup_commit_charge_swapin(struct page *page,
61 struct mem_cgroup *ptr); 59 struct mem_cgroup *memcg);
62extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
63 61
64extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
65 gfp_t gfp_mask); 63 gfp_t gfp_mask);
66extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 64
67extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
68extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
69extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 67 enum lru_list);
70extern void mem_cgroup_del_lru(struct page *page); 68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
71extern void mem_cgroup_move_lists(struct page *page, 69void mem_cgroup_lru_del(struct page *);
72 enum lru_list from, enum lru_list to); 70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
73 72
74/* For coalescing uncharge for reducing memcg' overhead*/ 73/* For coalescing uncharge for reducing memcg' overhead*/
75extern void mem_cgroup_uncharge_start(void); 74extern void mem_cgroup_uncharge_start(void);
@@ -102,10 +101,15 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
102 101
103extern int 102extern int
104mem_cgroup_prepare_migration(struct page *page, 103mem_cgroup_prepare_migration(struct page *page,
105 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 104 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 105extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
107 struct page *oldpage, struct page *newpage, bool migration_ok); 106 struct page *oldpage, struct page *newpage, bool migration_ok);
108 107
108struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
109 struct mem_cgroup *,
110 struct mem_cgroup_reclaim_cookie *);
111void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
112
109/* 113/*
110 * For memory reclaim. 114 * For memory reclaim.
111 */ 115 */
@@ -122,7 +126,10 @@ struct zone_reclaim_stat*
122mem_cgroup_get_reclaim_stat_from_page(struct page *page); 126mem_cgroup_get_reclaim_stat_from_page(struct page *page);
123extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 127extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
124 struct task_struct *p); 128 struct task_struct *p);
129extern void mem_cgroup_replace_page_cache(struct page *oldpage,
130 struct page *newpage);
125 131
132extern void mem_cgroup_reset_owner(struct page *page);
126#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
127extern int do_swap_account; 134extern int do_swap_account;
128#endif 135#endif
@@ -157,7 +164,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
157 164
158void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 165void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
159#ifdef CONFIG_TRANSPARENT_HUGEPAGE 166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
160void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 167void mem_cgroup_split_huge_fixup(struct page *head);
161#endif 168#endif
162 169
163#ifdef CONFIG_DEBUG_VM 170#ifdef CONFIG_DEBUG_VM
@@ -180,17 +187,17 @@ static inline int mem_cgroup_cache_charge(struct page *page,
180} 187}
181 188
182static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 189static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
183 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 190 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
184{ 191{
185 return 0; 192 return 0;
186} 193}
187 194
188static inline void mem_cgroup_commit_charge_swapin(struct page *page, 195static inline void mem_cgroup_commit_charge_swapin(struct page *page,
189 struct mem_cgroup *ptr) 196 struct mem_cgroup *memcg)
190{ 197{
191} 198}
192 199
193static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 200static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
194{ 201{
195} 202}
196 203
@@ -210,33 +217,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
210{ 217{
211} 218}
212 219
213static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 220static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
214{ 221 struct mem_cgroup *memcg)
215}
216
217static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
218{ 222{
219 return ; 223 return &zone->lruvec;
220} 224}
221 225
222static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 226static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
227 struct page *page,
228 enum lru_list lru)
223{ 229{
224 return ; 230 return &zone->lruvec;
225} 231}
226 232
227static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 233static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
228{ 234{
229 return ;
230} 235}
231 236
232static inline void mem_cgroup_del_lru(struct page *page) 237static inline void mem_cgroup_lru_del(struct page *page)
233{ 238{
234 return ;
235} 239}
236 240
237static inline void 241static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
238mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 242 struct page *page,
243 enum lru_list from,
244 enum lru_list to)
239{ 245{
246 return &zone->lruvec;
240} 247}
241 248
242static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 249static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -269,7 +276,7 @@ static inline struct cgroup_subsys_state
269 276
270static inline int 277static inline int
271mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 278mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
272 struct mem_cgroup **ptr, gfp_t gfp_mask) 279 struct mem_cgroup **memcgp, gfp_t gfp_mask)
273{ 280{
274 return 0; 281 return 0;
275} 282}
@@ -279,6 +286,19 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
279{ 286{
280} 287}
281 288
289static inline struct mem_cgroup *
290mem_cgroup_iter(struct mem_cgroup *root,
291 struct mem_cgroup *prev,
292 struct mem_cgroup_reclaim_cookie *reclaim)
293{
294 return NULL;
295}
296
297static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
298 struct mem_cgroup *prev)
299{
300}
301
282static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg) 302static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
283{ 303{
284 return 0; 304 return 0;
@@ -360,8 +380,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
360 return 0; 380 return 0;
361} 381}
362 382
363static inline void mem_cgroup_split_huge_fixup(struct page *head, 383static inline void mem_cgroup_split_huge_fixup(struct page *head)
364 struct page *tail)
365{ 384{
366} 385}
367 386
@@ -369,6 +388,14 @@ static inline
369void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 388void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
370{ 389{
371} 390}
391static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
392 struct page *newpage)
393{
394}
395
396static inline void mem_cgroup_reset_owner(struct page *page)
397{
398}
372#endif /* CONFIG_CGROUP_MEM_CONT */ 399#endif /* CONFIG_CGROUP_MEM_CONT */
373 400
374#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 401#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e39aeecfe9a2..eaf867412f7a 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -6,18 +6,31 @@
6 6
7typedef struct page *new_page_t(struct page *, unsigned long private, int **); 7typedef struct page *new_page_t(struct page *, unsigned long private, int **);
8 8
9/*
10 * MIGRATE_ASYNC means never block
11 * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
12 * on most operations but not ->writepage as the potential stall time
13 * is too significant
14 * MIGRATE_SYNC will block when migrating pages
15 */
16enum migrate_mode {
17 MIGRATE_ASYNC,
18 MIGRATE_SYNC_LIGHT,
19 MIGRATE_SYNC,
20};
21
9#ifdef CONFIG_MIGRATION 22#ifdef CONFIG_MIGRATION
10#define PAGE_MIGRATION 1 23#define PAGE_MIGRATION 1
11 24
12extern void putback_lru_pages(struct list_head *l); 25extern void putback_lru_pages(struct list_head *l);
13extern int migrate_page(struct address_space *, 26extern int migrate_page(struct address_space *,
14 struct page *, struct page *); 27 struct page *, struct page *, enum migrate_mode);
15extern int migrate_pages(struct list_head *l, new_page_t x, 28extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, bool offlining, 29 unsigned long private, bool offlining,
17 bool sync); 30 enum migrate_mode mode);
18extern int migrate_huge_pages(struct list_head *l, new_page_t x, 31extern int migrate_huge_pages(struct list_head *l, new_page_t x,
19 unsigned long private, bool offlining, 32 unsigned long private, bool offlining,
20 bool sync); 33 enum migrate_mode mode);
21 34
22extern int fail_migrate_page(struct address_space *, 35extern int fail_migrate_page(struct address_space *,
23 struct page *, struct page *); 36 struct page *, struct page *);
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
36static inline void putback_lru_pages(struct list_head *l) {} 49static inline void putback_lru_pages(struct list_head *l) {}
37static inline int migrate_pages(struct list_head *l, new_page_t x, 50static inline int migrate_pages(struct list_head *l, new_page_t x,
38 unsigned long private, bool offlining, 51 unsigned long private, bool offlining,
39 bool sync) { return -ENOSYS; } 52 enum migrate_mode mode) { return -ENOSYS; }
40static inline int migrate_huge_pages(struct list_head *l, new_page_t x, 53static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
41 unsigned long private, bool offlining, 54 unsigned long private, bool offlining,
42 bool sync) { return -ENOSYS; } 55 enum migrate_mode mode) { return -ENOSYS; }
43 56
44static inline int migrate_prep(void) { return -ENOSYS; } 57static inline int migrate_prep(void) { return -ENOSYS; }
45static inline int migrate_prep_local(void) { return -ENOSYS; } 58static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8f7d24712dc1..227fd3e9a9c9 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,26 +22,21 @@ static inline int page_is_file_cache(struct page *page)
22} 22}
23 23
24static inline void 24static inline void
25__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, 25add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
26 struct list_head *head)
27{ 26{
28 list_add(&page->lru, head); 27 struct lruvec *lruvec;
29 __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
30 mem_cgroup_add_lru_list(page, l);
31}
32 28
33static inline void 29 lruvec = mem_cgroup_lru_add_list(zone, page, lru);
34add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) 30 list_add(&page->lru, &lruvec->lists[lru]);
35{ 31 __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
36 __add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
37} 32}
38 33
39static inline void 34static inline void
40del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) 35del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
41{ 36{
37 mem_cgroup_lru_del_list(page, lru);
42 list_del(&page->lru); 38 list_del(&page->lru);
43 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 39 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
44 mem_cgroup_del_lru_list(page, l);
45} 40}
46 41
47/** 42/**
@@ -59,24 +54,28 @@ static inline enum lru_list page_lru_base_type(struct page *page)
59 return LRU_INACTIVE_ANON; 54 return LRU_INACTIVE_ANON;
60} 55}
61 56
62static inline void 57/**
63del_page_from_lru(struct zone *zone, struct page *page) 58 * page_off_lru - which LRU list was page on? clearing its lru flags.
59 * @page: the page to test
60 *
61 * Returns the LRU list a page was on, as an index into the array of LRU
62 * lists; and clears its Unevictable or Active flags, ready for freeing.
63 */
64static inline enum lru_list page_off_lru(struct page *page)
64{ 65{
65 enum lru_list l; 66 enum lru_list lru;
66 67
67 list_del(&page->lru);
68 if (PageUnevictable(page)) { 68 if (PageUnevictable(page)) {
69 __ClearPageUnevictable(page); 69 __ClearPageUnevictable(page);
70 l = LRU_UNEVICTABLE; 70 lru = LRU_UNEVICTABLE;
71 } else { 71 } else {
72 l = page_lru_base_type(page); 72 lru = page_lru_base_type(page);
73 if (PageActive(page)) { 73 if (PageActive(page)) {
74 __ClearPageActive(page); 74 __ClearPageActive(page);
75 l += LRU_ACTIVE; 75 lru += LRU_ACTIVE;
76 } 76 }
77 } 77 }
78 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 78 return lru;
79 mem_cgroup_del_lru_list(page, l);
80} 79}
81 80
82/** 81/**
@@ -97,7 +96,6 @@ static inline enum lru_list page_lru(struct page *page)
97 if (PageActive(page)) 96 if (PageActive(page))
98 lru += LRU_ACTIVE; 97 lru += LRU_ACTIVE;
99 } 98 }
100
101 return lru; 99 return lru;
102} 100}
103 101
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5b42f1b34eb7..3cc3062b3767 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -151,12 +151,11 @@ struct page {
151#endif 151#endif
152} 152}
153/* 153/*
154 * If another subsystem starts using the double word pairing for atomic 154 * The struct page can be forced to be double word aligned so that atomic ops
155 * operations on struct page then it must change the #if to ensure 155 * on double words work. The SLUB allocator can make use of such a feature.
156 * proper alignment of the page struct.
157 */ 156 */
158#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) 157#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
159 __attribute__((__aligned__(2*sizeof(unsigned long)))) 158 __aligned(2 * sizeof(unsigned long))
160#endif 159#endif
161; 160;
162 161
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ca6ca92418a6..650ba2fb3301 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -140,25 +140,29 @@ enum lru_list {
140 NR_LRU_LISTS 140 NR_LRU_LISTS
141}; 141};
142 142
143#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) 143#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
144 144
145#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) 145#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
146 146
147static inline int is_file_lru(enum lru_list l) 147static inline int is_file_lru(enum lru_list lru)
148{ 148{
149 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); 149 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
150} 150}
151 151
152static inline int is_active_lru(enum lru_list l) 152static inline int is_active_lru(enum lru_list lru)
153{ 153{
154 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); 154 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
155} 155}
156 156
157static inline int is_unevictable_lru(enum lru_list l) 157static inline int is_unevictable_lru(enum lru_list lru)
158{ 158{
159 return (l == LRU_UNEVICTABLE); 159 return (lru == LRU_UNEVICTABLE);
160} 160}
161 161
162struct lruvec {
163 struct list_head lists[NR_LRU_LISTS];
164};
165
162/* Mask used at gathering information at once (see memcontrol.c) */ 166/* Mask used at gathering information at once (see memcontrol.c) */
163#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 167#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
164#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 168#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
@@ -173,6 +177,8 @@ static inline int is_unevictable_lru(enum lru_list l)
173#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) 177#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
174/* Isolate unmapped file */ 178/* Isolate unmapped file */
175#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) 179#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
180/* Isolate for asynchronous migration */
181#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
176 182
177/* LRU Isolation modes. */ 183/* LRU Isolation modes. */
178typedef unsigned __bitwise__ isolate_mode_t; 184typedef unsigned __bitwise__ isolate_mode_t;
@@ -364,10 +370,8 @@ struct zone {
364 ZONE_PADDING(_pad1_) 370 ZONE_PADDING(_pad1_)
365 371
366 /* Fields commonly accessed by the page reclaim scanner */ 372 /* Fields commonly accessed by the page reclaim scanner */
367 spinlock_t lru_lock; 373 spinlock_t lru_lock;
368 struct zone_lru { 374 struct lruvec lruvec;
369 struct list_head list;
370 } lru[NR_LRU_LISTS];
371 375
372 struct zone_reclaim_stat reclaim_stat; 376 struct zone_reclaim_stat reclaim_stat;
373 377
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6f9d04a85336..552fba9c7d5a 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,7 +43,7 @@ enum oom_constraint {
43extern void compare_swap_oom_score_adj(int old_val, int new_val); 43extern void compare_swap_oom_score_adj(int old_val, int new_val);
44extern int test_set_oom_score_adj(int new_val); 44extern int test_set_oom_score_adj(int new_val);
45 45
46extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, 46extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
47 const nodemask_t *nodemask, unsigned long totalpages); 47 const nodemask_t *nodemask, unsigned long totalpages);
48extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 48extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
49extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 49extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 961ecc7d30bc..a2d11771c84b 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -10,8 +10,6 @@ enum {
10 /* flags for mem_cgroup and file and I/O status */ 10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ 11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */ 12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 __NR_PCG_FLAGS, 13 __NR_PCG_FLAGS,
16}; 14};
17 15
@@ -31,7 +29,6 @@ enum {
31struct page_cgroup { 29struct page_cgroup {
32 unsigned long flags; 30 unsigned long flags;
33 struct mem_cgroup *mem_cgroup; 31 struct mem_cgroup *mem_cgroup;
34 struct list_head lru; /* per cgroup LRU list */
35}; 32};
36 33
37void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); 34void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
@@ -76,12 +73,6 @@ TESTPCGFLAG(Used, USED)
76CLEARPCGFLAG(Used, USED) 73CLEARPCGFLAG(Used, USED)
77SETPCGFLAG(Used, USED) 74SETPCGFLAG(Used, USED)
78 75
79SETPCGFLAG(AcctLRU, ACCT_LRU)
80CLEARPCGFLAG(AcctLRU, ACCT_LRU)
81TESTPCGFLAG(AcctLRU, ACCT_LRU)
82TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
83
84
85SETPCGFLAG(FileMapped, FILE_MAPPED) 76SETPCGFLAG(FileMapped, FILE_MAPPED)
86CLEARPCGFLAG(FileMapped, FILE_MAPPED) 77CLEARPCGFLAG(FileMapped, FILE_MAPPED)
87TESTPCGFLAG(FileMapped, FILE_MAPPED) 78TESTPCGFLAG(FileMapped, FILE_MAPPED)
@@ -122,39 +113,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
122 local_irq_restore(*flags); 113 local_irq_restore(*flags);
123} 114}
124 115
125#ifdef CONFIG_SPARSEMEM
126#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
127#else
128#define PCG_ARRAYID_WIDTH NODES_SHIFT
129#endif
130
131#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
132#error Not enough space left in pc->flags to store page_cgroup array IDs
133#endif
134
135/* pc->flags: ARRAY-ID | FLAGS */
136
137#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
138
139#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
140/*
141 * Zero the shift count for non-existent fields, to prevent compiler
142 * warnings and ensure references are optimized away.
143 */
144#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
145
146static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
147 unsigned long id)
148{
149 pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
150 pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
151}
152
153static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
154{
155 return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
156}
157
158#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 116#else /* CONFIG_CGROUP_MEM_RES_CTLR */
159struct page_cgroup; 117struct page_cgroup;
160 118
@@ -183,7 +141,7 @@ static inline void __init page_cgroup_init_flatmem(void)
183extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 141extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
184 unsigned short old, unsigned short new); 142 unsigned short old, unsigned short new);
185extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); 143extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
186extern unsigned short lookup_swap_cgroup(swp_entry_t ent); 144extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
187extern int swap_cgroup_swapon(int type, unsigned long max_pages); 145extern int swap_cgroup_swapon(int type, unsigned long max_pages);
188extern void swap_cgroup_swapoff(int type); 146extern void swap_cgroup_swapoff(int type);
189#else 147#else
@@ -195,7 +153,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
195} 153}
196 154
197static inline 155static inline
198unsigned short lookup_swap_cgroup(swp_entry_t ent) 156unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
199{ 157{
200 return 0; 158 return 0;
201} 159}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index ed17024d2ebe..2aa12b8499c0 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -21,8 +21,7 @@ struct pagevec {
21}; 21};
22 22
23void __pagevec_release(struct pagevec *pvec); 23void __pagevec_release(struct pagevec *pvec);
24void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); 24void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
25void pagevec_strip(struct pagevec *pvec);
26unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 25unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
27 pgoff_t start, unsigned nr_pages); 26 pgoff_t start, unsigned nr_pages);
28unsigned pagevec_lookup_tag(struct pagevec *pvec, 27unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -59,7 +58,6 @@ static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
59 return pagevec_space(pvec); 58 return pagevec_space(pvec);
60} 59}
61 60
62
63static inline void pagevec_release(struct pagevec *pvec) 61static inline void pagevec_release(struct pagevec *pvec)
64{ 62{
65 if (pagevec_count(pvec)) 63 if (pagevec_count(pvec))
@@ -68,22 +66,22 @@ static inline void pagevec_release(struct pagevec *pvec)
68 66
69static inline void __pagevec_lru_add_anon(struct pagevec *pvec) 67static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
70{ 68{
71 ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON); 69 __pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
72} 70}
73 71
74static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) 72static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
75{ 73{
76 ____pagevec_lru_add(pvec, LRU_ACTIVE_ANON); 74 __pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
77} 75}
78 76
79static inline void __pagevec_lru_add_file(struct pagevec *pvec) 77static inline void __pagevec_lru_add_file(struct pagevec *pvec)
80{ 78{
81 ____pagevec_lru_add(pvec, LRU_INACTIVE_FILE); 79 __pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
82} 80}
83 81
84static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) 82static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
85{ 83{
86 ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE); 84 __pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
87} 85}
88 86
89static inline void pagevec_lru_add_file(struct pagevec *pvec) 87static inline void pagevec_lru_add_file(struct pagevec *pvec)
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index a3baeb2c2161..7ddc7f1b480f 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -102,4 +102,16 @@
102 102
103#define PR_MCE_KILL_GET 34 103#define PR_MCE_KILL_GET 34
104 104
105/*
106 * Tune up process memory map specifics.
107 */
108#define PR_SET_MM 35
109# define PR_SET_MM_START_CODE 1
110# define PR_SET_MM_END_CODE 2
111# define PR_SET_MM_START_DATA 3
112# define PR_SET_MM_END_DATA 4
113# define PR_SET_MM_START_STACK 5
114# define PR_SET_MM_START_BRK 6
115# define PR_SET_MM_BRK 7
116
105#endif /* _LINUX_PRCTL_H */ 117#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 9d4539c52e53..07e360b1b282 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -49,9 +49,6 @@
49#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 49#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
50#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 50#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
51 51
52#define radix_tree_indirect_to_ptr(ptr) \
53 radix_tree_indirect_to_ptr((void __force *)(ptr))
54
55static inline int radix_tree_is_indirect_ptr(void *ptr) 52static inline int radix_tree_is_indirect_ptr(void *ptr)
56{ 53{
57 return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); 54 return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1afb9954bbf1..1cdd62a2788a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -158,7 +158,7 @@ static inline void page_dup_rmap(struct page *page)
158 * Called from mm/vmscan.c to handle paging out 158 * Called from mm/vmscan.c to handle paging out
159 */ 159 */
160int page_referenced(struct page *, int is_locked, 160int page_referenced(struct page *, int is_locked,
161 struct mem_cgroup *cnt, unsigned long *vm_flags); 161 struct mem_cgroup *memcg, unsigned long *vm_flags);
162int page_referenced_one(struct page *, struct vm_area_struct *, 162int page_referenced_one(struct page *, struct vm_area_struct *,
163 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); 163 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
164 164
@@ -236,7 +236,7 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
236#define anon_vma_link(vma) do {} while (0) 236#define anon_vma_link(vma) do {} while (0)
237 237
238static inline int page_referenced(struct page *page, int is_locked, 238static inline int page_referenced(struct page *page, int is_locked,
239 struct mem_cgroup *cnt, 239 struct mem_cgroup *memcg,
240 unsigned long *vm_flags) 240 unsigned long *vm_flags)
241{ 241{
242 *vm_flags = 0; 242 *vm_flags = 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21cd0303af51..4032ec1cf836 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2275,7 +2275,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
2275extern void exit_itimers(struct signal_struct *); 2275extern void exit_itimers(struct signal_struct *);
2276extern void flush_itimer_signals(void); 2276extern void flush_itimer_signals(void);
2277 2277
2278extern NORET_TYPE void do_group_exit(int); 2278extern void do_group_exit(int);
2279 2279
2280extern void daemonize(const char *, ...); 2280extern void daemonize(const char *, ...);
2281extern int allow_signal(int); 2281extern int allow_signal(int);