diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:42:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:42:54 -0500 |
commit | 099469502f62fbe0d7e4f0b83a2f22538367f734 (patch) | |
tree | 5229c3818b2e6e09d35026d49314047121130536 /include/linux | |
parent | 7c17d86a8502c2e30c2eea777ed1b830aa3b447b (diff) | |
parent | 35f1526845a9d804206883e19bd257d3dcef758f (diff) |
Merge branch 'akpm' (aka "Andrew's patch-bomb, take two")
Andrew explains:
- various misc stuff
- Most of the rest of MM: memcg, threaded hugepages, others.
- cpumask
- kexec
- kdump
- some direct-io performance tweaking
- radix-tree optimisations
- new selftests code
A note on this: often people will develop a new userspace-visible
feature and will develop userspace code to exercise/test that
feature. Then they merge the patch and the selftest code dies.
Sometimes we paste it into the changelog. Sometimes the code gets
thrown into Documentation/(!).
This saddens me. So this patch creates a bare-bones framework which
will henceforth allow me to ask people to include their test apps in
the kernel tree so we can keep them alive. Then when people enhance
or fix the feature, I can ask them to update the test app too.
The infrastruture is terribly trivial at present - let's see how it
evolves.
- checkpoint/restart feature work.
A note on this: this is a project by various mad Russians to perform
c/r mainly from userspace, with various oddball helper code added
into the kernel where the need is demonstrated.
So rather than some large central lump of code, what we have is
little bits and pieces popping up in various places which either
expose something new or which permit something which is normally
kernel-private to be modified.
The overall project is an ongoing thing. I've judged that the size
and scope of the thing means that we're more likely to be successful
with it if we integrate the support into mainline piecemeal rather
than allowing it all to develop out-of-tree.
However I'm less confident than the developers that it will all
eventually work! So what I'm asking them to do is to wrap each piece
of new code inside CONFIG_CHECKPOINT_RESTORE. So if it all
eventually comes to tears and the project as a whole fails, it should
be a simple matter to go through and delete all trace of it.
This lot pretty much wraps up the -rc1 merge for me.
* akpm: (96 commits)
unlzo: fix input buffer free
ramoops: update parameters only after successful init
ramoops: fix use of rounddown_pow_of_two()
c/r: prctl: add PR_SET_MM codes to set up mm_struct entries
c/r: procfs: add start_data, end_data, start_brk members to /proc/$pid/stat v4
c/r: introduce CHECKPOINT_RESTORE symbol
selftests: new x86 breakpoints selftest
selftests: new very basic kernel selftests directory
radix_tree: take radix_tree_path off stack
radix_tree: remove radix_tree_indirect_to_ptr()
dio: optimize cache misses in the submission path
vfs: cache request_queue in struct block_device
fs/direct-io.c: calculate fs_count correctly in get_more_blocks()
drivers/parport/parport_pc.c: fix warnings
panic: don't print redundant backtraces on oops
sysctl: add the kernel.ns_last_pid control
kdump: add udev events for memory online/offline
include/linux/crash_dump.h needs elf.h
kdump: fix crash_kexec()/smp_send_stop() race in panic()
kdump: crashk_res init check for /sys/kernel/kexec_crash_size
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/crash_dump.h | 1 | ||||
-rw-r--r-- | include/linux/eventpoll.h | 1 | ||||
-rw-r--r-- | include/linux/fs.h | 14 | ||||
-rw-r--r-- | include/linux/huge_mm.h | 2 | ||||
-rw-r--r-- | include/linux/kernel.h | 13 | ||||
-rw-r--r-- | include/linux/kmsg_dump.h | 1 | ||||
-rw-r--r-- | include/linux/linkage.h | 4 | ||||
-rw-r--r-- | include/linux/memcontrol.h | 105 | ||||
-rw-r--r-- | include/linux/migrate.h | 23 | ||||
-rw-r--r-- | include/linux/mm_inline.h | 44 | ||||
-rw-r--r-- | include/linux/mm_types.h | 9 | ||||
-rw-r--r-- | include/linux/mmzone.h | 28 | ||||
-rw-r--r-- | include/linux/oom.h | 2 | ||||
-rw-r--r-- | include/linux/page_cgroup.h | 46 | ||||
-rw-r--r-- | include/linux/pagevec.h | 12 | ||||
-rw-r--r-- | include/linux/prctl.h | 12 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 3 | ||||
-rw-r--r-- | include/linux/rmap.h | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 2 |
19 files changed, 169 insertions, 157 deletions
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 5c4abce94ad1..b936763f2236 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/kexec.h> | 5 | #include <linux/kexec.h> |
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/proc_fs.h> | 7 | #include <linux/proc_fs.h> |
8 | #include <linux/elf.h> | ||
8 | 9 | ||
9 | #define ELFCORE_ADDR_MAX (-1ULL) | 10 | #define ELFCORE_ADDR_MAX (-1ULL) |
10 | #define ELFCORE_ADDR_ERR (-2ULL) | 11 | #define ELFCORE_ADDR_ERR (-2ULL) |
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index f362733186a5..657ab55beda0 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h | |||
@@ -61,6 +61,7 @@ struct file; | |||
61 | static inline void eventpoll_init_file(struct file *file) | 61 | static inline void eventpoll_init_file(struct file *file) |
62 | { | 62 | { |
63 | INIT_LIST_HEAD(&file->f_ep_links); | 63 | INIT_LIST_HEAD(&file->f_ep_links); |
64 | INIT_LIST_HEAD(&file->f_tfile_llink); | ||
64 | } | 65 | } |
65 | 66 | ||
66 | 67 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 7aacf31418fe..4bc8169fb5a1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -525,6 +525,7 @@ enum positive_aop_returns { | |||
525 | struct page; | 525 | struct page; |
526 | struct address_space; | 526 | struct address_space; |
527 | struct writeback_control; | 527 | struct writeback_control; |
528 | enum migrate_mode; | ||
528 | 529 | ||
529 | struct iov_iter { | 530 | struct iov_iter { |
530 | const struct iovec *iov; | 531 | const struct iovec *iov; |
@@ -609,9 +610,12 @@ struct address_space_operations { | |||
609 | loff_t offset, unsigned long nr_segs); | 610 | loff_t offset, unsigned long nr_segs); |
610 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, | 611 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, |
611 | void **, unsigned long *); | 612 | void **, unsigned long *); |
612 | /* migrate the contents of a page to the specified target */ | 613 | /* |
614 | * migrate the contents of a page to the specified target. If sync | ||
615 | * is false, it must not block. | ||
616 | */ | ||
613 | int (*migratepage) (struct address_space *, | 617 | int (*migratepage) (struct address_space *, |
614 | struct page *, struct page *); | 618 | struct page *, struct page *, enum migrate_mode); |
615 | int (*launder_page) (struct page *); | 619 | int (*launder_page) (struct page *); |
616 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | 620 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, |
617 | unsigned long); | 621 | unsigned long); |
@@ -656,6 +660,7 @@ struct address_space { | |||
656 | * must be enforced here for CRIS, to let the least significant bit | 660 | * must be enforced here for CRIS, to let the least significant bit |
657 | * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. | 661 | * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. |
658 | */ | 662 | */ |
663 | struct request_queue; | ||
659 | 664 | ||
660 | struct block_device { | 665 | struct block_device { |
661 | dev_t bd_dev; /* not a kdev_t - it's a search key */ | 666 | dev_t bd_dev; /* not a kdev_t - it's a search key */ |
@@ -678,6 +683,7 @@ struct block_device { | |||
678 | unsigned bd_part_count; | 683 | unsigned bd_part_count; |
679 | int bd_invalidated; | 684 | int bd_invalidated; |
680 | struct gendisk * bd_disk; | 685 | struct gendisk * bd_disk; |
686 | struct request_queue * bd_queue; | ||
681 | struct list_head bd_list; | 687 | struct list_head bd_list; |
682 | /* | 688 | /* |
683 | * Private data. You must have bd_claim'ed the block_device | 689 | * Private data. You must have bd_claim'ed the block_device |
@@ -1001,6 +1007,7 @@ struct file { | |||
1001 | #ifdef CONFIG_EPOLL | 1007 | #ifdef CONFIG_EPOLL |
1002 | /* Used by fs/eventpoll.c to link all the hooks to this file */ | 1008 | /* Used by fs/eventpoll.c to link all the hooks to this file */ |
1003 | struct list_head f_ep_links; | 1009 | struct list_head f_ep_links; |
1010 | struct list_head f_tfile_llink; | ||
1004 | #endif /* #ifdef CONFIG_EPOLL */ | 1011 | #endif /* #ifdef CONFIG_EPOLL */ |
1005 | struct address_space *f_mapping; | 1012 | struct address_space *f_mapping; |
1006 | #ifdef CONFIG_DEBUG_WRITECOUNT | 1013 | #ifdef CONFIG_DEBUG_WRITECOUNT |
@@ -2536,7 +2543,8 @@ extern int generic_check_addressable(unsigned, u64); | |||
2536 | 2543 | ||
2537 | #ifdef CONFIG_MIGRATION | 2544 | #ifdef CONFIG_MIGRATION |
2538 | extern int buffer_migrate_page(struct address_space *, | 2545 | extern int buffer_migrate_page(struct address_space *, |
2539 | struct page *, struct page *); | 2546 | struct page *, struct page *, |
2547 | enum migrate_mode); | ||
2540 | #else | 2548 | #else |
2541 | #define buffer_migrate_page NULL | 2549 | #define buffer_migrate_page NULL |
2542 | #endif | 2550 | #endif |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a9ace9c32507..1b921299abc4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | |||
18 | unsigned int flags); | 18 | unsigned int flags); |
19 | extern int zap_huge_pmd(struct mmu_gather *tlb, | 19 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
20 | struct vm_area_struct *vma, | 20 | struct vm_area_struct *vma, |
21 | pmd_t *pmd); | 21 | pmd_t *pmd, unsigned long addr); |
22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
23 | unsigned long addr, unsigned long end, | 23 | unsigned long addr, unsigned long end, |
24 | unsigned char *vec); | 24 | unsigned char *vec); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d0a7a0c71661..e8343422240a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -185,16 +185,17 @@ static inline void might_fault(void) | |||
185 | 185 | ||
186 | extern struct atomic_notifier_head panic_notifier_list; | 186 | extern struct atomic_notifier_head panic_notifier_list; |
187 | extern long (*panic_blink)(int state); | 187 | extern long (*panic_blink)(int state); |
188 | NORET_TYPE void panic(const char * fmt, ...) | 188 | __printf(1, 2) |
189 | __attribute__ ((NORET_AND format (printf, 1, 2))) __cold; | 189 | void panic(const char *fmt, ...) |
190 | __noreturn __cold; | ||
190 | extern void oops_enter(void); | 191 | extern void oops_enter(void); |
191 | extern void oops_exit(void); | 192 | extern void oops_exit(void); |
192 | void print_oops_end_marker(void); | 193 | void print_oops_end_marker(void); |
193 | extern int oops_may_print(void); | 194 | extern int oops_may_print(void); |
194 | NORET_TYPE void do_exit(long error_code) | 195 | void do_exit(long error_code) |
195 | ATTRIB_NORET; | 196 | __noreturn; |
196 | NORET_TYPE void complete_and_exit(struct completion *, long) | 197 | void complete_and_exit(struct completion *, long) |
197 | ATTRIB_NORET; | 198 | __noreturn; |
198 | 199 | ||
199 | /* Internal, do not use. */ | 200 | /* Internal, do not use. */ |
200 | int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); | 201 | int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); |
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index ee0c952188de..fee66317e071 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h | |||
@@ -18,7 +18,6 @@ | |||
18 | enum kmsg_dump_reason { | 18 | enum kmsg_dump_reason { |
19 | KMSG_DUMP_OOPS, | 19 | KMSG_DUMP_OOPS, |
20 | KMSG_DUMP_PANIC, | 20 | KMSG_DUMP_PANIC, |
21 | KMSG_DUMP_KEXEC, | ||
22 | KMSG_DUMP_RESTART, | 21 | KMSG_DUMP_RESTART, |
23 | KMSG_DUMP_HALT, | 22 | KMSG_DUMP_HALT, |
24 | KMSG_DUMP_POWEROFF, | 23 | KMSG_DUMP_POWEROFF, |
diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 3f46aedea42f..807f1e533226 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h | |||
@@ -88,8 +88,4 @@ | |||
88 | 88 | ||
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | #define NORET_TYPE /**/ | ||
92 | #define ATTRIB_NORET __attribute__((noreturn)) | ||
93 | #define NORET_AND noreturn, | ||
94 | |||
95 | #endif | 91 | #endif |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f944591765eb..4d34356fe644 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -32,13 +32,11 @@ enum mem_cgroup_page_stat_item { | |||
32 | MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ | 32 | MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ |
33 | }; | 33 | }; |
34 | 34 | ||
35 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 35 | struct mem_cgroup_reclaim_cookie { |
36 | struct list_head *dst, | 36 | struct zone *zone; |
37 | unsigned long *scanned, int order, | 37 | int priority; |
38 | isolate_mode_t mode, | 38 | unsigned int generation; |
39 | struct zone *z, | 39 | }; |
40 | struct mem_cgroup *mem_cont, | ||
41 | int active, int file); | ||
42 | 40 | ||
43 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 41 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
44 | /* | 42 | /* |
@@ -56,20 +54,21 @@ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, | |||
56 | gfp_t gfp_mask); | 54 | gfp_t gfp_mask); |
57 | /* for swap handling */ | 55 | /* for swap handling */ |
58 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 56 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
59 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); | 57 | struct page *page, gfp_t mask, struct mem_cgroup **memcgp); |
60 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | 58 | extern void mem_cgroup_commit_charge_swapin(struct page *page, |
61 | struct mem_cgroup *ptr); | 59 | struct mem_cgroup *memcg); |
62 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); | 60 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); |
63 | 61 | ||
64 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 62 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
65 | gfp_t gfp_mask); | 63 | gfp_t gfp_mask); |
66 | extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); | 64 | |
67 | extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); | 65 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
68 | extern void mem_cgroup_rotate_reclaimable_page(struct page *page); | 66 | struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, |
69 | extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | 67 | enum lru_list); |
70 | extern void mem_cgroup_del_lru(struct page *page); | 68 | void mem_cgroup_lru_del_list(struct page *, enum lru_list); |
71 | extern void mem_cgroup_move_lists(struct page *page, | 69 | void mem_cgroup_lru_del(struct page *); |
72 | enum lru_list from, enum lru_list to); | 70 | struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, |
71 | enum lru_list, enum lru_list); | ||
73 | 72 | ||
74 | /* For coalescing uncharge for reducing memcg' overhead*/ | 73 | /* For coalescing uncharge for reducing memcg' overhead*/ |
75 | extern void mem_cgroup_uncharge_start(void); | 74 | extern void mem_cgroup_uncharge_start(void); |
@@ -102,10 +101,15 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | |||
102 | 101 | ||
103 | extern int | 102 | extern int |
104 | mem_cgroup_prepare_migration(struct page *page, | 103 | mem_cgroup_prepare_migration(struct page *page, |
105 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); | 104 | struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); |
106 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 105 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
107 | struct page *oldpage, struct page *newpage, bool migration_ok); | 106 | struct page *oldpage, struct page *newpage, bool migration_ok); |
108 | 107 | ||
108 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | ||
109 | struct mem_cgroup *, | ||
110 | struct mem_cgroup_reclaim_cookie *); | ||
111 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | ||
112 | |||
109 | /* | 113 | /* |
110 | * For memory reclaim. | 114 | * For memory reclaim. |
111 | */ | 115 | */ |
@@ -122,7 +126,10 @@ struct zone_reclaim_stat* | |||
122 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | 126 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); |
123 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 127 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
124 | struct task_struct *p); | 128 | struct task_struct *p); |
129 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, | ||
130 | struct page *newpage); | ||
125 | 131 | ||
132 | extern void mem_cgroup_reset_owner(struct page *page); | ||
126 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 133 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
127 | extern int do_swap_account; | 134 | extern int do_swap_account; |
128 | #endif | 135 | #endif |
@@ -157,7 +164,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); | |||
157 | 164 | ||
158 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | 165 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); |
159 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 166 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
160 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); | 167 | void mem_cgroup_split_huge_fixup(struct page *head); |
161 | #endif | 168 | #endif |
162 | 169 | ||
163 | #ifdef CONFIG_DEBUG_VM | 170 | #ifdef CONFIG_DEBUG_VM |
@@ -180,17 +187,17 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
180 | } | 187 | } |
181 | 188 | ||
182 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 189 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
183 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) | 190 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) |
184 | { | 191 | { |
185 | return 0; | 192 | return 0; |
186 | } | 193 | } |
187 | 194 | ||
188 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | 195 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, |
189 | struct mem_cgroup *ptr) | 196 | struct mem_cgroup *memcg) |
190 | { | 197 | { |
191 | } | 198 | } |
192 | 199 | ||
193 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | 200 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) |
194 | { | 201 | { |
195 | } | 202 | } |
196 | 203 | ||
@@ -210,33 +217,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
210 | { | 217 | { |
211 | } | 218 | } |
212 | 219 | ||
213 | static inline void mem_cgroup_add_lru_list(struct page *page, int lru) | 220 | static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, |
214 | { | 221 | struct mem_cgroup *memcg) |
215 | } | ||
216 | |||
217 | static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | ||
218 | { | 222 | { |
219 | return ; | 223 | return &zone->lruvec; |
220 | } | 224 | } |
221 | 225 | ||
222 | static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) | 226 | static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, |
227 | struct page *page, | ||
228 | enum lru_list lru) | ||
223 | { | 229 | { |
224 | return ; | 230 | return &zone->lruvec; |
225 | } | 231 | } |
226 | 232 | ||
227 | static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) | 233 | static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) |
228 | { | 234 | { |
229 | return ; | ||
230 | } | 235 | } |
231 | 236 | ||
232 | static inline void mem_cgroup_del_lru(struct page *page) | 237 | static inline void mem_cgroup_lru_del(struct page *page) |
233 | { | 238 | { |
234 | return ; | ||
235 | } | 239 | } |
236 | 240 | ||
237 | static inline void | 241 | static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, |
238 | mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | 242 | struct page *page, |
243 | enum lru_list from, | ||
244 | enum lru_list to) | ||
239 | { | 245 | { |
246 | return &zone->lruvec; | ||
240 | } | 247 | } |
241 | 248 | ||
242 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | 249 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) |
@@ -269,7 +276,7 @@ static inline struct cgroup_subsys_state | |||
269 | 276 | ||
270 | static inline int | 277 | static inline int |
271 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, | 278 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
272 | struct mem_cgroup **ptr, gfp_t gfp_mask) | 279 | struct mem_cgroup **memcgp, gfp_t gfp_mask) |
273 | { | 280 | { |
274 | return 0; | 281 | return 0; |
275 | } | 282 | } |
@@ -279,6 +286,19 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, | |||
279 | { | 286 | { |
280 | } | 287 | } |
281 | 288 | ||
289 | static inline struct mem_cgroup * | ||
290 | mem_cgroup_iter(struct mem_cgroup *root, | ||
291 | struct mem_cgroup *prev, | ||
292 | struct mem_cgroup_reclaim_cookie *reclaim) | ||
293 | { | ||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | ||
298 | struct mem_cgroup *prev) | ||
299 | { | ||
300 | } | ||
301 | |||
282 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg) | 302 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg) |
283 | { | 303 | { |
284 | return 0; | 304 | return 0; |
@@ -360,8 +380,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | |||
360 | return 0; | 380 | return 0; |
361 | } | 381 | } |
362 | 382 | ||
363 | static inline void mem_cgroup_split_huge_fixup(struct page *head, | 383 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
364 | struct page *tail) | ||
365 | { | 384 | { |
366 | } | 385 | } |
367 | 386 | ||
@@ -369,6 +388,14 @@ static inline | |||
369 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | 388 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) |
370 | { | 389 | { |
371 | } | 390 | } |
391 | static inline void mem_cgroup_replace_page_cache(struct page *oldpage, | ||
392 | struct page *newpage) | ||
393 | { | ||
394 | } | ||
395 | |||
396 | static inline void mem_cgroup_reset_owner(struct page *page) | ||
397 | { | ||
398 | } | ||
372 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 399 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
373 | 400 | ||
374 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) | 401 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e39aeecfe9a2..eaf867412f7a 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -6,18 +6,31 @@ | |||
6 | 6 | ||
7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
8 | 8 | ||
9 | /* | ||
10 | * MIGRATE_ASYNC means never block | ||
11 | * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking | ||
12 | * on most operations but not ->writepage as the potential stall time | ||
13 | * is too significant | ||
14 | * MIGRATE_SYNC will block when migrating pages | ||
15 | */ | ||
16 | enum migrate_mode { | ||
17 | MIGRATE_ASYNC, | ||
18 | MIGRATE_SYNC_LIGHT, | ||
19 | MIGRATE_SYNC, | ||
20 | }; | ||
21 | |||
9 | #ifdef CONFIG_MIGRATION | 22 | #ifdef CONFIG_MIGRATION |
10 | #define PAGE_MIGRATION 1 | 23 | #define PAGE_MIGRATION 1 |
11 | 24 | ||
12 | extern void putback_lru_pages(struct list_head *l); | 25 | extern void putback_lru_pages(struct list_head *l); |
13 | extern int migrate_page(struct address_space *, | 26 | extern int migrate_page(struct address_space *, |
14 | struct page *, struct page *); | 27 | struct page *, struct page *, enum migrate_mode); |
15 | extern int migrate_pages(struct list_head *l, new_page_t x, | 28 | extern int migrate_pages(struct list_head *l, new_page_t x, |
16 | unsigned long private, bool offlining, | 29 | unsigned long private, bool offlining, |
17 | bool sync); | 30 | enum migrate_mode mode); |
18 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, | 31 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, |
19 | unsigned long private, bool offlining, | 32 | unsigned long private, bool offlining, |
20 | bool sync); | 33 | enum migrate_mode mode); |
21 | 34 | ||
22 | extern int fail_migrate_page(struct address_space *, | 35 | extern int fail_migrate_page(struct address_space *, |
23 | struct page *, struct page *); | 36 | struct page *, struct page *); |
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
36 | static inline void putback_lru_pages(struct list_head *l) {} | 49 | static inline void putback_lru_pages(struct list_head *l) {} |
37 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 50 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
38 | unsigned long private, bool offlining, | 51 | unsigned long private, bool offlining, |
39 | bool sync) { return -ENOSYS; } | 52 | enum migrate_mode mode) { return -ENOSYS; } |
40 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, | 53 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, |
41 | unsigned long private, bool offlining, | 54 | unsigned long private, bool offlining, |
42 | bool sync) { return -ENOSYS; } | 55 | enum migrate_mode mode) { return -ENOSYS; } |
43 | 56 | ||
44 | static inline int migrate_prep(void) { return -ENOSYS; } | 57 | static inline int migrate_prep(void) { return -ENOSYS; } |
45 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 58 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8f7d24712dc1..227fd3e9a9c9 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -22,26 +22,21 @@ static inline int page_is_file_cache(struct page *page) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | static inline void | 24 | static inline void |
25 | __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, | 25 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) |
26 | struct list_head *head) | ||
27 | { | 26 | { |
28 | list_add(&page->lru, head); | 27 | struct lruvec *lruvec; |
29 | __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); | ||
30 | mem_cgroup_add_lru_list(page, l); | ||
31 | } | ||
32 | 28 | ||
33 | static inline void | 29 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); |
34 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) | 30 | list_add(&page->lru, &lruvec->lists[lru]); |
35 | { | 31 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); |
36 | __add_page_to_lru_list(zone, page, l, &zone->lru[l].list); | ||
37 | } | 32 | } |
38 | 33 | ||
39 | static inline void | 34 | static inline void |
40 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) | 35 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) |
41 | { | 36 | { |
37 | mem_cgroup_lru_del_list(page, lru); | ||
42 | list_del(&page->lru); | 38 | list_del(&page->lru); |
43 | __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); | 39 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); |
44 | mem_cgroup_del_lru_list(page, l); | ||
45 | } | 40 | } |
46 | 41 | ||
47 | /** | 42 | /** |
@@ -59,24 +54,28 @@ static inline enum lru_list page_lru_base_type(struct page *page) | |||
59 | return LRU_INACTIVE_ANON; | 54 | return LRU_INACTIVE_ANON; |
60 | } | 55 | } |
61 | 56 | ||
62 | static inline void | 57 | /** |
63 | del_page_from_lru(struct zone *zone, struct page *page) | 58 | * page_off_lru - which LRU list was page on? clearing its lru flags. |
59 | * @page: the page to test | ||
60 | * | ||
61 | * Returns the LRU list a page was on, as an index into the array of LRU | ||
62 | * lists; and clears its Unevictable or Active flags, ready for freeing. | ||
63 | */ | ||
64 | static inline enum lru_list page_off_lru(struct page *page) | ||
64 | { | 65 | { |
65 | enum lru_list l; | 66 | enum lru_list lru; |
66 | 67 | ||
67 | list_del(&page->lru); | ||
68 | if (PageUnevictable(page)) { | 68 | if (PageUnevictable(page)) { |
69 | __ClearPageUnevictable(page); | 69 | __ClearPageUnevictable(page); |
70 | l = LRU_UNEVICTABLE; | 70 | lru = LRU_UNEVICTABLE; |
71 | } else { | 71 | } else { |
72 | l = page_lru_base_type(page); | 72 | lru = page_lru_base_type(page); |
73 | if (PageActive(page)) { | 73 | if (PageActive(page)) { |
74 | __ClearPageActive(page); | 74 | __ClearPageActive(page); |
75 | l += LRU_ACTIVE; | 75 | lru += LRU_ACTIVE; |
76 | } | 76 | } |
77 | } | 77 | } |
78 | __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); | 78 | return lru; |
79 | mem_cgroup_del_lru_list(page, l); | ||
80 | } | 79 | } |
81 | 80 | ||
82 | /** | 81 | /** |
@@ -97,7 +96,6 @@ static inline enum lru_list page_lru(struct page *page) | |||
97 | if (PageActive(page)) | 96 | if (PageActive(page)) |
98 | lru += LRU_ACTIVE; | 97 | lru += LRU_ACTIVE; |
99 | } | 98 | } |
100 | |||
101 | return lru; | 99 | return lru; |
102 | } | 100 | } |
103 | 101 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5b42f1b34eb7..3cc3062b3767 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -151,12 +151,11 @@ struct page { | |||
151 | #endif | 151 | #endif |
152 | } | 152 | } |
153 | /* | 153 | /* |
154 | * If another subsystem starts using the double word pairing for atomic | 154 | * The struct page can be forced to be double word aligned so that atomic ops |
155 | * operations on struct page then it must change the #if to ensure | 155 | * on double words work. The SLUB allocator can make use of such a feature. |
156 | * proper alignment of the page struct. | ||
157 | */ | 156 | */ |
158 | #if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) | 157 | #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE |
159 | __attribute__((__aligned__(2*sizeof(unsigned long)))) | 158 | __aligned(2 * sizeof(unsigned long)) |
160 | #endif | 159 | #endif |
161 | ; | 160 | ; |
162 | 161 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ca6ca92418a6..650ba2fb3301 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -140,25 +140,29 @@ enum lru_list { | |||
140 | NR_LRU_LISTS | 140 | NR_LRU_LISTS |
141 | }; | 141 | }; |
142 | 142 | ||
143 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) | 143 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
144 | 144 | ||
145 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) | 145 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
146 | 146 | ||
147 | static inline int is_file_lru(enum lru_list l) | 147 | static inline int is_file_lru(enum lru_list lru) |
148 | { | 148 | { |
149 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | 149 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int is_active_lru(enum lru_list l) | 152 | static inline int is_active_lru(enum lru_list lru) |
153 | { | 153 | { |
154 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); | 154 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
155 | } | 155 | } |
156 | 156 | ||
157 | static inline int is_unevictable_lru(enum lru_list l) | 157 | static inline int is_unevictable_lru(enum lru_list lru) |
158 | { | 158 | { |
159 | return (l == LRU_UNEVICTABLE); | 159 | return (lru == LRU_UNEVICTABLE); |
160 | } | 160 | } |
161 | 161 | ||
162 | struct lruvec { | ||
163 | struct list_head lists[NR_LRU_LISTS]; | ||
164 | }; | ||
165 | |||
162 | /* Mask used at gathering information at once (see memcontrol.c) */ | 166 | /* Mask used at gathering information at once (see memcontrol.c) */ |
163 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | 167 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) |
164 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | 168 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
@@ -173,6 +177,8 @@ static inline int is_unevictable_lru(enum lru_list l) | |||
173 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) | 177 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) |
174 | /* Isolate unmapped file */ | 178 | /* Isolate unmapped file */ |
175 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) | 179 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) |
180 | /* Isolate for asynchronous migration */ | ||
181 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) | ||
176 | 182 | ||
177 | /* LRU Isolation modes. */ | 183 | /* LRU Isolation modes. */ |
178 | typedef unsigned __bitwise__ isolate_mode_t; | 184 | typedef unsigned __bitwise__ isolate_mode_t; |
@@ -364,10 +370,8 @@ struct zone { | |||
364 | ZONE_PADDING(_pad1_) | 370 | ZONE_PADDING(_pad1_) |
365 | 371 | ||
366 | /* Fields commonly accessed by the page reclaim scanner */ | 372 | /* Fields commonly accessed by the page reclaim scanner */ |
367 | spinlock_t lru_lock; | 373 | spinlock_t lru_lock; |
368 | struct zone_lru { | 374 | struct lruvec lruvec; |
369 | struct list_head list; | ||
370 | } lru[NR_LRU_LISTS]; | ||
371 | 375 | ||
372 | struct zone_reclaim_stat reclaim_stat; | 376 | struct zone_reclaim_stat reclaim_stat; |
373 | 377 | ||
diff --git a/include/linux/oom.h b/include/linux/oom.h index 6f9d04a85336..552fba9c7d5a 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -43,7 +43,7 @@ enum oom_constraint { | |||
43 | extern void compare_swap_oom_score_adj(int old_val, int new_val); | 43 | extern void compare_swap_oom_score_adj(int old_val, int new_val); |
44 | extern int test_set_oom_score_adj(int new_val); | 44 | extern int test_set_oom_score_adj(int new_val); |
45 | 45 | ||
46 | extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | 46 | extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
47 | const nodemask_t *nodemask, unsigned long totalpages); | 47 | const nodemask_t *nodemask, unsigned long totalpages); |
48 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 48 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
49 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 49 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 961ecc7d30bc..a2d11771c84b 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -10,8 +10,6 @@ enum { | |||
10 | /* flags for mem_cgroup and file and I/O status */ | 10 | /* flags for mem_cgroup and file and I/O status */ |
11 | PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ | 11 | PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ |
12 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ | 12 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ |
13 | /* No lock in page_cgroup */ | ||
14 | PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */ | ||
15 | __NR_PCG_FLAGS, | 13 | __NR_PCG_FLAGS, |
16 | }; | 14 | }; |
17 | 15 | ||
@@ -31,7 +29,6 @@ enum { | |||
31 | struct page_cgroup { | 29 | struct page_cgroup { |
32 | unsigned long flags; | 30 | unsigned long flags; |
33 | struct mem_cgroup *mem_cgroup; | 31 | struct mem_cgroup *mem_cgroup; |
34 | struct list_head lru; /* per cgroup LRU list */ | ||
35 | }; | 32 | }; |
36 | 33 | ||
37 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); | 34 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); |
@@ -76,12 +73,6 @@ TESTPCGFLAG(Used, USED) | |||
76 | CLEARPCGFLAG(Used, USED) | 73 | CLEARPCGFLAG(Used, USED) |
77 | SETPCGFLAG(Used, USED) | 74 | SETPCGFLAG(Used, USED) |
78 | 75 | ||
79 | SETPCGFLAG(AcctLRU, ACCT_LRU) | ||
80 | CLEARPCGFLAG(AcctLRU, ACCT_LRU) | ||
81 | TESTPCGFLAG(AcctLRU, ACCT_LRU) | ||
82 | TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) | ||
83 | |||
84 | |||
85 | SETPCGFLAG(FileMapped, FILE_MAPPED) | 76 | SETPCGFLAG(FileMapped, FILE_MAPPED) |
86 | CLEARPCGFLAG(FileMapped, FILE_MAPPED) | 77 | CLEARPCGFLAG(FileMapped, FILE_MAPPED) |
87 | TESTPCGFLAG(FileMapped, FILE_MAPPED) | 78 | TESTPCGFLAG(FileMapped, FILE_MAPPED) |
@@ -122,39 +113,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc, | |||
122 | local_irq_restore(*flags); | 113 | local_irq_restore(*flags); |
123 | } | 114 | } |
124 | 115 | ||
125 | #ifdef CONFIG_SPARSEMEM | ||
126 | #define PCG_ARRAYID_WIDTH SECTIONS_SHIFT | ||
127 | #else | ||
128 | #define PCG_ARRAYID_WIDTH NODES_SHIFT | ||
129 | #endif | ||
130 | |||
131 | #if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS) | ||
132 | #error Not enough space left in pc->flags to store page_cgroup array IDs | ||
133 | #endif | ||
134 | |||
135 | /* pc->flags: ARRAY-ID | FLAGS */ | ||
136 | |||
137 | #define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1) | ||
138 | |||
139 | #define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH) | ||
140 | /* | ||
141 | * Zero the shift count for non-existent fields, to prevent compiler | ||
142 | * warnings and ensure references are optimized away. | ||
143 | */ | ||
144 | #define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0)) | ||
145 | |||
146 | static inline void set_page_cgroup_array_id(struct page_cgroup *pc, | ||
147 | unsigned long id) | ||
148 | { | ||
149 | pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT); | ||
150 | pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT; | ||
151 | } | ||
152 | |||
153 | static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc) | ||
154 | { | ||
155 | return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK; | ||
156 | } | ||
157 | |||
158 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 116 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
159 | struct page_cgroup; | 117 | struct page_cgroup; |
160 | 118 | ||
@@ -183,7 +141,7 @@ static inline void __init page_cgroup_init_flatmem(void) | |||
183 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | 141 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
184 | unsigned short old, unsigned short new); | 142 | unsigned short old, unsigned short new); |
185 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | 143 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); |
186 | extern unsigned short lookup_swap_cgroup(swp_entry_t ent); | 144 | extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); |
187 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | 145 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); |
188 | extern void swap_cgroup_swapoff(int type); | 146 | extern void swap_cgroup_swapoff(int type); |
189 | #else | 147 | #else |
@@ -195,7 +153,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | |||
195 | } | 153 | } |
196 | 154 | ||
197 | static inline | 155 | static inline |
198 | unsigned short lookup_swap_cgroup(swp_entry_t ent) | 156 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
199 | { | 157 | { |
200 | return 0; | 158 | return 0; |
201 | } | 159 | } |
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index ed17024d2ebe..2aa12b8499c0 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h | |||
@@ -21,8 +21,7 @@ struct pagevec { | |||
21 | }; | 21 | }; |
22 | 22 | ||
23 | void __pagevec_release(struct pagevec *pvec); | 23 | void __pagevec_release(struct pagevec *pvec); |
24 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); | 24 | void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); |
25 | void pagevec_strip(struct pagevec *pvec); | ||
26 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 25 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, |
27 | pgoff_t start, unsigned nr_pages); | 26 | pgoff_t start, unsigned nr_pages); |
28 | unsigned pagevec_lookup_tag(struct pagevec *pvec, | 27 | unsigned pagevec_lookup_tag(struct pagevec *pvec, |
@@ -59,7 +58,6 @@ static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) | |||
59 | return pagevec_space(pvec); | 58 | return pagevec_space(pvec); |
60 | } | 59 | } |
61 | 60 | ||
62 | |||
63 | static inline void pagevec_release(struct pagevec *pvec) | 61 | static inline void pagevec_release(struct pagevec *pvec) |
64 | { | 62 | { |
65 | if (pagevec_count(pvec)) | 63 | if (pagevec_count(pvec)) |
@@ -68,22 +66,22 @@ static inline void pagevec_release(struct pagevec *pvec) | |||
68 | 66 | ||
69 | static inline void __pagevec_lru_add_anon(struct pagevec *pvec) | 67 | static inline void __pagevec_lru_add_anon(struct pagevec *pvec) |
70 | { | 68 | { |
71 | ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON); | 69 | __pagevec_lru_add(pvec, LRU_INACTIVE_ANON); |
72 | } | 70 | } |
73 | 71 | ||
74 | static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) | 72 | static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) |
75 | { | 73 | { |
76 | ____pagevec_lru_add(pvec, LRU_ACTIVE_ANON); | 74 | __pagevec_lru_add(pvec, LRU_ACTIVE_ANON); |
77 | } | 75 | } |
78 | 76 | ||
79 | static inline void __pagevec_lru_add_file(struct pagevec *pvec) | 77 | static inline void __pagevec_lru_add_file(struct pagevec *pvec) |
80 | { | 78 | { |
81 | ____pagevec_lru_add(pvec, LRU_INACTIVE_FILE); | 79 | __pagevec_lru_add(pvec, LRU_INACTIVE_FILE); |
82 | } | 80 | } |
83 | 81 | ||
84 | static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) | 82 | static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) |
85 | { | 83 | { |
86 | ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE); | 84 | __pagevec_lru_add(pvec, LRU_ACTIVE_FILE); |
87 | } | 85 | } |
88 | 86 | ||
89 | static inline void pagevec_lru_add_file(struct pagevec *pvec) | 87 | static inline void pagevec_lru_add_file(struct pagevec *pvec) |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index a3baeb2c2161..7ddc7f1b480f 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
@@ -102,4 +102,16 @@ | |||
102 | 102 | ||
103 | #define PR_MCE_KILL_GET 34 | 103 | #define PR_MCE_KILL_GET 34 |
104 | 104 | ||
105 | /* | ||
106 | * Tune up process memory map specifics. | ||
107 | */ | ||
108 | #define PR_SET_MM 35 | ||
109 | # define PR_SET_MM_START_CODE 1 | ||
110 | # define PR_SET_MM_END_CODE 2 | ||
111 | # define PR_SET_MM_START_DATA 3 | ||
112 | # define PR_SET_MM_END_DATA 4 | ||
113 | # define PR_SET_MM_START_STACK 5 | ||
114 | # define PR_SET_MM_START_BRK 6 | ||
115 | # define PR_SET_MM_BRK 7 | ||
116 | |||
105 | #endif /* _LINUX_PRCTL_H */ | 117 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 9d4539c52e53..07e360b1b282 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -49,9 +49,6 @@ | |||
49 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 | 49 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 |
50 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 | 50 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 |
51 | 51 | ||
52 | #define radix_tree_indirect_to_ptr(ptr) \ | ||
53 | radix_tree_indirect_to_ptr((void __force *)(ptr)) | ||
54 | |||
55 | static inline int radix_tree_is_indirect_ptr(void *ptr) | 52 | static inline int radix_tree_is_indirect_ptr(void *ptr) |
56 | { | 53 | { |
57 | return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); | 54 | return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1afb9954bbf1..1cdd62a2788a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -158,7 +158,7 @@ static inline void page_dup_rmap(struct page *page) | |||
158 | * Called from mm/vmscan.c to handle paging out | 158 | * Called from mm/vmscan.c to handle paging out |
159 | */ | 159 | */ |
160 | int page_referenced(struct page *, int is_locked, | 160 | int page_referenced(struct page *, int is_locked, |
161 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 161 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
162 | int page_referenced_one(struct page *, struct vm_area_struct *, | 162 | int page_referenced_one(struct page *, struct vm_area_struct *, |
163 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | 163 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); |
164 | 164 | ||
@@ -236,7 +236,7 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |||
236 | #define anon_vma_link(vma) do {} while (0) | 236 | #define anon_vma_link(vma) do {} while (0) |
237 | 237 | ||
238 | static inline int page_referenced(struct page *page, int is_locked, | 238 | static inline int page_referenced(struct page *page, int is_locked, |
239 | struct mem_cgroup *cnt, | 239 | struct mem_cgroup *memcg, |
240 | unsigned long *vm_flags) | 240 | unsigned long *vm_flags) |
241 | { | 241 | { |
242 | *vm_flags = 0; | 242 | *vm_flags = 0; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 21cd0303af51..4032ec1cf836 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2275,7 +2275,7 @@ extern void __cleanup_sighand(struct sighand_struct *); | |||
2275 | extern void exit_itimers(struct signal_struct *); | 2275 | extern void exit_itimers(struct signal_struct *); |
2276 | extern void flush_itimer_signals(void); | 2276 | extern void flush_itimer_signals(void); |
2277 | 2277 | ||
2278 | extern NORET_TYPE void do_group_exit(int); | 2278 | extern void do_group_exit(int); |
2279 | 2279 | ||
2280 | extern void daemonize(const char *, ...); | 2280 | extern void daemonize(const char *, ...); |
2281 | extern int allow_signal(int); | 2281 | extern int allow_signal(int); |