aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
commitac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch)
treee37328cfbeaf43716dd5914cad9179e57e84df76 /include/linux
parenta40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff)
parent437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/compaction.h4
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/highmem.h7
-rw-r--r--include/linux/hugetlb.h50
-rw-r--r--include/linux/hugetlb_cgroup.h126
-rw-r--r--include/linux/memcontrol.h34
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mm.h31
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/oom.h21
-rw-r--r--include/linux/page-flags.h29
-rw-r--r--include/linux/page-isolation.h13
-rw-r--r--include/linux/page_cgroup.h10
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/shrinker.h1
-rw-r--r--include/linux/skbuff.h80
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/swap.h14
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/writeback.h5
28 files changed, 456 insertions, 69 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 489de625cd2..c97c6b9cd38 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -17,6 +17,7 @@
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/writeback.h> 18#include <linux/writeback.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/sysctl.h>
20 21
21struct page; 22struct page;
22struct device; 23struct device;
@@ -304,6 +305,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
304void set_bdi_congested(struct backing_dev_info *bdi, int sync); 305void set_bdi_congested(struct backing_dev_info *bdi, int sync);
305long congestion_wait(int sync, long timeout); 306long congestion_wait(int sync, long timeout);
306long wait_iff_congested(struct zone *zone, int sync, long timeout); 307long wait_iff_congested(struct zone *zone, int sync, long timeout);
308int pdflush_proc_obsolete(struct ctl_table *table, int write,
309 void __user *buffer, size_t *lenp, loff_t *ppos);
307 310
308static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 311static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
309{ 312{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0edb65dd8ed..7b7ac9ccec7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -160,6 +160,7 @@ enum rq_flag_bits {
160 __REQ_FLUSH_SEQ, /* request for flush sequence */ 160 __REQ_FLUSH_SEQ, /* request for flush sequence */
161 __REQ_IO_STAT, /* account I/O stat */ 161 __REQ_IO_STAT, /* account I/O stat */
162 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 162 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
163 __REQ_KERNEL, /* direct IO to kernel pages */
163 __REQ_NR_BITS, /* stops here */ 164 __REQ_NR_BITS, /* stops here */
164}; 165};
165 166
@@ -201,5 +202,6 @@ enum rq_flag_bits {
201#define REQ_IO_STAT (1 << __REQ_IO_STAT) 202#define REQ_IO_STAT (1 << __REQ_IO_STAT)
202#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 203#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
203#define REQ_SECURE (1 << __REQ_SECURE) 204#define REQ_SECURE (1 << __REQ_SECURE)
205#define REQ_KERNEL (1 << __REQ_KERNEL)
204 206
205#endif /* __LINUX_BLK_TYPES_H */ 207#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 0bd390ce98b..dfae957398c 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -31,7 +31,7 @@ SUBSYS(cpuacct)
31 31
32/* */ 32/* */
33 33
34#ifdef CONFIG_CGROUP_MEM_RES_CTLR 34#ifdef CONFIG_MEMCG
35SUBSYS(mem_cgroup) 35SUBSYS(mem_cgroup)
36#endif 36#endif
37 37
@@ -72,3 +72,9 @@ SUBSYS(net_prio)
72#endif 72#endif
73 73
74/* */ 74/* */
75
76#ifdef CONFIG_CGROUP_HUGETLB
77SUBSYS(hugetlb)
78#endif
79
80/* */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d6..133ddcf8339 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -58,7 +58,7 @@ static inline bool compaction_deferred(struct zone *zone, int order)
58 if (++zone->compact_considered > defer_limit) 58 if (++zone->compact_considered > defer_limit)
59 zone->compact_considered = defer_limit; 59 zone->compact_considered = defer_limit;
60 60
61 return zone->compact_considered < (1UL << zone->compact_defer_shift); 61 return zone->compact_considered < defer_limit;
62} 62}
63 63
64#else 64#else
@@ -85,7 +85,7 @@ static inline void defer_compaction(struct zone *zone, int order)
85 85
86static inline bool compaction_deferred(struct zone *zone, int order) 86static inline bool compaction_deferred(struct zone *zone, int order)
87{ 87{
88 return 1; 88 return true;
89} 89}
90 90
91#endif /* CONFIG_COMPACTION */ 91#endif /* CONFIG_COMPACTION */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b178f9e91e2..d7eed5b98ae 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -165,6 +165,8 @@ struct inodes_stat_t {
165#define READ 0 165#define READ 0
166#define WRITE RW_MASK 166#define WRITE RW_MASK
167#define READA RWA_MASK 167#define READA RWA_MASK
168#define KERNEL_READ (READ|REQ_KERNEL)
169#define KERNEL_WRITE (WRITE|REQ_KERNEL)
168 170
169#define READ_SYNC (READ | REQ_SYNC) 171#define READ_SYNC (READ | REQ_SYNC)
170#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) 172#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -427,6 +429,7 @@ struct kstatfs;
427struct vm_area_struct; 429struct vm_area_struct;
428struct vfsmount; 430struct vfsmount;
429struct cred; 431struct cred;
432struct swap_info_struct;
430 433
431extern void __init inode_init(void); 434extern void __init inode_init(void);
432extern void __init inode_init_early(void); 435extern void __init inode_init_early(void);
@@ -636,6 +639,11 @@ struct address_space_operations {
636 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 639 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
637 unsigned long); 640 unsigned long);
638 int (*error_remove_page)(struct address_space *, struct page *); 641 int (*error_remove_page)(struct address_space *, struct page *);
642
643 /* swapfile support */
644 int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
645 sector_t *span);
646 void (*swap_deactivate)(struct file *file);
639}; 647};
640 648
641extern const struct address_space_operations empty_aops; 649extern const struct address_space_operations empty_aops;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1e49be49d32..4883f393f50 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -23,6 +23,7 @@ struct vm_area_struct;
23#define ___GFP_REPEAT 0x400u 23#define ___GFP_REPEAT 0x400u
24#define ___GFP_NOFAIL 0x800u 24#define ___GFP_NOFAIL 0x800u
25#define ___GFP_NORETRY 0x1000u 25#define ___GFP_NORETRY 0x1000u
26#define ___GFP_MEMALLOC 0x2000u
26#define ___GFP_COMP 0x4000u 27#define ___GFP_COMP 0x4000u
27#define ___GFP_ZERO 0x8000u 28#define ___GFP_ZERO 0x8000u
28#define ___GFP_NOMEMALLOC 0x10000u 29#define ___GFP_NOMEMALLOC 0x10000u
@@ -76,9 +77,14 @@ struct vm_area_struct;
76#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ 77#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
77#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ 78#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
78#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ 79#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
80#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
79#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ 81#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
80#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ 82#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
81#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ 83#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
84 * This takes precedence over the
85 * __GFP_MEMALLOC flag if both are
86 * set
87 */
82#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ 88#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
83#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ 89#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
84#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ 90#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
@@ -129,7 +135,7 @@ struct vm_area_struct;
129/* Control page allocator reclaim behavior */ 135/* Control page allocator reclaim behavior */
130#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 136#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
131 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 137 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
132 __GFP_NORETRY|__GFP_NOMEMALLOC) 138 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
133 139
134/* Control slab gfp mask during early boot */ 140/* Control slab gfp mask during early boot */
135#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) 141#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
@@ -379,6 +385,9 @@ void drain_local_pages(void *dummy);
379 */ 385 */
380extern gfp_t gfp_allowed_mask; 386extern gfp_t gfp_allowed_mask;
381 387
388/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
389bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
390
382extern void pm_restrict_gfp_mask(void); 391extern void pm_restrict_gfp_mask(void);
383extern void pm_restore_gfp_mask(void); 392extern void pm_restore_gfp_mask(void);
384 393
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 774fa47b3b5..ef788b5b4a3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages;
39 39
40void kmap_flush_unused(void); 40void kmap_flush_unused(void);
41 41
42struct page *kmap_to_page(void *addr);
43
42#else /* CONFIG_HIGHMEM */ 44#else /* CONFIG_HIGHMEM */
43 45
44static inline unsigned int nr_free_highpages(void) { return 0; } 46static inline unsigned int nr_free_highpages(void) { return 0; }
45 47
48static inline struct page *kmap_to_page(void *addr)
49{
50 return virt_to_page(addr);
51}
52
46#define totalhigh_pages 0UL 53#define totalhigh_pages 0UL
47 54
48#ifndef ARCH_HAS_KMAP 55#ifndef ARCH_HAS_KMAP
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d5d6bbe2259..225164842ab 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -4,9 +4,11 @@
4#include <linux/mm_types.h> 4#include <linux/mm_types.h>
5#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/hugetlb_inline.h> 6#include <linux/hugetlb_inline.h>
7#include <linux/cgroup.h>
7 8
8struct ctl_table; 9struct ctl_table;
9struct user_struct; 10struct user_struct;
11struct mmu_gather;
10 12
11#ifdef CONFIG_HUGETLB_PAGE 13#ifdef CONFIG_HUGETLB_PAGE
12 14
@@ -20,6 +22,11 @@ struct hugepage_subpool {
20 long max_hpages, used_hpages; 22 long max_hpages, used_hpages;
21}; 23};
22 24
25extern spinlock_t hugetlb_lock;
26extern int hugetlb_max_hstate __read_mostly;
27#define for_each_hstate(h) \
28 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
29
23struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); 30struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
24void hugepage_put_subpool(struct hugepage_subpool *spool); 31void hugepage_put_subpool(struct hugepage_subpool *spool);
25 32
@@ -40,9 +47,14 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
40 struct page **, struct vm_area_struct **, 47 struct page **, struct vm_area_struct **,
41 unsigned long *, int *, int, unsigned int flags); 48 unsigned long *, int *, int, unsigned int flags);
42void unmap_hugepage_range(struct vm_area_struct *, 49void unmap_hugepage_range(struct vm_area_struct *,
43 unsigned long, unsigned long, struct page *); 50 unsigned long, unsigned long, struct page *);
44void __unmap_hugepage_range(struct vm_area_struct *, 51void __unmap_hugepage_range_final(struct mmu_gather *tlb,
45 unsigned long, unsigned long, struct page *); 52 struct vm_area_struct *vma,
53 unsigned long start, unsigned long end,
54 struct page *ref_page);
55void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
56 unsigned long start, unsigned long end,
57 struct page *ref_page);
46int hugetlb_prefault(struct address_space *, struct vm_area_struct *); 58int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
47void hugetlb_report_meminfo(struct seq_file *); 59void hugetlb_report_meminfo(struct seq_file *);
48int hugetlb_report_node_meminfo(int, char *); 60int hugetlb_report_node_meminfo(int, char *);
@@ -98,7 +110,6 @@ static inline unsigned long hugetlb_total_pages(void)
98#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 110#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
99#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 111#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
100#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) 112#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
101#define unmap_hugepage_range(vma, start, end, page) BUG()
102static inline void hugetlb_report_meminfo(struct seq_file *m) 113static inline void hugetlb_report_meminfo(struct seq_file *m)
103{ 114{
104} 115}
@@ -112,13 +123,31 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
112#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 123#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
113#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) 124#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
114#define huge_pte_offset(mm, address) 0 125#define huge_pte_offset(mm, address) 0
115#define dequeue_hwpoisoned_huge_page(page) 0 126static inline int dequeue_hwpoisoned_huge_page(struct page *page)
127{
128 return 0;
129}
130
116static inline void copy_huge_page(struct page *dst, struct page *src) 131static inline void copy_huge_page(struct page *dst, struct page *src)
117{ 132{
118} 133}
119 134
120#define hugetlb_change_protection(vma, address, end, newprot) 135#define hugetlb_change_protection(vma, address, end, newprot)
121 136
137static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
138 struct vm_area_struct *vma, unsigned long start,
139 unsigned long end, struct page *ref_page)
140{
141 BUG();
142}
143
144static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
145 struct vm_area_struct *vma, unsigned long start,
146 unsigned long end, struct page *ref_page)
147{
148 BUG();
149}
150
122#endif /* !CONFIG_HUGETLB_PAGE */ 151#endif /* !CONFIG_HUGETLB_PAGE */
123 152
124#define HUGETLB_ANON_FILE "anon_hugepage" 153#define HUGETLB_ANON_FILE "anon_hugepage"
@@ -199,10 +228,15 @@ struct hstate {
199 unsigned long resv_huge_pages; 228 unsigned long resv_huge_pages;
200 unsigned long surplus_huge_pages; 229 unsigned long surplus_huge_pages;
201 unsigned long nr_overcommit_huge_pages; 230 unsigned long nr_overcommit_huge_pages;
231 struct list_head hugepage_activelist;
202 struct list_head hugepage_freelists[MAX_NUMNODES]; 232 struct list_head hugepage_freelists[MAX_NUMNODES];
203 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 233 unsigned int nr_huge_pages_node[MAX_NUMNODES];
204 unsigned int free_huge_pages_node[MAX_NUMNODES]; 234 unsigned int free_huge_pages_node[MAX_NUMNODES];
205 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 235 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
236#ifdef CONFIG_CGROUP_HUGETLB
237 /* cgroup control files */
238 struct cftype cgroup_files[5];
239#endif
206 char name[HSTATE_NAME_LEN]; 240 char name[HSTATE_NAME_LEN];
207}; 241};
208 242
@@ -302,6 +336,11 @@ static inline unsigned hstate_index_to_shift(unsigned index)
302 return hstates[index].order + PAGE_SHIFT; 336 return hstates[index].order + PAGE_SHIFT;
303} 337}
304 338
339static inline int hstate_index(struct hstate *h)
340{
341 return h - hstates;
342}
343
305#else 344#else
306struct hstate {}; 345struct hstate {};
307#define alloc_huge_page_node(h, nid) NULL 346#define alloc_huge_page_node(h, nid) NULL
@@ -320,6 +359,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
320 return 1; 359 return 1;
321} 360}
322#define hstate_index_to_shift(index) 0 361#define hstate_index_to_shift(index) 0
362#define hstate_index(h) 0
323#endif 363#endif
324 364
325#endif /* _LINUX_HUGETLB_H */ 365#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
new file mode 100644
index 00000000000..d73878c694b
--- /dev/null
+++ b/include/linux/hugetlb_cgroup.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
18#include <linux/res_counter.h>
19
20struct hugetlb_cgroup;
21/*
22 * Minimum page order trackable by hugetlb cgroup.
23 * At least 3 pages are necessary for all the tracking information.
24 */
25#define HUGETLB_CGROUP_MIN_ORDER 2
26
27#ifdef CONFIG_CGROUP_HUGETLB
28
29static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
30{
31 VM_BUG_ON(!PageHuge(page));
32
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
34 return NULL;
35 return (struct hugetlb_cgroup *)page[2].lru.next;
36}
37
38static inline
39int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
40{
41 VM_BUG_ON(!PageHuge(page));
42
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
44 return -1;
45 page[2].lru.next = (void *)h_cg;
46 return 0;
47}
48
49static inline bool hugetlb_cgroup_disabled(void)
50{
51 if (hugetlb_subsys.disabled)
52 return true;
53 return false;
54}
55
56extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
57 struct hugetlb_cgroup **ptr);
58extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
59 struct hugetlb_cgroup *h_cg,
60 struct page *page);
61extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
62 struct page *page);
63extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
64 struct hugetlb_cgroup *h_cg);
65extern int hugetlb_cgroup_file_init(int idx) __init;
66extern void hugetlb_cgroup_migrate(struct page *oldhpage,
67 struct page *newhpage);
68
69#else
70static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
71{
72 return NULL;
73}
74
75static inline
76int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
77{
78 return 0;
79}
80
81static inline bool hugetlb_cgroup_disabled(void)
82{
83 return true;
84}
85
86static inline int
87hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
88 struct hugetlb_cgroup **ptr)
89{
90 return 0;
91}
92
93static inline void
94hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
95 struct hugetlb_cgroup *h_cg,
96 struct page *page)
97{
98 return;
99}
100
101static inline void
102hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
103{
104 return;
105}
106
107static inline void
108hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
109 struct hugetlb_cgroup *h_cg)
110{
111 return;
112}
113
114static inline int __init hugetlb_cgroup_file_init(int idx)
115{
116 return 0;
117}
118
119static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
120 struct page *newhpage)
121{
122 return;
123}
124
125#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
126#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 83e7ba90d6e..8d9489fdab2 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -38,7 +38,7 @@ struct mem_cgroup_reclaim_cookie {
38 unsigned int generation; 38 unsigned int generation;
39}; 39};
40 40
41#ifdef CONFIG_CGROUP_MEM_RES_CTLR 41#ifdef CONFIG_MEMCG
42/* 42/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
@@ -72,8 +72,6 @@ extern void mem_cgroup_uncharge_end(void);
72extern void mem_cgroup_uncharge_page(struct page *page); 72extern void mem_cgroup_uncharge_page(struct page *page);
73extern void mem_cgroup_uncharge_cache_page(struct page *page); 73extern void mem_cgroup_uncharge_cache_page(struct page *page);
74 74
75extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
76 int order);
77bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 75bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
78 struct mem_cgroup *memcg); 76 struct mem_cgroup *memcg);
79int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 77int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
@@ -100,9 +98,9 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
100 98
101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 99extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
102 100
103extern int 101extern void
104mem_cgroup_prepare_migration(struct page *page, 102mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); 103 struct mem_cgroup **memcgp);
106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 104extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
107 struct page *oldpage, struct page *newpage, bool migration_ok); 105 struct page *oldpage, struct page *newpage, bool migration_ok);
108 106
@@ -124,7 +122,7 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
124extern void mem_cgroup_replace_page_cache(struct page *oldpage, 122extern void mem_cgroup_replace_page_cache(struct page *oldpage,
125 struct page *newpage); 123 struct page *newpage);
126 124
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 125#ifdef CONFIG_MEMCG_SWAP
128extern int do_swap_account; 126extern int do_swap_account;
129#endif 127#endif
130 128
@@ -182,7 +180,6 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
182unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 180unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
183 gfp_t gfp_mask, 181 gfp_t gfp_mask,
184 unsigned long *total_scanned); 182 unsigned long *total_scanned);
185u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
186 183
187void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 184void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188#ifdef CONFIG_TRANSPARENT_HUGEPAGE 185#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -193,7 +190,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
193bool mem_cgroup_bad_page_check(struct page *page); 190bool mem_cgroup_bad_page_check(struct page *page);
194void mem_cgroup_print_bad_page(struct page *page); 191void mem_cgroup_print_bad_page(struct page *page);
195#endif 192#endif
196#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 193#else /* CONFIG_MEMCG */
197struct mem_cgroup; 194struct mem_cgroup;
198 195
199static inline int mem_cgroup_newpage_charge(struct page *page, 196static inline int mem_cgroup_newpage_charge(struct page *page,
@@ -279,11 +276,10 @@ static inline struct cgroup_subsys_state
279 return NULL; 276 return NULL;
280} 277}
281 278
282static inline int 279static inline void
283mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 280mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
284 struct mem_cgroup **memcgp, gfp_t gfp_mask) 281 struct mem_cgroup **memcgp)
285{ 282{
286 return 0;
287} 283}
288 284
289static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 285static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
@@ -366,12 +362,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
366 return 0; 362 return 0;
367} 363}
368 364
369static inline
370u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
371{
372 return 0;
373}
374
375static inline void mem_cgroup_split_huge_fixup(struct page *head) 365static inline void mem_cgroup_split_huge_fixup(struct page *head)
376{ 366{
377} 367}
@@ -384,9 +374,9 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
384 struct page *newpage) 374 struct page *newpage)
385{ 375{
386} 376}
387#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 377#endif /* CONFIG_MEMCG */
388 378
389#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 379#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
390static inline bool 380static inline bool
391mem_cgroup_bad_page_check(struct page *page) 381mem_cgroup_bad_page_check(struct page *page)
392{ 382{
@@ -406,7 +396,7 @@ enum {
406}; 396};
407 397
408struct sock; 398struct sock;
409#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 399#ifdef CONFIG_MEMCG_KMEM
410void sock_update_memcg(struct sock *sk); 400void sock_update_memcg(struct sock *sk);
411void sock_release_memcg(struct sock *sk); 401void sock_release_memcg(struct sock *sk);
412#else 402#else
@@ -416,6 +406,6 @@ static inline void sock_update_memcg(struct sock *sk)
416static inline void sock_release_memcg(struct sock *sk) 406static inline void sock_release_memcg(struct sock *sk)
417{ 407{
418} 408}
419#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 409#endif /* CONFIG_MEMCG_KMEM */
420#endif /* _LINUX_MEMCONTROL_H */ 410#endif /* _LINUX_MEMCONTROL_H */
421 411
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 855c337b20c..ce7e6671968 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -15,7 +15,7 @@ extern int migrate_page(struct address_space *,
15extern int migrate_pages(struct list_head *l, new_page_t x, 15extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, bool offlining, 16 unsigned long private, bool offlining,
17 enum migrate_mode mode); 17 enum migrate_mode mode);
18extern int migrate_huge_pages(struct list_head *l, new_page_t x, 18extern int migrate_huge_page(struct page *, new_page_t x,
19 unsigned long private, bool offlining, 19 unsigned long private, bool offlining,
20 enum migrate_mode mode); 20 enum migrate_mode mode);
21 21
@@ -36,7 +36,7 @@ static inline void putback_lru_pages(struct list_head *l) {}
36static inline int migrate_pages(struct list_head *l, new_page_t x, 36static inline int migrate_pages(struct list_head *l, new_page_t x,
37 unsigned long private, bool offlining, 37 unsigned long private, bool offlining,
38 enum migrate_mode mode) { return -ENOSYS; } 38 enum migrate_mode mode) { return -ENOSYS; }
39static inline int migrate_huge_pages(struct list_head *l, new_page_t x, 39static inline int migrate_huge_page(struct page *page, new_page_t x,
40 unsigned long private, bool offlining, 40 unsigned long private, bool offlining,
41 enum migrate_mode mode) { return -ENOSYS; } 41 enum migrate_mode mode) { return -ENOSYS; }
42 42
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f9f279cf5b1..bd079a1b0fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -805,6 +805,17 @@ static inline void *page_rmapping(struct page *page)
805 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); 805 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
806} 806}
807 807
808extern struct address_space *__page_file_mapping(struct page *);
809
810static inline
811struct address_space *page_file_mapping(struct page *page)
812{
813 if (unlikely(PageSwapCache(page)))
814 return __page_file_mapping(page);
815
816 return page->mapping;
817}
818
808static inline int PageAnon(struct page *page) 819static inline int PageAnon(struct page *page)
809{ 820{
810 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 821 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -821,6 +832,20 @@ static inline pgoff_t page_index(struct page *page)
821 return page->index; 832 return page->index;
822} 833}
823 834
835extern pgoff_t __page_file_index(struct page *page);
836
837/*
838 * Return the file index of the page. Regular pagecache pages use ->index
839 * whereas swapcache pages use swp_offset(->private)
840 */
841static inline pgoff_t page_file_index(struct page *page)
842{
843 if (unlikely(PageSwapCache(page)))
844 return __page_file_index(page);
845
846 return page->index;
847}
848
824/* 849/*
825 * Return true if this page is mapped into pagetables. 850 * Return true if this page is mapped into pagetables.
826 */ 851 */
@@ -994,6 +1019,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
994 struct page **pages, struct vm_area_struct **vmas); 1019 struct page **pages, struct vm_area_struct **vmas);
995int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1020int get_user_pages_fast(unsigned long start, int nr_pages, int write,
996 struct page **pages); 1021 struct page **pages);
1022struct kvec;
1023int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1024 struct page **pages);
1025int get_kernel_page(unsigned long start, int write, struct page **pages);
997struct page *get_dump_page(unsigned long addr); 1026struct page *get_dump_page(unsigned long addr);
998 1027
999extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1028extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
@@ -1331,6 +1360,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1331extern void setup_per_cpu_pageset(void); 1360extern void setup_per_cpu_pageset(void);
1332 1361
1333extern void zone_pcp_update(struct zone *zone); 1362extern void zone_pcp_update(struct zone *zone);
1363extern void zone_pcp_reset(struct zone *zone);
1334 1364
1335/* nommu.c */ 1365/* nommu.c */
1336extern atomic_long_t mmap_pages_allocated; 1366extern atomic_long_t mmap_pages_allocated;
@@ -1528,6 +1558,7 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1528static inline void vm_stat_account(struct mm_struct *mm, 1558static inline void vm_stat_account(struct mm_struct *mm,
1529 unsigned long flags, struct file *file, long pages) 1559 unsigned long flags, struct file *file, long pages)
1530{ 1560{
1561 mm->total_vm += pages;
1531} 1562}
1532#endif /* CONFIG_PROC_FS */ 1563#endif /* CONFIG_PROC_FS */
1533 1564
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 074eb98fe15..bf7867200b9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -54,6 +54,15 @@ struct page {
54 union { 54 union {
55 pgoff_t index; /* Our offset within mapping. */ 55 pgoff_t index; /* Our offset within mapping. */
56 void *freelist; /* slub/slob first free object */ 56 void *freelist; /* slub/slob first free object */
57 bool pfmemalloc; /* If set by the page allocator,
58 * ALLOC_NO_WATERMARKS was set
59 * and the low watermark was not
60 * met implying that the system
61 * is under some pressure. The
62 * caller should try ensure
63 * this page is only used to
64 * free other pages.
65 */
57 }; 66 };
58 67
59 union { 68 union {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 458988bd55a..2daa54f55db 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -201,7 +201,7 @@ struct zone_reclaim_stat {
201struct lruvec { 201struct lruvec {
202 struct list_head lists[NR_LRU_LISTS]; 202 struct list_head lists[NR_LRU_LISTS];
203 struct zone_reclaim_stat reclaim_stat; 203 struct zone_reclaim_stat reclaim_stat;
204#ifdef CONFIG_CGROUP_MEM_RES_CTLR 204#ifdef CONFIG_MEMCG
205 struct zone *zone; 205 struct zone *zone;
206#endif 206#endif
207}; 207};
@@ -209,7 +209,6 @@ struct lruvec {
209/* Mask used at gathering information at once (see memcontrol.c) */ 209/* Mask used at gathering information at once (see memcontrol.c) */
210#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 210#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
211#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 211#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
212#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
213#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 212#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
214 213
215/* Isolate clean file */ 214/* Isolate clean file */
@@ -369,6 +368,10 @@ struct zone {
369 */ 368 */
370 spinlock_t lock; 369 spinlock_t lock;
371 int all_unreclaimable; /* All pages pinned */ 370 int all_unreclaimable; /* All pages pinned */
371#if defined CONFIG_COMPACTION || defined CONFIG_CMA
372 /* pfn where the last incremental compaction isolated free pages */
373 unsigned long compact_cached_free_pfn;
374#endif
372#ifdef CONFIG_MEMORY_HOTPLUG 375#ifdef CONFIG_MEMORY_HOTPLUG
373 /* see spanned/present_pages for more description */ 376 /* see spanned/present_pages for more description */
374 seqlock_t span_seqlock; 377 seqlock_t span_seqlock;
@@ -475,6 +478,14 @@ struct zone {
475 * rarely used fields: 478 * rarely used fields:
476 */ 479 */
477 const char *name; 480 const char *name;
481#ifdef CONFIG_MEMORY_ISOLATION
482 /*
483 * the number of MIGRATE_ISOLATE *pageblock*.
484 * We need this for free page counting. Look at zone_watermark_ok_safe.
485 * It's protected by zone->lock
486 */
487 int nr_pageblock_isolate;
488#endif
478} ____cacheline_internodealigned_in_smp; 489} ____cacheline_internodealigned_in_smp;
479 490
480typedef enum { 491typedef enum {
@@ -671,7 +682,7 @@ typedef struct pglist_data {
671 int nr_zones; 682 int nr_zones;
672#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 683#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
673 struct page *node_mem_map; 684 struct page *node_mem_map;
674#ifdef CONFIG_CGROUP_MEM_RES_CTLR 685#ifdef CONFIG_MEMCG
675 struct page_cgroup *node_page_cgroup; 686 struct page_cgroup *node_page_cgroup;
676#endif 687#endif
677#endif 688#endif
@@ -694,6 +705,7 @@ typedef struct pglist_data {
694 range, including holes */ 705 range, including holes */
695 int node_id; 706 int node_id;
696 wait_queue_head_t kswapd_wait; 707 wait_queue_head_t kswapd_wait;
708 wait_queue_head_t pfmemalloc_wait;
697 struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ 709 struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
698 int kswapd_max_order; 710 int kswapd_max_order;
699 enum zone_type classzone_idx; 711 enum zone_type classzone_idx;
@@ -718,7 +730,7 @@ typedef struct pglist_data {
718#include <linux/memory_hotplug.h> 730#include <linux/memory_hotplug.h>
719 731
720extern struct mutex zonelists_mutex; 732extern struct mutex zonelists_mutex;
721void build_all_zonelists(void *data); 733void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
722void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); 734void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
723bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, 735bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
724 int classzone_idx, int alloc_flags); 736 int classzone_idx, int alloc_flags);
@@ -736,7 +748,7 @@ extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
736 748
737static inline struct zone *lruvec_zone(struct lruvec *lruvec) 749static inline struct zone *lruvec_zone(struct lruvec *lruvec)
738{ 750{
739#ifdef CONFIG_CGROUP_MEM_RES_CTLR 751#ifdef CONFIG_MEMCG
740 return lruvec->zone; 752 return lruvec->zone;
741#else 753#else
742 return container_of(lruvec, struct zone, lruvec); 754 return container_of(lruvec, struct zone, lruvec);
@@ -773,7 +785,7 @@ extern int movable_zone;
773 785
774static inline int zone_movable_is_highmem(void) 786static inline int zone_movable_is_highmem(void)
775{ 787{
776#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) 788#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
777 return movable_zone == ZONE_HIGHMEM; 789 return movable_zone == ZONE_HIGHMEM;
778#else 790#else
779 return 0; 791 return 0;
@@ -1052,7 +1064,7 @@ struct mem_section {
1052 1064
1053 /* See declaration of similar field in struct zone */ 1065 /* See declaration of similar field in struct zone */
1054 unsigned long *pageblock_flags; 1066 unsigned long *pageblock_flags;
1055#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1067#ifdef CONFIG_MEMCG
1056 /* 1068 /*
1057 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use 1069 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
1058 * section. (see memcontrol.h/page_cgroup.h about this.) 1070 * section. (see memcontrol.h/page_cgroup.h about this.)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2889877318b..1f8fc7f9bcd 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -473,10 +473,10 @@ extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
473 unsigned long); 473 unsigned long);
474extern ssize_t nfs_file_direct_read(struct kiocb *iocb, 474extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
475 const struct iovec *iov, unsigned long nr_segs, 475 const struct iovec *iov, unsigned long nr_segs,
476 loff_t pos); 476 loff_t pos, bool uio);
477extern ssize_t nfs_file_direct_write(struct kiocb *iocb, 477extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
478 const struct iovec *iov, unsigned long nr_segs, 478 const struct iovec *iov, unsigned long nr_segs,
479 loff_t pos); 479 loff_t pos, bool uio);
480 480
481/* 481/*
482 * linux/fs/nfs/dir.c 482 * linux/fs/nfs/dir.c
diff --git a/include/linux/oom.h b/include/linux/oom.h
index e4c29bc72e7..49a3031fda5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -40,15 +40,36 @@ enum oom_constraint {
40 CONSTRAINT_MEMCG, 40 CONSTRAINT_MEMCG,
41}; 41};
42 42
43enum oom_scan_t {
44 OOM_SCAN_OK, /* scan thread and find its badness */
45 OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
46 OOM_SCAN_ABORT, /* abort the iteration and return */
47 OOM_SCAN_SELECT, /* always select this thread first */
48};
49
43extern void compare_swap_oom_score_adj(int old_val, int new_val); 50extern void compare_swap_oom_score_adj(int old_val, int new_val);
44extern int test_set_oom_score_adj(int new_val); 51extern int test_set_oom_score_adj(int new_val);
45 52
46extern unsigned long oom_badness(struct task_struct *p, 53extern unsigned long oom_badness(struct task_struct *p,
47 struct mem_cgroup *memcg, const nodemask_t *nodemask, 54 struct mem_cgroup *memcg, const nodemask_t *nodemask,
48 unsigned long totalpages); 55 unsigned long totalpages);
56extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
57 unsigned int points, unsigned long totalpages,
58 struct mem_cgroup *memcg, nodemask_t *nodemask,
59 const char *message);
60
49extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 61extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
50extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 62extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
51 63
64extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
65 int order, const nodemask_t *nodemask);
66
67extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
68 unsigned long totalpages, const nodemask_t *nodemask,
69 bool force_kill);
70extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
71 int order);
72
52extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 73extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
53 int order, nodemask_t *mask, bool force_kill); 74 int order, nodemask_t *mask, bool force_kill);
54extern int register_oom_notifier(struct notifier_block *nb); 75extern int register_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c88d2a9451a..b5d13841604 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/mmdebug.h>
10#ifndef __GENERATING_BOUNDS_H 11#ifndef __GENERATING_BOUNDS_H
11#include <linux/mm_types.h> 12#include <linux/mm_types.h>
12#include <generated/bounds.h> 13#include <generated/bounds.h>
@@ -453,6 +454,34 @@ static inline int PageTransTail(struct page *page)
453} 454}
454#endif 455#endif
455 456
457/*
458 * If network-based swap is enabled, sl*b must keep track of whether pages
459 * were allocated from pfmemalloc reserves.
460 */
461static inline int PageSlabPfmemalloc(struct page *page)
462{
463 VM_BUG_ON(!PageSlab(page));
464 return PageActive(page);
465}
466
467static inline void SetPageSlabPfmemalloc(struct page *page)
468{
469 VM_BUG_ON(!PageSlab(page));
470 SetPageActive(page);
471}
472
473static inline void __ClearPageSlabPfmemalloc(struct page *page)
474{
475 VM_BUG_ON(!PageSlab(page));
476 __ClearPageActive(page);
477}
478
479static inline void ClearPageSlabPfmemalloc(struct page *page)
480{
481 VM_BUG_ON(!PageSlab(page));
482 ClearPageActive(page);
483}
484
456#ifdef CONFIG_MMU 485#ifdef CONFIG_MMU
457#define __PG_MLOCKED (1 << PG_mlocked) 486#define __PG_MLOCKED (1 << PG_mlocked)
458#else 487#else
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3bdcab30ca4..105077aa768 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -1,6 +1,11 @@
1#ifndef __LINUX_PAGEISOLATION_H 1#ifndef __LINUX_PAGEISOLATION_H
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4
5bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
6void set_pageblock_migratetype(struct page *page, int migratetype);
7int move_freepages_block(struct zone *zone, struct page *page,
8 int migratetype);
4/* 9/*
5 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 10 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
6 * If specified range includes migrate types other than MOVABLE or CMA, 11 * If specified range includes migrate types other than MOVABLE or CMA,
@@ -10,7 +15,7 @@
10 * free all pages in the range. test_page_isolated() can be used for 15 * free all pages in the range. test_page_isolated() can be used for
11 * test it. 16 * test it.
12 */ 17 */
13extern int 18int
14start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 19start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
15 unsigned migratetype); 20 unsigned migratetype);
16 21
@@ -18,7 +23,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
18 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. 23 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
19 * target range is [start_pfn, end_pfn) 24 * target range is [start_pfn, end_pfn)
20 */ 25 */
21extern int 26int
22undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 27undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
23 unsigned migratetype); 28 unsigned migratetype);
24 29
@@ -30,8 +35,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
30/* 35/*
31 * Internal functions. Changes pageblock's migrate type. 36 * Internal functions. Changes pageblock's migrate type.
32 */ 37 */
33extern int set_migratetype_isolate(struct page *page); 38int set_migratetype_isolate(struct page *page);
34extern void unset_migratetype_isolate(struct page *page, unsigned migratetype); 39void unset_migratetype_isolate(struct page *page, unsigned migratetype);
35 40
36 41
37#endif 42#endif
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index a88cdba2780..777a524716d 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -12,7 +12,7 @@ enum {
12#ifndef __GENERATING_BOUNDS_H 12#ifndef __GENERATING_BOUNDS_H
13#include <generated/bounds.h> 13#include <generated/bounds.h>
14 14
15#ifdef CONFIG_CGROUP_MEM_RES_CTLR 15#ifdef CONFIG_MEMCG
16#include <linux/bit_spinlock.h> 16#include <linux/bit_spinlock.h>
17 17
18/* 18/*
@@ -82,7 +82,7 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
82 bit_spin_unlock(PCG_LOCK, &pc->flags); 82 bit_spin_unlock(PCG_LOCK, &pc->flags);
83} 83}
84 84
85#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 85#else /* CONFIG_MEMCG */
86struct page_cgroup; 86struct page_cgroup;
87 87
88static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) 88static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
@@ -102,11 +102,11 @@ static inline void __init page_cgroup_init_flatmem(void)
102{ 102{
103} 103}
104 104
105#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 105#endif /* CONFIG_MEMCG */
106 106
107#include <linux/swap.h> 107#include <linux/swap.h>
108 108
109#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 109#ifdef CONFIG_MEMCG_SWAP
110extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 110extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
111 unsigned short old, unsigned short new); 111 unsigned short old, unsigned short new);
112extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); 112extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
@@ -138,7 +138,7 @@ static inline void swap_cgroup_swapoff(int type)
138 return; 138 return;
139} 139}
140 140
141#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ 141#endif /* CONFIG_MEMCG_SWAP */
142 142
143#endif /* !__GENERATING_BOUNDS_H */ 143#endif /* !__GENERATING_BOUNDS_H */
144 144
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7cfad3bbb0c..e42c762f0dc 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -286,6 +286,11 @@ static inline loff_t page_offset(struct page *page)
286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
287} 287}
288 288
289static inline loff_t page_file_offset(struct page *page)
290{
291 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
292}
293
289extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 294extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
290 unsigned long address); 295 unsigned long address);
291 296
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68dcffaa62a..c147e7024f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1584,7 +1584,7 @@ struct task_struct {
1584 /* bitmask and counter of trace recursion */ 1584 /* bitmask and counter of trace recursion */
1585 unsigned long trace_recursion; 1585 unsigned long trace_recursion;
1586#endif /* CONFIG_TRACING */ 1586#endif /* CONFIG_TRACING */
1587#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1587#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1588 struct memcg_batch_info { 1588 struct memcg_batch_info {
1589 int do_batch; /* incremented when batch uncharge started */ 1589 int do_batch; /* incremented when batch uncharge started */
1590 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1590 struct mem_cgroup *memcg; /* target memcg of uncharge */
@@ -1894,6 +1894,13 @@ static inline void rcu_copy_process(struct task_struct *p)
1894 1894
1895#endif 1895#endif
1896 1896
1897static inline void tsk_restore_flags(struct task_struct *task,
1898 unsigned long orig_flags, unsigned long flags)
1899{
1900 task->flags &= ~flags;
1901 task->flags |= orig_flags & flags;
1902}
1903
1897#ifdef CONFIG_SMP 1904#ifdef CONFIG_SMP
1898extern void do_set_cpus_allowed(struct task_struct *p, 1905extern void do_set_cpus_allowed(struct task_struct *p,
1899 const struct cpumask *new_mask); 1906 const struct cpumask *new_mask);
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 07ceb97d53f..ac6b8ee0782 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -20,7 +20,6 @@ struct shrink_control {
20 * 'nr_to_scan' entries and attempt to free them up. It should return 20 * 'nr_to_scan' entries and attempt to free them up. It should return
21 * the number of objects which remain in the cache. If it returns -1, it means 21 * the number of objects which remain in the cache. If it returns -1, it means
22 * it cannot do any scanning at this time (eg. there is a risk of deadlock). 22 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
23 * The callback must not return -1 if nr_to_scan is zero.
24 * 23 *
25 * The 'gfpmask' refers to the allocation we are currently trying to 24 * The 'gfpmask' refers to the allocation we are currently trying to
26 * fulfil. 25 * fulfil.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d205c4be7f5..7632c87da2c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -462,6 +462,7 @@ struct sk_buff {
462#ifdef CONFIG_IPV6_NDISC_NODETYPE 462#ifdef CONFIG_IPV6_NDISC_NODETYPE
463 __u8 ndisc_nodetype:2; 463 __u8 ndisc_nodetype:2;
464#endif 464#endif
465 __u8 pfmemalloc:1;
465 __u8 ooo_okay:1; 466 __u8 ooo_okay:1;
466 __u8 l4_rxhash:1; 467 __u8 l4_rxhash:1;
467 __u8 wifi_acked_valid:1; 468 __u8 wifi_acked_valid:1;
@@ -502,6 +503,15 @@ struct sk_buff {
502#include <linux/slab.h> 503#include <linux/slab.h>
503 504
504 505
506#define SKB_ALLOC_FCLONE 0x01
507#define SKB_ALLOC_RX 0x02
508
509/* Returns true if the skb was allocated from PFMEMALLOC reserves */
510static inline bool skb_pfmemalloc(const struct sk_buff *skb)
511{
512 return unlikely(skb->pfmemalloc);
513}
514
505/* 515/*
506 * skb might have a dst pointer attached, refcounted or not. 516 * skb might have a dst pointer attached, refcounted or not.
507 * _skb_refdst low order bit is set if refcount was _not_ taken 517 * _skb_refdst low order bit is set if refcount was _not_ taken
@@ -565,7 +575,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
565 bool *fragstolen, int *delta_truesize); 575 bool *fragstolen, int *delta_truesize);
566 576
567extern struct sk_buff *__alloc_skb(unsigned int size, 577extern struct sk_buff *__alloc_skb(unsigned int size,
568 gfp_t priority, int fclone, int node); 578 gfp_t priority, int flags, int node);
569extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 579extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
570static inline struct sk_buff *alloc_skb(unsigned int size, 580static inline struct sk_buff *alloc_skb(unsigned int size,
571 gfp_t priority) 581 gfp_t priority)
@@ -576,7 +586,7 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
576static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 586static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
577 gfp_t priority) 587 gfp_t priority)
578{ 588{
579 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 589 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
580} 590}
581 591
582extern void skb_recycle(struct sk_buff *skb); 592extern void skb_recycle(struct sk_buff *skb);
@@ -1237,6 +1247,17 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1237{ 1247{
1238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1248 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1239 1249
1250 /*
1251 * Propagate page->pfmemalloc to the skb if we can. The problem is
1252 * that not all callers have unique ownership of the page. If
1253 * pfmemalloc is set, we check the mapping as a mapping implies
1254 * page->index is set (index and pfmemalloc share space).
1255 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1256 * do not lose pfmemalloc information as the pages would not be
1257 * allocated using __GFP_MEMALLOC.
1258 */
1259 if (page->pfmemalloc && !page->mapping)
1260 skb->pfmemalloc = true;
1240 frag->page.p = page; 1261 frag->page.p = page;
1241 frag->page_offset = off; 1262 frag->page_offset = off;
1242 skb_frag_size_set(frag, size); 1263 skb_frag_size_set(frag, size);
@@ -1753,6 +1774,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1753 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1774 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1754} 1775}
1755 1776
1777/*
1778 * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
1779 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1780 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1781 * @order: size of the allocation
1782 *
1783 * Allocate a new page.
1784 *
1785 * %NULL is returned if there is no free memory.
1786*/
1787static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1788 struct sk_buff *skb,
1789 unsigned int order)
1790{
1791 struct page *page;
1792
1793 gfp_mask |= __GFP_COLD;
1794
1795 if (!(gfp_mask & __GFP_NOMEMALLOC))
1796 gfp_mask |= __GFP_MEMALLOC;
1797
1798 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1799 if (skb && page && page->pfmemalloc)
1800 skb->pfmemalloc = true;
1801
1802 return page;
1803}
1804
1805/**
1806 * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
1807 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1808 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1809 *
1810 * Allocate a new page.
1811 *
1812 * %NULL is returned if there is no free memory.
1813 */
1814static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1815 struct sk_buff *skb)
1816{
1817 return __skb_alloc_pages(gfp_mask, skb, 0);
1818}
1819
1820/**
1821 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
1822 * @page: The page that was allocated from skb_alloc_page
1823 * @skb: The skb that may need pfmemalloc set
1824 */
1825static inline void skb_propagate_pfmemalloc(struct page *page,
1826 struct sk_buff *skb)
1827{
1828 if (page && page->pfmemalloc)
1829 skb->pfmemalloc = true;
1830}
1831
1756/** 1832/**
1757 * skb_frag_page - retrieve the page refered to by a paged fragment 1833 * skb_frag_page - retrieve the page refered to by a paged fragment
1758 * @frag: the paged fragment 1834 * @frag: the paged fragment
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 77d278defa7..cff40aa7db6 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -174,6 +174,8 @@ struct rpc_xprt {
174 unsigned long state; /* transport state */ 174 unsigned long state; /* transport state */
175 unsigned char shutdown : 1, /* being shut down */ 175 unsigned char shutdown : 1, /* being shut down */
176 resvport : 1; /* use a reserved port */ 176 resvport : 1; /* use a reserved port */
177 unsigned int swapper; /* we're swapping over this
178 transport */
177 unsigned int bind_index; /* bind function index */ 179 unsigned int bind_index; /* bind function index */
178 180
179 /* 181 /*
@@ -316,6 +318,7 @@ void xprt_release_rqst_cong(struct rpc_task *task);
316void xprt_disconnect_done(struct rpc_xprt *xprt); 318void xprt_disconnect_done(struct rpc_xprt *xprt);
317void xprt_force_disconnect(struct rpc_xprt *xprt); 319void xprt_force_disconnect(struct rpc_xprt *xprt);
318void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); 320void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
321int xs_swapper(struct rpc_xprt *xprt, int enable);
319 322
320/* 323/*
321 * Reserved bit positions in xprt->state 324 * Reserved bit positions in xprt->state
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c84ec68eaec..388e7060141 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -151,6 +151,7 @@ enum {
151 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 151 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
152 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 152 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
153 SWP_BLKDEV = (1 << 6), /* its a block device */ 153 SWP_BLKDEV = (1 << 6), /* its a block device */
154 SWP_FILE = (1 << 7), /* set after swap_activate success */
154 /* add others here before... */ 155 /* add others here before... */
155 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ 156 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
156}; 157};
@@ -301,7 +302,7 @@ static inline void scan_unevictable_unregister_node(struct node *node)
301 302
302extern int kswapd_run(int nid); 303extern int kswapd_run(int nid);
303extern void kswapd_stop(int nid); 304extern void kswapd_stop(int nid);
304#ifdef CONFIG_CGROUP_MEM_RES_CTLR 305#ifdef CONFIG_MEMCG
305extern int mem_cgroup_swappiness(struct mem_cgroup *mem); 306extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
306#else 307#else
307static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 308static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
@@ -309,7 +310,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
309 return vm_swappiness; 310 return vm_swappiness;
310} 311}
311#endif 312#endif
312#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 313#ifdef CONFIG_MEMCG_SWAP
313extern void mem_cgroup_uncharge_swap(swp_entry_t ent); 314extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
314#else 315#else
315static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) 316static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
@@ -320,8 +321,14 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
320/* linux/mm/page_io.c */ 321/* linux/mm/page_io.c */
321extern int swap_readpage(struct page *); 322extern int swap_readpage(struct page *);
322extern int swap_writepage(struct page *page, struct writeback_control *wbc); 323extern int swap_writepage(struct page *page, struct writeback_control *wbc);
324extern int swap_set_page_dirty(struct page *page);
323extern void end_swap_bio_read(struct bio *bio, int err); 325extern void end_swap_bio_read(struct bio *bio, int err);
324 326
327int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
328 unsigned long nr_pages, sector_t start_block);
329int generic_swapfile_activate(struct swap_info_struct *, struct file *,
330 sector_t *);
331
325/* linux/mm/swap_state.c */ 332/* linux/mm/swap_state.c */
326extern struct address_space swapper_space; 333extern struct address_space swapper_space;
327#define total_swapcache_pages swapper_space.nrpages 334#define total_swapcache_pages swapper_space.nrpages
@@ -356,11 +363,12 @@ extern unsigned int count_swap_pages(int, int);
356extern sector_t map_swap_page(struct page *, struct block_device **); 363extern sector_t map_swap_page(struct page *, struct block_device **);
357extern sector_t swapdev_block(int, pgoff_t); 364extern sector_t swapdev_block(int, pgoff_t);
358extern int page_swapcount(struct page *); 365extern int page_swapcount(struct page *);
366extern struct swap_info_struct *page_swap_info(struct page *);
359extern int reuse_swap_page(struct page *); 367extern int reuse_swap_page(struct page *);
360extern int try_to_free_swap(struct page *); 368extern int try_to_free_swap(struct page *);
361struct backing_dev_info; 369struct backing_dev_info;
362 370
363#ifdef CONFIG_CGROUP_MEM_RES_CTLR 371#ifdef CONFIG_MEMCG
364extern void 372extern void
365mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 373mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
366#else 374#else
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 06f8e385825..57f7b109151 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
30 FOR_ALL_ZONES(PGSTEAL_DIRECT), 30 FOR_ALL_ZONES(PGSTEAL_DIRECT),
31 FOR_ALL_ZONES(PGSCAN_KSWAPD), 31 FOR_ALL_ZONES(PGSCAN_KSWAPD),
32 FOR_ALL_ZONES(PGSCAN_DIRECT), 32 FOR_ALL_ZONES(PGSCAN_DIRECT),
33 PGSCAN_DIRECT_THROTTLE,
33#ifdef CONFIG_NUMA 34#ifdef CONFIG_NUMA
34 PGSCAN_ZONE_RECLAIM_FAILED, 35 PGSCAN_ZONE_RECLAIM_FAILED,
35#endif 36#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 65efb92da99..ad2cfd53dad 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -179,11 +179,6 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
179#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 179#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
180#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 180#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
181 181
182static inline void zap_zone_vm_stats(struct zone *zone)
183{
184 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
185}
186
187extern void inc_zone_state(struct zone *, enum zone_stat_item); 182extern void inc_zone_state(struct zone *, enum zone_stat_item);
188 183
189#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 6d0a0fcd80e..c66fe3332d8 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -189,9 +189,4 @@ void tag_pages_for_writeback(struct address_space *mapping,
189 189
190void account_page_redirty(struct page *page); 190void account_page_redirty(struct page *page);
191 191
192/* pdflush.c */
193extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
194 read-only. */
195
196
197#endif /* WRITEBACK_H */ 192#endif /* WRITEBACK_H */