aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bitmap.h62
-rw-r--r--include/linux/byteorder/generic.h2
-rw-r--r--include/linux/cma.h27
-rw-r--r--include/linux/dma-contiguous.h11
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fsnotify_backend.h14
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/glob.h9
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/klist.h2
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memory_hotplug.h10
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmu_notifier.h6
-rw-r--r--include/linux/mmzone.h219
-rw-r--r--include/linux/nodemask.h11
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h21
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/zbud.h2
-rw-r--r--include/linux/zlib.h118
-rw-r--r--include/linux/zpool.h106
-rw-r--r--include/trace/events/migrate.h1
-rw-r--r--include/trace/events/pagemap.h16
32 files changed, 371 insertions, 322 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 7ad634501e48..e1c8d080c427 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -88,32 +88,32 @@
88 * lib/bitmap.c provides these functions: 88 * lib/bitmap.c provides these functions:
89 */ 89 */
90 90
91extern int __bitmap_empty(const unsigned long *bitmap, int bits); 91extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
92extern int __bitmap_full(const unsigned long *bitmap, int bits); 92extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
93extern int __bitmap_equal(const unsigned long *bitmap1, 93extern int __bitmap_equal(const unsigned long *bitmap1,
94 const unsigned long *bitmap2, int bits); 94 const unsigned long *bitmap2, unsigned int nbits);
95extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, 95extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
96 int bits); 96 unsigned int nbits);
97extern void __bitmap_shift_right(unsigned long *dst, 97extern void __bitmap_shift_right(unsigned long *dst,
98 const unsigned long *src, int shift, int bits); 98 const unsigned long *src, int shift, int bits);
99extern void __bitmap_shift_left(unsigned long *dst, 99extern void __bitmap_shift_left(unsigned long *dst,
100 const unsigned long *src, int shift, int bits); 100 const unsigned long *src, int shift, int bits);
101extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 101extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
102 const unsigned long *bitmap2, int bits); 102 const unsigned long *bitmap2, unsigned int nbits);
103extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 103extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
104 const unsigned long *bitmap2, int bits); 104 const unsigned long *bitmap2, unsigned int nbits);
105extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, 105extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
106 const unsigned long *bitmap2, int bits); 106 const unsigned long *bitmap2, unsigned int nbits);
107extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 107extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
108 const unsigned long *bitmap2, int bits); 108 const unsigned long *bitmap2, unsigned int nbits);
109extern int __bitmap_intersects(const unsigned long *bitmap1, 109extern int __bitmap_intersects(const unsigned long *bitmap1,
110 const unsigned long *bitmap2, int bits); 110 const unsigned long *bitmap2, unsigned int nbits);
111extern int __bitmap_subset(const unsigned long *bitmap1, 111extern int __bitmap_subset(const unsigned long *bitmap1,
112 const unsigned long *bitmap2, int bits); 112 const unsigned long *bitmap2, unsigned int nbits);
113extern int __bitmap_weight(const unsigned long *bitmap, int bits); 113extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
114 114
115extern void bitmap_set(unsigned long *map, int i, int len); 115extern void bitmap_set(unsigned long *map, unsigned int start, int len);
116extern void bitmap_clear(unsigned long *map, int start, int nr); 116extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
117extern unsigned long bitmap_find_next_zero_area(unsigned long *map, 117extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
118 unsigned long size, 118 unsigned long size,
119 unsigned long start, 119 unsigned long start,
@@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
140 const unsigned long *relmap, int bits); 140 const unsigned long *relmap, int bits);
141extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, 141extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
142 int sz, int bits); 142 int sz, int bits);
143extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); 143extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
144extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 144extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
145extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 145extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
146extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); 146extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
147extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); 147extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
148 148
@@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
188} 188}
189 189
190static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, 190static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
191 const unsigned long *src2, int nbits) 191 const unsigned long *src2, unsigned int nbits)
192{ 192{
193 if (small_const_nbits(nbits)) 193 if (small_const_nbits(nbits))
194 return (*dst = *src1 & *src2) != 0; 194 return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
195 return __bitmap_and(dst, src1, src2, nbits); 195 return __bitmap_and(dst, src1, src2, nbits);
196} 196}
197 197
198static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, 198static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
199 const unsigned long *src2, int nbits) 199 const unsigned long *src2, unsigned int nbits)
200{ 200{
201 if (small_const_nbits(nbits)) 201 if (small_const_nbits(nbits))
202 *dst = *src1 | *src2; 202 *dst = *src1 | *src2;
@@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
205} 205}
206 206
207static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, 207static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
208 const unsigned long *src2, int nbits) 208 const unsigned long *src2, unsigned int nbits)
209{ 209{
210 if (small_const_nbits(nbits)) 210 if (small_const_nbits(nbits))
211 *dst = *src1 ^ *src2; 211 *dst = *src1 ^ *src2;
@@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
214} 214}
215 215
216static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, 216static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
217 const unsigned long *src2, int nbits) 217 const unsigned long *src2, unsigned int nbits)
218{ 218{
219 if (small_const_nbits(nbits)) 219 if (small_const_nbits(nbits))
220 return (*dst = *src1 & ~(*src2)) != 0; 220 return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
221 return __bitmap_andnot(dst, src1, src2, nbits); 221 return __bitmap_andnot(dst, src1, src2, nbits);
222} 222}
223 223
224static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, 224static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
225 int nbits) 225 unsigned int nbits)
226{ 226{
227 if (small_const_nbits(nbits)) 227 if (small_const_nbits(nbits))
228 *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); 228 *dst = ~(*src);
229 else 229 else
230 __bitmap_complement(dst, src, nbits); 230 __bitmap_complement(dst, src, nbits);
231} 231}
232 232
233static inline int bitmap_equal(const unsigned long *src1, 233static inline int bitmap_equal(const unsigned long *src1,
234 const unsigned long *src2, int nbits) 234 const unsigned long *src2, unsigned int nbits)
235{ 235{
236 if (small_const_nbits(nbits)) 236 if (small_const_nbits(nbits))
237 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); 237 return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
240} 240}
241 241
242static inline int bitmap_intersects(const unsigned long *src1, 242static inline int bitmap_intersects(const unsigned long *src1,
243 const unsigned long *src2, int nbits) 243 const unsigned long *src2, unsigned int nbits)
244{ 244{
245 if (small_const_nbits(nbits)) 245 if (small_const_nbits(nbits))
246 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; 246 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
249} 249}
250 250
251static inline int bitmap_subset(const unsigned long *src1, 251static inline int bitmap_subset(const unsigned long *src1,
252 const unsigned long *src2, int nbits) 252 const unsigned long *src2, unsigned int nbits)
253{ 253{
254 if (small_const_nbits(nbits)) 254 if (small_const_nbits(nbits))
255 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); 255 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
257 return __bitmap_subset(src1, src2, nbits); 257 return __bitmap_subset(src1, src2, nbits);
258} 258}
259 259
260static inline int bitmap_empty(const unsigned long *src, int nbits) 260static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
261{ 261{
262 if (small_const_nbits(nbits)) 262 if (small_const_nbits(nbits))
263 return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); 263 return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
265 return __bitmap_empty(src, nbits); 265 return __bitmap_empty(src, nbits);
266} 266}
267 267
268static inline int bitmap_full(const unsigned long *src, int nbits) 268static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
269{ 269{
270 if (small_const_nbits(nbits)) 270 if (small_const_nbits(nbits))
271 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); 271 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
273 return __bitmap_full(src, nbits); 273 return __bitmap_full(src, nbits);
274} 274}
275 275
276static inline int bitmap_weight(const unsigned long *src, int nbits) 276static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
277{ 277{
278 if (small_const_nbits(nbits)) 278 if (small_const_nbits(nbits))
279 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 279 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
284 const unsigned long *src, int n, int nbits) 284 const unsigned long *src, int n, int nbits)
285{ 285{
286 if (small_const_nbits(nbits)) 286 if (small_const_nbits(nbits))
287 *dst = *src >> n; 287 *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
288 else 288 else
289 __bitmap_shift_right(dst, src, n, nbits); 289 __bitmap_shift_right(dst, src, n, nbits);
290} 290}
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 0846e6b931ce..89f67c1c3160 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -2,7 +2,7 @@
2#define _LINUX_BYTEORDER_GENERIC_H 2#define _LINUX_BYTEORDER_GENERIC_H
3 3
4/* 4/*
5 * linux/byteorder_generic.h 5 * linux/byteorder/generic.h
6 * Generic Byte-reordering support 6 * Generic Byte-reordering support
7 * 7 *
8 * The "... p" macros, like le64_to_cpup, can be used with pointers 8 * The "... p" macros, like le64_to_cpup, can be used with pointers
diff --git a/include/linux/cma.h b/include/linux/cma.h
new file mode 100644
index 000000000000..371b93042520
--- /dev/null
+++ b/include/linux/cma.h
@@ -0,0 +1,27 @@
1#ifndef __CMA_H__
2#define __CMA_H__
3
4/*
5 * There is always at least global CMA area and a few optional
6 * areas configured in kernel .config.
7 */
8#ifdef CONFIG_CMA_AREAS
9#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
10
11#else
12#define MAX_CMA_AREAS (0)
13
14#endif
15
16struct cma;
17
18extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma);
20
21extern int __init cma_declare_contiguous(phys_addr_t size,
22 phys_addr_t base, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit,
24 bool fixed, struct cma **res_cma);
25extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
26extern bool cma_release(struct cma *cma, struct page *pages, int count);
27#endif
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 772eab5d524a..569bbd039896 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -53,18 +53,13 @@
53 53
54#ifdef __KERNEL__ 54#ifdef __KERNEL__
55 55
56#include <linux/device.h>
57
56struct cma; 58struct cma;
57struct page; 59struct page;
58struct device;
59 60
60#ifdef CONFIG_DMA_CMA 61#ifdef CONFIG_DMA_CMA
61 62
62/*
63 * There is always at least global CMA area and a few optional device
64 * private areas configured in kernel .config.
65 */
66#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
67
68extern struct cma *dma_contiguous_default_area; 63extern struct cma *dma_contiguous_default_area;
69 64
70static inline struct cma *dev_get_cma_area(struct device *dev) 65static inline struct cma *dev_get_cma_area(struct device *dev)
@@ -123,8 +118,6 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
123 118
124#else 119#else
125 120
126#define MAX_CMA_AREAS (0)
127
128static inline struct cma *dev_get_cma_area(struct device *dev) 121static inline struct cma *dev_get_cma_area(struct device *dev)
129{ 122{
130 return NULL; 123 return NULL;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2daccaf4b547..1ab6c6913040 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2688,7 +2688,7 @@ static const struct file_operations __fops = { \
2688 .read = simple_attr_read, \ 2688 .read = simple_attr_read, \
2689 .write = simple_attr_write, \ 2689 .write = simple_attr_write, \
2690 .llseek = generic_file_llseek, \ 2690 .llseek = generic_file_llseek, \
2691}; 2691}
2692 2692
2693static inline __printf(1, 2) 2693static inline __printf(1, 2)
2694void __simple_attr_check_format(const char *fmt, ...) 2694void __simple_attr_check_format(const char *fmt, ...)
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index fc7718c6bd3e..ca060d7c4fa6 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -322,16 +322,18 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
322extern void fsnotify_destroy_event(struct fsnotify_group *group, 322extern void fsnotify_destroy_event(struct fsnotify_group *group,
323 struct fsnotify_event *event); 323 struct fsnotify_event *event);
324/* attach the event to the group notification queue */ 324/* attach the event to the group notification queue */
325extern int fsnotify_add_notify_event(struct fsnotify_group *group, 325extern int fsnotify_add_event(struct fsnotify_group *group,
326 struct fsnotify_event *event, 326 struct fsnotify_event *event,
327 int (*merge)(struct list_head *, 327 int (*merge)(struct list_head *,
328 struct fsnotify_event *)); 328 struct fsnotify_event *));
329/* Remove passed event from groups notification queue */
330extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
329/* true if the group notification queue is empty */ 331/* true if the group notification queue is empty */
330extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); 332extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
331/* return, but do not dequeue the first event on the notification queue */ 333/* return, but do not dequeue the first event on the notification queue */
332extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); 334extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
333/* return AND dequeue the first event on the notification queue */ 335/* return AND dequeue the first event on the notification queue */
334extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); 336extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
335 337
336/* functions used to manipulate the marks attached to inodes */ 338/* functions used to manipulate the marks attached to inodes */
337 339
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6eb1fb37de9a..5e7219dc0fae 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -360,7 +360,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
360void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 360void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
361void free_pages_exact(void *virt, size_t size); 361void free_pages_exact(void *virt, size_t size);
362/* This is different from alloc_pages_exact_node !!! */ 362/* This is different from alloc_pages_exact_node !!! */
363void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 363void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
364 364
365#define __get_free_page(gfp_mask) \ 365#define __get_free_page(gfp_mask) \
366 __get_free_pages((gfp_mask), 0) 366 __get_free_pages((gfp_mask), 0)
diff --git a/include/linux/glob.h b/include/linux/glob.h
new file mode 100644
index 000000000000..861d8347d08e
--- /dev/null
+++ b/include/linux/glob.h
@@ -0,0 +1,9 @@
1#ifndef _LINUX_GLOB_H
2#define _LINUX_GLOB_H
3
4#include <linux/types.h> /* For bool */
5#include <linux/compiler.h> /* For __pure */
6
7bool __pure glob_match(char const *pat, char const *str);
8
9#endif /* _LINUX_GLOB_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7fb31da45d03..9286a46b7d69 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -93,7 +93,7 @@ static inline int kmap_atomic_idx_push(void)
93 93
94#ifdef CONFIG_DEBUG_HIGHMEM 94#ifdef CONFIG_DEBUG_HIGHMEM
95 WARN_ON_ONCE(in_irq() && !irqs_disabled()); 95 WARN_ON_ONCE(in_irq() && !irqs_disabled());
96 BUG_ON(idx > KM_TYPE_NR); 96 BUG_ON(idx >= KM_TYPE_NR);
97#endif 97#endif
98 return idx; 98 return idx;
99} 99}
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b826239bdce0..63579cb8d3dc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
93#endif /* CONFIG_DEBUG_VM */ 93#endif /* CONFIG_DEBUG_VM */
94 94
95extern unsigned long transparent_hugepage_flags; 95extern unsigned long transparent_hugepage_flags;
96extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
97 pmd_t *dst_pmd, pmd_t *src_pmd,
98 struct vm_area_struct *vma,
99 unsigned long addr, unsigned long end);
100extern int split_huge_page_to_list(struct page *page, struct list_head *list); 96extern int split_huge_page_to_list(struct page *page, struct list_head *list);
101static inline int split_huge_page(struct page *page) 97static inline int split_huge_page(struct page *page)
102{ 98{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a23c096b3080..6e6d338641fe 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -87,7 +87,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
87#endif 87#endif
88 88
89extern unsigned long hugepages_treat_as_movable; 89extern unsigned long hugepages_treat_as_movable;
90extern const unsigned long hugetlb_zero, hugetlb_infinity;
91extern int sysctl_hugetlb_shm_group; 90extern int sysctl_hugetlb_shm_group;
92extern struct list_head huge_boot_pages; 91extern struct list_head huge_boot_pages;
93 92
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index a9e2268ecccb..3dc22abbc68a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -493,11 +493,6 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
493 return buf; 493 return buf;
494} 494}
495 495
496static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
497{
498 return hex_byte_pack(buf, byte);
499}
500
501extern int hex_to_bin(char ch); 496extern int hex_to_bin(char ch);
502extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); 497extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
503 498
diff --git a/include/linux/klist.h b/include/linux/klist.h
index a370ce57cf1d..61e5b723ae73 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -44,7 +44,7 @@ struct klist_node {
44 44
45extern void klist_add_tail(struct klist_node *n, struct klist *k); 45extern void klist_add_tail(struct klist_node *n, struct klist *k);
46extern void klist_add_head(struct klist_node *n, struct klist *k); 46extern void klist_add_head(struct klist_node *n, struct klist *k);
47extern void klist_add_after(struct klist_node *n, struct klist_node *pos); 47extern void klist_add_behind(struct klist_node *n, struct klist_node *pos);
48extern void klist_add_before(struct klist_node *n, struct klist_node *pos); 48extern void klist_add_before(struct klist_node *n, struct klist_node *pos);
49 49
50extern void klist_del(struct klist_node *n); 50extern void klist_del(struct klist_node *n);
diff --git a/include/linux/list.h b/include/linux/list.h
index ef9594171062..cbbb96fcead9 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -654,15 +654,15 @@ static inline void hlist_add_before(struct hlist_node *n,
654 *(n->pprev) = n; 654 *(n->pprev) = n;
655} 655}
656 656
657static inline void hlist_add_after(struct hlist_node *n, 657static inline void hlist_add_behind(struct hlist_node *n,
658 struct hlist_node *next) 658 struct hlist_node *prev)
659{ 659{
660 next->next = n->next; 660 n->next = prev->next;
661 n->next = next; 661 prev->next = n;
662 next->pprev = &n->next; 662 n->pprev = &prev->next;
663 663
664 if(next->next) 664 if (n->next)
665 next->next->pprev = &next->next; 665 n->next->pprev = &n->next;
666} 666}
667 667
668/* after that we'll appear to be on some hlist and hlist_del will work */ 668/* after that we'll appear to be on some hlist and hlist_del will work */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b660e05b63d4..e8cc45307f8f 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -249,7 +249,7 @@ phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
249/* 249/*
250 * Set the allocation direction to bottom-up or top-down. 250 * Set the allocation direction to bottom-up or top-down.
251 */ 251 */
252static inline void memblock_set_bottom_up(bool enable) 252static inline void __init memblock_set_bottom_up(bool enable)
253{ 253{
254 memblock.bottom_up = enable; 254 memblock.bottom_up = enable;
255} 255}
@@ -264,7 +264,7 @@ static inline bool memblock_bottom_up(void)
264 return memblock.bottom_up; 264 return memblock.bottom_up;
265} 265}
266#else 266#else
267static inline void memblock_set_bottom_up(bool enable) {} 267static inline void __init memblock_set_bottom_up(bool enable) {}
268static inline bool memblock_bottom_up(void) { return false; } 268static inline bool memblock_bottom_up(void) { return false; }
269#endif 269#endif
270 270
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 010d125bffbf..d9524c49d767 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -26,11 +26,12 @@ enum {
26 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, 26 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
27}; 27};
28 28
29/* Types for control the zone type of onlined memory */ 29/* Types for control the zone type of onlined and offlined memory */
30enum { 30enum {
31 ONLINE_KEEP, 31 MMOP_OFFLINE = -1,
32 ONLINE_KERNEL, 32 MMOP_ONLINE_KEEP,
33 ONLINE_MOVABLE, 33 MMOP_ONLINE_KERNEL,
34 MMOP_ONLINE_MOVABLE,
34}; 35};
35 36
36/* 37/*
@@ -258,6 +259,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
258extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 259extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
259 void *arg, int (*func)(struct memory_block *, void *)); 260 void *arg, int (*func)(struct memory_block *, void *));
260extern int add_memory(int nid, u64 start, u64 size); 261extern int add_memory(int nid, u64 start, u64 size);
262extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
261extern int arch_add_memory(int nid, u64 start, u64 size); 263extern int arch_add_memory(int nid, u64 start, u64 size);
262extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 264extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
263extern bool is_memblock_offlined(struct memory_block *mem); 265extern bool is_memblock_offlined(struct memory_block *mem);
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index edd82a105220..2f348d02f640 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -20,11 +20,13 @@ extern void dump_page_badflags(struct page *page, const char *reason,
20 } while (0) 20 } while (0)
21#define VM_WARN_ON(cond) WARN_ON(cond) 21#define VM_WARN_ON(cond) WARN_ON(cond)
22#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) 22#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
23#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
23#else 24#else
24#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 25#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
25#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) 26#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
26#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) 27#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
27#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) 28#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
29#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
28#endif 30#endif
29 31
30#ifdef CONFIG_DEBUG_VIRTUAL 32#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index deca87452528..27288692241e 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -170,6 +170,8 @@ extern int __mmu_notifier_register(struct mmu_notifier *mn,
170 struct mm_struct *mm); 170 struct mm_struct *mm);
171extern void mmu_notifier_unregister(struct mmu_notifier *mn, 171extern void mmu_notifier_unregister(struct mmu_notifier *mn,
172 struct mm_struct *mm); 172 struct mm_struct *mm);
173extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
174 struct mm_struct *mm);
173extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); 175extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
174extern void __mmu_notifier_release(struct mm_struct *mm); 176extern void __mmu_notifier_release(struct mm_struct *mm);
175extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 177extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
@@ -288,6 +290,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
288 set_pte_at(___mm, ___address, __ptep, ___pte); \ 290 set_pte_at(___mm, ___address, __ptep, ___pte); \
289}) 291})
290 292
293extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
294 void (*func)(struct rcu_head *rcu));
295extern void mmu_notifier_synchronize(void);
296
291#else /* CONFIG_MMU_NOTIFIER */ 297#else /* CONFIG_MMU_NOTIFIER */
292 298
293static inline void mmu_notifier_release(struct mm_struct *mm) 299static inline void mmu_notifier_release(struct mm_struct *mm)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6cbd1b6c3d20..318df7051850 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -143,6 +143,7 @@ enum zone_stat_item {
143 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 143 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
144 NR_DIRTIED, /* page dirtyings since bootup */ 144 NR_DIRTIED, /* page dirtyings since bootup */
145 NR_WRITTEN, /* page writings since bootup */ 145 NR_WRITTEN, /* page writings since bootup */
146 NR_PAGES_SCANNED, /* pages scanned since last reclaim */
146#ifdef CONFIG_NUMA 147#ifdef CONFIG_NUMA
147 NUMA_HIT, /* allocated in intended node */ 148 NUMA_HIT, /* allocated in intended node */
148 NUMA_MISS, /* allocated in non intended node */ 149 NUMA_MISS, /* allocated in non intended node */
@@ -324,19 +325,12 @@ enum zone_type {
324#ifndef __GENERATING_BOUNDS_H 325#ifndef __GENERATING_BOUNDS_H
325 326
326struct zone { 327struct zone {
327 /* Fields commonly accessed by the page allocator */ 328 /* Read-mostly fields */
328 329
329 /* zone watermarks, access with *_wmark_pages(zone) macros */ 330 /* zone watermarks, access with *_wmark_pages(zone) macros */
330 unsigned long watermark[NR_WMARK]; 331 unsigned long watermark[NR_WMARK];
331 332
332 /* 333 /*
333 * When free pages are below this point, additional steps are taken
334 * when reading the number of free pages to avoid per-cpu counter
335 * drift allowing watermarks to be breached
336 */
337 unsigned long percpu_drift_mark;
338
339 /*
340 * We don't know if the memory that we're going to allocate will be freeable 334 * We don't know if the memory that we're going to allocate will be freeable
341 * or/and it will be released eventually, so to avoid totally wasting several 335 * or/and it will be released eventually, so to avoid totally wasting several
342 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 336 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -344,41 +338,26 @@ struct zone {
344 * on the higher zones). This array is recalculated at runtime if the 338 * on the higher zones). This array is recalculated at runtime if the
345 * sysctl_lowmem_reserve_ratio sysctl changes. 339 * sysctl_lowmem_reserve_ratio sysctl changes.
346 */ 340 */
347 unsigned long lowmem_reserve[MAX_NR_ZONES]; 341 long lowmem_reserve[MAX_NR_ZONES];
348
349 /*
350 * This is a per-zone reserve of pages that should not be
351 * considered dirtyable memory.
352 */
353 unsigned long dirty_balance_reserve;
354 342
355#ifdef CONFIG_NUMA 343#ifdef CONFIG_NUMA
356 int node; 344 int node;
345#endif
346
357 /* 347 /*
358 * zone reclaim becomes active if more unmapped pages exist. 348 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
349 * this zone's LRU. Maintained by the pageout code.
359 */ 350 */
360 unsigned long min_unmapped_pages; 351 unsigned int inactive_ratio;
361 unsigned long min_slab_pages; 352
362#endif 353 struct pglist_data *zone_pgdat;
363 struct per_cpu_pageset __percpu *pageset; 354 struct per_cpu_pageset __percpu *pageset;
355
364 /* 356 /*
365 * free areas of different sizes 357 * This is a per-zone reserve of pages that should not be
358 * considered dirtyable memory.
366 */ 359 */
367 spinlock_t lock; 360 unsigned long dirty_balance_reserve;
368#if defined CONFIG_COMPACTION || defined CONFIG_CMA
369 /* Set to true when the PG_migrate_skip bits should be cleared */
370 bool compact_blockskip_flush;
371
372 /* pfn where compaction free scanner should start */
373 unsigned long compact_cached_free_pfn;
374 /* pfn where async and sync compaction migration scanner should start */
375 unsigned long compact_cached_migrate_pfn[2];
376#endif
377#ifdef CONFIG_MEMORY_HOTPLUG
378 /* see spanned/present_pages for more description */
379 seqlock_t span_seqlock;
380#endif
381 struct free_area free_area[MAX_ORDER];
382 361
383#ifndef CONFIG_SPARSEMEM 362#ifndef CONFIG_SPARSEMEM
384 /* 363 /*
@@ -388,74 +367,14 @@ struct zone {
388 unsigned long *pageblock_flags; 367 unsigned long *pageblock_flags;
389#endif /* CONFIG_SPARSEMEM */ 368#endif /* CONFIG_SPARSEMEM */
390 369
391#ifdef CONFIG_COMPACTION 370#ifdef CONFIG_NUMA
392 /*
393 * On compaction failure, 1<<compact_defer_shift compactions
394 * are skipped before trying again. The number attempted since
395 * last failure is tracked with compact_considered.
396 */
397 unsigned int compact_considered;
398 unsigned int compact_defer_shift;
399 int compact_order_failed;
400#endif
401
402 ZONE_PADDING(_pad1_)
403
404 /* Fields commonly accessed by the page reclaim scanner */
405 spinlock_t lru_lock;
406 struct lruvec lruvec;
407
408 /* Evictions & activations on the inactive file list */
409 atomic_long_t inactive_age;
410
411 unsigned long pages_scanned; /* since last reclaim */
412 unsigned long flags; /* zone flags, see below */
413
414 /* Zone statistics */
415 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
416
417 /*
418 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
419 * this zone's LRU. Maintained by the pageout code.
420 */
421 unsigned int inactive_ratio;
422
423
424 ZONE_PADDING(_pad2_)
425 /* Rarely used or read-mostly fields */
426
427 /* 371 /*
428 * wait_table -- the array holding the hash table 372 * zone reclaim becomes active if more unmapped pages exist.
429 * wait_table_hash_nr_entries -- the size of the hash table array
430 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
431 *
432 * The purpose of all these is to keep track of the people
433 * waiting for a page to become available and make them
434 * runnable again when possible. The trouble is that this
435 * consumes a lot of space, especially when so few things
436 * wait on pages at a given time. So instead of using
437 * per-page waitqueues, we use a waitqueue hash table.
438 *
439 * The bucket discipline is to sleep on the same queue when
440 * colliding and wake all in that wait queue when removing.
441 * When something wakes, it must check to be sure its page is
442 * truly available, a la thundering herd. The cost of a
443 * collision is great, but given the expected load of the
444 * table, they should be so rare as to be outweighed by the
445 * benefits from the saved space.
446 *
447 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
448 * primary users of these fields, and in mm/page_alloc.c
449 * free_area_init_core() performs the initialization of them.
450 */ 373 */
451 wait_queue_head_t * wait_table; 374 unsigned long min_unmapped_pages;
452 unsigned long wait_table_hash_nr_entries; 375 unsigned long min_slab_pages;
453 unsigned long wait_table_bits; 376#endif /* CONFIG_NUMA */
454 377
455 /*
456 * Discontig memory support fields.
457 */
458 struct pglist_data *zone_pgdat;
459 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 378 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
460 unsigned long zone_start_pfn; 379 unsigned long zone_start_pfn;
461 380
@@ -500,9 +419,11 @@ struct zone {
500 * adjust_managed_page_count() should be used instead of directly 419 * adjust_managed_page_count() should be used instead of directly
501 * touching zone->managed_pages and totalram_pages. 420 * touching zone->managed_pages and totalram_pages.
502 */ 421 */
422 unsigned long managed_pages;
503 unsigned long spanned_pages; 423 unsigned long spanned_pages;
504 unsigned long present_pages; 424 unsigned long present_pages;
505 unsigned long managed_pages; 425
426 const char *name;
506 427
507 /* 428 /*
508 * Number of MIGRATE_RESEVE page block. To maintain for just 429 * Number of MIGRATE_RESEVE page block. To maintain for just
@@ -510,10 +431,94 @@ struct zone {
510 */ 431 */
511 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
512 433
434#ifdef CONFIG_MEMORY_HOTPLUG
435 /* see spanned/present_pages for more description */
436 seqlock_t span_seqlock;
437#endif
438
513 /* 439 /*
514 * rarely used fields: 440 * wait_table -- the array holding the hash table
441 * wait_table_hash_nr_entries -- the size of the hash table array
442 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
443 *
444 * The purpose of all these is to keep track of the people
445 * waiting for a page to become available and make them
446 * runnable again when possible. The trouble is that this
447 * consumes a lot of space, especially when so few things
448 * wait on pages at a given time. So instead of using
449 * per-page waitqueues, we use a waitqueue hash table.
450 *
451 * The bucket discipline is to sleep on the same queue when
452 * colliding and wake all in that wait queue when removing.
453 * When something wakes, it must check to be sure its page is
454 * truly available, a la thundering herd. The cost of a
455 * collision is great, but given the expected load of the
456 * table, they should be so rare as to be outweighed by the
457 * benefits from the saved space.
458 *
459 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
460 * primary users of these fields, and in mm/page_alloc.c
461 * free_area_init_core() performs the initialization of them.
515 */ 462 */
516 const char *name; 463 wait_queue_head_t *wait_table;
464 unsigned long wait_table_hash_nr_entries;
465 unsigned long wait_table_bits;
466
467 ZONE_PADDING(_pad1_)
468
469 /* Write-intensive fields used from the page allocator */
470 spinlock_t lock;
471
472 /* free areas of different sizes */
473 struct free_area free_area[MAX_ORDER];
474
475 /* zone flags, see below */
476 unsigned long flags;
477
478 ZONE_PADDING(_pad2_)
479
480 /* Write-intensive fields used by page reclaim */
481
482 /* Fields commonly accessed by the page reclaim scanner */
483 spinlock_t lru_lock;
484 struct lruvec lruvec;
485
486 /* Evictions & activations on the inactive file list */
487 atomic_long_t inactive_age;
488
489 /*
490 * When free pages are below this point, additional steps are taken
491 * when reading the number of free pages to avoid per-cpu counter
492 * drift allowing watermarks to be breached
493 */
494 unsigned long percpu_drift_mark;
495
496#if defined CONFIG_COMPACTION || defined CONFIG_CMA
497 /* pfn where compaction free scanner should start */
498 unsigned long compact_cached_free_pfn;
499 /* pfn where async and sync compaction migration scanner should start */
500 unsigned long compact_cached_migrate_pfn[2];
501#endif
502
503#ifdef CONFIG_COMPACTION
504 /*
505 * On compaction failure, 1<<compact_defer_shift compactions
506 * are skipped before trying again. The number attempted since
507 * last failure is tracked with compact_considered.
508 */
509 unsigned int compact_considered;
510 unsigned int compact_defer_shift;
511 int compact_order_failed;
512#endif
513
514#if defined CONFIG_COMPACTION || defined CONFIG_CMA
515 /* Set to true when the PG_migrate_skip bits should be cleared */
516 bool compact_blockskip_flush;
517#endif
518
519 ZONE_PADDING(_pad3_)
520 /* Zone statistics */
521 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
517} ____cacheline_internodealigned_in_smp; 522} ____cacheline_internodealigned_in_smp;
518 523
519typedef enum { 524typedef enum {
@@ -529,6 +534,7 @@ typedef enum {
529 ZONE_WRITEBACK, /* reclaim scanning has recently found 534 ZONE_WRITEBACK, /* reclaim scanning has recently found
530 * many pages under writeback 535 * many pages under writeback
531 */ 536 */
537 ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
532} zone_flags_t; 538} zone_flags_t;
533 539
534static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) 540static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -566,6 +572,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
566 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 572 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
567} 573}
568 574
575static inline int zone_is_fair_depleted(const struct zone *zone)
576{
577 return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
578}
579
569static inline int zone_is_oom_locked(const struct zone *zone) 580static inline int zone_is_oom_locked(const struct zone *zone)
570{ 581{
571 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 582 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
@@ -872,6 +883,8 @@ static inline int zone_movable_is_highmem(void)
872{ 883{
873#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 884#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
874 return movable_zone == ZONE_HIGHMEM; 885 return movable_zone == ZONE_HIGHMEM;
886#elif defined(CONFIG_HIGHMEM)
887 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
875#else 888#else
876 return 0; 889 return 0;
877#endif 890#endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 58b9a02c38d2..83a6aeda899d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -430,7 +430,15 @@ static inline int num_node_state(enum node_states state)
430 for_each_node_mask((__node), node_states[__state]) 430 for_each_node_mask((__node), node_states[__state])
431 431
432#define first_online_node first_node(node_states[N_ONLINE]) 432#define first_online_node first_node(node_states[N_ONLINE])
433#define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) 433#define first_memory_node first_node(node_states[N_MEMORY])
434static inline int next_online_node(int nid)
435{
436 return next_node(nid, node_states[N_ONLINE]);
437}
438static inline int next_memory_node(int nid)
439{
440 return next_node(nid, node_states[N_MEMORY]);
441}
434 442
435extern int nr_node_ids; 443extern int nr_node_ids;
436extern int nr_online_nodes; 444extern int nr_online_nodes;
@@ -471,6 +479,7 @@ static inline int num_node_state(enum node_states state)
471 for ( (node) = 0; (node) == 0; (node) = 1) 479 for ( (node) = 0; (node) == 0; (node) = 1)
472 480
473#define first_online_node 0 481#define first_online_node 0
482#define first_memory_node 0
474#define next_online_node(nid) (MAX_NUMNODES) 483#define next_online_node(nid) (MAX_NUMNODES)
475#define nr_node_ids 1 484#define nr_node_ids 1
476#define nr_online_nodes 1 485#define nr_online_nodes 1
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 4cd62677feb9..647395a1a550 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -55,8 +55,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
55 struct mem_cgroup *memcg, nodemask_t *nodemask, 55 struct mem_cgroup *memcg, nodemask_t *nodemask,
56 const char *message); 56 const char *message);
57 57
58extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 58extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
59extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 59extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
60 60
61extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 61extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
62 int order, const nodemask_t *nodemask); 62 int order, const nodemask_t *nodemask);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 8304959ad336..e1f5fcd79792 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -171,13 +171,12 @@ static inline int __TestClearPage##uname(struct page *page) \
171#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ 171#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
172 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) 172 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
173 173
174#define PAGEFLAG_FALSE(uname) \
175static inline int Page##uname(const struct page *page) \
176 { return 0; }
177
178#define TESTSCFLAG(uname, lname) \ 174#define TESTSCFLAG(uname, lname) \
179 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) 175 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
180 176
177#define TESTPAGEFLAG_FALSE(uname) \
178static inline int Page##uname(const struct page *page) { return 0; }
179
181#define SETPAGEFLAG_NOOP(uname) \ 180#define SETPAGEFLAG_NOOP(uname) \
182static inline void SetPage##uname(struct page *page) { } 181static inline void SetPage##uname(struct page *page) { }
183 182
@@ -187,12 +186,21 @@ static inline void ClearPage##uname(struct page *page) { }
187#define __CLEARPAGEFLAG_NOOP(uname) \ 186#define __CLEARPAGEFLAG_NOOP(uname) \
188static inline void __ClearPage##uname(struct page *page) { } 187static inline void __ClearPage##uname(struct page *page) { }
189 188
189#define TESTSETFLAG_FALSE(uname) \
190static inline int TestSetPage##uname(struct page *page) { return 0; }
191
190#define TESTCLEARFLAG_FALSE(uname) \ 192#define TESTCLEARFLAG_FALSE(uname) \
191static inline int TestClearPage##uname(struct page *page) { return 0; } 193static inline int TestClearPage##uname(struct page *page) { return 0; }
192 194
193#define __TESTCLEARFLAG_FALSE(uname) \ 195#define __TESTCLEARFLAG_FALSE(uname) \
194static inline int __TestClearPage##uname(struct page *page) { return 0; } 196static inline int __TestClearPage##uname(struct page *page) { return 0; }
195 197
198#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
199 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
200
201#define TESTSCFLAG_FALSE(uname) \
202 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
203
196struct page; /* forward declaration */ 204struct page; /* forward declaration */
197 205
198TESTPAGEFLAG(Locked, locked) 206TESTPAGEFLAG(Locked, locked)
@@ -248,7 +256,6 @@ PAGEFLAG_FALSE(HighMem)
248PAGEFLAG(SwapCache, swapcache) 256PAGEFLAG(SwapCache, swapcache)
249#else 257#else
250PAGEFLAG_FALSE(SwapCache) 258PAGEFLAG_FALSE(SwapCache)
251 SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
252#endif 259#endif
253 260
254PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 261PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
@@ -258,8 +265,8 @@ PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
258PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 265PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
259 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) 266 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
260#else 267#else
261PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) 268PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
262 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) 269 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
263#endif 270#endif
264 271
265#ifdef CONFIG_ARCH_USES_PG_UNCACHED 272#ifdef CONFIG_ARCH_USES_PG_UNCACHED
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e1474ae18c88..3df8c7db7a4e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -484,6 +484,9 @@ static inline int lock_page_killable(struct page *page)
484/* 484/*
485 * lock_page_or_retry - Lock the page, unless this would block and the 485 * lock_page_or_retry - Lock the page, unless this would block and the
486 * caller indicated that it can handle a retry. 486 * caller indicated that it can handle a retry.
487 *
488 * Return value and mmap_sem implications depend on flags; see
489 * __lock_page_or_retry().
487 */ 490 */
488static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, 491static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
489 unsigned int flags) 492 unsigned int flags)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 319ff7e53efb..0990997a5304 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -31,7 +31,7 @@ static inline const char *printk_skip_level(const char *buffer)
31} 31}
32 32
33/* printk's without a loglevel use this.. */ 33/* printk's without a loglevel use this.. */
34#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL 34#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
35 35
36/* We show everything that is MORE important than this.. */ 36/* We show everything that is MORE important than this.. */
37#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ 37#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8183b46fbaa2..372ad5e0dcb8 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -432,9 +432,9 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
432} 432}
433 433
434/** 434/**
435 * hlist_add_after_rcu 435 * hlist_add_behind_rcu
436 * @prev: the existing element to add the new element after.
437 * @n: the new element to add to the hash list. 436 * @n: the new element to add to the hash list.
437 * @prev: the existing element to add the new element after.
438 * 438 *
439 * Description: 439 * Description:
440 * Adds the specified element to the specified hlist 440 * Adds the specified element to the specified hlist
@@ -449,8 +449,8 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
449 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 449 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
450 * problems on Alpha CPUs. 450 * problems on Alpha CPUs.
451 */ 451 */
452static inline void hlist_add_after_rcu(struct hlist_node *prev, 452static inline void hlist_add_behind_rcu(struct hlist_node *n,
453 struct hlist_node *n) 453 struct hlist_node *prev)
454{ 454{
455 n->next = prev->next; 455 n->next = prev->next;
456 n->pprev = &prev->next; 456 n->pprev = &prev->next;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4bdbee80eede..1eb64043c076 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -311,7 +311,6 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
311 struct lruvec *lruvec, struct list_head *head); 311 struct lruvec *lruvec, struct list_head *head);
312extern void activate_page(struct page *); 312extern void activate_page(struct page *);
313extern void mark_page_accessed(struct page *); 313extern void mark_page_accessed(struct page *);
314extern void init_page_accessed(struct page *page);
315extern void lru_add_drain(void); 314extern void lru_add_drain(void);
316extern void lru_add_drain_cpu(int cpu); 315extern void lru_add_drain_cpu(int cpu);
317extern void lru_add_drain_all(void); 316extern void lru_add_drain_all(void);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4b8a89189a29..b87696fdf06a 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -113,7 +113,7 @@ extern struct vm_struct *remove_vm_area(const void *addr);
113extern struct vm_struct *find_vm_area(const void *addr); 113extern struct vm_struct *find_vm_area(const void *addr);
114 114
115extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 115extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
116 struct page ***pages); 116 struct page **pages);
117#ifdef CONFIG_MMU 117#ifdef CONFIG_MMU
118extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 118extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
119 pgprot_t prot, struct page **pages); 119 pgprot_t prot, struct page **pages);
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index 13af0d450bf6..f9d41a6e361f 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -11,7 +11,7 @@ struct zbud_ops {
11 11
12struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); 12struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
13void zbud_destroy_pool(struct zbud_pool *pool); 13void zbud_destroy_pool(struct zbud_pool *pool);
14int zbud_alloc(struct zbud_pool *pool, unsigned int size, gfp_t gfp, 14int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
15 unsigned long *handle); 15 unsigned long *handle);
16void zbud_free(struct zbud_pool *pool, unsigned long handle); 16void zbud_free(struct zbud_pool *pool, unsigned long handle);
17int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); 17int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 9c5a6b4de0a3..197abb2a54c5 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -493,64 +493,6 @@ extern int deflateInit2 (z_streamp strm,
493 method). msg is set to null if there is no error message. deflateInit2 does 493 method). msg is set to null if there is no error message. deflateInit2 does
494 not perform any compression: this will be done by deflate(). 494 not perform any compression: this will be done by deflate().
495*/ 495*/
496
497#if 0
498extern int zlib_deflateSetDictionary (z_streamp strm,
499 const Byte *dictionary,
500 uInt dictLength);
501#endif
502/*
503 Initializes the compression dictionary from the given byte sequence
504 without producing any compressed output. This function must be called
505 immediately after deflateInit, deflateInit2 or deflateReset, before any
506 call of deflate. The compressor and decompressor must use exactly the same
507 dictionary (see inflateSetDictionary).
508
509 The dictionary should consist of strings (byte sequences) that are likely
510 to be encountered later in the data to be compressed, with the most commonly
511 used strings preferably put towards the end of the dictionary. Using a
512 dictionary is most useful when the data to be compressed is short and can be
513 predicted with good accuracy; the data can then be compressed better than
514 with the default empty dictionary.
515
516 Depending on the size of the compression data structures selected by
517 deflateInit or deflateInit2, a part of the dictionary may in effect be
518 discarded, for example if the dictionary is larger than the window size in
519 deflate or deflate2. Thus the strings most likely to be useful should be
520 put at the end of the dictionary, not at the front.
521
522 Upon return of this function, strm->adler is set to the Adler32 value
523 of the dictionary; the decompressor may later use this value to determine
524 which dictionary has been used by the compressor. (The Adler32 value
525 applies to the whole dictionary even if only a subset of the dictionary is
526 actually used by the compressor.)
527
528 deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
529 parameter is invalid (such as NULL dictionary) or the stream state is
530 inconsistent (for example if deflate has already been called for this stream
531 or if the compression method is bsort). deflateSetDictionary does not
532 perform any compression: this will be done by deflate().
533*/
534
535#if 0
536extern int zlib_deflateCopy (z_streamp dest, z_streamp source);
537#endif
538
539/*
540 Sets the destination stream as a complete copy of the source stream.
541
542 This function can be useful when several compression strategies will be
543 tried, for example when there are several ways of pre-processing the input
544 data with a filter. The streams that will be discarded should then be freed
545 by calling deflateEnd. Note that deflateCopy duplicates the internal
546 compression state which can be quite large, so this strategy is slow and
547 can consume lots of memory.
548
549 deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
550 enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
551 (such as zalloc being NULL). msg is left unchanged in both source and
552 destination.
553*/
554 496
555extern int zlib_deflateReset (z_streamp strm); 497extern int zlib_deflateReset (z_streamp strm);
556/* 498/*
@@ -568,27 +510,6 @@ static inline unsigned long deflateBound(unsigned long s)
568 return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11; 510 return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
569} 511}
570 512
571#if 0
572extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
573#endif
574/*
575 Dynamically update the compression level and compression strategy. The
576 interpretation of level and strategy is as in deflateInit2. This can be
577 used to switch between compression and straight copy of the input data, or
578 to switch to a different kind of input data requiring a different
579 strategy. If the compression level is changed, the input available so far
580 is compressed with the old level (and may be flushed); the new level will
581 take effect only at the next call of deflate().
582
583 Before the call of deflateParams, the stream state must be set as for
584 a call of deflate(), since the currently available input may have to
585 be compressed and flushed. In particular, strm->avail_out must be non-zero.
586
587 deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
588 stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
589 if strm->avail_out was zero.
590*/
591
592/* 513/*
593extern int inflateInit2 (z_streamp strm, int windowBits); 514extern int inflateInit2 (z_streamp strm, int windowBits);
594 515
@@ -631,45 +552,6 @@ extern int inflateInit2 (z_streamp strm, int windowBits);
631 and avail_out are unchanged.) 552 and avail_out are unchanged.)
632*/ 553*/
633 554
634extern int zlib_inflateSetDictionary (z_streamp strm,
635 const Byte *dictionary,
636 uInt dictLength);
637/*
638 Initializes the decompression dictionary from the given uncompressed byte
639 sequence. This function must be called immediately after a call of inflate,
640 if that call returned Z_NEED_DICT. The dictionary chosen by the compressor
641 can be determined from the adler32 value returned by that call of inflate.
642 The compressor and decompressor must use exactly the same dictionary (see
643 deflateSetDictionary). For raw inflate, this function can be called
644 immediately after inflateInit2() or inflateReset() and before any call of
645 inflate() to set the dictionary. The application must insure that the
646 dictionary that was used for compression is provided.
647
648 inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
649 parameter is invalid (such as NULL dictionary) or the stream state is
650 inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
651 expected one (incorrect adler32 value). inflateSetDictionary does not
652 perform any decompression: this will be done by subsequent calls of
653 inflate().
654*/
655
656#if 0
657extern int zlib_inflateSync (z_streamp strm);
658#endif
659/*
660 Skips invalid compressed data until a full flush point (see above the
661 description of deflate with Z_FULL_FLUSH) can be found, or until all
662 available input is skipped. No output is provided.
663
664 inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
665 if no more input was provided, Z_DATA_ERROR if no flush point has been found,
666 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
667 case, the application may save the current current value of total_in which
668 indicates where valid compressed data was found. In the error case, the
669 application may repeatedly call inflateSync, providing more input each time,
670 until success or end of the input data.
671*/
672
673extern int zlib_inflateReset (z_streamp strm); 555extern int zlib_inflateReset (z_streamp strm);
674/* 556/*
675 This function is equivalent to inflateEnd followed by inflateInit, 557 This function is equivalent to inflateEnd followed by inflateInit,
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
new file mode 100644
index 000000000000..f14bd75f08b3
--- /dev/null
+++ b/include/linux/zpool.h
@@ -0,0 +1,106 @@
1/*
2 * zpool memory storage api
3 *
4 * Copyright (C) 2014 Dan Streetman
5 *
6 * This is a common frontend for the zbud and zsmalloc memory
7 * storage pool implementations. Typically, this is used to
8 * store compressed memory.
9 */
10
11#ifndef _ZPOOL_H_
12#define _ZPOOL_H_
13
14struct zpool;
15
16struct zpool_ops {
17 int (*evict)(struct zpool *pool, unsigned long handle);
18};
19
20/*
21 * Control how a handle is mapped. It will be ignored if the
22 * implementation does not support it. Its use is optional.
23 * Note that this does not refer to memory protection, it
24 * refers to how the memory will be copied in/out if copying
25 * is necessary during mapping; read-write is the safest as
26 * it copies the existing memory in on map, and copies the
27 * changed memory back out on unmap. Write-only does not copy
28 * in the memory and should only be used for initialization.
29 * If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
30 */
31enum zpool_mapmode {
32 ZPOOL_MM_RW, /* normal read-write mapping */
33 ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
34 ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
35
36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
37};
38
39struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops);
40
41char *zpool_get_type(struct zpool *pool);
42
43void zpool_destroy_pool(struct zpool *pool);
44
45int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
46 unsigned long *handle);
47
48void zpool_free(struct zpool *pool, unsigned long handle);
49
50int zpool_shrink(struct zpool *pool, unsigned int pages,
51 unsigned int *reclaimed);
52
53void *zpool_map_handle(struct zpool *pool, unsigned long handle,
54 enum zpool_mapmode mm);
55
56void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
57
58u64 zpool_get_total_size(struct zpool *pool);
59
60
61/**
62 * struct zpool_driver - driver implementation for zpool
63 * @type: name of the driver.
64 * @list: entry in the list of zpool drivers.
65 * @create: create a new pool.
66 * @destroy: destroy a pool.
67 * @malloc: allocate mem from a pool.
68 * @free: free mem from a pool.
69 * @shrink: shrink the pool.
70 * @map: map a handle.
71 * @unmap: unmap a handle.
72 * @total_size: get total size of a pool.
73 *
74 * This is created by a zpool implementation and registered
75 * with zpool.
76 */
77struct zpool_driver {
78 char *type;
79 struct module *owner;
80 atomic_t refcount;
81 struct list_head list;
82
83 void *(*create)(gfp_t gfp, struct zpool_ops *ops);
84 void (*destroy)(void *pool);
85
86 int (*malloc)(void *pool, size_t size, gfp_t gfp,
87 unsigned long *handle);
88 void (*free)(void *pool, unsigned long handle);
89
90 int (*shrink)(void *pool, unsigned int pages,
91 unsigned int *reclaimed);
92
93 void *(*map)(void *pool, unsigned long handle,
94 enum zpool_mapmode mm);
95 void (*unmap)(void *pool, unsigned long handle);
96
97 u64 (*total_size)(void *pool);
98};
99
100void zpool_register_driver(struct zpool_driver *driver);
101
102int zpool_unregister_driver(struct zpool_driver *driver);
103
104int zpool_evict(void *pool, unsigned long handle);
105
106#endif
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 4e4f2f8b1ac2..dd2b5467d905 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -17,6 +17,7 @@
17 {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \ 17 {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
18 {MR_SYSCALL, "syscall_or_cpuset"}, \ 18 {MR_SYSCALL, "syscall_or_cpuset"}, \
19 {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \ 19 {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
20 {MR_NUMA_MISPLACED, "numa_misplaced"}, \
20 {MR_CMA, "cma"} 21 {MR_CMA, "cma"}
21 22
22TRACE_EVENT(mm_migrate_pages, 23TRACE_EVENT(mm_migrate_pages,
diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
index 1c9fabde69e4..ce0803b8d05f 100644
--- a/include/trace/events/pagemap.h
+++ b/include/trace/events/pagemap.h
@@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion,
28 28
29 TP_PROTO( 29 TP_PROTO(
30 struct page *page, 30 struct page *page,
31 unsigned long pfn, 31 int lru
32 int lru,
33 unsigned long flags
34 ), 32 ),
35 33
36 TP_ARGS(page, pfn, lru, flags), 34 TP_ARGS(page, lru),
37 35
38 TP_STRUCT__entry( 36 TP_STRUCT__entry(
39 __field(struct page *, page ) 37 __field(struct page *, page )
@@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion,
44 42
45 TP_fast_assign( 43 TP_fast_assign(
46 __entry->page = page; 44 __entry->page = page;
47 __entry->pfn = pfn; 45 __entry->pfn = page_to_pfn(page);
48 __entry->lru = lru; 46 __entry->lru = lru;
49 __entry->flags = flags; 47 __entry->flags = trace_pagemap_flags(page);
50 ), 48 ),
51 49
52 /* Flag format is based on page-types.c formatting for pagemap */ 50 /* Flag format is based on page-types.c formatting for pagemap */
@@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion,
64 62
65TRACE_EVENT(mm_lru_activate, 63TRACE_EVENT(mm_lru_activate,
66 64
67 TP_PROTO(struct page *page, unsigned long pfn), 65 TP_PROTO(struct page *page),
68 66
69 TP_ARGS(page, pfn), 67 TP_ARGS(page),
70 68
71 TP_STRUCT__entry( 69 TP_STRUCT__entry(
72 __field(struct page *, page ) 70 __field(struct page *, page )
@@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate,
75 73
76 TP_fast_assign( 74 TP_fast_assign(
77 __entry->page = page; 75 __entry->page = page;
78 __entry->pfn = pfn; 76 __entry->pfn = page_to_pfn(page);
79 ), 77 ),
80 78
81 /* Flag format is based on page-types.c formatting for pagemap */ 79 /* Flag format is based on page-types.c formatting for pagemap */