diff options
-rw-r--r-- | drivers/base/node.c | 4 | ||||
-rw-r--r-- | fs/proc/meminfo.c | 4 | ||||
-rw-r--r-- | fs/proc/page.c | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 13 | ||||
-rw-r--r-- | include/linux/page-flags.h | 16 | ||||
-rw-r--r-- | include/linux/pagemap.h | 12 | ||||
-rw-r--r-- | include/linux/rmap.h | 7 | ||||
-rw-r--r-- | include/linux/swap.h | 19 | ||||
-rw-r--r-- | include/linux/vmstat.h | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 2 | ||||
-rw-r--r-- | mm/Kconfig | 14 | ||||
-rw-r--r-- | mm/internal.h | 6 | ||||
-rw-r--r-- | mm/mlock.c | 22 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 17 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
17 files changed, 3 insertions, 153 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c index 40b809742a1c..91d4087b4039 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
72 | "Node %d Inactive(anon): %8lu kB\n" | 72 | "Node %d Inactive(anon): %8lu kB\n" |
73 | "Node %d Active(file): %8lu kB\n" | 73 | "Node %d Active(file): %8lu kB\n" |
74 | "Node %d Inactive(file): %8lu kB\n" | 74 | "Node %d Inactive(file): %8lu kB\n" |
75 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
76 | "Node %d Unevictable: %8lu kB\n" | 75 | "Node %d Unevictable: %8lu kB\n" |
77 | "Node %d Mlocked: %8lu kB\n" | 76 | "Node %d Mlocked: %8lu kB\n" |
78 | #endif | ||
79 | #ifdef CONFIG_HIGHMEM | 77 | #ifdef CONFIG_HIGHMEM |
80 | "Node %d HighTotal: %8lu kB\n" | 78 | "Node %d HighTotal: %8lu kB\n" |
81 | "Node %d HighFree: %8lu kB\n" | 79 | "Node %d HighFree: %8lu kB\n" |
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
105 | nid, K(node_page_state(nid, NR_INACTIVE_ANON)), | 103 | nid, K(node_page_state(nid, NR_INACTIVE_ANON)), |
106 | nid, K(node_page_state(nid, NR_ACTIVE_FILE)), | 104 | nid, K(node_page_state(nid, NR_ACTIVE_FILE)), |
107 | nid, K(node_page_state(nid, NR_INACTIVE_FILE)), | 105 | nid, K(node_page_state(nid, NR_INACTIVE_FILE)), |
108 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
109 | nid, K(node_page_state(nid, NR_UNEVICTABLE)), | 106 | nid, K(node_page_state(nid, NR_UNEVICTABLE)), |
110 | nid, K(node_page_state(nid, NR_MLOCK)), | 107 | nid, K(node_page_state(nid, NR_MLOCK)), |
111 | #endif | ||
112 | #ifdef CONFIG_HIGHMEM | 108 | #ifdef CONFIG_HIGHMEM |
113 | nid, K(i.totalhigh), | 109 | nid, K(i.totalhigh), |
114 | nid, K(i.freehigh), | 110 | nid, K(i.freehigh), |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index c6b0302af4c4..d5c410d47fae 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
64 | "Inactive(anon): %8lu kB\n" | 64 | "Inactive(anon): %8lu kB\n" |
65 | "Active(file): %8lu kB\n" | 65 | "Active(file): %8lu kB\n" |
66 | "Inactive(file): %8lu kB\n" | 66 | "Inactive(file): %8lu kB\n" |
67 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
68 | "Unevictable: %8lu kB\n" | 67 | "Unevictable: %8lu kB\n" |
69 | "Mlocked: %8lu kB\n" | 68 | "Mlocked: %8lu kB\n" |
70 | #endif | ||
71 | #ifdef CONFIG_HIGHMEM | 69 | #ifdef CONFIG_HIGHMEM |
72 | "HighTotal: %8lu kB\n" | 70 | "HighTotal: %8lu kB\n" |
73 | "HighFree: %8lu kB\n" | 71 | "HighFree: %8lu kB\n" |
@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
109 | K(pages[LRU_INACTIVE_ANON]), | 107 | K(pages[LRU_INACTIVE_ANON]), |
110 | K(pages[LRU_ACTIVE_FILE]), | 108 | K(pages[LRU_ACTIVE_FILE]), |
111 | K(pages[LRU_INACTIVE_FILE]), | 109 | K(pages[LRU_INACTIVE_FILE]), |
112 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
113 | K(pages[LRU_UNEVICTABLE]), | 110 | K(pages[LRU_UNEVICTABLE]), |
114 | K(global_page_state(NR_MLOCK)), | 111 | K(global_page_state(NR_MLOCK)), |
115 | #endif | ||
116 | #ifdef CONFIG_HIGHMEM | 112 | #ifdef CONFIG_HIGHMEM |
117 | K(i.totalhigh), | 113 | K(i.totalhigh), |
118 | K(i.freehigh), | 114 | K(i.freehigh), |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 9d926bd279a4..2707c6c7a20f 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -172,10 +172,8 @@ static u64 get_uflags(struct page *page) | |||
172 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | 172 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); |
173 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | 173 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); |
174 | 174 | ||
175 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
176 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | 175 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
177 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | 176 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); |
178 | #endif | ||
179 | 177 | ||
180 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 178 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
181 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | 179 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index db976b9f8791..889598537370 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -83,13 +83,8 @@ enum zone_stat_item { | |||
83 | NR_ACTIVE_ANON, /* " " " " " */ | 83 | NR_ACTIVE_ANON, /* " " " " " */ |
84 | NR_INACTIVE_FILE, /* " " " " " */ | 84 | NR_INACTIVE_FILE, /* " " " " " */ |
85 | NR_ACTIVE_FILE, /* " " " " " */ | 85 | NR_ACTIVE_FILE, /* " " " " " */ |
86 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
87 | NR_UNEVICTABLE, /* " " " " " */ | 86 | NR_UNEVICTABLE, /* " " " " " */ |
88 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ | 87 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
89 | #else | ||
90 | NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
91 | NR_MLOCK = NR_ACTIVE_FILE, | ||
92 | #endif | ||
93 | NR_ANON_PAGES, /* Mapped anonymous pages */ | 88 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
94 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 89 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
95 | only modified from process context */ | 90 | only modified from process context */ |
@@ -132,11 +127,7 @@ enum lru_list { | |||
132 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | 127 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
133 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | 128 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
134 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | 129 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
135 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
136 | LRU_UNEVICTABLE, | 130 | LRU_UNEVICTABLE, |
137 | #else | ||
138 | LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
139 | #endif | ||
140 | NR_LRU_LISTS | 131 | NR_LRU_LISTS |
141 | }; | 132 | }; |
142 | 133 | ||
@@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l) | |||
156 | 147 | ||
157 | static inline int is_unevictable_lru(enum lru_list l) | 148 | static inline int is_unevictable_lru(enum lru_list l) |
158 | { | 149 | { |
159 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
160 | return (l == LRU_UNEVICTABLE); | 150 | return (l == LRU_UNEVICTABLE); |
161 | #else | ||
162 | return 0; | ||
163 | #endif | ||
164 | } | 151 | } |
165 | 152 | ||
166 | enum zone_watermarks { | 153 | enum zone_watermarks { |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 62214c7d2d93..d6792f88a176 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -95,9 +95,7 @@ enum pageflags { | |||
95 | PG_reclaim, /* To be reclaimed asap */ | 95 | PG_reclaim, /* To be reclaimed asap */ |
96 | PG_buddy, /* Page is free, on buddy lists */ | 96 | PG_buddy, /* Page is free, on buddy lists */ |
97 | PG_swapbacked, /* Page is backed by RAM/swap */ | 97 | PG_swapbacked, /* Page is backed by RAM/swap */ |
98 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
99 | PG_unevictable, /* Page is "unevictable" */ | 98 | PG_unevictable, /* Page is "unevictable" */ |
100 | #endif | ||
101 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 99 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
102 | PG_mlocked, /* Page is vma mlocked */ | 100 | PG_mlocked, /* Page is vma mlocked */ |
103 | #endif | 101 | #endif |
@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache) | |||
248 | SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) | 246 | SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) |
249 | #endif | 247 | #endif |
250 | 248 | ||
251 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
252 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) | 249 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) |
253 | TESTCLEARFLAG(Unevictable, unevictable) | 250 | TESTCLEARFLAG(Unevictable, unevictable) |
254 | #else | ||
255 | PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) | ||
256 | SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) | ||
257 | __CLEARPAGEFLAG_NOOP(Unevictable) | ||
258 | #endif | ||
259 | 251 | ||
260 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 252 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
261 | #define MLOCK_PAGES 1 | 253 | #define MLOCK_PAGES 1 |
@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page) | |||
382 | 374 | ||
383 | #endif /* !PAGEFLAGS_EXTENDED */ | 375 | #endif /* !PAGEFLAGS_EXTENDED */ |
384 | 376 | ||
385 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
386 | #define __PG_UNEVICTABLE (1 << PG_unevictable) | ||
387 | #else | ||
388 | #define __PG_UNEVICTABLE 0 | ||
389 | #endif | ||
390 | |||
391 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 377 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
392 | #define __PG_MLOCKED (1 << PG_mlocked) | 378 | #define __PG_MLOCKED (1 << PG_mlocked) |
393 | #else | 379 | #else |
@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page) | |||
403 | 1 << PG_private | 1 << PG_private_2 | \ | 389 | 1 << PG_private | 1 << PG_private_2 | \ |
404 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ | 390 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ |
405 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 391 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ |
406 | __PG_UNEVICTABLE | __PG_MLOCKED) | 392 | 1 << PG_unevictable | __PG_MLOCKED) |
407 | 393 | ||
408 | /* | 394 | /* |
409 | * Flags checked when a page is prepped for return by the page allocator. | 395 | * Flags checked when a page is prepped for return by the page allocator. |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 34da5230faab..aec3252afcf5 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -22,9 +22,7 @@ enum mapping_flags { | |||
22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | 22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | 23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | 24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
25 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | 25 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
27 | #endif | ||
28 | }; | 26 | }; |
29 | 27 | ||
30 | static inline void mapping_set_error(struct address_space *mapping, int error) | 28 | static inline void mapping_set_error(struct address_space *mapping, int error) |
@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) | |||
37 | } | 35 | } |
38 | } | 36 | } |
39 | 37 | ||
40 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
41 | |||
42 | static inline void mapping_set_unevictable(struct address_space *mapping) | 38 | static inline void mapping_set_unevictable(struct address_space *mapping) |
43 | { | 39 | { |
44 | set_bit(AS_UNEVICTABLE, &mapping->flags); | 40 | set_bit(AS_UNEVICTABLE, &mapping->flags); |
@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping) | |||
55 | return test_bit(AS_UNEVICTABLE, &mapping->flags); | 51 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
56 | return !!mapping; | 52 | return !!mapping; |
57 | } | 53 | } |
58 | #else | ||
59 | static inline void mapping_set_unevictable(struct address_space *mapping) { } | ||
60 | static inline void mapping_clear_unevictable(struct address_space *mapping) { } | ||
61 | static inline int mapping_unevictable(struct address_space *mapping) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | #endif | ||
66 | 54 | ||
67 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | 55 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
68 | { | 56 | { |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b35bc0e19cd9..619379a1dd98 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |||
105 | */ | 105 | */ |
106 | int page_mkclean(struct page *); | 106 | int page_mkclean(struct page *); |
107 | 107 | ||
108 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
109 | /* | 108 | /* |
110 | * called in munlock()/munmap() path to check for other vmas holding | 109 | * called in munlock()/munmap() path to check for other vmas holding |
111 | * the page mlocked. | 110 | * the page mlocked. |
112 | */ | 111 | */ |
113 | int try_to_munlock(struct page *); | 112 | int try_to_munlock(struct page *); |
114 | #else | ||
115 | static inline int try_to_munlock(struct page *page) | ||
116 | { | ||
117 | return 0; /* a.k.a. SWAP_SUCCESS */ | ||
118 | } | ||
119 | #endif | ||
120 | 113 | ||
121 | #else /* !CONFIG_MMU */ | 114 | #else /* !CONFIG_MMU */ |
122 | 115 | ||
diff --git a/include/linux/swap.h b/include/linux/swap.h index d476aad3ff57..f30c06908f09 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |||
235 | } | 235 | } |
236 | #endif | 236 | #endif |
237 | 237 | ||
238 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
239 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | 238 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); |
240 | extern void scan_mapping_unevictable_pages(struct address_space *); | 239 | extern void scan_mapping_unevictable_pages(struct address_space *); |
241 | 240 | ||
@@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, | |||
244 | void __user *, size_t *, loff_t *); | 243 | void __user *, size_t *, loff_t *); |
245 | extern int scan_unevictable_register_node(struct node *node); | 244 | extern int scan_unevictable_register_node(struct node *node); |
246 | extern void scan_unevictable_unregister_node(struct node *node); | 245 | extern void scan_unevictable_unregister_node(struct node *node); |
247 | #else | ||
248 | static inline int page_evictable(struct page *page, | ||
249 | struct vm_area_struct *vma) | ||
250 | { | ||
251 | return 1; | ||
252 | } | ||
253 | |||
254 | static inline void scan_mapping_unevictable_pages(struct address_space *mapping) | ||
255 | { | ||
256 | } | ||
257 | |||
258 | static inline int scan_unevictable_register_node(struct node *node) | ||
259 | { | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static inline void scan_unevictable_unregister_node(struct node *node) { } | ||
264 | #endif | ||
265 | 246 | ||
266 | extern int kswapd_run(int nid); | 247 | extern int kswapd_run(int nid); |
267 | 248 | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 524cd1b28ecb..ff4696c6dce3 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
41 | #ifdef CONFIG_HUGETLB_PAGE | 41 | #ifdef CONFIG_HUGETLB_PAGE |
42 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 42 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
43 | #endif | 43 | #endif |
44 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
45 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | 44 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ |
46 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | 45 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ |
47 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | 46 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ |
@@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
50 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | 49 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
51 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | 50 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
52 | UNEVICTABLE_MLOCKFREED, | 51 | UNEVICTABLE_MLOCKFREED, |
53 | #endif | ||
54 | NR_VM_EVENT_ITEMS | 52 | NR_VM_EVENT_ITEMS |
55 | }; | 53 | }; |
56 | 54 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0e51a35a4486..2ccee08f92f1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1325,7 +1325,6 @@ static struct ctl_table vm_table[] = { | |||
1325 | .extra2 = &one, | 1325 | .extra2 = &one, |
1326 | }, | 1326 | }, |
1327 | #endif | 1327 | #endif |
1328 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
1329 | { | 1328 | { |
1330 | .ctl_name = CTL_UNNUMBERED, | 1329 | .ctl_name = CTL_UNNUMBERED, |
1331 | .procname = "scan_unevictable_pages", | 1330 | .procname = "scan_unevictable_pages", |
@@ -1334,7 +1333,6 @@ static struct ctl_table vm_table[] = { | |||
1334 | .mode = 0644, | 1333 | .mode = 0644, |
1335 | .proc_handler = &scan_unevictable_handler, | 1334 | .proc_handler = &scan_unevictable_handler, |
1336 | }, | 1335 | }, |
1337 | #endif | ||
1338 | /* | 1336 | /* |
1339 | * NOTE: do not add new entries to this table unless you have read | 1337 | * NOTE: do not add new entries to this table unless you have read |
1340 | * Documentation/sysctl/ctl_unnumbered.txt | 1338 | * Documentation/sysctl/ctl_unnumbered.txt |
diff --git a/mm/Kconfig b/mm/Kconfig index 71830ba7b986..97d2c88b745e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -203,25 +203,13 @@ config VIRT_TO_BUS | |||
203 | def_bool y | 203 | def_bool y |
204 | depends on !ARCH_NO_VIRT_TO_BUS | 204 | depends on !ARCH_NO_VIRT_TO_BUS |
205 | 205 | ||
206 | config UNEVICTABLE_LRU | ||
207 | bool "Add LRU list to track non-evictable pages" | ||
208 | default y | ||
209 | help | ||
210 | Keeps unevictable pages off of the active and inactive pageout | ||
211 | lists, so kswapd will not waste CPU time or have its balancing | ||
212 | algorithms thrown off by scanning these pages. Selecting this | ||
213 | will use one page flag and increase the code size a little, | ||
214 | say Y unless you know what you are doing. | ||
215 | |||
216 | See Documentation/vm/unevictable-lru.txt for more information. | ||
217 | |||
218 | config HAVE_MLOCK | 206 | config HAVE_MLOCK |
219 | bool | 207 | bool |
220 | default y if MMU=y | 208 | default y if MMU=y |
221 | 209 | ||
222 | config HAVE_MLOCKED_PAGE_BIT | 210 | config HAVE_MLOCKED_PAGE_BIT |
223 | bool | 211 | bool |
224 | default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y | 212 | default y if HAVE_MLOCK=y |
225 | 213 | ||
226 | config MMU_NOTIFIER | 214 | config MMU_NOTIFIER |
227 | bool | 215 | bool |
diff --git a/mm/internal.h b/mm/internal.h index b4ac332e8072..f02c7508068d 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
73 | } | 73 | } |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
77 | /* | 76 | /* |
78 | * unevictable_migrate_page() called only from migrate_page_copy() to | 77 | * unevictable_migrate_page() called only from migrate_page_copy() to |
79 | * migrate unevictable flag to new page. | 78 | * migrate unevictable flag to new page. |
@@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
85 | if (TestClearPageUnevictable(old)) | 84 | if (TestClearPageUnevictable(old)) |
86 | SetPageUnevictable(new); | 85 | SetPageUnevictable(new); |
87 | } | 86 | } |
88 | #else | ||
89 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | ||
90 | { | ||
91 | } | ||
92 | #endif | ||
93 | 87 | ||
94 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 88 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
95 | /* | 89 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index ac130433c7d3..45eb650b9654 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -31,7 +31,6 @@ int can_do_mlock(void) | |||
31 | } | 31 | } |
32 | EXPORT_SYMBOL(can_do_mlock); | 32 | EXPORT_SYMBOL(can_do_mlock); |
33 | 33 | ||
34 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
35 | /* | 34 | /* |
36 | * Mlocked pages are marked with PageMlocked() flag for efficient testing | 35 | * Mlocked pages are marked with PageMlocked() flag for efficient testing |
37 | * in vmscan and, possibly, the fault path; and to support semi-accurate | 36 | * in vmscan and, possibly, the fault path; and to support semi-accurate |
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval) | |||
261 | return retval; | 260 | return retval; |
262 | } | 261 | } |
263 | 262 | ||
264 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
265 | |||
266 | /* | ||
267 | * Just make pages present if VM_LOCKED. No-op if unlocking. | ||
268 | */ | ||
269 | static long __mlock_vma_pages_range(struct vm_area_struct *vma, | ||
270 | unsigned long start, unsigned long end, | ||
271 | int mlock) | ||
272 | { | ||
273 | if (mlock && (vma->vm_flags & VM_LOCKED)) | ||
274 | return make_pages_present(start, end); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static inline int __mlock_posix_error_return(long retval) | ||
279 | { | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
284 | |||
285 | /** | 263 | /** |
286 | * mlock_vma_pages_range() - mlock pages in specified vma range. | 264 | * mlock_vma_pages_range() - mlock pages in specified vma range. |
287 | * @vma - the vma containing the specfied address range | 265 | * @vma - the vma containing the specfied address range |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 00e293734fc9..c95a77cd581b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2077,19 +2077,14 @@ void show_free_areas(void) | |||
2077 | 2077 | ||
2078 | printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" | 2078 | printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" |
2079 | " inactive_file:%lu" | 2079 | " inactive_file:%lu" |
2080 | //TODO: check/adjust line lengths | ||
2081 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2082 | " unevictable:%lu" | 2080 | " unevictable:%lu" |
2083 | #endif | ||
2084 | " dirty:%lu writeback:%lu unstable:%lu\n" | 2081 | " dirty:%lu writeback:%lu unstable:%lu\n" |
2085 | " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", | 2082 | " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", |
2086 | global_page_state(NR_ACTIVE_ANON), | 2083 | global_page_state(NR_ACTIVE_ANON), |
2087 | global_page_state(NR_ACTIVE_FILE), | 2084 | global_page_state(NR_ACTIVE_FILE), |
2088 | global_page_state(NR_INACTIVE_ANON), | 2085 | global_page_state(NR_INACTIVE_ANON), |
2089 | global_page_state(NR_INACTIVE_FILE), | 2086 | global_page_state(NR_INACTIVE_FILE), |
2090 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2091 | global_page_state(NR_UNEVICTABLE), | 2087 | global_page_state(NR_UNEVICTABLE), |
2092 | #endif | ||
2093 | global_page_state(NR_FILE_DIRTY), | 2088 | global_page_state(NR_FILE_DIRTY), |
2094 | global_page_state(NR_WRITEBACK), | 2089 | global_page_state(NR_WRITEBACK), |
2095 | global_page_state(NR_UNSTABLE_NFS), | 2090 | global_page_state(NR_UNSTABLE_NFS), |
@@ -2113,9 +2108,7 @@ void show_free_areas(void) | |||
2113 | " inactive_anon:%lukB" | 2108 | " inactive_anon:%lukB" |
2114 | " active_file:%lukB" | 2109 | " active_file:%lukB" |
2115 | " inactive_file:%lukB" | 2110 | " inactive_file:%lukB" |
2116 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2117 | " unevictable:%lukB" | 2111 | " unevictable:%lukB" |
2118 | #endif | ||
2119 | " present:%lukB" | 2112 | " present:%lukB" |
2120 | " pages_scanned:%lu" | 2113 | " pages_scanned:%lu" |
2121 | " all_unreclaimable? %s" | 2114 | " all_unreclaimable? %s" |
@@ -2129,9 +2122,7 @@ void show_free_areas(void) | |||
2129 | K(zone_page_state(zone, NR_INACTIVE_ANON)), | 2122 | K(zone_page_state(zone, NR_INACTIVE_ANON)), |
2130 | K(zone_page_state(zone, NR_ACTIVE_FILE)), | 2123 | K(zone_page_state(zone, NR_ACTIVE_FILE)), |
2131 | K(zone_page_state(zone, NR_INACTIVE_FILE)), | 2124 | K(zone_page_state(zone, NR_INACTIVE_FILE)), |
2132 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2133 | K(zone_page_state(zone, NR_UNEVICTABLE)), | 2125 | K(zone_page_state(zone, NR_UNEVICTABLE)), |
2134 | #endif | ||
2135 | K(zone->present_pages), | 2126 | K(zone->present_pages), |
2136 | zone->pages_scanned, | 2127 | zone->pages_scanned, |
2137 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") | 2128 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") |
@@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration) | |||
1202 | return ret; | 1202 | return ret; |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
1206 | /** | 1205 | /** |
1207 | * try_to_munlock - try to munlock a page | 1206 | * try_to_munlock - try to munlock a page |
1208 | * @page: the page to be munlocked | 1207 | * @page: the page to be munlocked |
@@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page) | |||
1226 | else | 1225 | else |
1227 | return try_to_unmap_file(page, 1, 0); | 1226 | return try_to_unmap_file(page, 1, 0); |
1228 | } | 1227 | } |
1229 | #endif | 1228 | |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 879d034930c4..2c4b945b011f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
514 | * | 514 | * |
515 | * lru_lock must not be held, interrupts must be enabled. | 515 | * lru_lock must not be held, interrupts must be enabled. |
516 | */ | 516 | */ |
517 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
518 | void putback_lru_page(struct page *page) | 517 | void putback_lru_page(struct page *page) |
519 | { | 518 | { |
520 | int lru; | 519 | int lru; |
@@ -568,20 +567,6 @@ redo: | |||
568 | put_page(page); /* drop ref from isolate */ | 567 | put_page(page); /* drop ref from isolate */ |
569 | } | 568 | } |
570 | 569 | ||
571 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
572 | |||
573 | void putback_lru_page(struct page *page) | ||
574 | { | ||
575 | int lru; | ||
576 | VM_BUG_ON(PageLRU(page)); | ||
577 | |||
578 | lru = !!TestClearPageActive(page) + page_is_file_cache(page); | ||
579 | lru_cache_add_lru(page, lru); | ||
580 | put_page(page); | ||
581 | } | ||
582 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
583 | |||
584 | |||
585 | /* | 570 | /* |
586 | * shrink_page_list() returns the number of reclaimed pages | 571 | * shrink_page_list() returns the number of reclaimed pages |
587 | */ | 572 | */ |
@@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2470 | } | 2455 | } |
2471 | #endif | 2456 | #endif |
2472 | 2457 | ||
2473 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2474 | /* | 2458 | /* |
2475 | * page_evictable - test whether a page is evictable | 2459 | * page_evictable - test whether a page is evictable |
2476 | * @page: the page to test | 2460 | * @page: the page to test |
@@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node) | |||
2717 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); | 2701 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); |
2718 | } | 2702 | } |
2719 | 2703 | ||
2720 | #endif | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 1e151cf6bf86..1e3aa8139f22 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = { | |||
629 | "nr_active_anon", | 629 | "nr_active_anon", |
630 | "nr_inactive_file", | 630 | "nr_inactive_file", |
631 | "nr_active_file", | 631 | "nr_active_file", |
632 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
633 | "nr_unevictable", | 632 | "nr_unevictable", |
634 | "nr_mlock", | 633 | "nr_mlock", |
635 | #endif | ||
636 | "nr_anon_pages", | 634 | "nr_anon_pages", |
637 | "nr_mapped", | 635 | "nr_mapped", |
638 | "nr_file_pages", | 636 | "nr_file_pages", |
@@ -687,7 +685,6 @@ static const char * const vmstat_text[] = { | |||
687 | "htlb_buddy_alloc_success", | 685 | "htlb_buddy_alloc_success", |
688 | "htlb_buddy_alloc_fail", | 686 | "htlb_buddy_alloc_fail", |
689 | #endif | 687 | #endif |
690 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
691 | "unevictable_pgs_culled", | 688 | "unevictable_pgs_culled", |
692 | "unevictable_pgs_scanned", | 689 | "unevictable_pgs_scanned", |
693 | "unevictable_pgs_rescued", | 690 | "unevictable_pgs_rescued", |
@@ -697,7 +694,6 @@ static const char * const vmstat_text[] = { | |||
697 | "unevictable_pgs_stranded", | 694 | "unevictable_pgs_stranded", |
698 | "unevictable_pgs_mlockfreed", | 695 | "unevictable_pgs_mlockfreed", |
699 | #endif | 696 | #endif |
700 | #endif | ||
701 | }; | 697 | }; |
702 | 698 | ||
703 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 699 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |