diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-06-16 18:32:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:42 -0400 |
commit | 6837765963f1723e80ca97b1fae660f3a60d77df (patch) | |
tree | a9a6ed4b7e3bf188966da78b04bf39298f24375a /mm | |
parent | bce7394a3ef82b8477952fbab838e4a6e8cb47d2 (diff) |
mm: remove CONFIG_UNEVICTABLE_LRU config option
Currently, nobody wants to turn UNEVICTABLE_LRU off. Thus this
configurability is unnecessary.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <andi@firstfloor.org>
Acked-by: Minchan Kim <minchan.kim@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 14 | ||||
-rw-r--r-- | mm/internal.h | 6 | ||||
-rw-r--r-- | mm/mlock.c | 22 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 17 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
7 files changed, 2 insertions, 73 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 71830ba7b986..97d2c88b745e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -203,25 +203,13 @@ config VIRT_TO_BUS | |||
203 | def_bool y | 203 | def_bool y |
204 | depends on !ARCH_NO_VIRT_TO_BUS | 204 | depends on !ARCH_NO_VIRT_TO_BUS |
205 | 205 | ||
206 | config UNEVICTABLE_LRU | ||
207 | bool "Add LRU list to track non-evictable pages" | ||
208 | default y | ||
209 | help | ||
210 | Keeps unevictable pages off of the active and inactive pageout | ||
211 | lists, so kswapd will not waste CPU time or have its balancing | ||
212 | algorithms thrown off by scanning these pages. Selecting this | ||
213 | will use one page flag and increase the code size a little, | ||
214 | say Y unless you know what you are doing. | ||
215 | |||
216 | See Documentation/vm/unevictable-lru.txt for more information. | ||
217 | |||
218 | config HAVE_MLOCK | 206 | config HAVE_MLOCK |
219 | bool | 207 | bool |
220 | default y if MMU=y | 208 | default y if MMU=y |
221 | 209 | ||
222 | config HAVE_MLOCKED_PAGE_BIT | 210 | config HAVE_MLOCKED_PAGE_BIT |
223 | bool | 211 | bool |
224 | default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y | 212 | default y if HAVE_MLOCK=y |
225 | 213 | ||
226 | config MMU_NOTIFIER | 214 | config MMU_NOTIFIER |
227 | bool | 215 | bool |
diff --git a/mm/internal.h b/mm/internal.h index b4ac332e8072..f02c7508068d 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
73 | } | 73 | } |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
77 | /* | 76 | /* |
78 | * unevictable_migrate_page() called only from migrate_page_copy() to | 77 | * unevictable_migrate_page() called only from migrate_page_copy() to |
79 | * migrate unevictable flag to new page. | 78 | * migrate unevictable flag to new page. |
@@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
85 | if (TestClearPageUnevictable(old)) | 84 | if (TestClearPageUnevictable(old)) |
86 | SetPageUnevictable(new); | 85 | SetPageUnevictable(new); |
87 | } | 86 | } |
88 | #else | ||
89 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | ||
90 | { | ||
91 | } | ||
92 | #endif | ||
93 | 87 | ||
94 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 88 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
95 | /* | 89 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index ac130433c7d3..45eb650b9654 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -31,7 +31,6 @@ int can_do_mlock(void) | |||
31 | } | 31 | } |
32 | EXPORT_SYMBOL(can_do_mlock); | 32 | EXPORT_SYMBOL(can_do_mlock); |
33 | 33 | ||
34 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
35 | /* | 34 | /* |
36 | * Mlocked pages are marked with PageMlocked() flag for efficient testing | 35 | * Mlocked pages are marked with PageMlocked() flag for efficient testing |
37 | * in vmscan and, possibly, the fault path; and to support semi-accurate | 36 | * in vmscan and, possibly, the fault path; and to support semi-accurate |
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval) | |||
261 | return retval; | 260 | return retval; |
262 | } | 261 | } |
263 | 262 | ||
264 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
265 | |||
266 | /* | ||
267 | * Just make pages present if VM_LOCKED. No-op if unlocking. | ||
268 | */ | ||
269 | static long __mlock_vma_pages_range(struct vm_area_struct *vma, | ||
270 | unsigned long start, unsigned long end, | ||
271 | int mlock) | ||
272 | { | ||
273 | if (mlock && (vma->vm_flags & VM_LOCKED)) | ||
274 | return make_pages_present(start, end); | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static inline int __mlock_posix_error_return(long retval) | ||
279 | { | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
284 | |||
285 | /** | 263 | /** |
286 | * mlock_vma_pages_range() - mlock pages in specified vma range. | 264 | * mlock_vma_pages_range() - mlock pages in specified vma range. |
287 | * @vma - the vma containing the specfied address range | 265 | * @vma - the vma containing the specfied address range |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 00e293734fc9..c95a77cd581b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2077,19 +2077,14 @@ void show_free_areas(void) | |||
2077 | 2077 | ||
2078 | printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" | 2078 | printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" |
2079 | " inactive_file:%lu" | 2079 | " inactive_file:%lu" |
2080 | //TODO: check/adjust line lengths | ||
2081 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2082 | " unevictable:%lu" | 2080 | " unevictable:%lu" |
2083 | #endif | ||
2084 | " dirty:%lu writeback:%lu unstable:%lu\n" | 2081 | " dirty:%lu writeback:%lu unstable:%lu\n" |
2085 | " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", | 2082 | " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", |
2086 | global_page_state(NR_ACTIVE_ANON), | 2083 | global_page_state(NR_ACTIVE_ANON), |
2087 | global_page_state(NR_ACTIVE_FILE), | 2084 | global_page_state(NR_ACTIVE_FILE), |
2088 | global_page_state(NR_INACTIVE_ANON), | 2085 | global_page_state(NR_INACTIVE_ANON), |
2089 | global_page_state(NR_INACTIVE_FILE), | 2086 | global_page_state(NR_INACTIVE_FILE), |
2090 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2091 | global_page_state(NR_UNEVICTABLE), | 2087 | global_page_state(NR_UNEVICTABLE), |
2092 | #endif | ||
2093 | global_page_state(NR_FILE_DIRTY), | 2088 | global_page_state(NR_FILE_DIRTY), |
2094 | global_page_state(NR_WRITEBACK), | 2089 | global_page_state(NR_WRITEBACK), |
2095 | global_page_state(NR_UNSTABLE_NFS), | 2090 | global_page_state(NR_UNSTABLE_NFS), |
@@ -2113,9 +2108,7 @@ void show_free_areas(void) | |||
2113 | " inactive_anon:%lukB" | 2108 | " inactive_anon:%lukB" |
2114 | " active_file:%lukB" | 2109 | " active_file:%lukB" |
2115 | " inactive_file:%lukB" | 2110 | " inactive_file:%lukB" |
2116 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2117 | " unevictable:%lukB" | 2111 | " unevictable:%lukB" |
2118 | #endif | ||
2119 | " present:%lukB" | 2112 | " present:%lukB" |
2120 | " pages_scanned:%lu" | 2113 | " pages_scanned:%lu" |
2121 | " all_unreclaimable? %s" | 2114 | " all_unreclaimable? %s" |
@@ -2129,9 +2122,7 @@ void show_free_areas(void) | |||
2129 | K(zone_page_state(zone, NR_INACTIVE_ANON)), | 2122 | K(zone_page_state(zone, NR_INACTIVE_ANON)), |
2130 | K(zone_page_state(zone, NR_ACTIVE_FILE)), | 2123 | K(zone_page_state(zone, NR_ACTIVE_FILE)), |
2131 | K(zone_page_state(zone, NR_INACTIVE_FILE)), | 2124 | K(zone_page_state(zone, NR_INACTIVE_FILE)), |
2132 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2133 | K(zone_page_state(zone, NR_UNEVICTABLE)), | 2125 | K(zone_page_state(zone, NR_UNEVICTABLE)), |
2134 | #endif | ||
2135 | K(zone->present_pages), | 2126 | K(zone->present_pages), |
2136 | zone->pages_scanned, | 2127 | zone->pages_scanned, |
2137 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") | 2128 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") |
@@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration) | |||
1202 | return ret; | 1202 | return ret; |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
1206 | /** | 1205 | /** |
1207 | * try_to_munlock - try to munlock a page | 1206 | * try_to_munlock - try to munlock a page |
1208 | * @page: the page to be munlocked | 1207 | * @page: the page to be munlocked |
@@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page) | |||
1226 | else | 1225 | else |
1227 | return try_to_unmap_file(page, 1, 0); | 1226 | return try_to_unmap_file(page, 1, 0); |
1228 | } | 1227 | } |
1229 | #endif | 1228 | |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 879d034930c4..2c4b945b011f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
514 | * | 514 | * |
515 | * lru_lock must not be held, interrupts must be enabled. | 515 | * lru_lock must not be held, interrupts must be enabled. |
516 | */ | 516 | */ |
517 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
518 | void putback_lru_page(struct page *page) | 517 | void putback_lru_page(struct page *page) |
519 | { | 518 | { |
520 | int lru; | 519 | int lru; |
@@ -568,20 +567,6 @@ redo: | |||
568 | put_page(page); /* drop ref from isolate */ | 567 | put_page(page); /* drop ref from isolate */ |
569 | } | 568 | } |
570 | 569 | ||
571 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
572 | |||
573 | void putback_lru_page(struct page *page) | ||
574 | { | ||
575 | int lru; | ||
576 | VM_BUG_ON(PageLRU(page)); | ||
577 | |||
578 | lru = !!TestClearPageActive(page) + page_is_file_cache(page); | ||
579 | lru_cache_add_lru(page, lru); | ||
580 | put_page(page); | ||
581 | } | ||
582 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
583 | |||
584 | |||
585 | /* | 570 | /* |
586 | * shrink_page_list() returns the number of reclaimed pages | 571 | * shrink_page_list() returns the number of reclaimed pages |
587 | */ | 572 | */ |
@@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2470 | } | 2455 | } |
2471 | #endif | 2456 | #endif |
2472 | 2457 | ||
2473 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
2474 | /* | 2458 | /* |
2475 | * page_evictable - test whether a page is evictable | 2459 | * page_evictable - test whether a page is evictable |
2476 | * @page: the page to test | 2460 | * @page: the page to test |
@@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node) | |||
2717 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); | 2701 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); |
2718 | } | 2702 | } |
2719 | 2703 | ||
2720 | #endif | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 1e151cf6bf86..1e3aa8139f22 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = { | |||
629 | "nr_active_anon", | 629 | "nr_active_anon", |
630 | "nr_inactive_file", | 630 | "nr_inactive_file", |
631 | "nr_active_file", | 631 | "nr_active_file", |
632 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
633 | "nr_unevictable", | 632 | "nr_unevictable", |
634 | "nr_mlock", | 633 | "nr_mlock", |
635 | #endif | ||
636 | "nr_anon_pages", | 634 | "nr_anon_pages", |
637 | "nr_mapped", | 635 | "nr_mapped", |
638 | "nr_file_pages", | 636 | "nr_file_pages", |
@@ -687,7 +685,6 @@ static const char * const vmstat_text[] = { | |||
687 | "htlb_buddy_alloc_success", | 685 | "htlb_buddy_alloc_success", |
688 | "htlb_buddy_alloc_fail", | 686 | "htlb_buddy_alloc_fail", |
689 | #endif | 687 | #endif |
690 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
691 | "unevictable_pgs_culled", | 688 | "unevictable_pgs_culled", |
692 | "unevictable_pgs_scanned", | 689 | "unevictable_pgs_scanned", |
693 | "unevictable_pgs_rescued", | 690 | "unevictable_pgs_rescued", |
@@ -697,7 +694,6 @@ static const char * const vmstat_text[] = { | |||
697 | "unevictable_pgs_stranded", | 694 | "unevictable_pgs_stranded", |
698 | "unevictable_pgs_mlockfreed", | 695 | "unevictable_pgs_mlockfreed", |
699 | #endif | 696 | #endif |
700 | #endif | ||
701 | }; | 697 | }; |
702 | 698 | ||
703 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 699 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |