aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/internal.h26
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c15
6 files changed, 19 insertions, 44 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b173955..49e907bd067f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
99 PG_buddy, /* Page is free, on buddy lists */ 99 PG_buddy, /* Page is free, on buddy lists */
100 PG_swapbacked, /* Page is backed by RAM/swap */ 100 PG_swapbacked, /* Page is backed by RAM/swap */
101 PG_unevictable, /* Page is "unevictable" */ 101 PG_unevictable, /* Page is "unevictable" */
102#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 102#ifdef CONFIG_MMU
103 PG_mlocked, /* Page is vma mlocked */ 103 PG_mlocked, /* Page is vma mlocked */
104#endif 104#endif
105#ifdef CONFIG_ARCH_USES_PG_UNCACHED 105#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
260 TESTCLEARFLAG(Unevictable, unevictable) 260 TESTCLEARFLAG(Unevictable, unevictable)
261 261
262#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 262#ifdef CONFIG_MMU
263#define MLOCK_PAGES 1
264PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 263PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
265 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) 264 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
266#else 265#else
267#define MLOCK_PAGES 0
268PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) 266PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
269 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) 267 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
270#endif 268#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
393 391
394#endif /* !PAGEFLAGS_EXTENDED */ 392#endif /* !PAGEFLAGS_EXTENDED */
395 393
396#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 394#ifdef CONFIG_MMU
397#define __PG_MLOCKED (1 << PG_mlocked) 395#define __PG_MLOCKED (1 << PG_mlocked)
398#else 396#else
399#define __PG_MLOCKED 0 397#define __PG_MLOCKED 0
diff --git a/mm/Kconfig b/mm/Kconfig
index 44cf6f0a3a6d..77b4980d6143 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -200,14 +200,6 @@ config VIRT_TO_BUS
200 def_bool y 200 def_bool y
201 depends on !ARCH_NO_VIRT_TO_BUS 201 depends on !ARCH_NO_VIRT_TO_BUS
202 202
203config HAVE_MLOCK
204 bool
205 default y if MMU=y
206
207config HAVE_MLOCKED_PAGE_BIT
208 bool
209 default y if HAVE_MLOCK=y
210
211config MMU_NOTIFIER 203config MMU_NOTIFIER
212 bool 204 bool
213 205
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb8..cb7d92d0a46d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page)
63 return page_private(page); 63 return page_private(page);
64} 64}
65 65
66#ifdef CONFIG_HAVE_MLOCK
67extern long mlock_vma_pages_range(struct vm_area_struct *vma,
68 unsigned long start, unsigned long end);
69extern void munlock_vma_pages_range(struct vm_area_struct *vma,
70 unsigned long start, unsigned long end);
71static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
72{
73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
74}
75#endif
76
77/* 66/*
78 * unevictable_migrate_page() called only from migrate_page_copy() to 67 * unevictable_migrate_page() called only from migrate_page_copy() to
79 * migrate unevictable flag to new page. 68 * migrate unevictable flag to new page.
@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
86 SetPageUnevictable(new); 75 SetPageUnevictable(new);
87} 76}
88 77
89#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 78#ifdef CONFIG_MMU
79extern long mlock_vma_pages_range(struct vm_area_struct *vma,
80 unsigned long start, unsigned long end);
81extern void munlock_vma_pages_range(struct vm_area_struct *vma,
82 unsigned long start, unsigned long end);
83static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
84{
85 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
86}
87
90/* 88/*
91 * Called only in fault path via page_evictable() for a new page 89 * Called only in fault path via page_evictable() for a new page
92 * to determine if it's being mapped into a LOCKED vma. 90 * to determine if it's being mapped into a LOCKED vma.
@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
144 } 142 }
145} 143}
146 144
147#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 145#else /* !CONFIG_MMU */
148static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 146static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
149{ 147{
150 return 0; 148 return 0;
@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { }
153static inline void mlock_vma_page(struct page *page) { } 151static inline void mlock_vma_page(struct page *page) { }
154static inline void mlock_migrate_page(struct page *new, struct page *old) { } 152static inline void mlock_migrate_page(struct page *new, struct page *old) { }
155 153
156#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 154#endif /* !CONFIG_MMU */
157 155
158/* 156/*
159 * Return the mem_map entry representing the 'offset' subpage within 157 * Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ac49fef95ab..50d4f8d7024a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -582,10 +582,8 @@ static struct page_state {
582 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 582 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
583 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 583 { unevict, unevict, "unevictable LRU", me_pagecache_clean},
584 584
585#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
586 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 585 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
587 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 586 { mlock, mlock, "mlocked LRU", me_pagecache_clean },
588#endif
589 587
590 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 588 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
591 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 589 { lru|dirty, lru, "clean LRU", me_pagecache_clean },
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41e..59d2e88fb47c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
486 zone->free_area[order].nr_free++; 486 zone->free_area[order].nr_free++;
487} 487}
488 488
489#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
490/* 489/*
491 * free_page_mlock() -- clean up attempts to free and mlocked() page. 490 * free_page_mlock() -- clean up attempts to free and mlocked() page.
492 * Page should not be on lru, so no need to fix that up. 491 * Page should not be on lru, so no need to fix that up.
@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
497 __dec_zone_page_state(page, NR_MLOCK); 496 __dec_zone_page_state(page, NR_MLOCK);
498 __count_vm_event(UNEVICTABLE_MLOCKFREED); 497 __count_vm_event(UNEVICTABLE_MLOCKFREED);
499} 498}
500#else
501static void free_page_mlock(struct page *page) { }
502#endif
503 499
504static inline int free_pages_check(struct page *page) 500static inline int free_pages_check(struct page *page)
505{ 501{
diff --git a/mm/rmap.c b/mm/rmap.c
index c3d6dc4223a4..eb3dfc8355ea 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -788,7 +788,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
788 ret = SWAP_MLOCK; 788 ret = SWAP_MLOCK;
789 goto out_unmap; 789 goto out_unmap;
790 } 790 }
791 if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK) 791 if (TTU_ACTION(flags) == TTU_MUNLOCK)
792 goto out_unmap; 792 goto out_unmap;
793 } 793 }
794 if (!(flags & TTU_IGNORE_ACCESS)) { 794 if (!(flags & TTU_IGNORE_ACCESS)) {
@@ -861,7 +861,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
861out_unmap: 861out_unmap:
862 pte_unmap_unlock(pte, ptl); 862 pte_unmap_unlock(pte, ptl);
863 863
864 if (MLOCK_PAGES && ret == SWAP_MLOCK) { 864 if (ret == SWAP_MLOCK) {
865 ret = SWAP_AGAIN; 865 ret = SWAP_AGAIN;
866 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 866 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
867 if (vma->vm_flags & VM_LOCKED) { 867 if (vma->vm_flags & VM_LOCKED) {
@@ -938,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
938 return ret; 938 return ret;
939 939
940 /* 940 /*
941 * MLOCK_PAGES => feature is configured. 941 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
942 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
943 * keep the sem while scanning the cluster for mlocking pages. 942 * keep the sem while scanning the cluster for mlocking pages.
944 */ 943 */
945 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { 944 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
946 locked_vma = (vma->vm_flags & VM_LOCKED); 945 locked_vma = (vma->vm_flags & VM_LOCKED);
947 if (!locked_vma) 946 if (!locked_vma)
948 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 947 up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -1075,9 +1074,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1075 1074
1076 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1075 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1077 shared.vm_set.list) { 1076 shared.vm_set.list) {
1078 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1079 (vma->vm_flags & VM_LOCKED))
1080 continue;
1081 cursor = (unsigned long) vma->vm_private_data; 1077 cursor = (unsigned long) vma->vm_private_data;
1082 if (cursor > max_nl_cursor) 1078 if (cursor > max_nl_cursor)
1083 max_nl_cursor = cursor; 1079 max_nl_cursor = cursor;
@@ -1110,9 +1106,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1110 do { 1106 do {
1111 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1107 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1112 shared.vm_set.list) { 1108 shared.vm_set.list) {
1113 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1114 (vma->vm_flags & VM_LOCKED))
1115 continue;
1116 cursor = (unsigned long) vma->vm_private_data; 1109 cursor = (unsigned long) vma->vm_private_data;
1117 while ( cursor < max_nl_cursor && 1110 while ( cursor < max_nl_cursor &&
1118 cursor < vma->vm_end - vma->vm_start) { 1111 cursor < vma->vm_end - vma->vm_start) {