aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/internal.h8
2 files changed, 13 insertions, 3 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index a5b77811fdf2..8c895973dfba 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -214,5 +214,13 @@ config UNEVICTABLE_LRU
214 will use one page flag and increase the code size a little, 214 will use one page flag and increase the code size a little,
215 say Y unless you know what you are doing. 215 say Y unless you know what you are doing.
216 216
217config HAVE_MLOCK
218 bool
219 default y if MMU=y
220
221config HAVE_MLOCKED_PAGE_BIT
222 bool
223 default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
224
217config MMU_NOTIFIER 225config MMU_NOTIFIER
218 bool 226 bool
diff --git a/mm/internal.h b/mm/internal.h
index 478223b73a2a..987bb03fbdd8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
63 return page_private(page); 63 return page_private(page);
64} 64}
65 65
66#ifdef CONFIG_HAVE_MLOCK
66extern long mlock_vma_pages_range(struct vm_area_struct *vma, 67extern long mlock_vma_pages_range(struct vm_area_struct *vma,
67 unsigned long start, unsigned long end); 68 unsigned long start, unsigned long end);
68extern void munlock_vma_pages_range(struct vm_area_struct *vma, 69extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
71{ 72{
72 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
73} 74}
75#endif
74 76
75#ifdef CONFIG_UNEVICTABLE_LRU 77#ifdef CONFIG_UNEVICTABLE_LRU
76/* 78/*
@@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
90} 92}
91#endif 93#endif
92 94
93#ifdef CONFIG_UNEVICTABLE_LRU 95#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
94/* 96/*
95 * Called only in fault path via page_evictable() for a new page 97 * Called only in fault path via page_evictable() for a new page
96 * to determine if it's being mapped into a LOCKED vma. 98 * to determine if it's being mapped into a LOCKED vma.
@@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
165 } 167 }
166} 168}
167 169
168#else /* CONFIG_UNEVICTABLE_LRU */ 170#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
169static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 171static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
170{ 172{
171 return 0; 173 return 0;
@@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
175static inline void mlock_migrate_page(struct page *new, struct page *old) { } 177static inline void mlock_migrate_page(struct page *new, struct page *old) { }
176static inline void free_page_mlock(struct page *page) { } 178static inline void free_page_mlock(struct page *page) { }
177 179
178#endif /* CONFIG_UNEVICTABLE_LRU */ 180#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
179 181
180/* 182/*
181 * Return the mem_map entry representing the 'offset' subpage within 183 * Return the mem_map entry representing the 'offset' subpage within