diff options
Diffstat (limited to 'mm/internal.h')
| -rw-r--r-- | mm/internal.h | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/internal.h b/mm/internal.h index 478223b73a2a..987bb03fbdd8 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
| @@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page) | |||
| 63 | return page_private(page); | 63 | return page_private(page); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | #ifdef CONFIG_HAVE_MLOCK | ||
| 66 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | 67 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
| 67 | unsigned long start, unsigned long end); | 68 | unsigned long start, unsigned long end); |
| 68 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | 69 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| @@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
| 71 | { | 72 | { |
| 72 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | 73 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 73 | } | 74 | } |
| 75 | #endif | ||
| 74 | 76 | ||
| 75 | #ifdef CONFIG_UNEVICTABLE_LRU | 77 | #ifdef CONFIG_UNEVICTABLE_LRU |
| 76 | /* | 78 | /* |
| @@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
| 90 | } | 92 | } |
| 91 | #endif | 93 | #endif |
| 92 | 94 | ||
| 93 | #ifdef CONFIG_UNEVICTABLE_LRU | 95 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
| 94 | /* | 96 | /* |
| 95 | * Called only in fault path via page_evictable() for a new page | 97 | * Called only in fault path via page_evictable() for a new page |
| 96 | * to determine if it's being mapped into a LOCKED vma. | 98 | * to determine if it's being mapped into a LOCKED vma. |
| @@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page) | |||
| 165 | } | 167 | } |
| 166 | } | 168 | } |
| 167 | 169 | ||
| 168 | #else /* CONFIG_UNEVICTABLE_LRU */ | 170 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
| 169 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 171 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
| 170 | { | 172 | { |
| 171 | return 0; | 173 | return 0; |
| @@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { } | |||
| 175 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 177 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
| 176 | static inline void free_page_mlock(struct page *page) { } | 178 | static inline void free_page_mlock(struct page *page) { } |
| 177 | 179 | ||
| 178 | #endif /* CONFIG_UNEVICTABLE_LRU */ | 180 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
| 179 | 181 | ||
| 180 | /* | 182 | /* |
| 181 | * Return the mem_map entry representing the 'offset' subpage within | 183 | * Return the mem_map entry representing the 'offset' subpage within |
