diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb8..cb7d92d0a46d 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) | |||
63 | return page_private(page); | 63 | return page_private(page); |
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_HAVE_MLOCK | ||
67 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | ||
68 | unsigned long start, unsigned long end); | ||
69 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | ||
70 | unsigned long start, unsigned long end); | ||
71 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | ||
72 | { | ||
73 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | ||
74 | } | ||
75 | #endif | ||
76 | |||
77 | /* | 66 | /* |
78 | * unevictable_migrate_page() called only from migrate_page_copy() to | 67 | * unevictable_migrate_page() called only from migrate_page_copy() to |
79 | * migrate unevictable flag to new page. | 68 | * migrate unevictable flag to new page. |
@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
86 | SetPageUnevictable(new); | 75 | SetPageUnevictable(new); |
87 | } | 76 | } |
88 | 77 | ||
89 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 78 | #ifdef CONFIG_MMU |
79 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | ||
80 | unsigned long start, unsigned long end); | ||
81 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | ||
82 | unsigned long start, unsigned long end); | ||
83 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | ||
84 | { | ||
85 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | ||
86 | } | ||
87 | |||
90 | /* | 88 | /* |
91 | * Called only in fault path via page_evictable() for a new page | 89 | * Called only in fault path via page_evictable() for a new page |
92 | * to determine if it's being mapped into a LOCKED vma. | 90 | * to determine if it's being mapped into a LOCKED vma. |
@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |||
144 | } | 142 | } |
145 | } | 143 | } |
146 | 144 | ||
147 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 145 | #else /* !CONFIG_MMU */ |
148 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 146 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
149 | { | 147 | { |
150 | return 0; | 148 | return 0; |
@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } | |||
153 | static inline void mlock_vma_page(struct page *page) { } | 151 | static inline void mlock_vma_page(struct page *page) { } |
154 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 152 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
155 | 153 | ||
156 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 154 | #endif /* !CONFIG_MMU */ |
157 | 155 | ||
158 | /* | 156 | /* |
159 | * Return the mem_map entry representing the 'offset' subpage within | 157 | * Return the mem_map entry representing the 'offset' subpage within |