diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/mm/internal.h b/mm/internal.h index 3db17b2a1ac6..4ebf0bef9a39 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -61,6 +61,10 @@ static inline unsigned long page_order(struct page *page) | |||
61 | return page_private(page); | 61 | return page_private(page); |
62 | } | 62 | } |
63 | 63 | ||
64 | extern int mlock_vma_pages_range(struct vm_area_struct *vma, | ||
65 | unsigned long start, unsigned long end); | ||
66 | extern void munlock_vma_pages_all(struct vm_area_struct *vma); | ||
67 | |||
64 | #ifdef CONFIG_UNEVICTABLE_LRU | 68 | #ifdef CONFIG_UNEVICTABLE_LRU |
65 | /* | 69 | /* |
66 | * unevictable_migrate_page() called only from migrate_page_copy() to | 70 | * unevictable_migrate_page() called only from migrate_page_copy() to |
@@ -79,6 +83,65 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
79 | } | 83 | } |
80 | #endif | 84 | #endif |
81 | 85 | ||
86 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
87 | /* | ||
88 | * Called only in fault path via page_evictable() for a new page | ||
89 | * to determine if it's being mapped into a LOCKED vma. | ||
90 | * If so, mark page as mlocked. | ||
91 | */ | ||
92 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) | ||
93 | { | ||
94 | VM_BUG_ON(PageLRU(page)); | ||
95 | |||
96 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | ||
97 | return 0; | ||
98 | |||
99 | SetPageMlocked(page); | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * must be called with vma's mmap_sem held for read, and page locked. | ||
105 | */ | ||
106 | extern void mlock_vma_page(struct page *page); | ||
107 | |||
108 | /* | ||
109 | * Clear the page's PageMlocked(). This can be useful in a situation where | ||
110 | * we want to unconditionally remove a page from the pagecache -- e.g., | ||
111 | * on truncation or freeing. | ||
112 | * | ||
113 | * It is legal to call this function for any page, mlocked or not. | ||
114 | * If called for a page that is still mapped by mlocked vmas, all we do | ||
115 | * is revert to lazy LRU behaviour -- semantics are not broken. | ||
116 | */ | ||
117 | extern void __clear_page_mlock(struct page *page); | ||
118 | static inline void clear_page_mlock(struct page *page) | ||
119 | { | ||
120 | if (unlikely(TestClearPageMlocked(page))) | ||
121 | __clear_page_mlock(page); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * mlock_migrate_page - called only from migrate_page_copy() to | ||
126 | * migrate the Mlocked page flag | ||
127 | */ | ||
128 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | ||
129 | { | ||
130 | if (TestClearPageMlocked(page)) | ||
131 | SetPageMlocked(newpage); | ||
132 | } | ||
133 | |||
134 | |||
135 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
136 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | ||
137 | { | ||
138 | return 0; | ||
139 | } | ||
140 | static inline void clear_page_mlock(struct page *page) { } | ||
141 | static inline void mlock_vma_page(struct page *page) { } | ||
142 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | ||
143 | |||
144 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
82 | 145 | ||
83 | /* | 146 | /* |
84 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | 147 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
@@ -148,4 +211,12 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
148 | } | 211 | } |
149 | #endif /* CONFIG_SPARSEMEM */ | 212 | #endif /* CONFIG_SPARSEMEM */ |
150 | 213 | ||
214 | #define GUP_FLAGS_WRITE 0x1 | ||
215 | #define GUP_FLAGS_FORCE 0x2 | ||
216 | #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 | ||
217 | |||
218 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
219 | unsigned long start, int len, int flags, | ||
220 | struct page **pages, struct vm_area_struct **vmas); | ||
221 | |||
151 | #endif | 222 | #endif |