diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/mm/internal.h b/mm/internal.h index 1f43f7416972..e4e728bdf324 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -39,6 +39,15 @@ static inline void __put_page(struct page *page) | |||
39 | atomic_dec(&page->_count); | 39 | atomic_dec(&page->_count); |
40 | } | 40 | } |
41 | 41 | ||
42 | /* | ||
43 | * in mm/vmscan.c: | ||
44 | */ | ||
45 | extern int isolate_lru_page(struct page *page); | ||
46 | extern void putback_lru_page(struct page *page); | ||
47 | |||
48 | /* | ||
49 | * in mm/page_alloc.c | ||
50 | */ | ||
42 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 51 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
43 | 52 | ||
44 | /* | 53 | /* |
@@ -52,6 +61,120 @@ static inline unsigned long page_order(struct page *page) | |||
52 | return page_private(page); | 61 | return page_private(page); |
53 | } | 62 | } |
54 | 63 | ||
64 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | ||
65 | unsigned long start, unsigned long end); | ||
66 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | ||
67 | unsigned long start, unsigned long end); | ||
68 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | ||
69 | { | ||
70 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | ||
71 | } | ||
72 | |||
73 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
74 | /* | ||
75 | * unevictable_migrate_page() called only from migrate_page_copy() to | ||
76 | * migrate unevictable flag to new page. | ||
77 | * Note that the old page has been isolated from the LRU lists at this | ||
78 | * point so we don't need to worry about LRU statistics. | ||
79 | */ | ||
80 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | ||
81 | { | ||
82 | if (TestClearPageUnevictable(old)) | ||
83 | SetPageUnevictable(new); | ||
84 | } | ||
85 | #else | ||
86 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | ||
87 | { | ||
88 | } | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
92 | /* | ||
93 | * Called only in fault path via page_evictable() for a new page | ||
94 | * to determine if it's being mapped into a LOCKED vma. | ||
95 | * If so, mark page as mlocked. | ||
96 | */ | ||
97 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) | ||
98 | { | ||
99 | VM_BUG_ON(PageLRU(page)); | ||
100 | |||
101 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | ||
102 | return 0; | ||
103 | |||
104 | if (!TestSetPageMlocked(page)) { | ||
105 | inc_zone_page_state(page, NR_MLOCK); | ||
106 | count_vm_event(UNEVICTABLE_PGMLOCKED); | ||
107 | } | ||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * must be called with vma's mmap_sem held for read, and page locked. | ||
113 | */ | ||
114 | extern void mlock_vma_page(struct page *page); | ||
115 | |||
116 | /* | ||
117 | * Clear the page's PageMlocked(). This can be useful in a situation where | ||
118 | * we want to unconditionally remove a page from the pagecache -- e.g., | ||
119 | * on truncation or freeing. | ||
120 | * | ||
121 | * It is legal to call this function for any page, mlocked or not. | ||
122 | * If called for a page that is still mapped by mlocked vmas, all we do | ||
123 | * is revert to lazy LRU behaviour -- semantics are not broken. | ||
124 | */ | ||
125 | extern void __clear_page_mlock(struct page *page); | ||
126 | static inline void clear_page_mlock(struct page *page) | ||
127 | { | ||
128 | if (unlikely(TestClearPageMlocked(page))) | ||
129 | __clear_page_mlock(page); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * mlock_migrate_page - called only from migrate_page_copy() to | ||
134 | * migrate the Mlocked page flag; update statistics. | ||
135 | */ | ||
136 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | ||
137 | { | ||
138 | if (TestClearPageMlocked(page)) { | ||
139 | unsigned long flags; | ||
140 | |||
141 | local_irq_save(flags); | ||
142 | __dec_zone_page_state(page, NR_MLOCK); | ||
143 | SetPageMlocked(newpage); | ||
144 | __inc_zone_page_state(newpage, NR_MLOCK); | ||
145 | local_irq_restore(flags); | ||
146 | } | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * free_page_mlock() -- clean up attempts to free and mlocked() page. | ||
151 | * Page should not be on lru, so no need to fix that up. | ||
152 | * free_pages_check() will verify... | ||
153 | */ | ||
154 | static inline void free_page_mlock(struct page *page) | ||
155 | { | ||
156 | if (unlikely(TestClearPageMlocked(page))) { | ||
157 | unsigned long flags; | ||
158 | |||
159 | local_irq_save(flags); | ||
160 | __dec_zone_page_state(page, NR_MLOCK); | ||
161 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | ||
162 | local_irq_restore(flags); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | #else /* CONFIG_UNEVICTABLE_LRU */ | ||
167 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | ||
168 | { | ||
169 | return 0; | ||
170 | } | ||
171 | static inline void clear_page_mlock(struct page *page) { } | ||
172 | static inline void mlock_vma_page(struct page *page) { } | ||
173 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | ||
174 | static inline void free_page_mlock(struct page *page) { } | ||
175 | |||
176 | #endif /* CONFIG_UNEVICTABLE_LRU */ | ||
177 | |||
55 | /* | 178 | /* |
56 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | 179 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
57 | * so all functions starting at paging_init should be marked __init | 180 | * so all functions starting at paging_init should be marked __init |
@@ -120,4 +243,12 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
120 | } | 243 | } |
121 | #endif /* CONFIG_SPARSEMEM */ | 244 | #endif /* CONFIG_SPARSEMEM */ |
122 | 245 | ||
246 | #define GUP_FLAGS_WRITE 0x1 | ||
247 | #define GUP_FLAGS_FORCE 0x2 | ||
248 | #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 | ||
249 | |||
250 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
251 | unsigned long start, int len, int flags, | ||
252 | struct page **pages, struct vm_area_struct **vmas); | ||
253 | |||
123 | #endif | 254 | #endif |