diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 33 |
1 files changed, 6 insertions, 27 deletions
diff --git a/mm/internal.h b/mm/internal.h index 987bb03fbdd8..f290c4db528b 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -16,9 +16,6 @@ | |||
16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
17 | unsigned long floor, unsigned long ceiling); | 17 | unsigned long floor, unsigned long ceiling); |
18 | 18 | ||
19 | extern void prep_compound_page(struct page *page, unsigned long order); | ||
20 | extern void prep_compound_gigantic_page(struct page *page, unsigned long order); | ||
21 | |||
22 | static inline void set_page_count(struct page *page, int v) | 19 | static inline void set_page_count(struct page *page, int v) |
23 | { | 20 | { |
24 | atomic_set(&page->_count, v); | 21 | atomic_set(&page->_count, v); |
@@ -51,6 +48,8 @@ extern void putback_lru_page(struct page *page); | |||
51 | */ | 48 | */ |
52 | extern unsigned long highest_memmap_pfn; | 49 | extern unsigned long highest_memmap_pfn; |
53 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 50 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
51 | extern void prep_compound_page(struct page *page, unsigned long order); | ||
52 | |||
54 | 53 | ||
55 | /* | 54 | /* |
56 | * function for dealing with page's order in buddy system. | 55 | * function for dealing with page's order in buddy system. |
@@ -74,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
74 | } | 73 | } |
75 | #endif | 74 | #endif |
76 | 75 | ||
77 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
78 | /* | 76 | /* |
79 | * unevictable_migrate_page() called only from migrate_page_copy() to | 77 | * unevictable_migrate_page() called only from migrate_page_copy() to |
80 | * migrate unevictable flag to new page. | 78 | * migrate unevictable flag to new page. |
@@ -86,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) | |||
86 | if (TestClearPageUnevictable(old)) | 84 | if (TestClearPageUnevictable(old)) |
87 | SetPageUnevictable(new); | 85 | SetPageUnevictable(new); |
88 | } | 86 | } |
89 | #else | ||
90 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | ||
91 | { | ||
92 | } | ||
93 | #endif | ||
94 | 87 | ||
95 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 88 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
96 | /* | 89 | /* |
@@ -150,23 +143,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |||
150 | } | 143 | } |
151 | } | 144 | } |
152 | 145 | ||
153 | /* | ||
154 | * free_page_mlock() -- clean up attempts to free and mlocked() page. | ||
155 | * Page should not be on lru, so no need to fix that up. | ||
156 | * free_pages_check() will verify... | ||
157 | */ | ||
158 | static inline void free_page_mlock(struct page *page) | ||
159 | { | ||
160 | if (unlikely(TestClearPageMlocked(page))) { | ||
161 | unsigned long flags; | ||
162 | |||
163 | local_irq_save(flags); | ||
164 | __dec_zone_page_state(page, NR_MLOCK); | ||
165 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | ||
166 | local_irq_restore(flags); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 146 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
171 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 147 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
172 | { | 148 | { |
@@ -175,7 +151,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | |||
175 | static inline void clear_page_mlock(struct page *page) { } | 151 | static inline void clear_page_mlock(struct page *page) { } |
176 | static inline void mlock_vma_page(struct page *page) { } | 152 | static inline void mlock_vma_page(struct page *page) { } |
177 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 153 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
178 | static inline void free_page_mlock(struct page *page) { } | ||
179 | 154 | ||
180 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 155 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
181 | 156 | ||
@@ -284,4 +259,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
284 | unsigned long start, int len, int flags, | 259 | unsigned long start, int len, int flags, |
285 | struct page **pages, struct vm_area_struct **vmas); | 260 | struct page **pages, struct vm_area_struct **vmas); |
286 | 261 | ||
262 | #define ZONE_RECLAIM_NOSCAN -2 | ||
263 | #define ZONE_RECLAIM_FULL -1 | ||
264 | #define ZONE_RECLAIM_SOME 0 | ||
265 | #define ZONE_RECLAIM_SUCCESS 1 | ||
287 | #endif | 266 | #endif |