diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 16 |
2 files changed, 16 insertions, 13 deletions
diff --git a/mm/internal.h b/mm/internal.h index 58ec1bc262c3..4b1672a8cf76 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |||
150 | } | 150 | } |
151 | } | 151 | } |
152 | 152 | ||
153 | /* | ||
154 | * free_page_mlock() -- clean up attempts to free and mlocked() page. | ||
155 | * Page should not be on lru, so no need to fix that up. | ||
156 | * free_pages_check() will verify... | ||
157 | */ | ||
158 | static inline void free_page_mlock(struct page *page) | ||
159 | { | ||
160 | __ClearPageMlocked(page); | ||
161 | __dec_zone_page_state(page, NR_MLOCK); | ||
162 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | ||
163 | } | ||
164 | |||
165 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 153 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
166 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 154 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
167 | { | 155 | { |
@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | |||
170 | static inline void clear_page_mlock(struct page *page) { } | 158 | static inline void clear_page_mlock(struct page *page) { } |
171 | static inline void mlock_vma_page(struct page *page) { } | 159 | static inline void mlock_vma_page(struct page *page) { } |
172 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 160 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
173 | static inline void free_page_mlock(struct page *page) { } | ||
174 | 161 | ||
175 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 162 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ |
176 | 163 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0c9f406e3c44..5dac5d8cb148 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -493,6 +493,22 @@ static inline void __free_one_page(struct page *page, | |||
493 | zone->free_area[order].nr_free++; | 493 | zone->free_area[order].nr_free++; |
494 | } | 494 | } |
495 | 495 | ||
496 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | ||
497 | /* | ||
498 | * free_page_mlock() -- clean up attempts to free and mlocked() page. | ||
499 | * Page should not be on lru, so no need to fix that up. | ||
500 | * free_pages_check() will verify... | ||
501 | */ | ||
502 | static inline void free_page_mlock(struct page *page) | ||
503 | { | ||
504 | __ClearPageMlocked(page); | ||
505 | __dec_zone_page_state(page, NR_MLOCK); | ||
506 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | ||
507 | } | ||
508 | #else | ||
509 | static void free_page_mlock(struct page *page) { } | ||
510 | #endif | ||
511 | |||
496 | static inline int free_pages_check(struct page *page) | 512 | static inline int free_pages_check(struct page *page) |
497 | { | 513 | { |
498 | if (unlikely(page_mapcount(page) | | 514 | if (unlikely(page_mapcount(page) | |