aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-06-16 18:32:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:35 -0400
commit092cead6175bb1b3d3078a34ba71c939d526c70b (patch)
tree84dfeda6c7ca85b6d68710c824e1ce59db16cc3b
parentb6e68bc1baed9b6972a250aba66b8c5276cf6fb1 (diff)
page allocator: move free_page_mlock() to page_alloc.c
Currently, free_page_mlock() is only called from page_alloc.c. Thus, we can move it to page_alloc.c. Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h13
-rw-r--r--mm/page_alloc.c16
2 files changed, 16 insertions, 13 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 58ec1bc262c3..4b1672a8cf76 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
150 } 150 }
151} 151}
152 152
153/*
154 * free_page_mlock() -- clean up attempts to free and mlocked() page.
155 * Page should not be on lru, so no need to fix that up.
156 * free_pages_check() will verify...
157 */
158static inline void free_page_mlock(struct page *page)
159{
160 __ClearPageMlocked(page);
161 __dec_zone_page_state(page, NR_MLOCK);
162 __count_vm_event(UNEVICTABLE_MLOCKFREED);
163}
164
165#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 153#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
166static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 154static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
167{ 155{
@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
170static inline void clear_page_mlock(struct page *page) { } 158static inline void clear_page_mlock(struct page *page) { }
171static inline void mlock_vma_page(struct page *page) { } 159static inline void mlock_vma_page(struct page *page) { }
172static inline void mlock_migrate_page(struct page *new, struct page *old) { } 160static inline void mlock_migrate_page(struct page *new, struct page *old) { }
173static inline void free_page_mlock(struct page *page) { }
174 161
175#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 162#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
176 163
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0c9f406e3c44..5dac5d8cb148 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -493,6 +493,22 @@ static inline void __free_one_page(struct page *page,
493 zone->free_area[order].nr_free++; 493 zone->free_area[order].nr_free++;
494} 494}
495 495
496#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
497/*
498 * free_page_mlock() -- clean up attempts to free and mlocked() page.
499 * Page should not be on lru, so no need to fix that up.
500 * free_pages_check() will verify...
501 */
502static inline void free_page_mlock(struct page *page)
503{
504 __ClearPageMlocked(page);
505 __dec_zone_page_state(page, NR_MLOCK);
506 __count_vm_event(UNEVICTABLE_MLOCKFREED);
507}
508#else
509static void free_page_mlock(struct page *page) { }
510#endif
511
496static inline int free_pages_check(struct page *page) 512static inline int free_pages_check(struct page *page)
497{ 513{
498 if (unlikely(page_mapcount(page) | 514 if (unlikely(page_mapcount(page) |