aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-10-08 19:33:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:56 -0400
commita0c5e813f087dffc0d9b173d2e7d3328b1482fd5 (patch)
treecc3fac50d76d0641722cb824fef825d5655e421e /mm/page_alloc.c
parente6c509f85455041d3d7c4b863bf80bc294288cc1 (diff)
mm: remove free_page_mlock
We should not be seeing non-0 unevictable_pgs_mlockfreed any longer. So remove free_page_mlock() from the page freeing paths: __PG_MLOCKED is already in PAGE_FLAGS_CHECK_AT_FREE, so free_pages_check() will now be checking it, reporting "BUG: Bad page state" if it's ever found set. Comment UNEVICTABLE_MLOCKFREED and unevictable_pgs_mlockfreed always 0. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michel Lespinasse <walken@google.com> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c17
1 files changed, 0 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 00750bc08a3a..dbb53866c3aa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -598,17 +598,6 @@ out:
598 zone->free_area[order].nr_free++; 598 zone->free_area[order].nr_free++;
599} 599}
600 600
601/*
602 * free_page_mlock() -- clean up attempts to free and mlocked() page.
603 * Page should not be on lru, so no need to fix that up.
604 * free_pages_check() will verify...
605 */
606static inline void free_page_mlock(struct page *page)
607{
608 __dec_zone_page_state(page, NR_MLOCK);
609 __count_vm_event(UNEVICTABLE_MLOCKFREED);
610}
611
612static inline int free_pages_check(struct page *page) 601static inline int free_pages_check(struct page *page)
613{ 602{
614 if (unlikely(page_mapcount(page) | 603 if (unlikely(page_mapcount(page) |
@@ -728,15 +717,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
728static void __free_pages_ok(struct page *page, unsigned int order) 717static void __free_pages_ok(struct page *page, unsigned int order)
729{ 718{
730 unsigned long flags; 719 unsigned long flags;
731 int wasMlocked = __TestClearPageMlocked(page);
732 int migratetype; 720 int migratetype;
733 721
734 if (!free_pages_prepare(page, order)) 722 if (!free_pages_prepare(page, order))
735 return; 723 return;
736 724
737 local_irq_save(flags); 725 local_irq_save(flags);
738 if (unlikely(wasMlocked))
739 free_page_mlock(page);
740 __count_vm_events(PGFREE, 1 << order); 726 __count_vm_events(PGFREE, 1 << order);
741 migratetype = get_pageblock_migratetype(page); 727 migratetype = get_pageblock_migratetype(page);
742 set_freepage_migratetype(page, migratetype); 728 set_freepage_migratetype(page, migratetype);
@@ -1310,7 +1296,6 @@ void free_hot_cold_page(struct page *page, int cold)
1310 struct per_cpu_pages *pcp; 1296 struct per_cpu_pages *pcp;
1311 unsigned long flags; 1297 unsigned long flags;
1312 int migratetype; 1298 int migratetype;
1313 int wasMlocked = __TestClearPageMlocked(page);
1314 1299
1315 if (!free_pages_prepare(page, 0)) 1300 if (!free_pages_prepare(page, 0))
1316 return; 1301 return;
@@ -1318,8 +1303,6 @@ void free_hot_cold_page(struct page *page, int cold)
1318 migratetype = get_pageblock_migratetype(page); 1303 migratetype = get_pageblock_migratetype(page);
1319 set_freepage_migratetype(page, migratetype); 1304 set_freepage_migratetype(page, migratetype);
1320 local_irq_save(flags); 1305 local_irq_save(flags);
1321 if (unlikely(wasMlocked))
1322 free_page_mlock(page);
1323 __count_vm_event(PGFREE); 1306 __count_vm_event(PGFREE);
1324 1307
1325 /* 1308 /*