aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/internal.h11
-rw-r--r--mm/page_alloc.c8
2 files changed, 10 insertions, 9 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 987bb03fbdd8..58ec1bc262c3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -157,14 +157,9 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
157 */ 157 */
158static inline void free_page_mlock(struct page *page) 158static inline void free_page_mlock(struct page *page)
159{ 159{
160 if (unlikely(TestClearPageMlocked(page))) { 160 __ClearPageMlocked(page);
161 unsigned long flags; 161 __dec_zone_page_state(page, NR_MLOCK);
162 162 __count_vm_event(UNEVICTABLE_MLOCKFREED);
163 local_irq_save(flags);
164 __dec_zone_page_state(page, NR_MLOCK);
165 __count_vm_event(UNEVICTABLE_MLOCKFREED);
166 local_irq_restore(flags);
167 }
168} 163}
169 164
170#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 165#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8f334d339b08..03a386d24ef2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -495,7 +495,6 @@ static inline void __free_one_page(struct page *page,
495 495
496static inline int free_pages_check(struct page *page) 496static inline int free_pages_check(struct page *page)
497{ 497{
498 free_page_mlock(page);
499 if (unlikely(page_mapcount(page) | 498 if (unlikely(page_mapcount(page) |
500 (page->mapping != NULL) | 499 (page->mapping != NULL) |
501 (page_count(page) != 0) | 500 (page_count(page) != 0) |
@@ -552,6 +551,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
552 unsigned long flags; 551 unsigned long flags;
553 int i; 552 int i;
554 int bad = 0; 553 int bad = 0;
554 int clearMlocked = PageMlocked(page);
555 555
556 for (i = 0 ; i < (1 << order) ; ++i) 556 for (i = 0 ; i < (1 << order) ; ++i)
557 bad += free_pages_check(page + i); 557 bad += free_pages_check(page + i);
@@ -567,6 +567,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
567 kernel_map_pages(page, 1 << order, 0); 567 kernel_map_pages(page, 1 << order, 0);
568 568
569 local_irq_save(flags); 569 local_irq_save(flags);
570 if (unlikely(clearMlocked))
571 free_page_mlock(page);
570 __count_vm_events(PGFREE, 1 << order); 572 __count_vm_events(PGFREE, 1 << order);
571 free_one_page(page_zone(page), page, order, 573 free_one_page(page_zone(page), page, order,
572 get_pageblock_migratetype(page)); 574 get_pageblock_migratetype(page));
@@ -1013,6 +1015,7 @@ static void free_hot_cold_page(struct page *page, int cold)
1013 struct zone *zone = page_zone(page); 1015 struct zone *zone = page_zone(page);
1014 struct per_cpu_pages *pcp; 1016 struct per_cpu_pages *pcp;
1015 unsigned long flags; 1017 unsigned long flags;
1018 int clearMlocked = PageMlocked(page);
1016 1019
1017 if (PageAnon(page)) 1020 if (PageAnon(page))
1018 page->mapping = NULL; 1021 page->mapping = NULL;
@@ -1028,7 +1031,10 @@ static void free_hot_cold_page(struct page *page, int cold)
1028 1031
1029 pcp = &zone_pcp(zone, get_cpu())->pcp; 1032 pcp = &zone_pcp(zone, get_cpu())->pcp;
1030 local_irq_save(flags); 1033 local_irq_save(flags);
1034 if (unlikely(clearMlocked))
1035 free_page_mlock(page);
1031 __count_vm_event(PGFREE); 1036 __count_vm_event(PGFREE);
1037
1032 if (cold) 1038 if (cold)
1033 list_add_tail(&page->lru, &pcp->list); 1039 list_add_tail(&page->lru, &pcp->list);
1034 else 1040 else