diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2009-06-19 13:30:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-20 19:08:22 -0400 |
commit | c277331d5fbaae5772ed19862feefa91f4e477d3 (patch) | |
tree | fcd980b58d9487421e9b0c45b7c082fa1302debb /mm/page_alloc.c | |
parent | 9063c61fd5cbd6f42e95929aa0e02380c9e15656 (diff) |
mm: page_alloc: clear PG_locked before checking flags on free
da456f1 "page allocator: do not disable interrupts in free_page_mlock()" moved
the PG_mlocked clearing after the flag sanity checking which makes mlocked
pages always trigger 'bad page'. Fix this by clearing the bit up front.
Reported--and-debugged-by: Peter Chubb <peter.chubb@nicta.com.au>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Tested-by: Maxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6f0753fe694c..30d5093a099d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -488,7 +488,6 @@ static inline void __free_one_page(struct page *page, | |||
488 | */ | 488 | */ |
489 | static inline void free_page_mlock(struct page *page) | 489 | static inline void free_page_mlock(struct page *page) |
490 | { | 490 | { |
491 | __ClearPageMlocked(page); | ||
492 | __dec_zone_page_state(page, NR_MLOCK); | 491 | __dec_zone_page_state(page, NR_MLOCK); |
493 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | 492 | __count_vm_event(UNEVICTABLE_MLOCKFREED); |
494 | } | 493 | } |
@@ -558,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
558 | unsigned long flags; | 557 | unsigned long flags; |
559 | int i; | 558 | int i; |
560 | int bad = 0; | 559 | int bad = 0; |
561 | int clearMlocked = PageMlocked(page); | 560 | int wasMlocked = TestClearPageMlocked(page); |
562 | 561 | ||
563 | kmemcheck_free_shadow(page, order); | 562 | kmemcheck_free_shadow(page, order); |
564 | 563 | ||
@@ -576,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
576 | kernel_map_pages(page, 1 << order, 0); | 575 | kernel_map_pages(page, 1 << order, 0); |
577 | 576 | ||
578 | local_irq_save(flags); | 577 | local_irq_save(flags); |
579 | if (unlikely(clearMlocked)) | 578 | if (unlikely(wasMlocked)) |
580 | free_page_mlock(page); | 579 | free_page_mlock(page); |
581 | __count_vm_events(PGFREE, 1 << order); | 580 | __count_vm_events(PGFREE, 1 << order); |
582 | free_one_page(page_zone(page), page, order, | 581 | free_one_page(page_zone(page), page, order, |
@@ -1022,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
1022 | struct zone *zone = page_zone(page); | 1021 | struct zone *zone = page_zone(page); |
1023 | struct per_cpu_pages *pcp; | 1022 | struct per_cpu_pages *pcp; |
1024 | unsigned long flags; | 1023 | unsigned long flags; |
1025 | int clearMlocked = PageMlocked(page); | 1024 | int wasMlocked = TestClearPageMlocked(page); |
1026 | 1025 | ||
1027 | kmemcheck_free_shadow(page, 0); | 1026 | kmemcheck_free_shadow(page, 0); |
1028 | 1027 | ||
@@ -1041,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
1041 | pcp = &zone_pcp(zone, get_cpu())->pcp; | 1040 | pcp = &zone_pcp(zone, get_cpu())->pcp; |
1042 | set_page_private(page, get_pageblock_migratetype(page)); | 1041 | set_page_private(page, get_pageblock_migratetype(page)); |
1043 | local_irq_save(flags); | 1042 | local_irq_save(flags); |
1044 | if (unlikely(clearMlocked)) | 1043 | if (unlikely(wasMlocked)) |
1045 | free_page_mlock(page); | 1044 | free_page_mlock(page); |
1046 | __count_vm_event(PGFREE); | 1045 | __count_vm_event(PGFREE); |
1047 | 1046 | ||