aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:34 -0400
commitda456f14d2f2d7350f2b9440af79c85a34c7eed5 (patch)
treefb857a24a561153e25e91a2ad55fa4123f8b152c /mm
parented0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c (diff)
page allocator: do not disable interrupts in free_page_mlock()
free_page_mlock() tests and clears PG_mlocked using locked versions of the bit operations. If set, it disables interrupts to update counters and this happens on every page free even though interrupts are disabled very shortly afterwards a second time. This is wasteful. This patch splits what free_page_mlock() does. The bit check is still made. However, the update of counters is delayed until the interrupts are disabled and the non-lock version for clearing the bit is used. One potential weirdness with this split is that the counters do not get updated if the bad_page() check is triggered but a system showing bad pages is getting screwed already. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h11
-rw-r--r--mm/page_alloc.c8
2 files changed, 10 insertions, 9 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 987bb03fbdd8..58ec1bc262c3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -157,14 +157,9 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
157 */ 157 */
158static inline void free_page_mlock(struct page *page) 158static inline void free_page_mlock(struct page *page)
159{ 159{
160 if (unlikely(TestClearPageMlocked(page))) { 160 __ClearPageMlocked(page);
161 unsigned long flags; 161 __dec_zone_page_state(page, NR_MLOCK);
162 162 __count_vm_event(UNEVICTABLE_MLOCKFREED);
163 local_irq_save(flags);
164 __dec_zone_page_state(page, NR_MLOCK);
165 __count_vm_event(UNEVICTABLE_MLOCKFREED);
166 local_irq_restore(flags);
167 }
168} 163}
169 164
170#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 165#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8f334d339b08..03a386d24ef2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -495,7 +495,6 @@ static inline void __free_one_page(struct page *page,
495 495
496static inline int free_pages_check(struct page *page) 496static inline int free_pages_check(struct page *page)
497{ 497{
498 free_page_mlock(page);
499 if (unlikely(page_mapcount(page) | 498 if (unlikely(page_mapcount(page) |
500 (page->mapping != NULL) | 499 (page->mapping != NULL) |
501 (page_count(page) != 0) | 500 (page_count(page) != 0) |
@@ -552,6 +551,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
552 unsigned long flags; 551 unsigned long flags;
553 int i; 552 int i;
554 int bad = 0; 553 int bad = 0;
554 int clearMlocked = PageMlocked(page);
555 555
556 for (i = 0 ; i < (1 << order) ; ++i) 556 for (i = 0 ; i < (1 << order) ; ++i)
557 bad += free_pages_check(page + i); 557 bad += free_pages_check(page + i);
@@ -567,6 +567,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
567 kernel_map_pages(page, 1 << order, 0); 567 kernel_map_pages(page, 1 << order, 0);
568 568
569 local_irq_save(flags); 569 local_irq_save(flags);
570 if (unlikely(clearMlocked))
571 free_page_mlock(page);
570 __count_vm_events(PGFREE, 1 << order); 572 __count_vm_events(PGFREE, 1 << order);
571 free_one_page(page_zone(page), page, order, 573 free_one_page(page_zone(page), page, order,
572 get_pageblock_migratetype(page)); 574 get_pageblock_migratetype(page));
@@ -1013,6 +1015,7 @@ static void free_hot_cold_page(struct page *page, int cold)
1013 struct zone *zone = page_zone(page); 1015 struct zone *zone = page_zone(page);
1014 struct per_cpu_pages *pcp; 1016 struct per_cpu_pages *pcp;
1015 unsigned long flags; 1017 unsigned long flags;
1018 int clearMlocked = PageMlocked(page);
1016 1019
1017 if (PageAnon(page)) 1020 if (PageAnon(page))
1018 page->mapping = NULL; 1021 page->mapping = NULL;
@@ -1028,7 +1031,10 @@ static void free_hot_cold_page(struct page *page, int cold)
1028 1031
1029 pcp = &zone_pcp(zone, get_cpu())->pcp; 1032 pcp = &zone_pcp(zone, get_cpu())->pcp;
1030 local_irq_save(flags); 1033 local_irq_save(flags);
1034 if (unlikely(clearMlocked))
1035 free_page_mlock(page);
1031 __count_vm_event(PGFREE); 1036 __count_vm_event(PGFREE);
1037
1032 if (cold) 1038 if (cold)
1033 list_add_tail(&page->lru, &pcp->list); 1039 list_add_tail(&page->lru, &pcp->list);
1034 else 1040 else