aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c53
1 files changed, 39 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a6b17aa4740b..a8182c89de59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -76,6 +76,31 @@ unsigned long totalreserve_pages __read_mostly;
76int percpu_pagelist_fraction; 76int percpu_pagelist_fraction;
77gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 77gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
78 78
79#ifdef CONFIG_PM_SLEEP
80/*
81 * The following functions are used by the suspend/hibernate code to temporarily
82 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
83 * while devices are suspended. To avoid races with the suspend/hibernate code,
84 * they should always be called with pm_mutex held (gfp_allowed_mask also should
85 * only be modified with pm_mutex held, unless the suspend/hibernate code is
86 * guaranteed not to run in parallel with that modification).
87 */
88void set_gfp_allowed_mask(gfp_t mask)
89{
90 WARN_ON(!mutex_is_locked(&pm_mutex));
91 gfp_allowed_mask = mask;
92}
93
94gfp_t clear_gfp_allowed_mask(gfp_t mask)
95{
96 gfp_t ret = gfp_allowed_mask;
97
98 WARN_ON(!mutex_is_locked(&pm_mutex));
99 gfp_allowed_mask &= ~mask;
100 return ret;
101}
102#endif /* CONFIG_PM_SLEEP */
103
79#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 104#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
80int pageblock_order __read_mostly; 105int pageblock_order __read_mostly;
81#endif 106#endif
@@ -530,7 +555,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
530 int batch_free = 0; 555 int batch_free = 0;
531 556
532 spin_lock(&zone->lock); 557 spin_lock(&zone->lock);
533 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 558 zone->all_unreclaimable = 0;
534 zone->pages_scanned = 0; 559 zone->pages_scanned = 0;
535 560
536 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 561 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -568,7 +593,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
568 int migratetype) 593 int migratetype)
569{ 594{
570 spin_lock(&zone->lock); 595 spin_lock(&zone->lock);
571 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 596 zone->all_unreclaimable = 0;
572 zone->pages_scanned = 0; 597 zone->pages_scanned = 0;
573 598
574 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 599 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
@@ -583,6 +608,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
583 int bad = 0; 608 int bad = 0;
584 int wasMlocked = __TestClearPageMlocked(page); 609 int wasMlocked = __TestClearPageMlocked(page);
585 610
611 trace_mm_page_free_direct(page, order);
586 kmemcheck_free_shadow(page, order); 612 kmemcheck_free_shadow(page, order);
587 613
588 for (i = 0 ; i < (1 << order) ; ++i) 614 for (i = 0 ; i < (1 << order) ; ++i)
@@ -1073,8 +1099,9 @@ void mark_free_pages(struct zone *zone)
1073 1099
1074/* 1100/*
1075 * Free a 0-order page 1101 * Free a 0-order page
1102 * cold == 1 ? free a cold page : free a hot page
1076 */ 1103 */
1077static void free_hot_cold_page(struct page *page, int cold) 1104void free_hot_cold_page(struct page *page, int cold)
1078{ 1105{
1079 struct zone *zone = page_zone(page); 1106 struct zone *zone = page_zone(page);
1080 struct per_cpu_pages *pcp; 1107 struct per_cpu_pages *pcp;
@@ -1082,6 +1109,7 @@ static void free_hot_cold_page(struct page *page, int cold)
1082 int migratetype; 1109 int migratetype;
1083 int wasMlocked = __TestClearPageMlocked(page); 1110 int wasMlocked = __TestClearPageMlocked(page);
1084 1111
1112 trace_mm_page_free_direct(page, 0);
1085 kmemcheck_free_shadow(page, 0); 1113 kmemcheck_free_shadow(page, 0);
1086 1114
1087 if (PageAnon(page)) 1115 if (PageAnon(page))
@@ -1133,12 +1161,6 @@ out:
1133 local_irq_restore(flags); 1161 local_irq_restore(flags);
1134} 1162}
1135 1163
1136void free_hot_page(struct page *page)
1137{
1138 trace_mm_page_free_direct(page, 0);
1139 free_hot_cold_page(page, 0);
1140}
1141
1142/* 1164/*
1143 * split_page takes a non-compound higher-order page, and splits it into 1165 * split_page takes a non-compound higher-order page, and splits it into
1144 * n (1<<order) sub-pages: page[0..n] 1166 * n (1<<order) sub-pages: page[0..n]
@@ -2008,9 +2030,8 @@ void __pagevec_free(struct pagevec *pvec)
2008void __free_pages(struct page *page, unsigned int order) 2030void __free_pages(struct page *page, unsigned int order)
2009{ 2031{
2010 if (put_page_testzero(page)) { 2032 if (put_page_testzero(page)) {
2011 trace_mm_page_free_direct(page, order);
2012 if (order == 0) 2033 if (order == 0)
2013 free_hot_page(page); 2034 free_hot_cold_page(page, 0);
2014 else 2035 else
2015 __free_pages_ok(page, order); 2036 __free_pages_ok(page, order);
2016 } 2037 }
@@ -2266,7 +2287,7 @@ void show_free_areas(void)
2266 K(zone_page_state(zone, NR_BOUNCE)), 2287 K(zone_page_state(zone, NR_BOUNCE)),
2267 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 2288 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2268 zone->pages_scanned, 2289 zone->pages_scanned,
2269 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 2290 (zone->all_unreclaimable ? "yes" : "no")
2270 ); 2291 );
2271 printk("lowmem_reserve[]:"); 2292 printk("lowmem_reserve[]:");
2272 for (i = 0; i < MAX_NR_ZONES; i++) 2293 for (i = 0; i < MAX_NR_ZONES; i++)
@@ -4371,8 +4392,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4371 for (i = 0; i < MAX_NR_ZONES; i++) { 4392 for (i = 0; i < MAX_NR_ZONES; i++) {
4372 if (i == ZONE_MOVABLE) 4393 if (i == ZONE_MOVABLE)
4373 continue; 4394 continue;
4374 printk(" %-8s %0#10lx -> %0#10lx\n", 4395 printk(" %-8s ", zone_names[i]);
4375 zone_names[i], 4396 if (arch_zone_lowest_possible_pfn[i] ==
4397 arch_zone_highest_possible_pfn[i])
4398 printk("empty\n");
4399 else
4400 printk("%0#10lx -> %0#10lx\n",
4376 arch_zone_lowest_possible_pfn[i], 4401 arch_zone_lowest_possible_pfn[i],
4377 arch_zone_highest_possible_pfn[i]); 4402 arch_zone_highest_possible_pfn[i]);
4378 } 4403 }