aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c51
1 files changed, 39 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41e..4e869657cb51 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
486 zone->free_area[order].nr_free++; 486 zone->free_area[order].nr_free++;
487} 487}
488 488
489#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
490/* 489/*
491 * free_page_mlock() -- clean up attempts to free and mlocked() page. 490 * free_page_mlock() -- clean up attempts to free and mlocked() page.
492 * Page should not be on lru, so no need to fix that up. 491 * Page should not be on lru, so no need to fix that up.
@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
497 __dec_zone_page_state(page, NR_MLOCK); 496 __dec_zone_page_state(page, NR_MLOCK);
498 __count_vm_event(UNEVICTABLE_MLOCKFREED); 497 __count_vm_event(UNEVICTABLE_MLOCKFREED);
499} 498}
500#else
501static void free_page_mlock(struct page *page) { }
502#endif
503 499
504static inline int free_pages_check(struct page *page) 500static inline int free_pages_check(struct page *page)
505{ 501{
@@ -1658,12 +1654,22 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1658 if (page) 1654 if (page)
1659 goto out; 1655 goto out;
1660 1656
1661 /* The OOM killer will not help higher order allocs */ 1657 if (!(gfp_mask & __GFP_NOFAIL)) {
1662 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL)) 1658 /* The OOM killer will not help higher order allocs */
1663 goto out; 1659 if (order > PAGE_ALLOC_COSTLY_ORDER)
1664 1660 goto out;
1661 /*
1662 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1663 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1664 * The caller should handle page allocation failure by itself if
1665 * it specifies __GFP_THISNODE.
1666 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1667 */
1668 if (gfp_mask & __GFP_THISNODE)
1669 goto out;
1670 }
1665 /* Exhausted what can be done so it's blamo time */ 1671 /* Exhausted what can be done so it's blamo time */
1666 out_of_memory(zonelist, gfp_mask, order); 1672 out_of_memory(zonelist, gfp_mask, order, nodemask);
1667 1673
1668out: 1674out:
1669 clear_zonelist_oom(zonelist, gfp_mask); 1675 clear_zonelist_oom(zonelist, gfp_mask);
@@ -3127,7 +3133,7 @@ static int __cpuinit process_zones(int cpu)
3127 3133
3128 if (percpu_pagelist_fraction) 3134 if (percpu_pagelist_fraction)
3129 setup_pagelist_highmark(zone_pcp(zone, cpu), 3135 setup_pagelist_highmark(zone_pcp(zone, cpu),
3130 (zone->present_pages / percpu_pagelist_fraction)); 3136 (zone->present_pages / percpu_pagelist_fraction));
3131 } 3137 }
3132 3138
3133 return 0; 3139 return 0;
@@ -3573,7 +3579,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3573 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 3579 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3574 * then all holes in the requested range will be accounted for. 3580 * then all holes in the requested range will be accounted for.
3575 */ 3581 */
3576static unsigned long __meminit __absent_pages_in_range(int nid, 3582unsigned long __meminit __absent_pages_in_range(int nid,
3577 unsigned long range_start_pfn, 3583 unsigned long range_start_pfn,
3578 unsigned long range_end_pfn) 3584 unsigned long range_end_pfn)
3579{ 3585{
@@ -4102,7 +4108,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
4102} 4108}
4103 4109
4104/* sort the node_map by start_pfn */ 4110/* sort the node_map by start_pfn */
4105static void __init sort_node_map(void) 4111void __init sort_node_map(void)
4106{ 4112{
4107 sort(early_node_map, (size_t)nr_nodemap_entries, 4113 sort(early_node_map, (size_t)nr_nodemap_entries,
4108 sizeof(struct node_active_region), 4114 sizeof(struct node_active_region),
@@ -5085,3 +5091,24 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5085 spin_unlock_irqrestore(&zone->lock, flags); 5091 spin_unlock_irqrestore(&zone->lock, flags);
5086} 5092}
5087#endif 5093#endif
5094
5095#ifdef CONFIG_MEMORY_FAILURE
5096bool is_free_buddy_page(struct page *page)
5097{
5098 struct zone *zone = page_zone(page);
5099 unsigned long pfn = page_to_pfn(page);
5100 unsigned long flags;
5101 int order;
5102
5103 spin_lock_irqsave(&zone->lock, flags);
5104 for (order = 0; order < MAX_ORDER; order++) {
5105 struct page *page_head = page - (pfn & ((1 << order) - 1));
5106
5107 if (PageBuddy(page_head) && page_order(page_head) >= order)
5108 break;
5109 }
5110 spin_unlock_irqrestore(&zone->lock, flags);
5111
5112 return order < MAX_ORDER;
5113}
5114#endif