aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a13ded1938f0..caea788628e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1968,7 +1968,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1968 goto out; 1968 goto out;
1969 } 1969 }
1970 /* Exhausted what can be done so it's blamo time */ 1970 /* Exhausted what can be done so it's blamo time */
1971 out_of_memory(zonelist, gfp_mask, order, nodemask); 1971 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
1972 1972
1973out: 1973out:
1974 clear_zonelist_oom(zonelist, gfp_mask); 1974 clear_zonelist_oom(zonelist, gfp_mask);
@@ -1990,7 +1990,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1990 if (!order) 1990 if (!order)
1991 return NULL; 1991 return NULL;
1992 1992
1993 if (compaction_deferred(preferred_zone)) { 1993 if (compaction_deferred(preferred_zone, order)) {
1994 *deferred_compaction = true; 1994 *deferred_compaction = true;
1995 return NULL; 1995 return NULL;
1996 } 1996 }
@@ -2012,6 +2012,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2012 if (page) { 2012 if (page) {
2013 preferred_zone->compact_considered = 0; 2013 preferred_zone->compact_considered = 0;
2014 preferred_zone->compact_defer_shift = 0; 2014 preferred_zone->compact_defer_shift = 0;
2015 if (order >= preferred_zone->compact_order_failed)
2016 preferred_zone->compact_order_failed = order + 1;
2015 count_vm_event(COMPACTSUCCESS); 2017 count_vm_event(COMPACTSUCCESS);
2016 return page; 2018 return page;
2017 } 2019 }
@@ -2028,7 +2030,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2028 * defer if the failure was a sync compaction failure. 2030 * defer if the failure was a sync compaction failure.
2029 */ 2031 */
2030 if (sync_migration) 2032 if (sync_migration)
2031 defer_compaction(preferred_zone); 2033 defer_compaction(preferred_zone, order);
2032 2034
2033 cond_resched(); 2035 cond_resched();
2034 } 2036 }
@@ -2378,8 +2380,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2378{ 2380{
2379 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2381 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2380 struct zone *preferred_zone; 2382 struct zone *preferred_zone;
2381 struct page *page; 2383 struct page *page = NULL;
2382 int migratetype = allocflags_to_migratetype(gfp_mask); 2384 int migratetype = allocflags_to_migratetype(gfp_mask);
2385 unsigned int cpuset_mems_cookie;
2383 2386
2384 gfp_mask &= gfp_allowed_mask; 2387 gfp_mask &= gfp_allowed_mask;
2385 2388
@@ -2398,15 +2401,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2398 if (unlikely(!zonelist->_zonerefs->zone)) 2401 if (unlikely(!zonelist->_zonerefs->zone))
2399 return NULL; 2402 return NULL;
2400 2403
2401 get_mems_allowed(); 2404retry_cpuset:
2405 cpuset_mems_cookie = get_mems_allowed();
2406
2402 /* The preferred zone is used for statistics later */ 2407 /* The preferred zone is used for statistics later */
2403 first_zones_zonelist(zonelist, high_zoneidx, 2408 first_zones_zonelist(zonelist, high_zoneidx,
2404 nodemask ? : &cpuset_current_mems_allowed, 2409 nodemask ? : &cpuset_current_mems_allowed,
2405 &preferred_zone); 2410 &preferred_zone);
2406 if (!preferred_zone) { 2411 if (!preferred_zone)
2407 put_mems_allowed(); 2412 goto out;
2408 return NULL;
2409 }
2410 2413
2411 /* First allocation attempt */ 2414 /* First allocation attempt */
2412 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2415 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -2416,9 +2419,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2416 page = __alloc_pages_slowpath(gfp_mask, order, 2419 page = __alloc_pages_slowpath(gfp_mask, order,
2417 zonelist, high_zoneidx, nodemask, 2420 zonelist, high_zoneidx, nodemask,
2418 preferred_zone, migratetype); 2421 preferred_zone, migratetype);
2419 put_mems_allowed();
2420 2422
2421 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2423 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2424
2425out:
2426 /*
2427 * When updating a task's mems_allowed, it is possible to race with
2428 * parallel threads in such a way that an allocation can fail while
2429 * the mask is being updated. If a page allocation is about to fail,
2430 * check if the cpuset changed during allocation and if so, retry.
2431 */
2432 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2433 goto retry_cpuset;
2434
2422 return page; 2435 return page;
2423} 2436}
2424EXPORT_SYMBOL(__alloc_pages_nodemask); 2437EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2632,13 +2645,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
2632bool skip_free_areas_node(unsigned int flags, int nid) 2645bool skip_free_areas_node(unsigned int flags, int nid)
2633{ 2646{
2634 bool ret = false; 2647 bool ret = false;
2648 unsigned int cpuset_mems_cookie;
2635 2649
2636 if (!(flags & SHOW_MEM_FILTER_NODES)) 2650 if (!(flags & SHOW_MEM_FILTER_NODES))
2637 goto out; 2651 goto out;
2638 2652
2639 get_mems_allowed(); 2653 do {
2640 ret = !node_isset(nid, cpuset_current_mems_allowed); 2654 cpuset_mems_cookie = get_mems_allowed();
2641 put_mems_allowed(); 2655 ret = !node_isset(nid, cpuset_current_mems_allowed);
2656 } while (!put_mems_allowed(cpuset_mems_cookie));
2642out: 2657out:
2643 return ret; 2658 return ret;
2644} 2659}
@@ -3925,18 +3940,6 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
3925 } 3940 }
3926} 3941}
3927 3942
3928int __init add_from_early_node_map(struct range *range, int az,
3929 int nr_range, int nid)
3930{
3931 unsigned long start_pfn, end_pfn;
3932 int i;
3933
3934 /* need to go over early_node_map to find out good range for node */
3935 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
3936 nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
3937 return nr_range;
3938}
3939
3940/** 3943/**
3941 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3944 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3942 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3945 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -4521,7 +4524,7 @@ static unsigned long __init early_calculate_totalpages(void)
4521 * memory. When they don't, some nodes will have more kernelcore than 4524 * memory. When they don't, some nodes will have more kernelcore than
4522 * others 4525 * others
4523 */ 4526 */
4524static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) 4527static void __init find_zone_movable_pfns_for_nodes(void)
4525{ 4528{
4526 int i, nid; 4529 int i, nid;
4527 unsigned long usable_startpfn; 4530 unsigned long usable_startpfn;
@@ -4713,7 +4716,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4713 4716
4714 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 4717 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4715 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 4718 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4716 find_zone_movable_pfns_for_nodes(zone_movable_pfn); 4719 find_zone_movable_pfns_for_nodes();
4717 4720
4718 /* Print out the zone ranges */ 4721 /* Print out the zone ranges */
4719 printk("Zone PFN ranges:\n"); 4722 printk("Zone PFN ranges:\n");
@@ -4823,6 +4826,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
4823 int cpu = (unsigned long)hcpu; 4826 int cpu = (unsigned long)hcpu;
4824 4827
4825 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 4828 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4829 lru_add_drain_cpu(cpu);
4826 drain_pages(cpu); 4830 drain_pages(cpu);
4827 4831
4828 /* 4832 /*