aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 673596ad9c80..40de6854b980 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2380,8 +2380,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2380{ 2380{
2381 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2381 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2382 struct zone *preferred_zone; 2382 struct zone *preferred_zone;
2383 struct page *page; 2383 struct page *page = NULL;
2384 int migratetype = allocflags_to_migratetype(gfp_mask); 2384 int migratetype = allocflags_to_migratetype(gfp_mask);
2385 unsigned int cpuset_mems_cookie;
2385 2386
2386 gfp_mask &= gfp_allowed_mask; 2387 gfp_mask &= gfp_allowed_mask;
2387 2388
@@ -2400,15 +2401,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2400 if (unlikely(!zonelist->_zonerefs->zone)) 2401 if (unlikely(!zonelist->_zonerefs->zone))
2401 return NULL; 2402 return NULL;
2402 2403
2403 get_mems_allowed(); 2404retry_cpuset:
2405 cpuset_mems_cookie = get_mems_allowed();
2406
2404 /* The preferred zone is used for statistics later */ 2407 /* The preferred zone is used for statistics later */
2405 first_zones_zonelist(zonelist, high_zoneidx, 2408 first_zones_zonelist(zonelist, high_zoneidx,
2406 nodemask ? : &cpuset_current_mems_allowed, 2409 nodemask ? : &cpuset_current_mems_allowed,
2407 &preferred_zone); 2410 &preferred_zone);
2408 if (!preferred_zone) { 2411 if (!preferred_zone)
2409 put_mems_allowed(); 2412 goto out;
2410 return NULL;
2411 }
2412 2413
2413 /* First allocation attempt */ 2414 /* First allocation attempt */
2414 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2415 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -2418,9 +2419,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2418 page = __alloc_pages_slowpath(gfp_mask, order, 2419 page = __alloc_pages_slowpath(gfp_mask, order,
2419 zonelist, high_zoneidx, nodemask, 2420 zonelist, high_zoneidx, nodemask,
2420 preferred_zone, migratetype); 2421 preferred_zone, migratetype);
2421 put_mems_allowed();
2422 2422
2423 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2423 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2424
2425out:
2426 /*
2427 * When updating a task's mems_allowed, it is possible to race with
2428 * parallel threads in such a way that an allocation can fail while
2429 * the mask is being updated. If a page allocation is about to fail,
2430 * check if the cpuset changed during allocation and if so, retry.
2431 */
2432 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2433 goto retry_cpuset;
2434
2424 return page; 2435 return page;
2425} 2436}
2426EXPORT_SYMBOL(__alloc_pages_nodemask); 2437EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2634,13 +2645,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
2634bool skip_free_areas_node(unsigned int flags, int nid) 2645bool skip_free_areas_node(unsigned int flags, int nid)
2635{ 2646{
2636 bool ret = false; 2647 bool ret = false;
2648 unsigned int cpuset_mems_cookie;
2637 2649
2638 if (!(flags & SHOW_MEM_FILTER_NODES)) 2650 if (!(flags & SHOW_MEM_FILTER_NODES))
2639 goto out; 2651 goto out;
2640 2652
2641 get_mems_allowed(); 2653 do {
2642 ret = !node_isset(nid, cpuset_current_mems_allowed); 2654 cpuset_mems_cookie = get_mems_allowed();
2643 put_mems_allowed(); 2655 ret = !node_isset(nid, cpuset_current_mems_allowed);
2656 } while (!put_mems_allowed(cpuset_mems_cookie));
2644out: 2657out:
2645 return ret; 2658 return ret;
2646} 2659}