aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2c93502cfcb4..e41504aa5da9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
176{ 176{
177#ifdef CONFIG_NUMA 177#ifdef CONFIG_NUMA
178 struct zone *zone; 178 struct zone *zone;
179 struct zone **z; 179 struct zoneref *z;
180 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 180 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
181 nodemask_t nodes = node_states[N_HIGH_MEMORY]; 181 nodemask_t nodes = node_states[N_HIGH_MEMORY];
182 182
@@ -462,29 +462,29 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
462 * if a parallel OOM killing is already taking place that includes a zone in 462 * if a parallel OOM killing is already taking place that includes a zone in
463 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. 463 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
464 */ 464 */
465int try_set_zone_oom(struct zonelist *zonelist) 465int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
466{ 466{
467 struct zone **z; 467 struct zoneref *z;
468 struct zone *zone;
468 int ret = 1; 469 int ret = 1;
469 470
470 z = zonelist->zones;
471
472 spin_lock(&zone_scan_mutex); 471 spin_lock(&zone_scan_mutex);
473 do { 472 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
474 if (zone_is_oom_locked(*z)) { 473 if (zone_is_oom_locked(zone)) {
475 ret = 0; 474 ret = 0;
476 goto out; 475 goto out;
477 } 476 }
478 } while (*(++z) != NULL); 477 }
478
479 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
480 /*
481 * Lock each zone in the zonelist under zone_scan_mutex so a
482 * parallel invocation of try_set_zone_oom() doesn't succeed
483 * when it shouldn't.
484 */
485 zone_set_flag(zone, ZONE_OOM_LOCKED);
486 }
479 487
480 /*
481 * Lock each zone in the zonelist under zone_scan_mutex so a parallel
482 * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
483 */
484 z = zonelist->zones;
485 do {
486 zone_set_flag(*z, ZONE_OOM_LOCKED);
487 } while (*(++z) != NULL);
488out: 488out:
489 spin_unlock(&zone_scan_mutex); 489 spin_unlock(&zone_scan_mutex);
490 return ret; 490 return ret;
@@ -495,16 +495,15 @@ out:
495 * allocation attempts with zonelists containing them may now recall the OOM 495 * allocation attempts with zonelists containing them may now recall the OOM
496 * killer, if necessary. 496 * killer, if necessary.
497 */ 497 */
498void clear_zonelist_oom(struct zonelist *zonelist) 498void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
499{ 499{
500 struct zone **z; 500 struct zoneref *z;
501 501 struct zone *zone;
502 z = zonelist->zones;
503 502
504 spin_lock(&zone_scan_mutex); 503 spin_lock(&zone_scan_mutex);
505 do { 504 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
506 zone_clear_flag(*z, ZONE_OOM_LOCKED); 505 zone_clear_flag(zone, ZONE_OOM_LOCKED);
507 } while (*(++z) != NULL); 506 }
508 spin_unlock(&zone_scan_mutex); 507 spin_unlock(&zone_scan_mutex);
509} 508}
510 509