diff options
-rw-r--r-- | include/linux/oom.h | 4 | ||||
-rw-r--r-- | mm/oom_kill.c | 30 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 |
3 files changed, 18 insertions, 22 deletions
diff --git a/include/linux/oom.h b/include/linux/oom.h index 4cd62677feb9..647395a1a550 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -55,8 +55,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
55 | struct mem_cgroup *memcg, nodemask_t *nodemask, | 55 | struct mem_cgroup *memcg, nodemask_t *nodemask, |
56 | const char *message); | 56 | const char *message); |
57 | 57 | ||
58 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 58 | extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags); |
59 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 59 | extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags); |
60 | 60 | ||
61 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | 61 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
62 | int order, const nodemask_t *nodemask); | 62 | int order, const nodemask_t *nodemask); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b0a1e1ff0353..d33aca1552ad 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -559,28 +559,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |||
559 | * if a parallel OOM killing is already taking place that includes a zone in | 559 | * if a parallel OOM killing is already taking place that includes a zone in |
560 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. | 560 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. |
561 | */ | 561 | */ |
562 | int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) | 562 | bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) |
563 | { | 563 | { |
564 | struct zoneref *z; | 564 | struct zoneref *z; |
565 | struct zone *zone; | 565 | struct zone *zone; |
566 | int ret = 1; | 566 | bool ret = true; |
567 | 567 | ||
568 | spin_lock(&zone_scan_lock); | 568 | spin_lock(&zone_scan_lock); |
569 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 569 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) |
570 | if (zone_is_oom_locked(zone)) { | 570 | if (zone_is_oom_locked(zone)) { |
571 | ret = 0; | 571 | ret = false; |
572 | goto out; | 572 | goto out; |
573 | } | 573 | } |
574 | } | ||
575 | 574 | ||
576 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 575 | /* |
577 | /* | 576 | * Lock each zone in the zonelist under zone_scan_lock so a parallel |
578 | * Lock each zone in the zonelist under zone_scan_lock so a | 577 | * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. |
579 | * parallel invocation of try_set_zonelist_oom() doesn't succeed | 578 | */ |
580 | * when it shouldn't. | 579 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) |
581 | */ | ||
582 | zone_set_flag(zone, ZONE_OOM_LOCKED); | 580 | zone_set_flag(zone, ZONE_OOM_LOCKED); |
583 | } | ||
584 | 581 | ||
585 | out: | 582 | out: |
586 | spin_unlock(&zone_scan_lock); | 583 | spin_unlock(&zone_scan_lock); |
@@ -592,15 +589,14 @@ out: | |||
592 | * allocation attempts with zonelists containing them may now recall the OOM | 589 | * allocation attempts with zonelists containing them may now recall the OOM |
593 | * killer, if necessary. | 590 | * killer, if necessary. |
594 | */ | 591 | */ |
595 | void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) | 592 | void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) |
596 | { | 593 | { |
597 | struct zoneref *z; | 594 | struct zoneref *z; |
598 | struct zone *zone; | 595 | struct zone *zone; |
599 | 596 | ||
600 | spin_lock(&zone_scan_lock); | 597 | spin_lock(&zone_scan_lock); |
601 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 598 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) |
602 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | 599 | zone_clear_flag(zone, ZONE_OOM_LOCKED); |
603 | } | ||
604 | spin_unlock(&zone_scan_lock); | 600 | spin_unlock(&zone_scan_lock); |
605 | } | 601 | } |
606 | 602 | ||
@@ -695,8 +691,8 @@ void pagefault_out_of_memory(void) | |||
695 | return; | 691 | return; |
696 | 692 | ||
697 | zonelist = node_zonelist(first_memory_node, GFP_KERNEL); | 693 | zonelist = node_zonelist(first_memory_node, GFP_KERNEL); |
698 | if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) { | 694 | if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) { |
699 | out_of_memory(NULL, 0, 0, NULL, false); | 695 | out_of_memory(NULL, 0, 0, NULL, false); |
700 | clear_zonelist_oom(zonelist, GFP_KERNEL); | 696 | oom_zonelist_unlock(zonelist, GFP_KERNEL); |
701 | } | 697 | } |
702 | } | 698 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fb9908148474..578236089ec1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2246,8 +2246,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2246 | { | 2246 | { |
2247 | struct page *page; | 2247 | struct page *page; |
2248 | 2248 | ||
2249 | /* Acquire the OOM killer lock for the zones in zonelist */ | 2249 | /* Acquire the per-zone oom lock for each zone */ |
2250 | if (!try_set_zonelist_oom(zonelist, gfp_mask)) { | 2250 | if (!oom_zonelist_trylock(zonelist, gfp_mask)) { |
2251 | schedule_timeout_uninterruptible(1); | 2251 | schedule_timeout_uninterruptible(1); |
2252 | return NULL; | 2252 | return NULL; |
2253 | } | 2253 | } |
@@ -2285,7 +2285,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2285 | out_of_memory(zonelist, gfp_mask, order, nodemask, false); | 2285 | out_of_memory(zonelist, gfp_mask, order, nodemask, false); |
2286 | 2286 | ||
2287 | out: | 2287 | out: |
2288 | clear_zonelist_oom(zonelist, gfp_mask); | 2288 | oom_zonelist_unlock(zonelist, gfp_mask); |
2289 | return page; | 2289 | return page; |
2290 | } | 2290 | } |
2291 | 2291 | ||